[med-svn] [python-mne] 01/04: Imported Upstream version 0.8

Alexandre Gramfort agramfort-guest at moszumanska.debian.org
Fri Aug 1 22:13:52 UTC 2014


This is an automated email from the git hooks/post-receive script.

agramfort-guest pushed a commit to branch master
in repository python-mne.

commit 653a6825dabe8d7d86f6862d26cb195832ffa380
Author: Alexandre Gramfort <alexandre.gramfort at m4x.org>
Date:   Thu Jul 31 23:31:53 2014 -0400

    Imported Upstream version 0.8
---
 .coveragerc                                        |    8 +
 .gitignore                                         |    4 +
 .mailmap                                           |   14 +
 .travis.yml                                        |   84 +-
 Makefile                                           |    7 +-
 README.rst                                         |    3 +-
 bin/mne                                            |   18 +-
 doc/source/cite.rst                                |    6 +-
 doc/source/contributing.rst                        |    4 +
 doc/source/getting_started.rst                     |  203 +-
 doc/source/index.rst                               |   12 +
 doc/source/manual/cookbook.rst                     |    2 +-
 doc/source/mne-python.rst                          |    1 +
 doc/source/mne_report_tutorial.rst                 |  117 +
 doc/source/python_reference.rst                    |  200 +-
 doc/source/python_tutorial.rst                     |   86 +-
 doc/source/whats_new.rst                           |  227 ++
 doc/sphinxext/gen_rst.py                           |   33 +-
 .../connectivity/plot_cwt_sensor_connectivity.py   |   10 +-
 .../plot_mne_inverse_coherence_epochs.py           |   16 +-
 .../plot_mne_inverse_connectivity_spectrum.py      |   11 +-
 .../plot_mne_inverse_label_connectivity.py         |   43 +-
 .../connectivity/plot_mne_inverse_psi_visual.py    |    8 +-
 examples/connectivity/plot_sensor_connectivity.py  |    8 +-
 examples/{ => datasets}/plot_megsim_data.py        |   18 +-
 .../plot_megsim_data_single_trial.py               |    5 +-
 examples/datasets/plot_spm_faces_dataset.py        |   68 +-
 examples/decoding/plot_decoding_csp_eeg.py         |  150 +
 examples/decoding/plot_decoding_csp_space.py       |   20 +-
 examples/decoding/plot_decoding_sensors.py         |   14 +-
 .../plot_decoding_spatio_temporal_source.py        |   28 +-
 .../decoding/plot_decoding_time_generalization.py  |   91 +
 examples/decoding/plot_ems_filtering.py            |   94 +
 examples/export/plot_epochs_as_data_frame.py       |   16 +-
 examples/export/plot_epochs_to_nitime.py           |   14 +-
 examples/export/plot_evoked_to_nitime.py           |   12 +-
 examples/export/plot_raw_to_nitime.py              |   22 +-
 examples/extract_events_from_raw.py                |   31 -
 examples/inverse/plot_compute_mne_inverse.py       |   18 +-
 .../plot_compute_mne_inverse_epochs_in_label.py    |   73 +-
 .../plot_compute_mne_inverse_raw_in_label.py       |    6 +-
 .../inverse/plot_compute_mne_inverse_volume.py     |    8 +-
 examples/inverse/plot_dics_beamformer.py           |    8 +-
 examples/inverse/plot_dics_source_power.py         |   24 +-
 examples/inverse/plot_dipole_fit_result.py         |   10 +-
 examples/inverse/plot_gamma_map_inverse.py         |   11 +-
 examples/inverse/plot_label_activation_from_stc.py |    2 +-
 examples/inverse/plot_label_from_stc.py            |   17 +-
 examples/inverse/plot_label_source_activations.py  |    9 +-
 examples/inverse/plot_lcmv_beamformer.py           |   10 +-
 examples/inverse/plot_lcmv_beamformer_volume.py    |   10 +-
 examples/inverse/plot_make_inverse_operator.py     |    9 +-
 examples/inverse/plot_mixed_norm_L21_inverse.py    |   21 +-
 examples/inverse/plot_mne_crosstalk_function.py    |   83 +
 examples/inverse/plot_mne_point_spread_function.py |  102 +
 examples/inverse/plot_morph_data.py                |    4 +-
 examples/inverse/plot_read_inverse.py              |   12 +-
 examples/inverse/plot_read_source_space.py         |    6 +-
 examples/inverse/plot_read_stc.py                  |    8 +-
 examples/inverse/plot_tf_dics.py                   |   51 +-
 examples/inverse/plot_tf_lcmv.py                   |   48 +-
 .../plot_time_frequency_mixed_norm_inverse.py      |   19 +-
 examples/plot_bem_contour_mri.py                   |   25 +
 examples/plot_channel_epochs_image.py              |   12 +-
 examples/plot_coregistration_transform.py          |   31 +
 examples/plot_decimate_head_surface.py             |    4 +-
 examples/plot_define_target_events.py              |   16 +-
 .../plot_estimate_covariance_matrix_baseline.py    |   16 +-
 examples/plot_estimate_covariance_matrix_raw.py    |   16 +-
 examples/plot_evoked_delayed_ssp.py                |   17 +-
 examples/plot_evoked_topomap.py                    |   78 +-
 examples/plot_evoked_topomap_delayed_ssp.py        |   14 +-
 examples/plot_evoked_whitening.py                  |   19 +-
 examples/plot_extract_events_from_raw.py           |   41 +
 examples/plot_from_raw_to_epochs_to_evoked.py      |   27 +-
 .../plot_from_raw_to_multiple_epochs_to_evoked.py  |   14 +-
 examples/plot_make_forward.py                      |   27 +-
 examples/plot_meg_eeg_fields_3d.py                 |   37 +
 examples/plot_read_and_write_raw_data.py           |   14 +-
 examples/plot_read_bem_surfaces.py                 |    6 +-
 examples/plot_read_epochs.py                       |   15 +-
 examples/plot_read_evoked.py                       |   19 +-
 examples/plot_read_forward.py                      |    8 +-
 examples/plot_read_noise_covariance_matrix.py      |    6 +-
 examples/plot_shift_evoked.py                      |   15 +-
 examples/plot_simulate_evoked_data.py              |   29 +-
 examples/plot_ssp_projs_sensitivity_map.py         |   15 +-
 examples/plot_ssp_projs_topomaps.py                |   20 +-
 examples/plot_topo_channel_epochs_image.py         |   14 +-
 examples/plot_topo_compare_conditions.py           |   12 +-
 examples/plot_topo_customized.py                   |   62 +
 examples/plot_topography.py                        |    9 +-
 .../preprocessing/plot_eog_artifact_histogram.py   |   49 +
 examples/preprocessing/plot_find_ecg_artifacts.py  |   14 +-
 examples/preprocessing/plot_find_eog_artifacts.py  |   12 +-
 examples/preprocessing/plot_ica_from_epochs.py     |  142 +-
 examples/preprocessing/plot_ica_from_raw.py        |  200 +-
 examples/read_events.py                            |    8 +-
 examples/realtime/ftclient_rt_average.py           |   90 +
 examples/realtime/plot_compute_rt_average.py       |   10 +-
 examples/realtime/plot_compute_rt_decoder.py       |   10 +-
 examples/realtime/rt_feedback_client.py            |    2 +-
 examples/realtime/rt_feedback_server.py            |   12 +-
 .../plot_cluster_1samp_test_time_frequency.py      |   14 +-
 examples/stats/plot_cluster_methods_tutorial.py    |    2 +-
 examples/stats/plot_cluster_stats_evoked.py        |   12 +-
 .../stats/plot_cluster_stats_spatio_temporal.py    |   20 +-
 .../plot_cluster_stats_spatio_temporal_2samp.py    |   12 +-
 ...tats_spatio_temporal_repeated_measures_anova.py |   37 +-
 .../stats/plot_cluster_stats_time_frequency.py     |   12 +-
 ...stats_time_frequency_repeated_measures_anova.py |   16 +-
 examples/stats/plot_fdr_stats_evoked.py            |   12 +-
 examples/stats/plot_sensor_permutation_test.py     |   50 +-
 examples/stats/plot_sensor_regression.py           |   79 +
 .../plot_compute_raw_data_spectrum.py              |   13 +-
 .../plot_compute_source_psd_epochs.py              |    6 +-
 .../time_frequency/plot_single_trial_spectra.py    |   24 +-
 .../plot_source_label_time_frequency.py            |   10 +-
 .../time_frequency/plot_source_power_spectrum.py   |   14 +-
 .../plot_source_space_time_frequency.py            |   10 +-
 examples/time_frequency/plot_temporal_whitening.py |    8 +-
 examples/time_frequency/plot_tfr_topography.py     |   80 -
 examples/time_frequency/plot_time_frequency.py     |   95 -
 .../time_frequency/plot_time_frequency_sensors.py  |   65 +
 mne/__init__.py                                    |   35 +-
 mne/_hdf5.py                                       |  167 +
 mne/baseline.py                                    |    2 +-
 mne/beamformer/_dics.py                            |   19 +-
 mne/beamformer/_lcmv.py                            |   35 +-
 mne/beamformer/tests/test_dics.py                  |   16 +-
 mne/beamformer/tests/test_lcmv.py                  |   40 +-
 mne/channels.py                                    |  343 ++
 mne/commands/mne_browse_raw.py                     |    2 +-
 mne/commands/mne_bti2fiff.py                       |    6 +-
 mne/commands/mne_clean_eog_ecg.py                  |   29 +-
 mne/commands/mne_compute_proj_ecg.py               |   16 +-
 mne/commands/mne_compute_proj_eog.py               |   16 +-
 mne/commands/mne_coreg.py                          |   24 +
 mne/commands/mne_flash_bem_model.py                |    9 +-
 mne/commands/mne_kit2fiff.py                       |   12 +-
 mne/commands/mne_make_scalp_surfaces.py            |   40 +-
 mne/commands/mne_report.py                         |   52 +
 mne/commands/mne_surf2bem.py                       |    5 +-
 mne/commands/utils.py                              |   11 +-
 mne/connectivity/effective.py                      |    7 +-
 mne/connectivity/spectral.py                       |   23 +-
 mne/connectivity/tests/test_spectral.py            |    2 +-
 mne/coreg.py                                       |  237 +-
 mne/cov.py                                         |  206 +-
 mne/cuda.py                                        |   26 +-
 mne/data/helmets/122m.fif.gz                       |  Bin 0 -> 4706 bytes
 mne/data/helmets/306m.fif.gz                       |  Bin 0 -> 9462 bytes
 mne/data/helmets/306m_rt.fif.gz                    |  Bin 0 -> 9443 bytes
 mne/data/helmets/BabySQUID.fif.gz                  |  Bin 0 -> 59505 bytes
 mne/data/helmets/CTF_275.fif.gz                    |  Bin 0 -> 9561 bytes
 mne/data/helmets/KIT.fif.gz                        |  Bin 0 -> 9477 bytes
 mne/data/helmets/Magnes_2500wh.fif.gz              |  Bin 0 -> 9470 bytes
 mne/data/helmets/Magnes_3600wh.fif.gz              |  Bin 0 -> 9475 bytes
 mne/datasets/__init__.py                           |    4 +-
 mne/datasets/eegbci/__init__.py                    |    4 +
 mne/datasets/eegbci/eegbci.py                      |  203 ++
 mne/datasets/megsim/megsim.py                      |   44 +-
 mne/datasets/sample/sample.py                      |   17 +-
 mne/datasets/somato/__init__.py                    |    4 +
 mne/datasets/somato/somato.py                      |   35 +
 mne/datasets/spm_face/spm_data.py                  |   14 +-
 mne/datasets/utils.py                              |   91 +-
 mne/decoding/__init__.py                           |    4 +-
 mne/decoding/classifier.py                         |   29 +-
 mne/decoding/csp.py                                |   15 +-
 mne/decoding/ems.py                                |  117 +
 mne/decoding/tests/test_classifier.py              |   28 +-
 mne/decoding/tests/test_csp.py                     |   14 +-
 mne/decoding/tests/test_ems.py                     |   58 +
 mne/decoding/tests/test_time_gen.py                |   44 +
 mne/decoding/time_gen.py                           |  123 +
 mne/dipole.py                                      |    2 +-
 mne/epochs.py                                      |  738 ++++-
 mne/event.py                                       |   88 +-
 mne/{fiff => }/evoked.py                           |  650 +++-
 mne/externals/FieldTrip.py                         |  508 +++
 mne/externals/__init__.py                          |    4 +
 mne/externals/decorator.py                         |  253 ++
 mne/externals/jdcal.py                             |  116 +
 mne/externals/six.py                               |  577 ++++
 mne/externals/tempita/__init__.py                  | 1303 ++++++++
 mne/externals/tempita/_looper.py                   |  163 +
 mne/externals/tempita/compat3.py                   |   45 +
 mne/fiff/__init__.py                               |   90 +-
 mne/fiff/brainvision/brainvision.py                |  529 ---
 mne/fiff/brainvision/tests/test_brainvision.py     |   75 -
 mne/fiff/bti/__init__.py                           |    5 -
 mne/fiff/channels.py                               |   35 -
 mne/fiff/cov.py                                    |  182 -
 mne/fiff/edf/tests/test_edf.py                     |   92 -
 mne/fiff/tests/test_evoked.py                      |  207 --
 mne/filter.py                                      |  138 +-
 mne/fixes.py                                       |   74 +-
 mne/forward/__init__.py                            |    4 +-
 mne/forward/_compute_forward.py                    |   31 +-
 mne/forward/_field_interpolation.py                |  286 ++
 mne/forward/_lead_dots.py                          |  309 ++
 mne/forward/_make_forward.py                       |   74 +-
 mne/forward/forward.py                             |  154 +-
 mne/forward/tests/test_field_interpolation.py      |  153 +
 mne/forward/tests/test_forward.py                  |   41 +-
 mne/forward/tests/test_make_forward.py             |   47 +-
 mne/gui/__init__.py                                |    8 +-
 mne/gui/_coreg_gui.py                              |  117 +-
 mne/gui/_fiducials_gui.py                          |    9 +-
 mne/gui/_file_traits.py                            |  226 +-
 mne/gui/_kit2fiff_gui.py                           |   66 +-
 mne/gui/_marker_gui.py                             |   17 +-
 mne/gui/_viewer.py                                 |    3 +-
 mne/gui/tests/test_coreg_gui.py                    |   16 +-
 mne/gui/tests/test_file_traits.py                  |   11 +-
 mne/gui/tests/test_kit2fiff_gui.py                 |   31 +-
 mne/gui/tests/test_marker_gui.py                   |   33 +-
 mne/html/bootstrap.min.css                         |    7 +
 mne/html/bootstrap.min.js                          |    7 +
 mne/html/d3.v3.min.js                              |    5 +
 mne/html/jquery-1.10.2.min.js                      |    6 +
 mne/html/jquery-ui.min.css                         |    6 +
 mne/html/jquery-ui.min.js                          |   12 +
 mne/html/mpld3.v0.2.min.js                         |    2 +
 mne/inverse_sparse/__init__.py                     |    2 +-
 mne/inverse_sparse/_gamma_map.py                   |   10 +-
 mne/inverse_sparse/mxne_debiasing.py               |    4 +-
 mne/inverse_sparse/mxne_inverse.py                 |    4 +-
 mne/inverse_sparse/mxne_optim.py                   |   41 +-
 mne/inverse_sparse/tests/test_gamma_map.py         |    8 +-
 mne/inverse_sparse/tests/test_mxne_debiasing.py    |    2 +-
 mne/inverse_sparse/tests/test_mxne_inverse.py      |    9 +-
 mne/inverse_sparse/tests/test_mxne_optim.py        |    6 +-
 mne/io/__init__.py                                 |   33 +
 mne/io/array/__init__.py                           |    5 +
 mne/io/array/array.py                              |   65 +
 mne/{fiff/bti => io/array}/tests/__init__.py       |    0
 mne/io/array/tests/test_array.py                   |  104 +
 mne/{fiff/raw.py => io/base.py}                    |  873 ++---
 mne/{fiff => io}/brainvision/__init__.py           |    0
 mne/io/brainvision/brainvision.py                  |  684 ++++
 mne/{fiff => io}/brainvision/tests/__init__.py     |    0
 mne/{fiff => io}/brainvision/tests/data/test.eeg   |  Bin
 mne/{fiff => io}/brainvision/tests/data/test.vhdr  |    0
 mne/{fiff => io}/brainvision/tests/data/test.vmrk  |    0
 .../brainvision/tests/data/test_bin_raw.fif        |  Bin
 .../brainvision/tests/data/test_elp.txt            |    0
 mne/io/brainvision/tests/test_brainvision.py       |  157 +
 mne/io/bti/__init__.py                             |    5 +
 mne/{fiff/bti/raw.py => io/bti/bti.py}             |  829 ++---
 mne/{fiff => io}/bti/constants.py                  |    4 +-
 mne/{fiff => io}/bti/read.py                       |   10 +-
 mne/{fiff/edf => io/bti}/tests/__init__.py         |    0
 .../bti/tests/data/exported4D_linux_raw.fif}       |  Bin
 .../bti/tests/data/exported4D_solaris_raw.fif}     |  Bin
 mne/{fiff => io}/bti/tests/data/test_config_linux  |  Bin
 .../bti/tests/data/test_config_solaris             |  Bin
 mne/{fiff => io}/bti/tests/data/test_hs_linux      |  Bin
 mne/{fiff => io}/bti/tests/data/test_hs_solaris    |  Bin
 mne/{fiff => io}/bti/tests/data/test_pdf_linux     |  Bin
 mne/{fiff => io}/bti/tests/data/test_pdf_solaris   |  Bin
 mne/{fiff => io}/bti/tests/test_bti.py             |   38 +-
 mne/{fiff => io}/bti/transforms.py                 |    2 +-
 mne/{fiff => io}/compensator.py                    |    0
 mne/{fiff => io}/constants.py                      |   22 +-
 mne/{fiff => io}/ctf.py                            |    5 +-
 mne/{fiff => io}/diff.py                           |    2 +-
 mne/{fiff => io}/edf/__init__.py                   |    0
 mne/{fiff => io}/edf/edf.py                        |  218 +-
 mne/{fiff/bti => io/edf}/tests/__init__.py         |    0
 mne/{fiff => io}/edf/tests/data/biosemi.hpts       |    0
 mne/{fiff => io}/edf/tests/data/test.bdf           |    0
 mne/{fiff => io}/edf/tests/data/test.edf           |   14 +-
 .../edf/tests/data/test_bdf_eeglab.mat             |  Bin
 .../edf/tests/data/test_edf_eeglab.mat             |  Bin
 mne/{fiff => io}/edf/tests/data/test_eeglab.mat    |  Bin
 mne/io/edf/tests/test_edf.py                       |  196 ++
 mne/io/egi/__init__.py                             |    5 +
 mne/io/egi/egi.py                                  |  322 ++
 mne/{fiff/bti => io/egi}/tests/__init__.py         |    0
 mne/io/egi/tests/data/test_egi.raw                 |  Bin 0 -> 80756 bytes
 mne/io/egi/tests/test_egi.py                       |   80 +
 mne/io/fiff/__init__.py                            |    1 +
 mne/io/fiff/raw.py                                 |  598 ++++
 mne/{fiff/bti => io/fiff}/tests/__init__.py        |    0
 mne/{ => io}/fiff/tests/test_raw.py                |  230 +-
 mne/{fiff => io}/kit/__init__.py                   |    3 +-
 mne/{fiff => io}/kit/constants.py                  |    0
 mne/{fiff => io}/kit/coreg.py                      |   47 +-
 mne/{fiff => io}/kit/kit.py                        |   90 +-
 mne/{fiff => io}/kit/tests/__init__.py             |    0
 mne/{fiff => io}/kit/tests/data/sns.txt            |    0
 mne/{fiff => io}/kit/tests/data/test.sqd           |  Bin
 mne/{fiff => io}/kit/tests/data/test_Ykgw.mat      |  Bin
 .../kit/tests/data/test_bin_raw.fif}               |  Bin
 mne/{fiff => io}/kit/tests/data/test_elp.txt       |    0
 mne/{fiff => io}/kit/tests/data/test_hsp.txt       |    0
 mne/{fiff => io}/kit/tests/data/test_mrk.sqd       |  Bin
 mne/{fiff => io}/kit/tests/data/test_mrk_post.sqd  |  Bin
 mne/{fiff => io}/kit/tests/data/test_mrk_pre.sqd   |  Bin
 mne/{fiff => io}/kit/tests/data/trans-sample.fif   |  Bin
 mne/{fiff => io}/kit/tests/test_coreg.py           |    9 +-
 mne/{fiff => io}/kit/tests/test_kit.py             |   40 +-
 mne/{fiff => io}/matrix.py                         |    2 +-
 mne/{fiff => io}/meas_info.py                      |  408 ++-
 mne/{fiff => io}/open.py                           |   47 +-
 mne/{fiff => io}/pick.py                           |   30 +-
 mne/{fiff => io}/proj.py                           |   73 +-
 mne/{fiff => io}/tag.py                            |   55 +-
 mne/{fiff => io}/tests/__init__.py                 |    0
 .../tests/data/fsaverage-fiducials.fif             |  Bin
 mne/{fiff => io}/tests/data/process_raw.sh         |    0
 .../tests/data/sample-audvis-raw-trans.txt         |    0
 mne/io/tests/data/small-src.fif.gz                 |  Bin 0 -> 77784 bytes
 .../tests/data/test-1-eve.fif}                     |  Bin
 mne/{fiff => io}/tests/data/test-ave-2.log         |    0
 mne/{fiff => io}/tests/data/test-ave.fif           |  Bin
 mne/{fiff => io}/tests/data/test-ave.fif.gz        |  Bin
 mne/{fiff => io}/tests/data/test-ave.log           |    0
 mne/{fiff => io}/tests/data/test-cov.fif           |  Bin
 mne/{fiff => io}/tests/data/test-cov.fif.gz        |  Bin
 mne/{fiff => io}/tests/data/test-eve-1.eve         |    0
 mne/{fiff => io}/tests/data/test-eve-old-style.eve |    0
 mne/{fiff => io}/tests/data/test-eve.eve           |    0
 mne/{fiff => io}/tests/data/test-eve.fif           |  Bin
 mne/{fiff => io}/tests/data/test-eve.fif.gz        |  Bin
 mne/{fiff => io}/tests/data/test-km-cov.fif        |  Bin
 mne/{fiff => io}/tests/data/test-lh.label          |    0
 mne/{fiff => io}/tests/data/test-mpr-eve.eve       |    0
 mne/{fiff => io}/tests/data/test-nf-ave.fif        |  Bin
 mne/{fiff => io}/tests/data/test-no-reject.ave     |    0
 .../test_proj.fif => io/tests/data/test-proj.fif}  |  Bin
 .../tests/data/test-proj.fif.gz}                   |  Bin
 mne/{fiff => io}/tests/data/test-rh.label          |    0
 mne/{fiff => io}/tests/data/test.ave               |    0
 mne/{fiff => io}/tests/data/test.cov               |    0
 mne/{fiff => io}/tests/data/test_bads.txt          |    0
 mne/{fiff => io}/tests/data/test_chpi_raw_hp.txt   |    0
 mne/{fiff => io}/tests/data/test_chpi_raw_sss.fif  |  Bin
 mne/{fiff => io}/tests/data/test_ctf_comp_raw.fif  |  Bin
 mne/{fiff => io}/tests/data/test_ctf_raw.fif       |  Bin
 mne/{fiff => io}/tests/data/test_empty_room.cov    |    0
 mne/{fiff => io}/tests/data/test_erm-cov.fif       |  Bin
 mne/{fiff => io}/tests/data/test_ica.lout          |    0
 mne/{fiff => io}/tests/data/test_keepmean.cov      |    0
 mne/{fiff => io}/tests/data/test_raw-eve.fif       |  Bin
 mne/{fiff => io}/tests/data/test_raw.fif           |  Bin
 mne/{fiff => io}/tests/data/test_raw.fif.gz        |  Bin
 mne/{fiff => io}/tests/data/test_raw.lout          |    0
 mne/{fiff => io}/tests/data/test_withbads_raw.fif  |  Bin
 mne/{fiff => io}/tests/data/test_wrong_bads.txt    |    0
 mne/{fiff => io}/tests/test_compensator.py         |   12 +-
 mne/{fiff => io}/tests/test_meas_info.py           |   26 +-
 mne/{fiff => io}/tests/test_pick.py                |    2 +-
 mne/{fiff => io}/tree.py                           |   17 +-
 mne/{fiff => io}/write.py                          |   50 +-
 mne/label.py                                       | 1098 +++++--
 mne/layouts/EEG1005.lay                            |  337 ++
 mne/layouts/EGI256.lout                            |  259 ++
 mne/layouts/KIT-157.lout                           |  158 +
 mne/layouts/layout.py                              |   97 +-
 mne/layouts/tests/test_layout.py                   |   56 +-
 mne/minimum_norm/__init__.py                       |    3 +-
 mne/minimum_norm/inverse.py                        |  141 +-
 mne/minimum_norm/psf_ctf.py                        |  431 +++
 mne/minimum_norm/tests/test_inverse.py             |   38 +-
 mne/minimum_norm/tests/test_psf_ctf.py             |   79 +
 mne/minimum_norm/tests/test_time_frequency.py      |   18 +-
 mne/minimum_norm/time_frequency.py                 |   44 +-
 mne/misc.py                                        |    2 +-
 mne/parallel.py                                    |   22 +-
 mne/preprocessing/__init__.py                      |    9 +-
 mne/preprocessing/bads.py                          |   36 +
 mne/preprocessing/ctps_.py                         |  169 +
 mne/preprocessing/ecg.py                           |  159 +-
 mne/preprocessing/eog.py                           |  137 +-
 mne/preprocessing/ica.py                           | 1741 ++++++----
 mne/preprocessing/infomax_.py                      |  276 ++
 mne/preprocessing/maxfilter.py                     |   11 +-
 mne/preprocessing/ssp.py                           |   19 +-
 mne/preprocessing/stim.py                          |    4 +-
 mne/preprocessing/tests/test_ctps.py               |   84 +
 mne/preprocessing/tests/test_ecg.py                |   13 +-
 mne/preprocessing/tests/test_eog.py                |    6 +-
 mne/preprocessing/tests/test_ica.py                |  385 ++-
 mne/preprocessing/tests/test_infomax.py            |  136 +
 mne/preprocessing/tests/test_ssp.py                |    8 +-
 mne/preprocessing/tests/test_stim.py               |    4 +-
 mne/proj.py                                        |   41 +-
 mne/realtime/__init__.py                           |    1 +
 mne/realtime/client.py                             |   19 +-
 mne/realtime/epochs.py                             |   25 +-
 mne/realtime/fieldtrip_client.py                   |  296 ++
 mne/realtime/mockclient.py                         |   10 +-
 mne/realtime/stim_server_client.py                 |   36 +-
 mne/realtime/tests/test_fieldtrip_client.py        |   68 +
 mne/realtime/tests/test_mockclient.py              |   14 +-
 mne/realtime/tests/test_stim_client_server.py      |   28 +-
 mne/report.py                                      | 1287 ++++++++
 mne/selection.py                                   |    5 +-
 mne/simulation/evoked.py                           |    4 +-
 mne/simulation/source.py                           |    8 +-
 mne/simulation/tests/test_evoked.py                |   27 +-
 mne/source_estimate.py                             |  285 +-
 mne/source_space.py                                |  237 +-
 mne/stats/__init__.py                              |    1 +
 mne/stats/cluster_level.py                         |   59 +-
 mne/stats/multi_comp.py                            |    2 +-
 mne/stats/parametric.py                            |   49 +-
 mne/stats/permutations.py                          |    4 +-
 mne/stats/regression.py                            |  135 +
 mne/stats/tests/test_cluster_level.py              |   35 +-
 mne/stats/tests/test_regression.py                 |   67 +
 mne/surface.py                                     |  160 +-
 mne/tests/test_channels.py                         |  109 +
 mne/tests/test_coreg.py                            |   18 +-
 mne/tests/test_cov.py                              |   53 +-
 mne/tests/test_epochs.py                           |  502 ++-
 mne/tests/test_event.py                            |   75 +-
 mne/tests/test_evoked.py                           |  384 +++
 mne/tests/test_filter.py                           |   34 +-
 mne/tests/test_fixes.py                            |   21 +-
 mne/tests/test_hdf5.py                             |   26 +
 mne/tests/test_label.py                            |  340 +-
 mne/tests/test_misc.py                             |    2 +-
 mne/tests/test_proj.py                             |   49 +-
 mne/tests/test_report.py                           |  119 +
 mne/tests/test_source_estimate.py                  |  167 +-
 mne/tests/test_source_space.py                     |  195 +-
 mne/tests/test_surface.py                          |   40 +-
 mne/tests/test_transforms.py                       |   33 +-
 mne/tests/test_utils.py                            |  177 +-
 mne/tests/test_viz.py                              |  511 ---
 mne/time_frequency/__init__.py                     |    2 +-
 mne/time_frequency/ar.py                           |    6 +-
 mne/time_frequency/csd.py                          |    2 +-
 mne/time_frequency/multitaper.py                   |   24 +-
 mne/time_frequency/psd.py                          |   64 +-
 mne/time_frequency/stft.py                         |   12 +-
 mne/time_frequency/tests/test_ar.py                |   12 +-
 mne/time_frequency/tests/test_csd.py               |   14 +-
 mne/time_frequency/tests/test_multitaper.py        |   16 +-
 mne/time_frequency/tests/test_psd.py               |   22 +-
 mne/time_frequency/tests/test_stft.py              |    2 +-
 mne/time_frequency/tests/test_tfr.py               |   72 +-
 mne/time_frequency/tfr.py                          |  521 ++-
 mne/transforms.py                                  |  190 +-
 mne/utils.py                                       |  580 +++-
 mne/viz.py                                         | 3460 --------------------
 mne/viz/_3d.py                                     |  651 ++++
 mne/viz/__init__.py                                |   20 +
 mne/viz/circle.py                                  |  408 +++
 mne/viz/epochs.py                                  |  451 +++
 mne/viz/evoked.py                                  |  296 ++
 mne/viz/ica.py                                     |  484 +++
 mne/viz/misc.py                                    |  521 +++
 mne/viz/raw.py                                     |  610 ++++
 mne/{fiff/bti => viz}/tests/__init__.py            |    0
 .../bti/tests/__init__.py => viz/tests/__init__py} |    0
 mne/viz/tests/test_3d.py                           |  115 +
 mne/viz/tests/test_circle.py                       |   94 +
 mne/viz/tests/test_epochs.py                       |  117 +
 mne/viz/tests/test_evoked.py                       |  106 +
 mne/viz/tests/test_ica.py                          |  140 +
 mne/viz/tests/test_misc.py                         |  114 +
 mne/viz/tests/test_raw.py                          |  107 +
 mne/viz/tests/test_topo.py                         |  119 +
 mne/viz/tests/test_topomap.py                      |  131 +
 mne/viz/tests/test_utils.py                        |   28 +
 mne/viz/topo.py                                    |  725 ++++
 mne/viz/topomap.py                                 | 1035 ++++++
 mne/viz/utils.py                                   |  364 ++
 setup.cfg                                          |   13 +-
 setup.py                                           |   42 +-
 475 files changed, 31733 insertions(+), 11032 deletions(-)

diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..6b9b8a5
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,8 @@
+[run]
+branch = True
+source = mne
+include = */mne/*
+omit =
+    */mne/externals/*
+    */bin/*
+    */setup.py
diff --git a/.gitignore b/.gitignore
index 0c1303a..a33eecd 100755
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,10 @@ tmp-*.w
 tmtags
 auto_examples
 MNE-sample-data*
+MNE-spm-face*
+MNE-eegbci-data*
+MNE-somato-data*
+MEGSIM*
 build
 coverage
 
diff --git a/.mailmap b/.mailmap
index 8b0f8a7..fbf64aa 100644
--- a/.mailmap
+++ b/.mailmap
@@ -6,6 +6,7 @@ Martin Luessi <mluessi at nmr.mgh.harvard.edu> mluessi at nmr.mgh.harvard.edu <mluessi
 Martin Luessi <mluessi at nmr.mgh.harvard.edu> martin <martin at think.hsd1.ma.comcast.net>
 Martin Luessi <mluessi at nmr.mgh.harvard.edu> martin <martin at think.(none)>
 Matti Hamalainen <msh at nmr.mgh.harvard.edu> Matti Hamalainen <msh at parsley.nmr.mgh.harvard.edu>
+Matti Hamalainen <msh at nmr.mgh.harvard.edu> mshamalainen <msh at nmr.mgh.harvard.edu>
 Christian Brodbeck <christianmbrodbeck at gmail.com> christianmbrodbeck <christianmbrodbeck at gmail.com>
 Louis Thibault <louist87 at gmail.com> = <louist87 at gmail.com>
 Louis Thibault <louist87 at gmail.com> Louis Thibault <louist at ltpc.(none)>
@@ -19,7 +20,9 @@ Denis A. Engemann <denis.engemann at gmail.com> dengemann <d.engemann at fz-juelich.de
 Denis A. Engemann <denis.engemann at gmail.com> Denis Engemann <dengemann at Deniss-MacBook-Pro.local>
 Denis A. Engemann <denis.engemann at gmail.com> Denis A. Engemann <d.engemann at fz-juelich.de>
 Denis A. Engemann <denis.engemann at gmail.com> Denis Engemann <dengemann at pool-186-21-zam037.wlan.kfa-juelich.de>
+Denis A. Engemann <denis.engemann at gmail.com> Denis A. Engemann <denisaengemann at Denis-A-Engemanns-MacBook-Air.local>
 Daniel Strohmeier <daniel.strohmeier at googlemail.com> joewalter <daniel.strohmeier at googlemail.com>
+Daniel Strohmeier <daniel.strohmeier at googlemail.com> Daniel Strohmeier <daniel.strohmeier at googlemail.com>
 Dan G. Wakeman <dgwakeman at gmail.com>
 Teon Brooks <teon.brooks at gmail.com>
 Teon Brooks <teon.brooks at gmail.com> Teon <teon at nyu.edu>
@@ -33,3 +36,14 @@ Mainak Jas <mainakjas at gmail.com> Mainak <mainakjas at gmail.com>
 Alan Leggitt <leggitta3 at gmail.com> leggitta <leggitta3 at gmail.com>
 Praveen Sripad <pravsripad at gmail.com> prav <pravsripad at gmail.com>
 Praveen Sripad <pravsripad at gmail.com> prav <prav at prav-dell.(none)>
+Martin Billinger <martin.billinger at tugraz.at> kazemakase <kazemakase at users.noreply.github.com>
+Martin Billinger <martin.billinger at tugraz.at> Martin Billinger <flkazemakase at gmail.com>
+Martin Billinger <martin.billinger at tugraz.at> Martin <martin.billinger at tugraz.at>
+Mainak Jas <mainakjas at gmail.com> Mainak Jas <mainak at neuro.hut.fi>
+Dan G. Wakeman <dgwakeman at gmail.com> Daniel Wakeman <dwakeman at marcie.nmr.mgh.harvard.edu>
+Marmaduke Woodman <mmwoodman at gmail.com> maedoc <maedoc at mm.st>
+Brad Buran <bburan at galenea.com> Brad Buran <bburan at alum.mit.edu>
+Cathy Nangini <cnangini at gmail.com> CN <cnangini at gmail.com>
+Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk> Olaf Hauk <olaf at mac0086.local>
+Jean-Remi King <jeanremi.kibng+github at gmail.com> kingjr <jeanremi.kibng+github at gmail.com>
+Roan LaPlante <aestrivex at gmail.com> aestrivex <aestrivex at gmail.com>
diff --git a/.travis.yml b/.travis.yml
index c61d0f0..0d16828 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,35 +1,71 @@
 language: python
+
 env:
-    - COVERAGE=--with-coverage MNE_FORCE_SERIAL=1 MNE_SKIP_SAMPLE_DATASET_TESTS=1
-python:
-    - "2.7"
-virtualenv:
-    system_site_packages: true
+    # Enable python 2 and python 3 builds
+    # DEPS=full: build optional dependencies: pandas, nitime, statsmodels,
+    #            scikit-learn, patsy, nibabel; in the case of Python 2, also
+    #            nitime
+    # DEPS=minimal: don't build optional dependencies; tests that require those
+    #               dependencies are supposed to be skipped
+    - PYTHON=2.7 DEPS=full
+    - PYTHON=3.3 DEPS=full
+    - PYTHON=2.6 DEPS=full
+    - PYTHON=2.7 DEPS=minimal
+# Setup anaconda
 before_install:
-    - sudo apt-get update -qq
-    - sudo apt-get install -qq python-scipy python-nose
-    - sudo apt-get install python-pip
-    - sudo apt-get install python-nibabel python-nitime python-pandas
-    - sudo pip install scikit-learn
+  - wget http://repo.continuum.io/miniconda/Miniconda-2.2.2-Linux-x86_64.sh -O miniconda.sh
+  - chmod +x miniconda.sh
+  - ./miniconda.sh -b
+  - export PATH=/home/travis/anaconda/bin:$PATH
+  - conda update --yes conda
+  # The next couple lines fix a crash with multiprocessing on Travis and are not specific to using Miniconda
+  - sudo rm -rf /dev/shm
+  - sudo ln -s /run/shm /dev/shm
+
 install:
-    - if [ "${COVERAGE}" == "--with-coverage" ]; then sudo pip install coverage; fi
-    - if [ "${COVERAGE}" == "--with-coverage" ]; then sudo pip install coveralls; fi
+    - conda create -n testenv --yes pip python=$PYTHON
+    - source activate testenv
+    - conda install --yes ipython==1.1.0 numpy scipy nose matplotlib
+    - if [ "${DEPS}" == "full" ]; then
+        conda install --yes pandas statsmodels scikit-learn patsy pytables;
+        pip install nibabel;
+        if [ ${PYTHON:0:1} == "2" ]; then
+          pip install nitime;
+        fi;
+      fi;
+    - pip install coverage; pip install coveralls; pip install nose-timer
+    - MNE_FORCE_SERIAL=1
+    - MNE_SKIP_SAMPLE_DATASET_TESTS=1
+    # Skip tests that require large downloads over the network to save bandwith
+    # usage as travis workers are stateless and therefore traditional local
+    # disk caching does not work.
+    - export MNE_SKIP_NETWORK_TESTS=1
     - python setup.py build
     - python setup.py install
+    - myscripts='browse_raw bti2fiff surf2bem'
+    - for script in $myscripts; do mne $script --help >/dev/null; done;
     - SRC_DIR=$(pwd)
     - cd ~
-    - MNE_DIR=$(python -c 'import mne;print mne.__path__[0]')
-    - ln -s ${SRC_DIR}/mne/fiff/tests/data ${MNE_DIR}/fiff/tests/data
-    - ln -s ${SRC_DIR}/mne/fiff/bti/tests/data ${MNE_DIR}/fiff/bti/tests/data
-    - ln -s ${SRC_DIR}/mne/fiff/edf/tests/data ${MNE_DIR}/fiff/edf/tests/data
-    - ln -s ${SRC_DIR}/mne/fiff/kit/tests/data ${MNE_DIR}/fiff/kit/tests/data
-    - ln -s ${SRC_DIR}/mne/fiff/brainvision/tests/data ${MNE_DIR}/fiff/brainvision/tests/data
+    - MNE_DIR=$(python -c 'import mne;print(mne.__path__[0])')
+    - ln -s ${SRC_DIR}/mne/io/tests/data ${MNE_DIR}/io/tests/data
+    - ln -s ${SRC_DIR}/mne/io/bti/tests/data ${MNE_DIR}/io/bti/tests/data
+    - ln -s ${SRC_DIR}/mne/io/edf/tests/data ${MNE_DIR}/io/edf/tests/data
+    - ln -s ${SRC_DIR}/mne/io/kit/tests/data ${MNE_DIR}/io/kit/tests/data
+    - ln -s ${SRC_DIR}/mne/io/brainvision/tests/data ${MNE_DIR}/io/brainvision/tests/data
+    - ln -s ${SRC_DIR}/mne/io/egi/tests/data ${MNE_DIR}/io/egi/tests/data
+    - ln -s ${SRC_DIR}/setup.cfg ${MNE_DIR}/../setup.cfg
+    - ln -s ${SRC_DIR}/.coveragerc ${MNE_DIR}/../.coveragerc
+    # Link coverage to src dir, coveralls should be run from there (needs git calls)
+    - ln -s ${MNE_DIR}/../.coverage ${SRC_DIR}/.coverage
+
 script:
+    # Suppress the parallel outputs for logging cleanliness
+    - export MNE_LOGGING_LEVEL=warning
     - cd ${MNE_DIR}/../
-    - TEST="nosetests -v --exe mne"
-    - TEST_COVER="nosetests -v --exe --with-coverage --cover-package=mne
-      --cover-html --cover-html-dir=coverage mne"
-    - if [ "${COVERAGE}" == "--with-coverage" ]; then ${TEST};
-      else ${TEST_COVER}; fi
+    - nosetests --with-timer --timer-top-n 30;
+
 after_success:
-    - if [ "${COVERAGE}" == "--with-coverage" ]; then coveralls; fi
+    # Need to run from source dir to exectue "git" commands
+    - echo "Running coveralls";
+    - cd ${SRC_DIR};
+    - coveralls;
diff --git a/Makefile b/Makefile
index 8e583bb..2fd3513 100755
--- a/Makefile
+++ b/Makefile
@@ -21,7 +21,10 @@ clean-build:
 clean-ctags:
 	rm -f tags
 
-clean: clean-build clean-pyc clean-so clean-ctags
+clean-cache:
+	find . -name "__pycache__" | xargs rm -rf
+
+clean: clean-build clean-pyc clean-so clean-ctags clean-cache
 
 in: inplace # just a shortcut
 inplace:
@@ -31,7 +34,7 @@ sample_data: $(CURDIR)/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif
 	@echo "Target needs sample data"
 
 $(CURDIR)/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif:
-	wget ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
+	wget -c ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
 	tar xvzf MNE-sample-data-processed.tar.gz
 	mv MNE-sample-data examples/
 	ln -sf ${PWD}/examples/MNE-sample-data ${PWD}/MNE-sample-data
diff --git a/README.rst b/README.rst
index cd7b1db..bc77ba5 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,3 @@
-
 .. -*- mode: rst -*-
 
 
@@ -72,7 +71,7 @@ Dependencies
 ^^^^^^^^^^^^
 
 The required dependencies to build the software are python >= 2.6,
-NumPy >= 1.4, SciPy >= 0.7.2 and matplotlib >= 0.98.4.
+NumPy >= 1.6, SciPy >= 0.7.2 and matplotlib >= 0.98.4.
 
 Some isolated functions require pandas >= 0.7.3 and nitime (multitaper analysis).
 
diff --git a/bin/mne b/bin/mne
index cbd8096..a8169fe 100755
--- a/bin/mne
+++ b/bin/mne
@@ -8,16 +8,18 @@ import os.path as op
 import mne
 
 mne_bin_dir = op.dirname(mne.__file__)
-valid_commands = sorted(glob.glob(op.join(mne_bin_dir, 'commands', 'mne_*.py')))
+valid_commands = sorted(glob.glob(op.join(mne_bin_dir,
+                                          'commands', 'mne_*.py')))
 valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands]
 
+
 def print_help():
-    print "Usage : mne command options\n"
-    print "Accepted commands :\n"
+    print("Usage : mne command options\n")
+    print("Accepted commands :\n")
     for c in valid_commands:
-        print "\t- %s" % c
-    print "\nExample : mne browse_raw --raw sample_audvis_raw.fif"
-    print "\nGetting help example : mne compute_proj_eog -h"
+        print("\t- %s" % c)
+    print("\nExample : mne browse_raw --raw sample_audvis_raw.fif")
+    print("\nGetting help example : mne compute_proj_eog -h")
     sys.exit(0)
 
 if len(sys.argv) == 1:
@@ -25,9 +27,9 @@ if len(sys.argv) == 1:
 elif ("help" in sys.argv[1] or "-h" in sys.argv[1]):
     print_help()
 elif sys.argv[1] == "--version":
-    print "MNE %s" % mne.__version__
+    print("MNE %s" % mne.__version__)
 elif sys.argv[1] not in valid_commands:
-    print 'Invalid command: "%s"\n' % sys.argv[1]
+    print('Invalid command: "%s"\n' % sys.argv[1])
     print_help()
     sys.exit(0)
 else:
diff --git a/doc/source/cite.rst b/doc/source/cite.rst
index e4c3734..14e323a 100644
--- a/doc/source/cite.rst
+++ b/doc/source/cite.rst
@@ -5,6 +5,10 @@ Cite MNE and MNE-Python
 
 If you use in your research the implementations provided by the MNE software you should cite:
 
-    [1] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, L. Parkkonen, M. Hämäläinen, `MNE software for processing MEG and EEG data <http://www.ncbi.nlm.nih.gov/pubmed/24161808>`_, NeuroImage, 2013, ISSN 1053-8119, `[DOI] <http://dx.doi.org/10.1016/j.neuroimage.2013.10.027>`_
+    [1] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, L. Parkkonen, M. Hämäläinen, `MNE software for processing MEG and EEG data <http://www.ncbi.nlm.nih.gov/pubmed/24161808>`_, NeuroImage, Volume 86, 1 February 2014, Pages 446-460, ISSN 1053-8119, `[DOI] <http://dx.doi.org/10.1016/j.neuroimage.2013.10.027>`_
+
+If you use the Python code you should cite as well:
+
+    [2] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, R. Goj, M. Jas, T. Brooks, L. Parkkonen, M. Hämäläinen, `MEG and EEG data analysis with MNE-Python <http://www.frontiersin.org/Journal/Abstract.aspx?s=1304&name=brain_imaging_methods&ART_DOI=10.3389/fnins.2013.00267>`_, Frontiers in Neuroscience, Volume 7, 2013, ISSN 1662-453X, `[DOI] <http://dx.doi.org/10.3389/fnins.2013.00267>`_
 
 You should as well cite the related method papers, some of which are listed in :ref:`ch_reading`.
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index b420a6e..13eb809 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -198,6 +198,10 @@ These steps can be broken out to be more explicit as:
 
     ln -s <path to mne-python>/mne ~/.local/lib/python2.7/site-packages/mne
 
+   Also for the mne-python scripts::
+   
+    ln -s <path to mne-python>/bin/mne /usr/local/bin/mne
+
    Since you make a symbolic link to the local directory, you won't require
    root access while editing the files and the changes in your working
    directory are automatically reflected in the installation directory. To
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
index 826b680..43e0ee3 100644
--- a/doc/source/getting_started.rst
+++ b/doc/source/getting_started.rst
@@ -3,34 +3,106 @@
 Getting Started
 ===============
 
-Inside the Martinos Center
---------------------------
-For people within the MGH/MIT/HMS Martinos Center mne is available on the network.
+This page will help you get started with MNE-python. If you are new to Python here is a
+very good place to get started: http://scipy-lectures.github.com. If you are at the Martinos 
+Center, please see this section :ref:`inside_martinos`. If you would like to use a custom
+installation of python (or have specific questions about integrating special tools like 
+IPython notebooks), please see this section :ref:`detailed_notes`.
 
-In a terminal do::
+Outside the Martinos Center
+---------------------------
 
-    setenv PATH /usr/pubsw/packages/python/epd/bin:${PATH}
+For a fast and up to date scientific Python environment that resolves all
+dependencies you can install Enthought Canopy available at:
 
-If you use Bash replace the previous instruction with::
+https://www.enthought.com/products/canopy/
 
-    export PATH=/usr/pubsw/packages/python/epd/bin:${PATH}
+Canopy is free for academic purposes. If you cannot benefit from the
+an academic license and you don't want to pay for it, you can
+use Canopy express which is a lightweight version (no 3D visualization
+support for example): https://www.enthought.com/store/.
 
-Then start the python interpreter with:
+To test that everything works properly, open up IPython::
 
-    ipython
+    ipython --pylab qt
 
-Then type::
+Now that you have a working Python environment you can install MNE.
+
+The first decision you must make is whether you want the most recent stable version or the 
+development version (this contains new features, however the function names and usage examples
+may not be fully settled).
+
+Stable Version Instructions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can also install the latest stable version with with pip::
+
+    pip install mne --upgrade
+    
+Now that you have installed mne, check and optimize the installation (:ref:`check_and_optimize`)
+
+Development Version Instructions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you know you would like to contribute to the project please follow the instructions here: 
+:ref:`using-git`
+
+If you just want to start using the latest development version (the most up to date)::
+
+    pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
+
+.. _check_and_optimize:
+
+Check and Optimize Installation
+-------------------------------
+
+To check that everything went fine, in ipython, type::
 
     >>> import mne
 
 If you get a new prompt with no error messages, you should be good to go.
-Start with the :ref:`examples-index`.
 
-Outside the Martinos Center
----------------------------
+CUDA Optimization
+^^^^^^^^^^^^^^^^^
+
+If you want to use NVIDIA CUDA for filtering (can yield 3-4x speedups), you'll
+need to install the NVIDIA toolkit on your system, and then both pycuda and
+scikits.cuda, see:
+
+https://developer.nvidia.com/cuda-downloads
+
+http://mathema.tician.de/software/pycuda
+
+http://wiki.tiker.net/PyCuda/Installation/
+
+https://github.com/lebedov/scikits.cuda
+
+To initialize mne-python cuda support, after installing these dependencies
+and running their associated unit tests (to ensure your installation is correct)
+you can run:
+
+    >>> mne.cuda.init_cuda() # doctest: +SKIP
+
+If you have everything installed correctly, you should see an INFO-level log
+message telling you your CUDA hardware's available memory. To have CUDA
+initialized on startup, you can do:
+
+    >>> mne.utils.set_config('MNE_USE_CUDA', 'true') # doctest: +SKIP
+
+You can test if MNE CUDA support is working by running the associated test:
+
+    nosetests mne/tests/test_filter.py
+
+If all tests pass with none skipped, then mne-python CUDA support works.
+
 
-MNE is written in pure Python making it easy to setup of
-any machine with Python >=2.6, NumPy >= 1.4, SciPy >= 0.7.2
+.. _detailed_notes:
+
+Detailed Notes
+--------------
+
+MNE is written in pure Python making it easy to setup on
+any machine with Python >=2.6, NumPy >= 1.6, SciPy >= 0.7.2
 and matplotlib >= 1.1.0.
 
 Some isolated functions (e.g. filtering with firwin2) require SciPy >= 0.9.
@@ -46,29 +118,22 @@ To run all documentation examples the following additional packages are required
 Note. For optimal performance we recommend installing recent versions of
 NumPy (> 1.7), SciPy (> 0.10) and scikit-learn (>= 0.14).
 
-For a fast and up to date scientific Python environment that resolves all
-dependencies you can install Enthought Canopy available at:
-
-https://www.enthought.com/products/canopy/
-
-Canopy is free for academic purposes. If you cannot benefit from the
-an academic license and you don't want to pay for it, you can
-use Canopy express which is a lightweight version (no 3D visualization
-support for example):
-
-https://www.enthought.com/store/
+Development Environment
+^^^^^^^^^^^^^^^^^^^^^^^
 
 Note that we explicitly support the following Python setups since they reflect our
 development environments and functionality is best tested for them:
-    
+
     * EPD 7.3 (Mac, Linux)
-    
+
     * Canopy >= 1.0 (Mac, Linux)
 
     * Anaconda (Mac)
-    
+
     * Debian / Ubuntu standard system Python + Scipy stack
 
+Anaconda
+^^^^^^^^
 
 Note for developers. To make Anaconda working with our test-suite a few
 manual adjustments might be necessary. This may require
@@ -76,13 +141,16 @@ manually adjusting the python interpreter invoked by the nosetests and
 the sphinx-build 'binaries' (http://goo.gl/Atqh26).
 Tested on a recent MacBook Pro running Mac OS X 10.8 and Mac OS X 10.9
 
-If you use another Python setup and you encounter some difficulties please 
-report them on the MNE mailing list or on github to get assistance.
+multi-threading
+^^^^^^^^^^^^^^^
 
-To test that everything works properly, open up IPython::
-
-    ipython
+For optimal performance we recommend using numpy / scipy with the multi-threaded
+ATLAS, gotoblas2, or intel MKL. For example, the Enthought Canopy and the Anaconda distributions
+ship with tested MKL-compiled numpy / scipy versions. Depending on the use case and your system
+this may speed up operations by a factor greater than 10.
 
+pylab
+^^^^^
 
 Although all of the examples in this documentation are in the style
 of the standard Python interpreter, the use of IPython with the pylab option
@@ -96,6 +164,9 @@ On Linux, for example, QT is the only matplotlib backend for which 3D rendering
 will work correctly. On Mac OS X for other backends certain matplotlib functions
 might not work as expected.
 
+IPython notebooks
+^^^^^^^^^^^^^^^^^
+
 To take full advantage of MNE-Python's visualization capacities in combination
 with IPython notebooks and inline displaying, please explicitly add the
 following magic method invocation to your notebook or configure your notebook
@@ -103,68 +174,32 @@ runtime accordingly.
 
     %pylab inline
 
-Now that you have a working Python environment you can install MNE.
-
-You can manually get the latest version of the code at:
-
-https://github.com/mne-tools/mne-python
+If you use another Python setup and you encounter some difficulties please
+report them on the MNE mailing list or on github to get assistance.
 
-Then from the mne-python folder (containing a setup.py file) you can install with::
 
-    python setup.py install
+.. _inside_martinos:
 
-or if you don't have admin access to your python setup (permission denied when install) use::
+Inside the Martinos Center
+--------------------------
 
-    python setup.py install --user
+For people within the MGH/MIT/HMS Martinos Center mne is available on the network.
 
-You can also install the latest release with easy_install::
+In a terminal do::
 
-    easy_install -U mne
+    setenv PATH /usr/pubsw/packages/python/epd/bin:${PATH}
 
-or with pip::
+If you use Bash replace the previous instruction with::
 
-    pip install mne --upgrade
+    export PATH=/usr/pubsw/packages/python/epd/bin:${PATH}
 
-For the latest development version (the most up to date)::
+Then start the python interpreter with:
 
-    pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
+    ipython
 
-To check that everything went fine, in ipython, type::
+Then type::
 
     >>> import mne
 
 If you get a new prompt with no error messages, you should be good to go.
-
-If you want to use NVIDIA CUDA for filtering (can yield 3-4x speedups), you'll
-need to install the NVIDIA toolkit on your system, and then both pycuda and
-scikits.cuda, see:
-
-https://developer.nvidia.com/cuda-downloads
-http://mathema.tician.de/software/pycuda
-http://wiki.tiker.net/PyCuda/Installation/
-https://github.com/lebedov/scikits.cuda
-
-To initialize mne-python cuda support, after installing these dependencies
-and running their associated unit tests (to ensure your installation is correct)
-you can run:
-
-    >>> mne.cuda.init_cuda() # doctest: +SKIP
-
-If you have everything installed correctly, you should see an INFO-level log
-message telling you your CUDA hardware's available memory. To have CUDA
-initialized on startup, you can do:
-
-    >>> mne.utils.set_config('MNE_USE_CUDA', 'true') # doctest: +SKIP
-
-You can test if MNE CUDA support is working by running the associated test:
-
-    nosetests mne/tests/test_filter.py
-
-If all tests pass with none skipped, then mne-python CUDA support works.
-
-Learning Python
----------------
-
-If you are new to Python here is a very good place to get started:
-
-    * http://scipy-lectures.github.com
+Start with the :ref:`examples-index`.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 47a9194..9a90d84 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,3 +1,15 @@
+==========================
+Google Summer of Code 2014
+==========================
+
+For the Google Summer of Code 2014 (GSOC) we are looking for two ambitious
+students with strong Python skills and a background and / or interest in brain
+imaging research. If you feel addressed or happen to know someone who might be
+interested please get in touch with us or forward this message to her / him.
+Here are our our `GSOC projects <http://goo.gl/4KkmRC>`_.
+`Registration <http://goo.gl/KMPQRf>`_ opens on March 10, 2014, 7 p.m. UTC.
+
+
 ========
 MNE Home
 ========
diff --git a/doc/source/manual/cookbook.rst b/doc/source/manual/cookbook.rst
index a55e8aa..7613c71 100644
--- a/doc/source/manual/cookbook.rst
+++ b/doc/source/manual/cookbook.rst
@@ -558,7 +558,7 @@ Designating bad channels
 Sometimes some MEG or EEG channels are not functioning properly
 for various reasons. These channels should be excluded from the
 analysis by marking them bad using the mne_mark_bad_channels utility,
-see :ref:`CHDDHBEE`. Especially if a channel is not show
+see :ref:`CHDDHBEE`. Especially if a channel does not show
 a signal at all (flat) it is most important to exclude it from the
 analysis, since its noise estimate will be unrealistically low and
 thus the current estimate calculations will give a strong weight
diff --git a/doc/source/mne-python.rst b/doc/source/mne-python.rst
index 94fe4fd..1380044 100644
--- a/doc/source/mne-python.rst
+++ b/doc/source/mne-python.rst
@@ -9,6 +9,7 @@ MNE with Python
 
    getting_started.rst
    python_tutorial.rst
+   mne_report_tutorial.rst
    auto_examples/index.rst
    python_reference.rst
    whats_new.rst
diff --git a/doc/source/mne_report_tutorial.rst b/doc/source/mne_report_tutorial.rst
new file mode 100644
index 0000000..f74c08d
--- /dev/null
+++ b/doc/source/mne_report_tutorial.rst
@@ -0,0 +1,117 @@
+.. _mne_report_tutorial:
+
+=================================================
+Tutorial: Getting started with MNE report command
+=================================================
+
+This quick start will show you how to run the `mne report` command on the
+sample data set provided with MNE.
+
+First ensure that the files you want to render follow the filename conventions
+defined by MNE:
+
+==================   ====================================================
+Data object          Filename convention (ends with)
+==================   ====================================================
+raw                  -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz)
+events               -eve.fif(.gz)
+epochs               -epo.fif(.gz)
+evoked               -ave.fif(.gz)
+covariance           -cov.fif(.gz)
+trans                -trans.fif(.gz)
+forward              -fwd.fif(.gz)
+inverse              -inv.fif(.gz)
+==================   ====================================================
+
+The command line interface
+--------------------------
+
+To generate a barebones report from all the \*.fif files in the sample dataset,
+invoke the following command::
+
+    mne report --path MNE-sample-data/ --verbose
+
+On successful creation of the report, it will open the html in a new tab in the browser.
+To disable this, use the `--no-browser` option.
+
+If the report is generated for a single subject, give the SUBJECT name and the
+SUBJECTS_DIR and this will generate the MRI slices (with BEM contours overlaid on top
+if available)::
+
+    mne report --path MNE-sample-data/ --subject sample --subjects-dir MNE-sample-data/subjects --verbose
+
+To properly render `trans` and `covariance` files, add the measurement information::
+
+    mne report --path MNE-sample-data/ --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ 
+        --subject sample --subjects_dir MNE-sample-data/subjects --verbose
+
+To generate the report in parallel::
+
+    mne report --path MNE-sample-data/ --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ 
+        --subject sample --subjects_dir MNE-sample-data/subjects --verbose --jobs 6
+
+The Python interface
+--------------------
+
+The same functionality can also be achieved using the Python interface. Import
+the required functions:
+
+    >>> from mne.report import Report
+    >>> from mne.datasets import sample
+
+Generate the report:
+
+    >>> path = sample.data_path()
+    >>> report = Report()
+    Embedding : jquery-1.10.2.min.js
+    Embedding : jquery-ui.min.js
+    Embedding : bootstrap.min.js
+    Embedding : jquery-ui.min.css
+    Embedding : bootstrap.min.css
+
+Only include \*-eve.fif files in the report:
+
+    >>> report.parse_folder(data_path=path, pattern='*-eve.fif') # doctest: +SKIP
+    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif
+    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_eog-eve.fif
+    Rendering : .../MNE-sample-data/MEG/sample/ernoise_raw-eve.fif
+    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_raw-eve.fif
+    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_ecg-eve.fif
+
+Save the report as an html, but do not open the html in a browser:
+
+    >>> report.save('report.html', overwrite=True, open_browser=False) # doctest:+SKIP
+    Rendering : Table of Contents...
+
+There is greater flexibility compared to the command line interface. 
+Custom plots can be added to the report. Let us first generate a custom plot:
+
+    >>> from mne import read_evokeds
+    >>> fname = path + '/MEG/sample/sample_audvis-ave.fif'
+    >>> evoked = read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) # doctest:+ELLIPSIS
+    Reading .../MNE-sample-data/MEG/sample/sample_audvis-ave.fif ...
+        Read a total of 4 projection items:
+            PCA-v1 (1 x 102) active
+            PCA-v2 (1 x 102) active
+            PCA-v3 (1 x 102) active
+            Average EEG reference (1 x 60) active
+        Found the data of interest:
+            t =    -199.80 ...     499.49 ms (Left Auditory)
+            0 CTF compensation matrices available
+            nave = 55 - aspect type = 100
+    Projections have already been applied. Doing nothing.
+    Applying baseline correction ... (mode: mean)
+    >>> fig = evoked.plot() # doctest: +SKIP
+
+To add the custom plot to the report, do:
+
+    >>> report.add_section(fig, captions='Left Auditory', section='evoked') # doctest: +SKIP
+    >>> report.save('report.html', overwrite=True) # doctest: +SKIP
+    Rendering : Table of Contents...
+
+The MNE report command internally manages the sections so that plots belonging to the same section
+are rendered consecutively. Within a section, the plots are ordered in the same order that they were 
+added using the `add_section` command. Each section is identified by a toggle button in the navigation 
+bar of the report which can be used to show or hide the contents of the section.
+
+That's it!
diff --git a/doc/source/python_reference.rst b/doc/source/python_reference.rst
index 2994986..6101c43 100644
--- a/doc/source/python_reference.rst
+++ b/doc/source/python_reference.rst
@@ -12,6 +12,12 @@ are collected in a separate section. Functions and classes that are not below
 a module heading are found in the :py:mod:`mne` namespace.
 
 
+.. toctree::
+   :maxdepth: 2
+
+   python_reference
+
+
 Classes
 =======
 
@@ -21,9 +27,10 @@ Classes
    :toctree: generated/
    :template: class.rst
 
-   fiff.Raw
+   io.Raw
+   io.RawFIFF
    Epochs
-   fiff.Evoked
+   Evoked
    SourceEstimate
    Covariance
    Label
@@ -39,6 +46,7 @@ Classes
    realtime.MockRtClient
    realtime.StimServer
    realtime.StimClient
+   report.Report
 
 Logging and Configuration
 =========================
@@ -69,10 +77,12 @@ Logging and Configuration
 
    init_cuda
 
-File I/O
-========
+Reading raw data
+================
 
-.. currentmodule:: mne
+:py:mod:`mne.io`:
+
+.. currentmodule:: mne.io
 
 Classes:
 
@@ -80,8 +90,37 @@ Classes:
    :toctree: generated/
    :template: class.rst
 
-   fiff.Evoked
-   fiff.Raw
+   Raw
+
+Functions:
+
+.. autosummary::
+  :toctree: generated/
+  :template: function.rst
+
+  read_raw_bti
+  read_raw_edf
+  read_raw_kit
+  read_raw_brainvision
+  read_raw_egi
+
+.. currentmodule:: mne.io.kit
+
+:py:mod:`mne.io.kit`:
+
+.. autosummary::
+  :toctree: generated/
+  :template: function.rst
+
+   read_elp
+   read_hsp
+   read_mrk
+   write_hsp
+   write_mrk
+
+
+File I/O
+========
 
 Functions:
 
@@ -89,14 +128,18 @@ Functions:
    :toctree: generated/
    :template: function.rst
 
-   parse_config
    decimate_surface
+   get_head_surf
+   get_meg_helmet_surf
+   parse_config
+   read_annot
    read_bem_solution
    read_bem_surfaces
    read_cov
    read_dip
    read_epochs
    read_events
+   read_evokeds
    read_forward_solution
    read_label
    read_morph_map
@@ -108,9 +151,11 @@ Functions:
    read_surface
    read_trans
    save_stc_as_volume
+   write_annot
    write_bem_surface
    write_cov
    write_events
+   write_evokeds
    write_forward_solution
    write_label
    write_proj
@@ -118,52 +163,48 @@ Functions:
    write_surface
    write_trans
 
-.. currentmodule:: mne.fiff.bti
 
-:py:mod:`mne.fiff.bti`:
+Creating data objects from arrays
+=================================
 
-Functions:
-
-.. autosummary::
-  :toctree: generated/
-  :template: function.rst
-
-  read_raw_bti
+Classes:
 
-.. currentmodule:: mne.fiff.kit
+.. currentmodule:: mne
 
-:py:mod:`mne.fiff.kit`:
+:py:mod:`mne`:
 
 .. autosummary::
-  :toctree: generated/
-  :template: function.rst
+   :toctree: generated/
+   :template: class.rst
 
-   read_raw_kit
-   read_elp
-   read_hsp
-   read_mrk
-   write_hsp
-   write_mrk
+   EvokedArray
+   EpochsArray
 
-.. currentmodule:: mne.fiff.edf
+.. currentmodule:: mne.io
 
-:py:mod:`mne.fiff.edf`:
+:py:mod:`mne.io`:
 
 .. autosummary::
-  :toctree: generated/
-  :template: function.rst
+   :toctree: generated/
+   :template: class.rst
 
-   read_raw_edf
+   RawArray
 
-.. currentmodule:: mne.fiff.brainvision
+Functions:
+
+.. currentmodule:: mne
 
-:py:mod:`mne.fiff.brainvision`:
+:py:mod:`mne`:
 
 .. autosummary::
   :toctree: generated/
   :template: function.rst
 
-   read_raw_brainvision
+  create_info
+
+
+Sample datasets
+===============
 
 :py:mod:`mne.datasets.sample`:
 
@@ -230,8 +271,13 @@ Visualization
    plot_cov
    plot_drop_log
    plot_evoked
+   plot_evoked_image
    plot_evoked_topomap
-   plot_ica_panel
+   plot_evoked_field
+   plot_ica_sources
+   plot_ica_components
+   plot_ica_scores
+   plot_ica_overlay
    plot_image_epochs
    plot_raw
    plot_raw_psds
@@ -239,13 +285,11 @@ Visualization
    plot_sparse_source_estimates
    plot_topo
    plot_topo_image_epochs
-   plot_topo_phase_lock
-   plot_topo_power
    plot_topo_tfr
    plot_topomap
    compare_fiff
 
-.. currentmodule:: mne.fiff
+.. currentmodule:: mne.io
 
 .. autosummary::
    :toctree: generated/
@@ -284,12 +328,16 @@ Projections:
 
    compute_proj_ecg
    compute_proj_eog
+   create_ecg_epochs
+   create_eog_epochs
    find_ecg_events
    find_eog_events
+   find_outlier_adaptive
    ica_find_ecg_events
    ica_find_eog_events
    read_ica
    run_ica
+   infomax
 
 :py:mod:`mne.filter`:
 
@@ -344,7 +392,7 @@ Events
 
    combine_event_ids
    equalize_epoch_counts
-
+   add_channels_epochs
 
 Sensor Space Data
 =================
@@ -355,19 +403,21 @@ Sensor Space Data
    :toctree: generated/
    :template: function.rst
 
-   fiff.concatenate_raws
-   fiff.get_chpi_positions
-   fiff.pick_channels
-   fiff.pick_channels_cov
-   fiff.pick_channels_forward
-   fiff.pick_channels_regexp
-   fiff.pick_types
-   fiff.pick_types_evoked
-   fiff.pick_types_forward
-
+   concatenate_raws
+   equalize_channels
+   get_chpi_positions
+   pick_channels
+   pick_channels_cov
+   pick_channels_forward
+   pick_channels_regexp
+   pick_types
+   pick_types_evoked
+   pick_types_forward
+   read_ch_connectivity
    read_epochs
    read_reject_parameters
    read_selection
+   rename_channels
 
 
 Covariance
@@ -392,8 +442,8 @@ MRI Processing
 
 Step by step instructions for using :func:`gui.coregistration`:
 
- - `Coregistration for subjects with structural MRI 
-   <http://www.slideshare.net/mne-python/mnepython-coregistration>`_ 
+ - `Coregistration for subjects with structural MRI
+   <http://www.slideshare.net/mne-python/mnepython-coregistration>`_
  - `Scaling a template MRI for subjects for which no MRI is available
    <http://www.slideshare.net/mne-python/mnepython-scale-mri>`_
 
@@ -405,6 +455,7 @@ Step by step instructions for using :func:`gui.coregistration`:
    gui.fiducials
    create_default_subject
    scale_mri
+   scale_bem
    scale_labels
    scale_source_space
 
@@ -412,8 +463,12 @@ Step by step instructions for using :func:`gui.coregistration`:
 Forward Modeling
 ================
 
+:py:mod:`mne`:
+
 .. currentmodule:: mne
 
+Functions:
+
 .. autosummary::
    :toctree: generated/
    :template: function.rst
@@ -425,6 +480,7 @@ Forward Modeling
    convert_forward_solution
    do_forward_solution
    make_forward_solution
+   make_field_map
    read_bem_surfaces
    read_forward_solution
    read_trans
@@ -457,6 +513,16 @@ Inverse Solutions
 
 .. currentmodule:: mne.minimum_norm
 
+Classes:
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   InverseOperator
+
+Functions:
+
 .. autosummary::
    :toctree: generated/
    :template: function.rst
@@ -470,6 +536,8 @@ Inverse Solutions
    source_band_induced_power
    source_induced_power
    write_inverse_operator
+   point_spread_function
+   cross_talk_function
 
 :py:mod:`mne.inverse_sparse`:
 
@@ -521,17 +589,20 @@ Source Space Data
    grade_to_tris
    grade_to_vertices
    grow_labels
-   labels_from_parc
    label_sign_flip
    morph_data
    morph_data_precomputed
+   read_annot
    read_dip
    read_label
    read_source_estimate
    save_stc_as_volume
+   split_label
    stc_to_label
    transform_coordinates
+   transform_surface_to
    vertex_to_mni
+   write_annot
    write_label
 
 
@@ -554,8 +625,8 @@ Time-Frequency
    compute_raw_psd
    compute_epochs_psd
    iir_filter_raw
-   induced_power
    morlet
+   tfr_morlet
    single_trial_power
    yule_walker
    ar_raw
@@ -605,8 +676,10 @@ Statistics
    permutation_cluster_test
    permutation_cluster_1samp_test
    permutation_t_test
+   spatio_temporal_cluster_test
    spatio_temporal_cluster_1samp_test
    ttest_1samp_no_p
+   linear_regression
 
 Functions to compute connectivity (adjacency) matrices for cluster-level statistics
 
@@ -659,3 +732,22 @@ Realtime
 
 .. automodule:: mne.realtime
    :no-members:
+
+MNE-Report
+==========
+
+:py:mod:`mne.report`:
+
+.. automodule:: mne.report
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.report
+
+Classes:
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   Report
diff --git a/doc/source/python_tutorial.rst b/doc/source/python_tutorial.rst
index 12abf40..539ddde 100644
--- a/doc/source/python_tutorial.rst
+++ b/doc/source/python_tutorial.rst
@@ -45,47 +45,17 @@ Installation of the required materials
 
 See :ref:`getting_started` with Python.
 
-Get the code
-^^^^^^^^^^^^
 
-  You can manually get the latest version of the code at:
+.. note:: The expected location for the MNE-sample data is my-path-to/mne-python/examples.
+    If you downloaded data and an example asks you whether to download it again, make sure
+    the data reside in the examples directory and you run the script from its current directory.
 
-  https://github.com/mne-tools/mne-python
+    From IPython e.g. say::
 
-  Then from the mne-python folder (containing a setup.py file) you can install with::
+    cd examples/preprocessing
 
-      python setup.py install
 
-  You can also install the latest release with easy_install::
-
-      easy_install -U mne
-
-  or with pip::
-
-      pip install mne --upgrade
-
-  For the latest development version (the most up to date)::
-
-      pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
-
-
-Make life easier
-~~~~~~~~~~~~~~~~
-
-  For optimal performance we recommend using numpy / scipy with the multi-threaded
-  ATLAS, gotoblas2, or intel MKL. For example, the Enthought Canopy and the Anaconda distributions
-  ship with tested MKL-compiled numpy / scipy versions. Depending on the use case and your system
-  this may speed up operations by a factor greater than 10.
-
-  The expected location for the MNE-sample data is my-path-to/mne-python/examples.
-  If you downloaded data and an example asks you whether to download it again, make sure
-  the data reside in the examples directory and you run the script from its current directory.
-
-  From IPython e.g. say::
-
-   cd examples/preprocessing
-
-   %run plot_find_ecg_artifacts.py
+    %run plot_find_ecg_artifacts.py
 
 
 From raw data to evoked data
@@ -128,24 +98,24 @@ Access raw data
     >>> from mne.datasets import sample
     >>> data_path = sample.data_path()
     >>> raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-    >>> print raw_fname # doctest: +SKIP
+    >>> print(raw_fname) # doctest: +SKIP
     ./MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw.fif
 
 .. note:: The MNE sample dataset should be downloaded automatically but be patient (approx. 2GB)
 
 Read data from file:
 
-    >>> raw = mne.fiff.Raw(raw_fname) # doctest:+ELLIPSIS
+    >>> raw = mne.io.Raw(raw_fname) # doctest:+ELLIPSIS
     Opening raw data ...
     Ready.
-    >>> print raw
+    >>> print(raw)
     <Raw  |  n_channels x n_times : 376 x 41700>
-    >>> print raw.info # doctest:+ELLIPSIS
-    <Info | 19 non-empty ...
+    >>> print(raw.info) # doctest:+ELLIPSIS
+    <Info | 17 non-empty ...
 
 Look at the channels in raw:
 
-    >>> print raw.ch_names # doctest:+ELLIPSIS
+    >>> print(raw.ch_names) # doctest:+ELLIPSIS
     ['MEG 0113', 'MEG 0112', ...]
 
 Read and plot a segment of raw data
@@ -154,9 +124,9 @@ Read and plot a segment of raw data
     >>> data, times = raw[:, start:stop]
     Reading 15015 ... 17266  =     99.998 ...   114.989 secs...
     [done]
-    >>> print data.shape
+    >>> print(data.shape)
     (376, 2252)
-    >>> print times.shape
+    >>> print(times.shape)
     (2252,)
     >>> data, times = raw[2:20:3, start:stop]  # access underlying data
     Reading 15015 ... 17266  =     99.998 ...   114.989 secs...
@@ -168,7 +138,7 @@ Read and plot a segment of raw data
 
 Save a segment of 150s of raw data (MEG only):
 
-    >>> picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, exclude='bads')
+    >>> picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, exclude='bads')
     >>> raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks, overwrite=True) # doctest: +ELLIPSIS
     Reading ...
 
@@ -182,7 +152,7 @@ First extract events:
     [done]
     319 events found
     Events id: [ 1  2  3  4  5 32]
-    >>> print events[:5]
+    >>> print(events[:5])
     [[6994    0    2]
      [7086    0    3]
      [7192    0    1]
@@ -212,12 +182,12 @@ The variable raw.info['bads'] is just a python list.
 
 Pick the good channels, excluding raw.info['bads']:
 
-    >>> picks = mne.fiff.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False, exclude='bads')
+    >>> picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False, exclude='bads')
 
 Alternatively one can restrict to magnetometers or gradiometers with:
 
-    >>> mag_picks = mne.fiff.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
-    >>> grad_picks = mne.fiff.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
+    >>> mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
+    >>> grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
 
 Define the baseline period:
 
@@ -233,15 +203,15 @@ Read epochs:
     Created an SSP operator (subspace dimension = 4)
     4 projection items activated
     145 matching events found
-    >>> print epochs
+    >>> print(epochs)
     <Epochs  |  n_events : 145 (good & bad), tmin : -0.2 (s), tmax : 0.5 (s), baseline : (None, 0),
-     'aud_r': 73, 'aud_l': 72>
+     'aud_l': 72, 'aud_r': 73>
 
 Get single epochs for one condition:
 
     >>> epochs_data = epochs['aud_l'].get_data() # doctest: +ELLIPSIS
     Reading ...
-    >>> print epochs_data.shape
+    >>> print(epochs_data.shape)
     (55, 365, 106)
 
 epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time instants).
@@ -266,7 +236,7 @@ Compute evoked responses for auditory responses by averaging and plot it:
 
     >>> evoked = epochs['aud_l'].average() # doctest: +ELLIPSIS
     Reading ...
-    >>> print evoked
+    >>> print(evoked)
     <Evoked  |  comment : 'aud_l', time : [-0.199795, 0.499488], n_epochs : 55, n_channels x n_times : 364 x 106>
     >>> evoked.plot() # doctest:+SKIP
 
@@ -279,13 +249,13 @@ Compute evoked responses for auditory responses by averaging and plot it:
 
   >>> max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
   Reading ...
-  >>> print max_in_each_epoch[:4] # doctest:+ELLIPSIS
+  >>> print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
   [1.93751...e-05, 1.64055...e-05, 1.85453...e-05, 2.04128...e-05]
 
 It is also possible to read evoked data stored in a fif file:
 
     >>> evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
-    >>> evoked1 = mne.fiff.read_evoked(evoked_fname, setno='Left Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
+    >>> evoked1 = mne.read_evokeds(evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
     Reading .../MNE-sample-data/MEG/sample/sample_audvis-ave.fif ...
         Read a total of 4 projection items:
             PCA-v1 (1 x 102) active
@@ -301,14 +271,14 @@ It is also possible to read evoked data stored in a fif file:
 
 Or another one stored in the same file:
 
-    >>> evoked2 = mne.fiff.read_evoked(evoked_fname, setno='Right Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
+    >>> evoked2 = mne.read_evokeds(evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
     Reading ...
 
 Compute a contrast:
 
     >>> contrast = evoked1 - evoked2
 
-    >>> print contrast
+    >>> print(contrast)
     <Evoked  |  comment : 'Left Auditory - Right Auditory', time : [-0.199795, 0.499488], n_epochs : 116, n_channels x n_times : 376 x 421>
 
 Time-Frequency: Induced power and phase-locking values
@@ -423,4 +393,4 @@ What else can you do?
 Want to know more ?
 ^^^^^^^^^^^^^^^^^^^
 
-Browse :ref:`examples-index` gallery.
\ No newline at end of file
+Browse :ref:`examples-index` gallery.
diff --git a/doc/source/whats_new.rst b/doc/source/whats_new.rst
index a0d743f..b3ce6ad 100644
--- a/doc/source/whats_new.rst
+++ b/doc/source/whats_new.rst
@@ -1,6 +1,205 @@
 What's new
 ==========
 
+.. _changes_0_8:
+
+Version 0.8
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - Add Python3 support by `Nick Ward`_, `Alex Gramfort`_, `Denis Engemann`_, and `Eric Larson`_
+
+   - Add `get_peak` method for evoked and stc objects by  `Denis Engemann`_
+
+   - Add `iter_topography` function for radically simplified custom sensor topography plotting by `Denis Engemann`_
+
+   - Add field line interpolation by `Eric Larson`_
+
+   - Add full provenance tacking for epochs and improve `drop_log` by `Tal Linzen`_, `Alex Gramfort`_ and `Denis Engemann`_
+
+   - Add systematic contains method to Raw, Epochs and Evoked for channel type membership testing by `Denis Engemann`_
+
+   - Add fiff unicode writing and reading support by `Denis Engemann`_
+
+   - Add 3D MEG/EEG field plotting function and evoked method by `Denis Engemann`_ and  `Alex Gramfort`_
+
+   - Add consistent channel-dropping methods to Raw, Epochs and Evoked by `Denis Engemann`_ and  `Alex Gramfort`_
+
+   - Add `equalize_channnels` function to set common channels for a list of Raw, Epochs, or Evoked objects by `Denis Engemann`_
+
+   - Add `plot_events` function to visually display paradigm by `Alex Gramfort`_
+
+   - Improved connectivity circle plot by `Martin Luessi`_
+
+   - Add ability to anonymize measurement info by `Eric Larson`_
+
+   - Add callback to connectivity circle plot to isolate connections to clicked nodes `Roan LaPlante`_
+
+   - Add ability to add patch information to source spaces by `Eric Larson`_
+
+   - Add `split_label` function to divide labels into multiple parts by `Christian Brodbeck`_
+
+   - Add `color` attribute to `Label` objects by `Christian Brodbeck`_
+
+   - Add 'max' mode for extract_label_time_course by `Mads Jensen`_
+
+   - Add `rename_channels` function to change channel names and types in info object by `Dan Wakeman`_ and `Denis Engemann`_
+
+   - Add  `compute_ems` function to extract the time course of experimental effects by `Denis Engemann`_, `Sébastien Marti`_ and `Alex Gramfort`_
+
+   - Add option to expand Labels defined in a source space to the original surface (`Label.fill()`) by `Christian Brodbeck`_
+
+   - GUIs can be invoked form the command line using `$ mne coreg` and `$ mne kit2fiff` by `Christian Brodbeck`_
+
+   - Add `add_channels_epochs` function to combine different recordings at the Epochs level by `Christian Brodbeck`_ and `Denis Engemann`_
+
+   - Add support for EGI Netstation simple binary files by `Denis Engemann`_
+
+   - Add support for treating arbitrary data (numpy ndarray) as a Raw instance by `Eric Larson`_
+
+   - Support for parsing the EDF+ annotation channel by `Martin Billinger`_
+
+   - Add EpochsArray constructor for creating epochs from numpy arrays by `Denis Engemann`_ and `Federico Raimondo`_
+
+   - Add connector to FieldTrip realtime client by `Mainak Jas`_
+
+   - Add color and event_id with legend options in plot_events in viz.py by `Cathy Nangini`_
+
+   - Add `events_list` parameter to `mne.concatenate_raws` to concatenate events corresponding to runs by `Denis Engemann`_
+
+   - Add `read_ch_connectivity` function and `ch_neighbor_connectivity` to read FieldTrip neighbor template .mat files and compute between sensor adjacency matrices by `Denis Engemann`_
+
+   - Add display of head in helmet from -trans.fif file to check coregistration quality by `Mainak Jas`_
+
+   - Add `raw.add_events` to allow adding events to a raw file by `Eric Larson`_
+
+   - Add `plot_image` method to Evoked object to display data as images by `JR King`_ and `Alex Gramfort`_ and `Denis Engemann`_
+
+   - Add BCI demo with CSP on motor imagery by `Martin Billinger`_
+
+   - New ICA API with unified methods for processing Raw, Epochs and Evoked objects by `Denis Engemann`_
+
+   - Apply ICA at the evoked stage by `Denis Engemann`_
+
+   - New ICA methods for visualizing unmixing quality, artifact detection and rejection by `Denis Engemann`_
+
+   - Add 'pick_channels' and 'drop_channels' mixin class to pick and drop channels from Raw, Epochs, and Evoked objects by `Andrew Dykstra`_ and `Denis Engemann`_
+
+   - Add 'EvokedArray' class to create an Evoked object from an array by 'Andrew Dykstra'_
+
+   - Add `plot_bem` method to visualize BEM contours on MRI anatomical images by `Mainak Jas`_ and `Alex Gramfort`_
+
+   - Add automated ECG detection using cross-trial phase statistics by `Denis Engemann`_ and `Juergen Dammers`_
+
+   - Add Forward class to succintly display gain matrix info by `Andrew Dykstra`_
+
+   - Add reading and writing of split raw files by `Martin Luessi`_
+
+   - Add OLS regression function by `Tal Linzen`_, `Teon Brooks`_ and `Denis Engemann`_
+
+   - Add computation of point spread and cross-talk functions for MNE type solutions by `Alex Gramfort`_ and `Olaf Hauk`_
+
+   - Add mask parameter to `plot_evoked_topomap` and `evoked.plot_topomap` by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Add infomax and extended infomax ICA by `Denis Engemann`_, `Juergen Dammers`_ and `Lukas Breuer`_ and `Federico Raimondo`_
+
+   - Aesthetically redesign interpolated topography plots by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Simplify sensor space time-frequency analysis API with `tfr_morlet` function by `Alex Gramfort`_ and `Denis Engemann`_
+
+   - Add new somatosensory MEG dataset with nice time-frequency content by `Alex Gramfort`_
+
+   - Add HDF5 write/read support for SourceEstimates by `Eric Larson`_
+
+   - Add InverseOperator class to display inverse operator info by `Mainak Jas`_
+
+   - Add `$ mne report` command to generate html reports of MEG/EEG data analysis pipelines by `Mainak Jas`_, `Alex Gramfort`_ and `Denis Engemann`_
+
+   - Improve ICA verbosity with regard to rank reduction by `Denis Engemann`_
+
+BUG
+~~~
+
+   - Fix incorrect `times` attribute when stc was computed using `apply_inverse` after decimation at epochs stage for certain, arbitrary sample frequencies by `Denis Engemann`_
+
+   - Fix corner case error for step-down-in-jumps permutation test (when step-down threshold was high enough to include all clusters) by `Eric Larson`_
+
+   - Fix selection of total number of components via float when picking ICA sources by `Denis Engemann`_ and `Qunxi Dong`_
+
+   - Fix writing and reading transforms after modification in measurment info by `Denis Engemann`_ and `Martin Luessi`_ and `Eric Larson`_
+
+   - Fix pre-whitening / rescaling when estimating ICA on multiple channels without covariance by `Denis Engemann`_
+
+   - Fix ICA pre-whitening, avoid recomputation when applying ICA to new data by `Denis Engemann`_
+
+API
+~~~
+
+   - The minimum numpy version has been increased to 1.6 from 1.4.
+
+   - Epochs object now has a selection attribute to track provenance of selected Epochs. The length of the drop_log attribute is now the same as the length of the original events passed to Epochs. In earlier versions it had the length of the events filtered by event_id. Epochs has also now a plot_drop_log method.
+
+   - Deprecate Epochs.drop_picks in favor of a new method called drop_channels
+
+   - Deprecate `labels_from_parc` and `parc_from_labels` in favor of `read_annot` and `write_annot`
+
+   - The default of the new add_dist option of `setup_source_space` to add patch information will change from False to True in MNE-Python 0.9
+
+   - Deprecate `read_evoked` and `write_evoked` in favor of `read_evokeds` and `write_evokeds`.
+read_evokeds will return all Evoked instances in a file by default.
+
+   - Deprecate `setno` in favor of `condition` in the initialization of an Evoked instance. This
+affects 'mne.fiff.Evoked' and 'read_evokeds', but not 'read_evoked'.
+
+   - Deprecate `mne.fiff` module, use `mne.io` instead e.g. `mne.io.Raw` instead of `mne.fiff.Raw`.
+
+   - Pick functions (e.g., `pick_types`) are now in the mne namespace (e.g. use `mne.pick_types`).
+
+   - Deprecated ICA methods specfific to one container type. Use ICA.fit, ICA.get_sources ICA.apply and ICA.plot_XXX for processing Raw, Epochs and Evoked objects.
+
+   - The default smoothing method for `mne.stc_to_label` will change in v0.9, and the old method is deprecated.
+
+   - As default, for ICA the maximum number of PCA components equals the number of channels passed. The number of PCA components used to reconstruct the sensor space signals now defaults to the maximum number of PCA components estimated.
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number
+of commits):
+
+   418  Denis A. Engemann
+   284  Alexandre Gramfort
+   242  Eric Larson
+   155  Christian Brodbeck
+   144  Mainak Jas
+    49  Martin Billinger
+    49  Andrew Dykstra
+    44  Tal Linzen
+    37  Dan G. Wakeman
+    36  Martin Luessi
+    26  Teon Brooks
+    20  Cathy Nangini
+    15  Hari Bharadwaj
+    15  Roman Goj
+    10  Ross Maddox
+     9  Marmaduke Woodman
+     8  Praveen Sripad
+     8  Tanay
+     8  Roan LaPlante
+     5  Saket Choudhary
+     4  Nick Ward
+     4  Mads Jensen
+     3  Olaf Hauk
+     3  Brad Buran
+     2  Daniel Strohmeier
+     2  Federico Raimondo
+     2  Alan Leggitt
+     1  Jean-Remi King
+     1  Matti Hamalainen
+
+
 .. _changes_0_7:
 
 Version 0.7
@@ -584,3 +783,31 @@ of commits):
 .. _Romain Trachel: http://www-sop.inria.fr/athena/Site/RomainTrachel
 
 .. _Christopher Dinh: https://github.com/chdinh
+
+.. _Nick Ward: http://www.ucl.ac.uk/ion/departments/sobell/Research/NWard
+
+.. _Tal Linzen: http://tallinzen.net/
+
+.. _Roan LaPlante: https://github.com/aestrivex
+
+.. _Mads Jensen: http://cnru.dk/people/mads-jensen
+
+.. _Dan Wakeman: https://github.com/dgwakeman
+
+.. _Qunxi Dong: https://github.com/dongqunxi
+
+.. _Martin Billinger: https://github.com/kazemakase
+
+.. _Federico Raimondo: https://github.com/fraimondo
+
+.. _Cathy Nangini: https://github.com/KatiRG
+
+.. _JR King: https://github.com/kingjr
+
+.. _Juergen Dammers: https://github.com/jdammers
+
+.. _Olaf Hauk: http://www.neuroscience.cam.ac.uk/directory/profile.php?olafhauk
+
+.. _Lukas Breuer: http://www.researchgate.net/profile/Lukas_Breuer
+
+.. _Federico Raimondo: https://github.com/fraimondo
diff --git a/doc/sphinxext/gen_rst.py b/doc/sphinxext/gen_rst.py
index 03d5f4b..e204334 100644
--- a/doc/sphinxext/gen_rst.py
+++ b/doc/sphinxext/gen_rst.py
@@ -570,6 +570,27 @@ def make_thumbnail(in_fname, out_fname, width, height):
     thumb.save(out_fname)
 
 
+def scale_image(in_fname, max_width):
+    """Scale image such that width <= max_width
+    """
+    img = Image.open(in_fname)
+    width_in, height_in = img.size
+
+    if width_in <= max_width:
+        return
+
+    scale = max_width / float(width_in)
+
+    width_sc = int(round(scale * width_in))
+    height_sc = int(round(scale * height_in))
+
+    # resize the image
+    img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
+
+    # overwrite the image
+    img.save(in_fname)
+
+
 def get_short_module_name(module_name, obj_name):
     """ Get the shortest possible module name """
     parts = module_name.split('.')
@@ -685,17 +706,20 @@ def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
 
                 # find functions so we can later add links to the documentation
                 funregex = re.compile('[\w.]+\(')
+                fun_exclude = ['print']
                 with open(src_file, 'rt') as fid:
                     for line in fid.readlines():
                         if line.startswith('#'):
                             continue
                         for match in funregex.findall(line):
                             fun_name = match[:-1]
+                            if fun_name in fun_exclude:
+                                continue
                             try:
                                 exec('this_fun = %s' % fun_name, my_globals)
                             except Exception as err:
-                                print 'extracting function failed'
-                                print err
+                                print ('Error: extracting function %s failed: '
+                                       '%s' % (fun_name, str(err)))
                                 continue
                             this_fun = my_globals['this_fun']
                             if not callable(this_fun):
@@ -761,6 +785,9 @@ def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
                         plt.savefig(image_path % fig_num, facecolor='black')
                     else:
                         plt.savefig(image_path % fig_num)
+
+                    # make sure the image is not too large
+                    scale_image(image_path % fig_num, 850)
                     figure_list.append(image_fname % fig_num)
                     last_fig_num = fig_num
 
@@ -768,6 +795,8 @@ def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
                 for scene in e.scenes:
                     last_fig_num += 1
                     mlab.savefig(image_path % last_fig_num)
+                    # make sure the image is not too large
+                    scale_image(image_path % last_fig_num, 850)
                     figure_list.append(image_fname % last_fig_num)
                     mlab.close(scene)
 
diff --git a/examples/connectivity/plot_cwt_sensor_connectivity.py b/examples/connectivity/plot_cwt_sensor_connectivity.py
index 6a5662c..2109194 100644
--- a/examples/connectivity/plot_cwt_sensor_connectivity.py
+++ b/examples/connectivity/plot_cwt_sensor_connectivity.py
@@ -17,11 +17,11 @@ domain using Morlet wavelets and the debiased Squared Weighted Phase Lag Index
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
-from mne import fiff
+from mne import io
 from mne.connectivity import spectral_connectivity, seed_target_indices
 from mne.datasets import sample
 from mne.viz import plot_topo_tfr
@@ -33,15 +33,15 @@ raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # Add a bad channel
 raw.info['bads'] += ['MEG 2443']
 
 # Pick MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                       exclude='bads')
 
 # Create epochs for left-visual condition
 event_id, tmin, tmax = 3, -0.2, 0.5
diff --git a/examples/connectivity/plot_mne_inverse_coherence_epochs.py b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
index 1355a06..03d8bd6 100644
--- a/examples/connectivity/plot_mne_inverse_coherence_epochs.py
+++ b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
@@ -13,12 +13,12 @@ MNE-dSPM inverse soltions.
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.minimum_norm import (apply_inverse, apply_inverse_epochs,
                               read_inverse_operator)
 from mne.connectivity import seed_target_indices, spectral_connectivity
@@ -45,8 +45,8 @@ events = mne.read_events(fname_event)
 raw.info['bads'] += ['MEG 2443']
 
 # pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -98,10 +98,10 @@ coh, freqs, times, n_epochs, n_tapers = spectral_connectivity(stcs,
     method='coh', mode='fourier', indices=indices,
     sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, n_jobs=2)
 
-print 'Frequencies in Hz over which coherence was averaged for alpha: '
-print freqs[0]
-print 'Frequencies in Hz over which coherence was averaged for beta: '
-print freqs[1]
+print('Frequencies in Hz over which coherence was averaged for alpha: ')
+print(freqs[0])
+print('Frequencies in Hz over which coherence was averaged for beta: ')
+print(freqs[1])
 
 # Generate a SourceEstimate with the coherence. This is simple since we
 # used a single seed. For more than one seeds we would have to split coh.
diff --git a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py b/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
index f6808a7..472375e 100644
--- a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
+++ b/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
@@ -7,16 +7,15 @@ The connectivity is computed between 4 labels across the spectrum
 between 5 and 40 Hz.
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-import numpy as np
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 from mne.connectivity import spectral_connectivity
 
@@ -35,8 +34,8 @@ events = mne.read_events(fname_event)
 raw.info['bads'] += ['MEG 2443']
 
 # Pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                       exclude='bads')
 
 # Define epochs for left-auditory condition
 event_id, tmin, tmax = 1, -0.2, 0.5
diff --git a/examples/connectivity/plot_mne_inverse_label_connectivity.py b/examples/connectivity/plot_mne_inverse_label_connectivity.py
index 015822b..8bb9a21 100644
--- a/examples/connectivity/plot_mne_inverse_label_connectivity.py
+++ b/examples/connectivity/plot_mne_inverse_label_connectivity.py
@@ -10,17 +10,17 @@ is ordered based on the locations of the regions.
 """
 
 # Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 from mne.connectivity import spectral_connectivity
 from mne.viz import circular_layout, plot_connectivity_circle
@@ -40,8 +40,8 @@ events = mne.read_events(fname_event)
 raw.info['bads'] += ['MEG 2443']
 
 # Pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                       exclude='bads')
 
 # Define epochs for left-auditory condition
 event_id, tmin, tmax = 1, -0.2, 0.5
@@ -58,8 +58,9 @@ stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
                             pick_ori="normal", return_generator=True)
 
 # Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
-labels, label_colors = mne.labels_from_parc('sample', parc='aparc',
-                                            subjects_dir=subjects_dir)
+labels = mne.read_labels_from_annot('sample', parc='aparc',
+                                    subjects_dir=subjects_dir)
+label_colors = [label.color for label in labels]
 
 # Average the source estimates within each label using sign-flips to reduce
 # signal cancellations, also here we return a generator
@@ -79,13 +80,16 @@ label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
 fmin = 8.
 fmax = 13.
 sfreq = raw.info['sfreq']  # the sampling frequency
-
+con_methods = ['pli', 'wpli2_debiased']
 con, freqs, times, n_epochs, n_tapers = spectral_connectivity(label_ts,
-        method='wpli2_debiased', mode='multitaper', sfreq=sfreq, fmin=fmin,
+        method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
         fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=2)
 
 # con is a 3D array, get the connectivity for the first (and only) freq. band
-con = con[:, :, 0]
+# for each method
+con_res = dict()
+for method, c in zip(con_methods, con):
+    con_res[method] = c[:, :, 0]
 
 # Now, we visualize the connectivity using a circular graph layout
 
@@ -112,14 +116,25 @@ node_order = list()
 node_order.extend(lh_labels[::-1])  # reverse the order
 node_order.extend(rh_labels)
 
-node_angles = circular_layout(label_names, node_order, start_pos=90)
+node_angles = circular_layout(label_names, node_order, start_pos=90,
+                              group_boundaries=[0, len(label_names) / 2])
 
 # Plot the graph using node colors from the FreeSurfer parcellation. We only
 # show the 300 strongest connections.
-plot_connectivity_circle(con, label_names, n_lines=300, node_angles=node_angles,
-                         node_colors=label_colors,
+plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
+                         node_angles=node_angles, node_colors=label_colors,
                          title='All-to-All Connectivity left-Auditory '
-                               'Condition')
+                               'Condition (PLI)')
 import matplotlib.pyplot as plt
 plt.savefig('circle.png', facecolor='black')
+
+# Plot connectivity for both methods in the same plot
+fig = plt.figure(num=None, figsize=(8, 4), facecolor='black')
+no_names = [''] * len(label_names)
+for ii, method in enumerate(con_methods):
+    plot_connectivity_circle(con_res[method], no_names, n_lines=300,
+                             node_angles=node_angles, node_colors=label_colors,
+                             title=method, padding=0, fontsize_colorbar=6,
+                             fig=fig, subplot=(1, 2, ii + 1))
+
 plt.show()
diff --git a/examples/connectivity/plot_mne_inverse_psi_visual.py b/examples/connectivity/plot_mne_inverse_psi_visual.py
index f0bbe38..9f73eee 100644
--- a/examples/connectivity/plot_mne_inverse_psi_visual.py
+++ b/examples/connectivity/plot_mne_inverse_psi_visual.py
@@ -24,12 +24,12 @@ pp. 1-4, Jun. 2008.
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
 from mne.connectivity import seed_target_indices, phase_slope_index
 from mne.viz import mne_analyze_colormap
@@ -51,8 +51,8 @@ raw = Raw(fname_raw)
 events = mne.read_events(fname_event)
 
 # pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
diff --git a/examples/connectivity/plot_sensor_connectivity.py b/examples/connectivity/plot_sensor_connectivity.py
index 984c460..3800ddc 100644
--- a/examples/connectivity/plot_sensor_connectivity.py
+++ b/examples/connectivity/plot_sensor_connectivity.py
@@ -12,13 +12,13 @@ are used which produces strong connectvitiy in the right occipital sensors.
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 from scipy import linalg
 
 import mne
-from mne import fiff
+from mne import io
 from mne.connectivity import spectral_connectivity
 from mne.datasets import sample
 
@@ -29,14 +29,14 @@ raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # Add a bad channel
 raw.info['bads'] += ['MEG 2443']
 
 # Pick MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
                         exclude='bads')
 
 # Create epochs for the visual condition
diff --git a/examples/plot_megsim_data.py b/examples/datasets/plot_megsim_data.py
similarity index 73%
rename from examples/plot_megsim_data.py
rename to examples/datasets/plot_megsim_data.py
index d9ceec7..425969b 100644
--- a/examples/plot_megsim_data.py
+++ b/examples/datasets/plot_megsim_data.py
@@ -14,9 +14,10 @@ Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
 (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
 Realistic Simulated and Empirical Data. Neuroinformatics 10:141-158
 """
+print(__doc__)
 
-import matplotlib.pyplot as plt
-import mne
+from mne import find_events, Epochs, pick_types, read_evokeds
+from mne.io import Raw
 from mne.datasets.megsim import load_data
 
 condition = 'visual'  # or 'auditory' or 'somatosensory'
@@ -29,23 +30,22 @@ raw_fnames = load_data(condition=condition, data_format='raw',
 evoked_fnames = load_data(condition=condition, data_format='evoked',
                           data_type='simulation')
 
-raw = mne.fiff.Raw(raw_fnames[0])
-events = mne.find_events(raw, stim_channel="STI 014")
+raw = Raw(raw_fnames[0])
+events = find_events(raw, stim_channel="STI 014", shortest_event=1)
 
 # Visualize raw file
 raw.plot()
 
 # Make an evoked file from the experimental data
-picks = mne.fiff.pick_types(raw.info, meg=True, eog=True, exclude='bads')
+picks = pick_types(raw.info, meg=True, eog=True, exclude='bads')
 
 # Read epochs
 event_id, tmin, tmax = 9, -0.2, 0.5
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0),
-                    reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                picks=picks, reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
 evoked = epochs.average()  # average epochs and get an Evoked dataset.
 evoked.plot()
 
 # Compare to the simulated data
-evoked_sim = mne.fiff.Evoked(evoked_fnames[0])
+evoked_sim = read_evokeds(evoked_fnames[0], condition=0)
 evoked_sim.plot()
diff --git a/examples/plot_megsim_data_single_trial.py b/examples/datasets/plot_megsim_data_single_trial.py
similarity index 91%
rename from examples/plot_megsim_data_single_trial.py
rename to examples/datasets/plot_megsim_data_single_trial.py
index 7d2e568..d561dc7 100644
--- a/examples/plot_megsim_data_single_trial.py
+++ b/examples/datasets/plot_megsim_data_single_trial.py
@@ -14,8 +14,9 @@ Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
 (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
 Realistic Simulated and Empirical Data. Neuroinformatics 10:141-158
 """
+print(__doc__)
 
-import mne
+from mne import read_evokeds
 from mne.datasets.megsim import load_data
 
 condition = 'visual'  # or 'auditory' or 'somatosensory'
@@ -27,7 +28,7 @@ epochs_fnames = load_data(condition=condition, data_format='single-trial',
 # Take only 10 trials from the same simulation setup.
 epochs_fnames = [f for f in epochs_fnames if 'sim6_trial_' in f][:10]
 
-evokeds = [mne.fiff.read_evoked(f) for f in epochs_fnames]
+evokeds = [read_evokeds(f)[0] for f in epochs_fnames]
 mean_evoked = sum(evokeds[1:], evokeds[0])
 
 # Visualize the average
diff --git a/examples/datasets/plot_spm_faces_dataset.py b/examples/datasets/plot_spm_faces_dataset.py
index e62ac65..b7dad75 100644
--- a/examples/datasets/plot_spm_faces_dataset.py
+++ b/examples/datasets/plot_spm_faces_dataset.py
@@ -10,20 +10,19 @@ Runs a full pipeline using MNE-Python:
 - source reconstruction using dSPM on the contrast : "faces - scrambled"
 
 """
-print __doc__
+print(__doc__)
 
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-import numpy as np
 import matplotlib.pyplot as plt
 
 import mne
 from mne.datasets import spm_face
-from mne.preprocessing import ICA
-from mne import fiff
+from mne.preprocessing import ICA, create_eog_epochs
+from mne import io
 from mne.minimum_norm import make_inverse_operator, apply_inverse
 
 
@@ -35,37 +34,38 @@ subjects_dir = data_path + '/subjects'
 
 raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
 
-raw = fiff.Raw(raw_fname % 1, preload=True) # Take first run
+raw = io.Raw(raw_fname % 1, preload=True)  # Take first run
 
-picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads')
-raw.filter(1, 45, method='iir')
+picks = mne.pick_types(raw.info, meg=True, exclude='bads')
+raw.filter(1, 30, method='iir')
 
 events = mne.find_events(raw, stim_channel='UPPT001')
-event_ids = {"faces":1, "scrambled":2}
+
+# plot the events to get an idea of the paradigm
+mne.viz.plot_events(events, raw.info['sfreq'])
+
+event_ids = {"faces": 1, "scrambled": 2}
 
 tmin, tmax = -0.2, 0.6
 baseline = None  # no baseline as high-pass is applied
-reject = dict(mag=1.5e-12)
+reject = dict(mag=5e-12)
 
 epochs = mne.Epochs(raw, events, event_ids, tmin, tmax,  picks=picks,
                     baseline=baseline, preload=True, reject=reject)
 
 # Fit ICA, find and remove major artifacts
+ica = ICA(n_components=0.95).fit(raw, decim=6, reject=reject)
 
-ica = ICA(None, 50).decompose_epochs(epochs, decim=2)
-
-for ch_name in ['MRT51-2908', 'MLF14-2908']:  # ECG, EOG contaminated chs
-    scores = ica.find_sources_epochs(epochs, ch_name, 'pearsonr')
-    ica.exclude += list(np.argsort(np.abs(scores))[-2:])
-
-ica.plot_topomap(np.unique(ica.exclude))  # plot components found
-
-
-# select ICA sources and reconstruct MEG signals, compute clean ERFs
-
-epochs = ica.pick_sources_epochs(epochs)
+# compute correlation scores, get bad indices sorted by score
+eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject)
+eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908')
+ica.plot_scores(eog_scores, eog_inds)  # see scores the selection is based on
+ica.plot_components(eog_inds)  # view topographic sensitivity of components
+ica.exclude += eog_inds[:1]  # we saw the 2nd ECG component looked too dipolar
+ica.plot_overlay(eog_epochs.average())  # inspect artifact removal
+epochs_cln = ica.apply(epochs, copy=True)  # clean data, default in place
 
-evoked = [epochs[k].average() for k in event_ids]
+evoked = [epochs_cln[k].average() for k in event_ids]
 
 contrast = evoked[1] - evoked[0]
 
@@ -77,7 +77,21 @@ for e in evoked:
 plt.show()
 
 # estimate noise covarariance
-noise_cov = mne.compute_covariance(epochs.crop(None, 0, copy=True))
+noise_cov = mne.compute_covariance(epochs_cln, tmax=0)
+
+###############################################################################
+# Visualize fields on MEG helmet
+
+trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
+                           'raw-trans.fif')
+
+maps = mne.make_field_map(evoked[0], trans_fname=trans_fname,
+                          subject='spm', subjects_dir=subjects_dir,
+                          n_jobs=1)
+
+
+evoked[0].plot_field(maps, time=0.170)
+
 
 ###############################################################################
 # Compute forward model
@@ -86,7 +100,7 @@ noise_cov = mne.compute_covariance(epochs.crop(None, 0, copy=True))
 src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
                              overwrite=True)
 
-mri = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
+mri = trans_fname
 bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
 forward = mne.make_forward_solution(contrast.info, mri=mri, src=src, bem=bem)
 forward = mne.convert_forward_solution(forward, surf_ori=True)
@@ -94,7 +108,7 @@ forward = mne.convert_forward_solution(forward, surf_ori=True)
 ###############################################################################
 # Compute inverse solution
 
-snr = 5.0
+snr = 3.0
 lambda2 = 1.0 / snr ** 2
 method = 'dSPM'
 
@@ -110,7 +124,7 @@ stc = apply_inverse(contrast, inverse_operator, lambda2, method,
 # Plot brain in 3D with PySurfer if available. Note that the subject name
 # is already known by the SourceEstimate stc object.
 brain = stc.plot(surface='inflated', hemi='both', subjects_dir=subjects_dir)
-brain.set_data_time_index(173)
+brain.set_time(170.0)  # milliseconds
 brain.scale_data_colormap(fmin=4, fmid=6, fmax=8, transparent=True)
 brain.show_view('ventral')
 # brain.save_image('dSPM_map.png')
diff --git a/examples/decoding/plot_decoding_csp_eeg.py b/examples/decoding/plot_decoding_csp_eeg.py
new file mode 100644
index 0000000..6c409ae
--- /dev/null
+++ b/examples/decoding/plot_decoding_csp_eeg.py
@@ -0,0 +1,150 @@
+"""
+===========================================================================
+Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
+===========================================================================
+
+Decoding of motor imagery applied to EEG data decomposed using CSP.
+Here the classifier is applied to features extracted on CSP filtered signals.
+
+See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
+
+The EEGBCI dataset is documented in [2]
+The data set is available at PhysioNet [3]
+
+[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
+    of the abnormal components in the clinical EEG. Electroencephalography
+    and Clinical Neurophysiology, 79(6):440--447, December 1991.
+
+[2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
+    Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
+    (BCI) System. IEEE TBME 51(6):1034-1043
+
+[3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
+    Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
+    PhysioToolkit, and PhysioNet: Components of a New Research Resource for
+    Complex Physiologic Signals. Circulation 101(23):e215-e220
+"""
+# Authors: Martin Billinger <martin.billinger at tugraz.at>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+import numpy as np
+import matplotlib.pyplot as plt
+
+from mne import Epochs, pick_types
+from mne.io import concatenate_raws
+from mne.io.edf import read_raw_edf
+from mne.datasets import eegbci
+from mne.event import find_events
+from mne.decoding import CSP
+from mne.layouts import read_layout
+
+###############################################################################
+## Set parameters and read data
+
+# avoid classification of evoked responses by using epochs that start 1s after
+# cue onset.
+tmin, tmax = -1., 4.
+event_id = dict(hands=2, feet=3)
+subject = 1
+runs = [6, 10, 14]  # motor imagery: hands vs feet
+
+raw_fnames = eegbci.load_data(subject, runs)
+raw_files = [read_raw_edf(f, tal_channel=-1, preload=True) for f in raw_fnames]
+raw = concatenate_raws(raw_files)
+
+# strip channel names
+raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]
+
+# Apply band-pass filter
+raw.filter(7., 30., method='iir')
+
+events = find_events(raw, shortest_event=0, stim_channel='STI 014')
+
+picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
+                   exclude='bads')
+
+# Read epochs (train will be done only between 1 and 2s)
+# Testing will be done with a running classifier
+epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
+                baseline=None, preload=True, add_eeg_ref=False)
+epochs_train = epochs.crop(tmin=1., tmax=2., copy=True)
+labels = epochs.events[:, -1] - 2
+
+###############################################################################
+# Classification with linear discrimant analysis
+
+from sklearn.lda import LDA
+from sklearn.cross_validation import ShuffleSplit
+
+# Assemble a classifier
+svc = LDA()
+csp = CSP(n_components=4, reg=None, log=True)
+
+# Define a monte-carlo cross-validation generator (reduce variance):
+cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
+scores = []
+epochs_data = epochs.get_data()
+epochs_data_train = epochs_train.get_data()
+
+# Use scikit-learn Pipeline with cross_val_score function
+from sklearn.pipeline import Pipeline
+from sklearn.cross_validation import cross_val_score
+clf = Pipeline([('CSP', csp), ('SVC', svc)])
+scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
+
+# Printing the results
+class_balance = np.mean(labels == labels[0])
+class_balance = max(class_balance, 1. - class_balance)
+print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
+                                                          class_balance))
+
+# plot CSP patterns estimated on full data for visualization
+csp.fit_transform(epochs_data, labels)
+
+evoked = epochs.average()
+evoked.data = csp.patterns_.T
+evoked.times = np.arange(evoked.data.shape[0])
+
+layout = read_layout('EEG1005')
+evoked.plot_topomap(times=[0, 1, 2, 61, 62, 63], ch_type='eeg', layout=layout,
+                    scale_time=1, time_format='%i', scale=1,
+                    unit='Patterns (AU)', size=1.5)
+
+###############################################################################
+# Look at performance over time
+
+sfreq = raw.info['sfreq']
+w_length = int(sfreq * 0.5)   # running classifier: window length
+w_step = int(sfreq * 0.1)  # running classifier: window step size
+w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
+
+scores_windows = []
+
+for train_idx, test_idx in cv:
+    y_train, y_test = labels[train_idx], labels[test_idx]
+
+    X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
+    X_test = csp.transform(epochs_data_train[test_idx])
+
+    # fit classifier
+    svc.fit(X_train, y_train)
+
+    # running classifier: test classifier on sliding window
+    score_this_window = []
+    for n in w_start:
+        X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
+        score_this_window.append(svc.score(X_test, y_test))
+    scores_windows.append(score_this_window)
+
+# Plot scores over time
+w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
+plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
+plt.axvline(0, linestyle='--', color='k', label='Onset')
+plt.axhline(0.5, linestyle='-', color='k', label='Chance')
+plt.xlabel('time (s)')
+plt.ylabel('classification accuracy')
+plt.title('Classification score over time')
+plt.legend(loc='lower right')
+plt.show()
diff --git a/examples/decoding/plot_decoding_csp_space.py b/examples/decoding/plot_decoding_csp_space.py
index a946f25..6429e55 100644
--- a/examples/decoding/plot_decoding_csp_space.py
+++ b/examples/decoding/plot_decoding_csp_space.py
@@ -13,16 +13,16 @@ See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
     and Clinical Neurophysiology, 79(6):440--447, December 1991.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Romain Trachel <romain.trachel at inria.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -35,13 +35,13 @@ tmin, tmax = -0.2, 0.5
 event_id = dict(aud_l=1, vis_l=3)
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname, preload=True)
+raw = io.Raw(raw_fname, preload=True)
 raw.filter(2, None, method='iir')  # replace baselining with high-pass
 events = mne.read_events(event_fname)
 
 raw.info['bads'] = ['MEG 2443']  # set bad channels
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=False,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=False,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
@@ -80,8 +80,8 @@ for train_idx, test_idx in cv:
 # Printing the results
 class_balance = np.mean(labels == labels[0])
 class_balance = max(class_balance, 1. - class_balance)
-print "Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
-                                                          class_balance)
+print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
+                                                          class_balance))
 
 # Or use much more convenient scikit-learn cross_val_score function using
 # a Pipeline
@@ -90,13 +90,13 @@ from sklearn.cross_validation import cross_val_score
 cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
 clf = Pipeline([('CSP', csp), ('SVC', svc)])
 scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
-print scores.mean()  # should match results above
+print(scores.mean())  # should match results above
 
 # And using reuglarized csp with Ledoit-Wolf estimator
 csp = CSP(n_components=n_components, reg='lws')
 clf = Pipeline([('CSP', csp), ('SVC', svc)])
 scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
-print scores.mean()  # should get better results than above
+print(scores.mean())  # should get better results than above
 
 # plot CSP patterns estimated on full data for visualization
 csp.fit_transform(epochs_data, labels)
diff --git a/examples/decoding/plot_decoding_sensors.py b/examples/decoding/plot_decoding_sensors.py
index 1b4fc73..4a43644 100644
--- a/examples/decoding/plot_decoding_sensors.py
+++ b/examples/decoding/plot_decoding_sensors.py
@@ -7,16 +7,16 @@ Decoding, a.k.a MVPA or supervised machine learning applied to MEG
 data in sensor space. Here the classifier is applied to every time
 point.
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 import matplotlib.pyplot as plt
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -31,14 +31,14 @@ tmin, tmax = -0.2, 0.5
 event_id = dict(aud_l=1, vis_l=3)
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname, preload=True)
+raw = io.Raw(raw_fname, preload=True)
 raw.filter(2, None, method='iir')  # replace baselining with high-pass
 events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
@@ -52,7 +52,7 @@ mne.epochs.equalize_epoch_counts(epochs_list)
 # Decoding in sensor space using a linear SVM
 n_times = len(epochs.times)
 # Take only the data channels (here the gradiometers)
-data_picks = fiff.pick_types(epochs.info, meg=True, exclude='bads')
+data_picks = mne.pick_types(epochs.info, meg=True, exclude='bads')
 # Make arrays X and y such that :
 # X is 3d with X.shape[0] is the total number of epochs to classify
 # y is filled with integers coding for the class to predict
diff --git a/examples/decoding/plot_decoding_spatio_temporal_source.py b/examples/decoding/plot_decoding_spatio_temporal_source.py
index 4c7c0f4..dddfd49 100644
--- a/examples/decoding/plot_decoding_spatio_temporal_source.py
+++ b/examples/decoding/plot_decoding_spatio_temporal_source.py
@@ -10,17 +10,17 @@ relevant features. The classifier then is trained to selected features of
 epochs in source space.
 """
 
-# Author: Denis A. Engemann <d.engemann at fz-juelich.de>
-#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 import os
 import numpy as np
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 
@@ -44,21 +44,21 @@ tmin, tmax = -0.2, 0.5
 event_id = dict(aud_r=2, vis_r=4)  # load contra-lateral conditions
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname, preload=True)
+raw = io.Raw(raw_fname, preload=True)
 raw.filter(2, None, method='iir')  # replace baselining with high-pass
 events = mne.read_events(event_fname)
 
 # Set up pick list: MEG - bad channels (modify to your needs)
 raw.info['bads'] += ['MEG 2443']  # mark bads
-picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                     picks=picks, baseline=None, preload=True,
                     reject=dict(grad=4000e-13, eog=150e-6))
 
-epochs.equalize_event_counts(event_id.keys(), 'mintime', copy=False)
+epochs.equalize_event_counts(list(event_id.keys()), 'mintime', copy=False)
 epochs_list = [epochs[k] for k in event_id]
 
 # Compute inverse solution
@@ -78,8 +78,8 @@ X = np.zeros([n_epochs, n_vertices, n_times])
 # to save memory, we'll load and transform our epochs step by step.
 for condition_count, ep in zip([0, n_epochs / 2], epochs_list):
     stcs = apply_inverse_epochs(ep, inverse_operator, lambda2,
-                            method, pick_ori="normal",  # this saves us memory
-                            return_generator=True)
+                                method, pick_ori="normal",  # saves us memory
+                                return_generator=True)
     for jj, stc in enumerate(stcs):
         X[condition_count + jj] = stc.lh_data
 
@@ -129,8 +129,8 @@ for ii, (train, test) in enumerate(cv):
     feature_weights += feature_selection.inverse_transform(clf.coef_) \
         .reshape(n_vertices, n_times)
 
-print 'Average prediction accuracy: %0.3f | standard deviation:  %0.3f' % \
-    (scores.mean(), scores.std())
+print('Average prediction accuracy: %0.3f | standard deviation:  %0.3f'
+      % (scores.mean(), scores.std()))
 
 # prepare feature weights for visualization
 feature_weights /= (ii + 1)  # create average weights
@@ -145,8 +145,8 @@ feature_weights = np.abs(feature_weights.data) * 10
 
 vertices = [stc.lh_vertno, np.array([])]  # empty array for right hemisphere
 stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
-            tmin=stc.tmin, tstep=stc.tstep,
-            subject='sample')
+                              tmin=stc.tmin, tstep=stc.tstep,
+                              subject='sample')
 
 brain = stc_feat.plot(subject=subject, fmin=1, fmid=5.5, fmax=20)
 brain.set_time(100)
diff --git a/examples/decoding/plot_decoding_time_generalization.py b/examples/decoding/plot_decoding_time_generalization.py
new file mode 100644
index 0000000..7c61740
--- /dev/null
+++ b/examples/decoding/plot_decoding_time_generalization.py
@@ -0,0 +1,91 @@
+"""
+========================================================
+Decoding sensor space data with over-time generalization
+========================================================
+
+This example runs the analysis computed in:
+
+Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
+and Stanislas Dehaene, "Two distinct dynamic modes subtend the detection of
+unexpected sounds", PLOS ONE, 2013
+
+The idea is to learn at one time instant and assess if the decoder
+can predict accurately over time.
+"""
+print(__doc__)
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+import mne
+from mne.datasets import spm_face
+from mne.decoding import time_generalization
+
+data_path = spm_face.data_path()
+
+###############################################################################
+# Load and filter data, set up epochs
+
+raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
+
+raw = mne.io.Raw(raw_fname % 1, preload=True)  # Take first run
+raw.append(mne.io.Raw(raw_fname % 2, preload=True))  # Take second run too
+
+picks = mne.pick_types(raw.info, meg=True, exclude='bads')
+raw.filter(1, 45, method='iir')
+
+events = mne.find_events(raw, stim_channel='UPPT001')
+event_id = {"faces": 1, "scrambled": 2}
+tmin, tmax = -0.1, 0.5
+
+# Set up pick list
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                       ref_meg=False, exclude='bads')
+
+# Read epochs
+decim = 4  # decimate to make the example faster to run
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=None, preload=True,
+                    reject=dict(mag=1.5e-12), decim=decim)
+
+epochs_list = [epochs[k] for k in event_id]
+mne.epochs.equalize_epoch_counts(epochs_list)
+
+###############################################################################
+# Run decoding
+
+# Compute Area Under the Curver (AUC) Receiver Operator Curve (ROC) score
+# of time generalization. A perfect decoding would lead to AUCs of 1.
+# Chance level is at 0.5.
+# The default classifier is a linear SVM (C=1) after feature scaling.
+scores = time_generalization(epochs_list, clf=None, cv=5, scoring="roc_auc",
+                             shuffle=True, n_jobs=2)
+
+###############################################################################
+# Now visualize
+times = 1e3 * epochs.times  # convert times to ms
+
+plt.figure()
+plt.imshow(scores, interpolation='nearest', origin='lower',
+           extent=[times[0], times[-1], times[0], times[-1]],
+           vmin=0.1, vmax=0.9, cmap='RdBu_r')
+plt.xlabel('Times Test (ms)')
+plt.ylabel('Times Train (ms)')
+plt.title('Time generalization (%s vs. %s)' % tuple(event_id.keys()))
+plt.axvline(0, color='k')
+plt.axhline(0, color='k')
+plt.colorbar()
+
+plt.figure()
+plt.plot(times, np.diag(scores), label="Classif. score")
+plt.axhline(0.5, color='k', linestyle='--', label="Chance level")
+plt.axvline(0, color='r', label='stim onset')
+plt.legend()
+plt.xlabel('Time (ms)')
+plt.ylabel('ROC classification score')
+plt.title('Decoding (%s vs. %s)' % tuple(event_id.keys()))
+plt.show()
diff --git a/examples/decoding/plot_ems_filtering.py b/examples/decoding/plot_ems_filtering.py
new file mode 100644
index 0000000..b37bf0a
--- /dev/null
+++ b/examples/decoding/plot_ems_filtering.py
@@ -0,0 +1,94 @@
+"""
+==============================================
+Compute effect-matched-spatial filtering (EMS)
+==============================================
+
+This example computes the EMS to reconstruct the time course of
+the experimental effect as described in:
+
+Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing multi-sensor
+data to a single time course that reveals experimental effects",
+BMC Neuroscience 2013, 14:122
+
+
+This technique is used to create spatial filters based on the
+difference between two conditions. By projecting the trial onto the
+corresponding spatial filters, surrogate single trials are created
+in which multi-sensor activity is reduced to one time series which
+exposes experimental effects, if present.
+
+We will first plot a trials x times image of the single trials and order the
+trials by condition. A second plot shows the average time series for each
+condition. Finally a topographic plot is created which exhibits the
+temporal evolution of the spatial filters.
+"""
+
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import mne
+from mne import io
+from mne.datasets import sample
+from mne.decoding import compute_ems
+data_path = sample.data_path()
+
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_ids = {'AudL': 1, 'VisL': 3, 'AudR': 2, 'VisR': 4}
+tmin = -0.2
+tmax = 0.5
+
+# Read data and create epochs
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(1, 45)
+events = mne.read_events(event_fname)
+
+include = []  # or stim channels ['STI 014']
+ch_type = 'grad'
+picks = mne.pick_types(raw.info, meg=ch_type, eeg=False, stim=False, eog=True,
+                       include=include, exclude='bads')
+
+reject = dict(grad=4000e-13, eog=150e-6)
+
+epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
+                    baseline=None, reject=reject)
+
+# Let's equalize the trial counts in each condition
+epochs.equalize_event_counts(epochs.event_id, copy=False)
+
+# compute surrogate time series
+surrogates, filters, conditions = compute_ems(epochs, ['AudL', 'VisL'])
+
+import matplotlib.pyplot as plt
+
+times = epochs.times * 1e3
+plt.figure()
+plt.title('single trial surrogates')
+plt.imshow(surrogates[conditions.argsort()], origin='lower', aspect='auto',
+           extent=[times[0], times[-1], 1, len(surrogates)],
+           cmap='RdBu_r')
+plt.xlabel('Time (ms)')
+plt.ylabel('Trials (reordered by condition)')
+
+plt.figure()
+plt.title('Average EMS signal')
+
+mappings = [(k, v) for k, v in event_ids.items() if v in conditions]
+for key, value in mappings:
+    ems_ave = surrogates[conditions == value]
+    ems_ave *= 1e13
+    plt.plot(times, ems_ave.mean(0), label=key)
+plt.xlabel('Time (ms)')
+plt.ylabel('fT/cm')
+plt.legend(loc='best')
+
+
+# visualize spatial filters across time
+plt.show()
+evoked = epochs.average()
+evoked.data = filters
+evoked.plot_topomap(ch_type=ch_type)
diff --git a/examples/export/plot_epochs_as_data_frame.py b/examples/export/plot_epochs_as_data_frame.py
index f994726..7ad45e9 100644
--- a/examples/export/plot_epochs_as_data_frame.py
+++ b/examples/export/plot_epochs_as_data_frame.py
@@ -85,16 +85,16 @@ Reference
 More information and additional introductory materials can be found at the
 pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
 """
-# Author: Denis Engemann <d.engemann at fz-juelich.de>
+# Author: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 import matplotlib.pyplot as plt
 import numpy as np
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.datasets import sample
 
 
@@ -112,8 +112,8 @@ events = mne.read_events(event_fname)[:10]
 
 # Add a bad channel
 raw.info['bads'] += ['MEG 2443']
-picks = mne.fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                            stim=False, exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=False, exclude='bads')
 
 tmin, tmax = -0.2, 0.5
 baseline = (None, 0)
@@ -150,7 +150,7 @@ df.pop('EOG 061')  # this works just like with a list.
 # Pandas is using a MultiIndex or hierarchical index to handle higher
 # dimensionality while at the same time representing data in a flat 2d manner.
 
-print df.index.names, df.index.levels
+print(df.index.names, df.index.levels)
 
 # Inspecting the index object unveils that 'epoch', 'time' are used
 # for subsetting data. We can take advantage of that by using the
@@ -200,7 +200,7 @@ mne.viz.tight_layout()
 
 max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
 
-print max_latency
+print(max_latency)
 
 # Then make the plot labels more readable let's edit the values of 'condition'.
 df.condition = df.condition.apply(lambda name: name + ' ')
@@ -217,7 +217,7 @@ final_df = max_latency.reset_index()
 final_df.rename(columns={0: sel[2]})  # as the index is oblivious of names.
 
 # The index is now written into regular columns so it can be used as factor.
-print final_df
+print(final_df)
 
 # To save as csv file, uncomment the next line.
 # final_df.to_csv('my_epochs.csv')
diff --git a/examples/export/plot_epochs_to_nitime.py b/examples/export/plot_epochs_to_nitime.py
index 200d9ac..037ab13 100644
--- a/examples/export/plot_epochs_to_nitime.py
+++ b/examples/export/plot_epochs_to_nitime.py
@@ -8,16 +8,16 @@ for further signal processing and data analysis.
 
 """
 
-# Author: Denis Engemann <d.engemann at fz-juelich.de>
-#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -28,13 +28,13 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 raw.info['bads'] += ['MEG 2443', 'EEG 053']
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
diff --git a/examples/export/plot_evoked_to_nitime.py b/examples/export/plot_evoked_to_nitime.py
index 8b4b771..b1ad268 100644
--- a/examples/export/plot_evoked_to_nitime.py
+++ b/examples/export/plot_evoked_to_nitime.py
@@ -4,14 +4,14 @@ Export evoked data to Nitime
 ============================
 
 """
-# Author: Denis Engemann <d.engemann at fz-juelichde>
-#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-from mne import fiff
+import mne
 from mne.datasets import sample
 from nitime.viz import plot_tseries
 import matplotlib.pyplot as plt
@@ -22,10 +22,10 @@ data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
 # Reading
-evoked = fiff.Evoked(fname, setno=0, baseline=(None, 0), proj=True)
+evoked = mne.read_evokeds(fname, condition=0, baseline=(None, 0), proj=True)
 
 # Pick channels to view
-picks = fiff.pick_types(evoked.info, meg='grad', eeg=False, exclude='bads')
+picks = mne.pick_types(evoked.info, meg='grad', eeg=False, exclude='bads')
 
 evoked_ts = evoked.to_nitime(picks=picks)
 
diff --git a/examples/export/plot_raw_to_nitime.py b/examples/export/plot_raw_to_nitime.py
index 0965f96..3f9be8c 100644
--- a/examples/export/plot_raw_to_nitime.py
+++ b/examples/export/plot_raw_to_nitime.py
@@ -8,16 +8,16 @@ for further signal processing and data analysis.
 
 """
 
-# Author: Denis Engemann <d.engemann at fz-juelich.de>
+# Author: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
 
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -28,8 +28,8 @@ raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 raw = Raw(raw_fname)
 
 # set picks
-picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
-                            stim=False, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                       stim=False, exclude='bads')
 
 # pick times relative to the onset of the MEG measurement.
 start, stop = raw.time_as_index([100, 115], use_first_samp=False)
@@ -41,22 +41,22 @@ raw_ts = raw.to_nitime(start=start, stop=stop, picks=picks, copy=True)
 # explore some nitime timeseries features
 
 # get start
-print raw_ts.t0
+print(raw_ts.t0)
 
 # get duration
-print raw_ts.duration
+print(raw_ts.duration)
 
 # get sample duration (sampling interval)
-print raw_ts.sampling_interval
+print(raw_ts.sampling_interval)
 
 # get exported raw infor
-print raw_ts.metadata.keys()
+print(list(raw_ts.metadata.keys()))
 
 # index at certain time
-print raw_ts.at(110.5)
+print(raw_ts.at(110.5))
 
 # get channel names (attribute added during export)
-print raw_ts.ch_names[:3]
+print(raw_ts.ch_names[:3])
 
 ###############################################################################
 # investigate spectral density
diff --git a/examples/extract_events_from_raw.py b/examples/extract_events_from_raw.py
deleted file mode 100644
index cf88a27..0000000
--- a/examples/extract_events_from_raw.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-=========================
-Find events in a raw file
-=========================
-
-Find events from the stimulation/trigger channel in the raw data.
-"""
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#
-# License: BSD (3-clause)
-
-print __doc__
-
-import mne
-from mne.datasets import sample
-from mne.fiff import Raw
-
-data_path = sample.data_path()
-fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
-
-# Reading events
-raw = Raw(fname)
-
-events = mne.find_events(raw, stim_channel='STI 014')
-
-# Writing events
-mne.write_events('events.fif', events)
-
-for ind, before, after in events[:5]:
-    print "At sample %d stim channel went from %d to %d" % (
-                                                    ind, before, after)
diff --git a/examples/inverse/plot_compute_mne_inverse.py b/examples/inverse/plot_compute_mne_inverse.py
index 4a635a5..938cf88 100644
--- a/examples/inverse/plot_compute_mne_inverse.py
+++ b/examples/inverse/plot_compute_mne_inverse.py
@@ -8,15 +8,15 @@ and stores the solution in stc files for visualisation.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 from mne.datasets import sample
-from mne.fiff import Evoked
+from mne import read_evokeds
 from mne.minimum_norm import apply_inverse, read_inverse_operator
 
 
@@ -30,7 +30,7 @@ lambda2 = 1.0 / snr ** 2
 method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
 
 # Load data
-evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
 inverse_operator = read_inverse_operator(fname_inv)
 
 # Compute inverse solution
@@ -50,7 +50,15 @@ plt.show()
 # Plot brain in 3D with PySurfer if available. Note that the subject name
 # is already known by the SourceEstimate stc object.
 brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
-brain.set_data_time_index(180)
 brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
 brain.show_view('lateral')
+
+# use peak getter to move vizualization to the time point of the peak
+vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
+
+brain.set_data_time_index(time_idx)
+
+# draw marker at maximum peaking vertex
+brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
+               scale_factor=0.6)
 brain.save_image('dSPM_map.png')
diff --git a/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py b/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
index 225ae0b..30de479 100644
--- a/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
+++ b/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
@@ -8,19 +8,19 @@ to a brain label.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
-
+from mne.minimum_norm import apply_inverse
 
 data_path = sample.data_path()
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -30,8 +30,11 @@ label_name = 'Aud-lh'
 fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
 
 event_id, tmin, tmax = 1, -0.2, 0.5
-snr = 1.0  # use smaller SNR for raw data
+
+# Using the same inverse operator when inspecting single trials Vs. evoked
+snr = 3.0  # Standard assumption for average data but using it for single trial
 lambda2 = 1.0 / snr ** 2
+
 method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
 
 # Load data
@@ -47,17 +50,28 @@ include = []
 raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
                    include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
                                                     eog=150e-6))
 
+# Get evoked data (averaging across trials in sensor space)
+evoked = epochs.average()
+
 # Compute inverse solution and stcs for each epoch
+# Use the same inverse operator as with evoked data (i.e., set nave)
+# If you use a different nave, dSPM just scales by a factor sqrt(nave)
 stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label,
-                            pick_ori="normal")
+                            pick_ori="normal", nave=evoked.nave)
+
+stc_evoked = apply_inverse(evoked, inverse_operator, lambda2, method,
+                           pick_ori="normal")
+
+stc_evoked_label = stc_evoked.in_label(label)
 
+# Mean across trials but not across vertices in label
 mean_stc = sum(stcs) / len(stcs)
 
 # compute sign flip to avoid signal cancelation when averaging signed values
@@ -66,14 +80,51 @@ flip = mne.label_sign_flip(label, inverse_operator['src'])
 label_mean = np.mean(mean_stc.data, axis=0)
 label_mean_flip = np.mean(flip[:, np.newaxis] * mean_stc.data, axis=0)
 
+# Get inverse solution by inverting evoked data
+stc_evoked = apply_inverse(evoked, inverse_operator, lambda2, method,
+                           pick_ori="normal")
+
+# apply_inverse() does whole brain, so sub-select label of interest
+stc_evoked_label = stc_evoked.in_label(label)
+
+# Average over label (not caring to align polarities here)
+label_mean_evoked = np.mean(stc_evoked_label.data, axis=0)
+
 ###############################################################################
-# View activation time-series
+# View activation time-series to illustrate the benefit of aligning/flipping
+
+times = 1e3 * stcs[0].times  # times in ms
+
 plt.figure()
-h0 = plt.plot(1e3 * stcs[0].times, mean_stc.data.T, 'k')
-h1, = plt.plot(1e3 * stcs[0].times, label_mean, 'r', linewidth=3)
-h2, = plt.plot(1e3 * stcs[0].times, label_mean_flip, 'g', linewidth=3)
+h0 = plt.plot(times, mean_stc.data.T, 'k')
+h1, = plt.plot(times, label_mean, 'r', linewidth=3)
+h2, = plt.plot(times, label_mean_flip, 'g', linewidth=3)
 plt.legend((h0[0], h1, h2), ('all dipoles in label', 'mean',
                              'mean with sign flip'))
 plt.xlabel('time (ms)')
 plt.ylabel('dSPM value')
 plt.show()
+
+###############################################################################
+# Viewing single trial dSPM and average dSPM for unflipped pooling over label
+# Compare to (1) Inverse (dSPM) then average, (2) Evoked then dSPM
+
+# Single trial
+plt.figure()
+for k, stc_trial in enumerate(stcs):
+    plt.plot(times, np.mean(stc_trial.data, axis=0).T, 'k--',
+             label='Single Trials' if k == 0 else '_nolegend_',
+             alpha=0.5)
+
+# Single trial inverse then average.. making linewidth large to not be masked
+plt.plot(times, label_mean, 'b', linewidth=6,
+         label='dSPM first, then average')
+
+# Evoked and then inverse
+plt.plot(times, label_mean_evoked, 'r', linewidth=2,
+         label='Average first, then dSPM')
+
+plt.xlabel('time (ms)')
+plt.ylabel('dSPM value')
+plt.legend()
+plt.show()
diff --git a/examples/inverse/plot_compute_mne_inverse_raw_in_label.py b/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
index 73230b6..ba69df1 100644
--- a/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
+++ b/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
@@ -9,16 +9,16 @@ visualisation.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
 
 
diff --git a/examples/inverse/plot_compute_mne_inverse_volume.py b/examples/inverse/plot_compute_mne_inverse_volume.py
index d0cd1fd..253bd12 100644
--- a/examples/inverse/plot_compute_mne_inverse_volume.py
+++ b/examples/inverse/plot_compute_mne_inverse_volume.py
@@ -8,16 +8,16 @@ space and stores the solution in a nifti file for visualisation.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 from mne.datasets import sample
-from mne.fiff import Evoked
+from mne import read_evokeds
 from mne.minimum_norm import apply_inverse, read_inverse_operator
 
 data_path = sample.data_path()
@@ -29,7 +29,7 @@ lambda2 = 1.0 / snr ** 2
 method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
 
 # Load data
-evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
 inverse_operator = read_inverse_operator(fname_inv)
 src = inverse_operator['src']
 
diff --git a/examples/inverse/plot_dics_beamformer.py b/examples/inverse/plot_dics_beamformer.py
index a1138be..8920603 100644
--- a/examples/inverse/plot_dics_beamformer.py
+++ b/examples/inverse/plot_dics_beamformer.py
@@ -16,14 +16,14 @@ in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 
 import matplotlib.pyplot as plt
 import numpy as np
 
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.datasets import sample
 from mne.time_frequency import compute_epochs_csd
 from mne.beamformer import dics
@@ -42,8 +42,8 @@ raw = Raw(raw_fname)
 raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
 
 # Set picks
-picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
-                            stim=False, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                       stim=False, exclude='bads')
 
 # Read epochs
 event_id, tmin, tmax = 1, -0.2, 0.5
diff --git a/examples/inverse/plot_dics_source_power.py b/examples/inverse/plot_dics_source_power.py
index 716dff4..7eeca11 100644
--- a/examples/inverse/plot_dics_source_power.py
+++ b/examples/inverse/plot_dics_source_power.py
@@ -12,14 +12,15 @@ in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
 """
 
 # Author: Roman Goj <roman.goj at gmail.com>
+#         Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.datasets import sample
 from mne.time_frequency import compute_epochs_csd
 from mne.beamformer import dics_source_power
@@ -36,8 +37,8 @@ raw = Raw(raw_fname)
 raw.info['bads'] = ['MEG 2443']  # 1 bad MEG channel
 
 # Set picks
-picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
-                            stim=False, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                       stim=False, exclude='bads')
 
 # Read epochs
 event_id, tmin, tmax = 1, -0.2, 0.5
@@ -56,24 +57,23 @@ forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
 # As fsum is False compute_epochs_csd returns a list of CrossSpectralDensity
 # instances than can then be passed to dics_source_power
 data_csds = compute_epochs_csd(epochs, mode='multitaper', tmin=0.04, tmax=0.15,
-                               fmin=30, fmax=50, fsum=False)
+                               fmin=15, fmax=30, fsum=False)
 noise_csds = compute_epochs_csd(epochs, mode='multitaper', tmin=-0.11,
-                                tmax=-0.001, fmin=30, fmax=50, fsum=False)
+                                tmax=-0.001, fmin=15, fmax=30, fsum=False)
 
 # Compute DICS spatial filter and estimate source power
 stc = dics_source_power(epochs.info, forward, noise_csds, data_csds)
 
-# Plot source power separately for each frequency of interest
-pow_lim = [[1.88, 2.41, 2.94],   # limits for source power at 36.4 Hz
-           [1.41, 1.65, 1.89]]   # limits for source power at 45.5 Hz
+from scipy.stats import scoreatpercentile  # for thresholding
+
 for i, csd in enumerate(data_csds):
     message = 'DICS source power at %0.1f Hz' % csd.frequencies[0]
     brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
                      time_label=message, figure=i)
-    data = stc.data[:, i]
+    fmin, fmax = [scoreatpercentile(stc.data[:, i], ii) for ii in [95, 100]]
+    fmid = fmin + (fmax - fmin) / 2
     brain.set_data_time_index(i)
-    brain.scale_data_colormap(fmin=pow_lim[i][0], fmid=pow_lim[i][1],
-                              fmax=pow_lim[i][2], transparent=True)
+    brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax, transparent=True)
     brain.show_view('lateral')
     # Uncomment line below to save images
     #brain.save_image('DICS_source_power_freq_%d.png' % csd.frequencies[0])
diff --git a/examples/inverse/plot_dipole_fit_result.py b/examples/inverse/plot_dipole_fit_result.py
index 5142834..ab6582c 100644
--- a/examples/inverse/plot_dipole_fit_result.py
+++ b/examples/inverse/plot_dipole_fit_result.py
@@ -14,11 +14,11 @@ $mne_dipole_fit --meas sample_audvis-ave.fif --set 1 --meg --tmin 40 --tmax 95 \
     --dip sample_audvis_set1.dip
 
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
@@ -39,9 +39,9 @@ src = fwd['src']
 # read dipoles
 time, pos, amplitude, ori, gof = mne.read_dip(dip_fname)
 
-print "Time (ms): %s" % time
-print "Amplitude (nAm): %s" % amplitude
-print "GOF (%%): %s" % gof
+print("Time (ms): %s" % time)
+print("Amplitude (nAm): %s" % amplitude)
+print("GOF (%%): %s" % gof)
 
 # only plot those for which GOF is above 50%
 pos = pos[gof > 50.]
diff --git a/examples/inverse/plot_gamma_map_inverse.py b/examples/inverse/plot_gamma_map_inverse.py
index edf2243..fd64fba 100644
--- a/examples/inverse/plot_gamma_map_inverse.py
+++ b/examples/inverse/plot_gamma_map_inverse.py
@@ -10,7 +10,7 @@ NeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009.
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 
@@ -26,8 +26,9 @@ evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
 
 # Read the evoked response and crop it
-setno = 'Left visual'
-evoked = mne.fiff.read_evoked(evoked_fname, setno=setno, baseline=(None, 0))
+condition = 'Left visual'
+evoked = mne.read_evokeds(evoked_fname, condition=condition,
+                          baseline=(None, 0))
 evoked.crop(tmin=-50e-3, tmax=300e-3)
 
 # Read the forward solution
@@ -55,10 +56,10 @@ plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
 
 # Show the evoked response and the residual for gradiometers
 ylim = dict(grad=[-120, 120])
-evoked = mne.fiff.pick_types_evoked(evoked, meg='grad', exclude='bads')
+evoked = mne.pick_types_evoked(evoked, meg='grad', exclude='bads')
 evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,
             proj=True)
 
-residual = mne.fiff.pick_types_evoked(residual, meg='grad', exclude='bads')
+residual = mne.pick_types_evoked(residual, meg='grad', exclude='bads')
 residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,
               proj=True)
diff --git a/examples/inverse/plot_label_activation_from_stc.py b/examples/inverse/plot_label_activation_from_stc.py
index 71d7db2..e5c17b0 100644
--- a/examples/inverse/plot_label_activation_from_stc.py
+++ b/examples/inverse/plot_label_activation_from_stc.py
@@ -13,7 +13,7 @@ formed through merging two labels.
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import os
 
diff --git a/examples/inverse/plot_label_from_stc.py b/examples/inverse/plot_label_from_stc.py
index cd2c3f4..b03274c 100644
--- a/examples/inverse/plot_label_from_stc.py
+++ b/examples/inverse/plot_label_from_stc.py
@@ -11,6 +11,7 @@ functional label. As expected the time course in the functional
 label yields higher values.
 
 """
+print(__doc__)
 
 # Author: Luke Bloy <luke.bloy at gmail.com>
 #         Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
@@ -21,7 +22,6 @@ import matplotlib.pyplot as plt
 
 import mne
 from mne.minimum_norm import read_inverse_operator, apply_inverse
-from mne.fiff import Evoked
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -41,7 +41,7 @@ aparc_label_name = 'bankssts-lh'
 tmin, tmax = 0.080, 0.120
 
 # Load data
-evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
 inverse_operator = read_inverse_operator(fname_inv)
 src = inverse_operator['src']  # get the source space
 
@@ -55,22 +55,23 @@ stc_mean = stc.copy().crop(tmin, tmax).mean()
 # use the stc_mean to generate a functional label
 # region growing is halted at 60% of the peak value within the
 # anatomical label / ROI specified by aparc_label_name
-label = mne.labels_from_parc(subject, parc='aparc', subjects_dir=subjects_dir,
-                             regexp=aparc_label_name)[0][0]
+label = mne.read_labels_from_annot(subject, parc='aparc',
+                                   subjects_dir=subjects_dir,
+                                   regexp=aparc_label_name)[0]
 stc_mean_label = stc_mean.in_label(label)
 data = np.abs(stc_mean_label.data)
 stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
 
-func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=5,
+func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
                                   subjects_dir=subjects_dir, connected=True)
 
 # take first as func_labels are ordered based on maximum values in stc
 func_label = func_labels[0]
 
 # load the anatomical ROI for comparison
-anat_label = mne.labels_from_parc(subject, parc='aparc',
-                                  subjects_dir=subjects_dir,
-                                  regexp=aparc_label_name)[0][0]
+anat_label = mne.read_labels_from_annot(subject, parc='aparc',
+                                        subjects_dir=subjects_dir,
+                                        regexp=aparc_label_name)[0]
 
 # extract the anatomical time course for each label
 stc_anat_label = stc.in_label(anat_label)
diff --git a/examples/inverse/plot_label_source_activations.py b/examples/inverse/plot_label_source_activations.py
index dc31433..c1f317f 100644
--- a/examples/inverse/plot_label_source_activations.py
+++ b/examples/inverse/plot_label_source_activations.py
@@ -10,16 +10,15 @@ in a label. We compare a simple average, with an averaging
 using the dipoles normal (flip mode) and then a PCA,
 also using a sign flip.
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, apply_inverse
-from mne.fiff import Evoked
 
 data_path = sample.data_path()
 label = 'Aud-lh'
@@ -32,7 +31,7 @@ lambda2 = 1.0 / snr ** 2
 method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
 
 # Load data
-evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
 inverse_operator = read_inverse_operator(fname_inv)
 src = inverse_operator['src']
 
@@ -48,7 +47,7 @@ mean = stc.extract_label_time_course(label, src, mode='mean')
 mean_flip = stc.extract_label_time_course(label, src, mode='mean_flip')
 pca = stc.extract_label_time_course(label, src, mode='pca_flip')
 
-print "Number of vertices : %d" % len(stc_label.data)
+print("Number of vertices : %d" % len(stc_label.data))
 
 # View source activations
 import matplotlib.pyplot as plt
diff --git a/examples/inverse/plot_lcmv_beamformer.py b/examples/inverse/plot_lcmv_beamformer.py
index 294b7a5..c7cea0d 100644
--- a/examples/inverse/plot_lcmv_beamformer.py
+++ b/examples/inverse/plot_lcmv_beamformer.py
@@ -8,18 +8,18 @@ of source orientation and stores the solutions in stc files for visualisation.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 import numpy as np
 
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.beamformer import lcmv
 
 data_path = sample.data_path()
@@ -41,8 +41,8 @@ events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 left_temporal_channels = mne.read_selection('Left-temporal')
-picks = pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
-                   exclude='bads', selection=left_temporal_channels)
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                       exclude='bads', selection=left_temporal_channels)
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
diff --git a/examples/inverse/plot_lcmv_beamformer_volume.py b/examples/inverse/plot_lcmv_beamformer_volume.py
index 6378564..c2debc1 100644
--- a/examples/inverse/plot_lcmv_beamformer_volume.py
+++ b/examples/inverse/plot_lcmv_beamformer_volume.py
@@ -9,17 +9,17 @@ Freeview.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.beamformer import lcmv
 
 
@@ -40,8 +40,8 @@ events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 left_temporal_channels = mne.read_selection('Left-temporal')
-picks = pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
-                   exclude='bads', selection=left_temporal_channels)
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                       exclude='bads', selection=left_temporal_channels)
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
diff --git a/examples/inverse/plot_make_inverse_operator.py b/examples/inverse/plot_make_inverse_operator.py
index eeafd48..4b4339e 100644
--- a/examples/inverse/plot_make_inverse_operator.py
+++ b/examples/inverse/plot_make_inverse_operator.py
@@ -9,16 +9,15 @@ in stc files for visualisation.
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 import mne
 from mne.datasets import sample
-from mne.fiff import Evoked
 from mne.minimum_norm import (make_inverse_operator, apply_inverse,
                               write_inverse_operator)
 
@@ -32,7 +31,7 @@ snr = 3.0
 lambda2 = 1.0 / snr ** 2
 
 # Load data
-evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
 forward_meeg = mne.read_forward_solution(fname_fwd_meeg, surf_ori=True)
 noise_cov = mne.read_cov(fname_cov)
 
@@ -41,7 +40,7 @@ noise_cov = mne.cov.regularize(noise_cov, evoked.info,
                                mag=0.05, grad=0.05, eeg=0.1, proj=True)
 
 # Restrict forward solution as necessary for MEG
-forward_meg = mne.fiff.pick_types_forward(forward_meeg, meg=True, eeg=False)
+forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False)
 # Alternatively, you can just load a forward solution that is restricted
 forward_eeg = mne.read_forward_solution(fname_fwd_eeg, surf_ori=True)
 
diff --git a/examples/inverse/plot_mixed_norm_L21_inverse.py b/examples/inverse/plot_mixed_norm_L21_inverse.py
index 44ff446..a467ae8 100644
--- a/examples/inverse/plot_mixed_norm_L21_inverse.py
+++ b/examples/inverse/plot_mixed_norm_L21_inverse.py
@@ -9,14 +9,13 @@ Mixed-norm estimates for the M/EEG inverse problem using accelerated
 gradient methods, Physics in Medicine and Biology, 2012
 http://dx.doi.org/10.1088/0031-9155/57/7/1937
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
 from mne.datasets import sample
 from mne.inverse_sparse import mixed_norm
 from mne.minimum_norm import make_inverse_operator, apply_inverse
@@ -26,12 +25,13 @@ data_path = sample.data_path()
 fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+subjects_dir = data_path + '/subjects'
 
 # Read noise covariance matrix
 cov = mne.read_cov(cov_fname)
 # Handling average file
-setno = 0
-evoked = fiff.read_evoked(ave_fname, setno=setno, baseline=(None, 0))
+condition = 'Left Auditory'
+evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
 evoked.crop(tmin=0, tmax=0.3)
 # Handling forward solution
 forward = mne.read_forward_solution(fwd_fname, surf_ori=True)
@@ -63,4 +63,13 @@ residual.plot(ylim=ylim, proj=True)
 ###############################################################################
 # View in 2D and 3D ("glass" brain like 3D plot)
 plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
-                             opacity=0.1, fig_name="MxNE (cond %s)" % setno)
+                             opacity=0.1, fig_name="MxNE (cond %s)" % condition)
+
+# and on the fsaverage brain after morphing
+stc_fsaverage = stc.morph(subject_from='sample', subject_to='fsaverage',
+                          grade=None, sparse=True, subjects_dir=subjects_dir)
+src_fsaverage_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
+src_fsaverage = mne.read_source_spaces(src_fsaverage_fname)
+
+plot_sparse_source_estimates(src_fsaverage, stc_fsaverage, bgcolor=(1, 1, 1),
+                             opacity=0.1)
diff --git a/examples/inverse/plot_mne_crosstalk_function.py b/examples/inverse/plot_mne_crosstalk_function.py
new file mode 100644
index 0000000..551a404
--- /dev/null
+++ b/examples/inverse/plot_mne_crosstalk_function.py
@@ -0,0 +1,83 @@
+"""
+===================================================================
+Compute cross-talk functions (CTFs) for labels for MNE/dSPM/sLORETA
+===================================================================
+
+CTFs are computed for four labels in the MNE sample data set
+for linear inverse operators (MNE, dSPM, sLORETA).
+CTFs describe the sensitivity of a linear estimator (e.g. for
+one label) to sources across the cortical surface. Sensitivity
+to sources outside the label is undesirable, and referred to as
+"leakage" or "cross-talk".
+"""
+
+# Author: Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import mne
+from mne.datasets import sample
+from mne.minimum_norm import cross_talk_function, read_inverse_operator
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects/'
+fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
+               data_path + '/MEG/sample/labels/Aud-lh.label',
+               data_path + '/MEG/sample/labels/Vis-rh.label',
+               data_path + '/MEG/sample/labels/Vis-lh.label']
+
+# In order to get gain matrix with fixed source orientation,
+# read forward solution with fixed orientations
+forward = mne.read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True)
+
+# read label(s)
+labels = [mne.read_label(ss) for ss in fname_label]
+
+inverse_operator = read_inverse_operator(fname_inv)
+
+# regularisation parameter
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+mode = 'svd'
+n_svd_comp = 1
+
+method = 'MNE'  # can be 'MNE', 'dSPM', or 'sLORETA'
+stc_ctf_mne = cross_talk_function(inverse_operator, forward, labels,
+                                  method=method, lambda2=lambda2,
+                                  signed=False, mode=mode,
+                                  n_svd_comp=n_svd_comp)
+
+method = 'dSPM'
+stc_ctf_dspm = cross_talk_function(inverse_operator, forward, labels,
+                                   method=method, lambda2=lambda2,
+                                   signed=False, mode=mode,
+                                   n_svd_comp=n_svd_comp)
+
+from mayavi import mlab
+fmin = 0.
+time_label = "MNE %d"
+fmax = stc_ctf_mne.data[:, 0].max()
+fmid = fmax / 2.
+brain_mne = stc_ctf_mne.plot(surface='inflated', hemi='rh',
+                             subjects_dir=subjects_dir,
+                             time_label=time_label, fmin=fmin,
+                             fmid=fmid, fmax=fmax,
+                             figure=mlab.figure(size=(500, 500)))
+
+time_label = "dSPM %d"
+fmax = stc_ctf_dspm.data[:, 0].max()
+fmid = fmax / 2.
+brain_dspm = stc_ctf_dspm.plot(surface='inflated', hemi='rh',
+                               subjects_dir=subjects_dir,
+                               time_label=time_label, fmin=fmin,
+                               fmid=fmid, fmax=fmax,
+                               figure=mlab.figure(size=(500, 500)))
+
+# Cross-talk functions for MNE and dSPM (and sLORETA) have the same shapes
+# (they may still differ in overall amplitude).
+# Point-spread functions (PSfs) usually differ significantly.
diff --git a/examples/inverse/plot_mne_point_spread_function.py b/examples/inverse/plot_mne_point_spread_function.py
new file mode 100644
index 0000000..78cce34
--- /dev/null
+++ b/examples/inverse/plot_mne_point_spread_function.py
@@ -0,0 +1,102 @@
+"""
+==========================================================
+Compute point-spread functions (PSFs) for MNE/dSPM/sLORETA
+==========================================================
+
+PSFs are computed for four labels in the MNE sample data set
+for linear inverse operators (MNE, dSPM, sLORETA).
+PSFs describe the spread of activation from one label
+across the cortical surface.
+"""
+
+# Authors: Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import mne
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator, point_spread_function
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects/'
+fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+fname_inv_eegmeg = (data_path +
+                    '/MEG/sample/sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif')
+fname_inv_meg = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
+               data_path + '/MEG/sample/labels/Aud-lh.label',
+               data_path + '/MEG/sample/labels/Vis-rh.label',
+               data_path + '/MEG/sample/labels/Vis-lh.label']
+
+
+# read forward solution (sources in surface-based coordinates)
+forward = mne.read_forward_solution(fname_fwd, force_fixed=False,
+                                    surf_ori=True)
+
+# read inverse operators
+inverse_operator_eegmeg = read_inverse_operator(fname_inv_eegmeg)
+inverse_operator_meg = read_inverse_operator(fname_inv_meg)
+
+# read label(s)
+labels = [mne.read_label(ss) for ss in fname_label]
+
+# regularisation parameter
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = 'MNE'  # can be 'MNE' or 'sLORETA'
+mode = 'svd'
+n_svd_comp = 1
+
+stc_psf_eegmeg, _ = point_spread_function(inverse_operator_eegmeg,
+                                          forward, method=method,
+                                          labels=labels,
+                                          lambda2=lambda2,
+                                          pick_ori='normal',
+                                          mode=mode,
+                                          n_svd_comp=n_svd_comp)
+
+stc_psf_meg, _ = point_spread_function(inverse_operator_meg,
+                                       forward, method=method,
+                                       labels=labels,
+                                       lambda2=lambda2,
+                                       pick_ori='normal',
+                                       mode=mode,
+                                       n_svd_comp=n_svd_comp)
+
+# save for viewing in mne_analyze in order of labels in 'labels'
+# last sample is average across PSFs
+# stc_psf_eegmeg.save('psf_eegmeg')
+# stc_psf_meg.save('psf_meg')
+
+from mayavi import mlab
+fmin = 0.
+time_label = "EEGMEG %d"
+fmax = stc_psf_eegmeg.data[:, 0].max()
+fmid = fmax / 2.
+brain_eegmeg = stc_psf_eegmeg.plot(surface='inflated', hemi='rh',
+                                   subjects_dir=subjects_dir,
+                                   time_label=time_label, fmin=fmin,
+                                   fmid=fmid, fmax=fmax,
+                                   figure=mlab.figure(size=(500, 500)))
+
+time_label = "MEG %d"
+fmax = stc_psf_meg.data[:, 0].max()
+fmid = fmax / 2.
+brain_meg = stc_psf_meg.plot(surface='inflated', hemi='rh',
+                             subjects_dir=subjects_dir,
+                             time_label=time_label, fmin=fmin,
+                             fmid=fmid, fmax=fmax,
+                             figure=mlab.figure(size=(500, 500)))
+
+# The PSF is centred around the right auditory cortex label,
+# but clearly extends beyond it.
+# It also contains "sidelobes" or "ghost sources"
+# in middle/superior temporal lobe.
+# For the Aud-RH example, MEG and EEGMEG do not seem to differ a lot,
+# but the addition of EEG still decreases point-spread to distant areas
+# (e.g. to ATL and IFG).
+# The chosen labels are quite far apart from each other, so their PSFs
+# do not overlap (check in mne_analyze)
diff --git a/examples/inverse/plot_morph_data.py b/examples/inverse/plot_morph_data.py
index af589f2..5e1c496 100644
--- a/examples/inverse/plot_morph_data.py
+++ b/examples/inverse/plot_morph_data.py
@@ -8,12 +8,12 @@ to the anatomy of another subject 'fsaverage'. The output
 is a source estimate defined on the anatomy of 'fsaverage'
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 import numpy as np
diff --git a/examples/inverse/plot_read_inverse.py b/examples/inverse/plot_read_inverse.py
index dff0af1..8d3eebb 100644
--- a/examples/inverse/plot_read_inverse.py
+++ b/examples/inverse/plot_read_inverse.py
@@ -3,11 +3,11 @@
 Reading an inverse operator and view source space in 3D
 =======================================================
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator
@@ -18,10 +18,10 @@ fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
 
 inv = read_inverse_operator(fname)
 
-print "Method: %s" % inv['methods']
-print "fMRI prior: %s" % inv['fmri_prior']
-print "Number of sources: %s" % inv['nsource']
-print "Number of channels: %s" % inv['nchan']
+print("Method: %s" % inv['methods'])
+print("fMRI prior: %s" % inv['fmri_prior'])
+print("Number of sources: %s" % inv['nsource'])
+print("Number of channels: %s" % inv['nchan'])
 
 ###############################################################################
 # Show result on 3D source space
diff --git a/examples/inverse/plot_read_source_space.py b/examples/inverse/plot_read_source_space.py
index d5027c1..e019f47 100644
--- a/examples/inverse/plot_read_source_space.py
+++ b/examples/inverse/plot_read_source_space.py
@@ -3,11 +3,11 @@
 Reading a source space from a forward operator
 ==============================================
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import os.path as op
 
@@ -15,7 +15,7 @@ import mne
 from mne.datasets import sample
 
 data_path = sample.data_path()
-fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-eeg-oct-6p-fwd.fif')
+fname = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif')
 
 add_geom = True  # include high resolution source space
 src = mne.read_source_spaces(fname, add_geom=add_geom)
diff --git a/examples/inverse/plot_read_stc.py b/examples/inverse/plot_read_stc.py
index 3e483c8..dadd9b1 100644
--- a/examples/inverse/plot_read_stc.py
+++ b/examples/inverse/plot_read_stc.py
@@ -6,11 +6,11 @@ Reading an STC file
 STC files contain activations on cortex ie. source
 reconstructions
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
@@ -21,8 +21,8 @@ fname = data_path + '/MEG/sample/sample_audvis-meg'
 stc = mne.read_source_estimate(fname)
 
 n_vertices, n_samples = stc.data.shape
-print "stc data size: %s (nb of vertices) x %s (nb of samples)" % (
-                                                    n_vertices, n_samples)
+print("stc data size: %s (nb of vertices) x %s (nb of samples)"
+      % (n_vertices, n_samples))
 
 # View source activations
 import matplotlib.pyplot as plt
diff --git a/examples/inverse/plot_tf_dics.py b/examples/inverse/plot_tf_dics.py
index 0b583d0..2446e65 100644
--- a/examples/inverse/plot_tf_dics.py
+++ b/examples/inverse/plot_tf_dics.py
@@ -15,10 +15,10 @@ dynamics of cortical activity. NeuroImage (2008) vol. 40 (4) pp. 1686-1700
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.event import make_fixed_length_events
 from mne.datasets import sample
 from mne.time_frequency import compute_epochs_csd
@@ -43,17 +43,23 @@ raw.info['bads'] = ['MEG 2443']  # 1 bad MEG channel
 # to speed up the example. For a solution based on all MEG channels use
 # meg=True, selection=None and add mag=4e-12 to the reject dictionary.
 left_temporal_channels = mne.read_selection('Left-temporal')
-picks = mne.fiff.pick_types(raw.info, meg='mag', eeg=False, eog=False,
-                            stim=False, exclude='bads',
-                            selection=left_temporal_channels)
+picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
+                       stim=False, exclude='bads',
+                       selection=left_temporal_channels)
 reject = dict(mag=4e-12)
 
+# Setting time windows. Note that tmin and tmax are set so that time-frequency
+# beamforming will be performed for a wider range of time points than will
+# later be displayed on the final spectrogram. This ensures that all time bins
+# displayed represent an average of an equal number of time windows.
+tmin, tmax, tstep = -0.55, 0.75, 0.05  # s
+tmin_plot, tmax_plot = -0.3, 0.5  # s
+
 # Read epochs
-event_id, epoch_tmin, epoch_tmax = 1, -0.3, 0.5
+event_id = 1
 events = mne.read_events(event_fname)
-epochs = mne.Epochs(raw, events, event_id, epoch_tmin, epoch_tmax, proj=True,
-                    picks=picks, baseline=(None, 0), preload=True,
-                    reject=reject)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
+                    baseline=None, preload=True, reject=reject)
 
 # Read empty room noise raw data
 raw_noise = Raw(noise_fname)
@@ -62,10 +68,9 @@ raw_noise.info['bads'] = ['MEG 2443']  # 1 bad MEG channel
 # Create noise epochs and make sure the number of noise epochs corresponds to
 # the number of data epochs
 events_noise = make_fixed_length_events(raw_noise, event_id)
-epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, epoch_tmin,
-                          epoch_tmax, proj=True, picks=picks,
-                          baseline=(None, 0), preload=True,
-                          reject=reject)
+epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin_plot,
+                          tmax_plot, proj=True, picks=picks,
+                          baseline=None, preload=True, reject=reject)
 # then make sure the number of epochs is the same
 epochs_noise = epochs_noise[:len(epochs.events)]
 
@@ -85,13 +90,6 @@ win_lengths = [0.3, 0.2, 0.15, 0.1]  # s
 # Should be a power of 2 to be faster.
 n_ffts = [256, 128, 128, 128]
 
-# Setting time windows, please note tmin stretches over the baseline, which is
-# selected to be as long as the longest time window. This enables a smooth and
-# accurate localization of activity in time
-tmin = -0.3  # s
-tmax = 0.5  # s
-tstep = 0.05  # s
-
 # Subtract evoked response prior to computation?
 subtract_evoked = False
 
@@ -102,15 +100,20 @@ noise_csds = []
 for freq_bin, win_length, n_fft in zip(freq_bins, win_lengths, n_ffts):
     noise_csd = compute_epochs_csd(epochs_noise, mode='fourier',
                                    fmin=freq_bin[0], fmax=freq_bin[1],
-                                   fsum=True, tmin=tmin,
-                                   tmax=tmin + win_length, n_fft=n_fft)
+                                   fsum=True, tmin=-win_length, tmax=0,
+                                   n_fft=n_fft)
     noise_csds.append(noise_csd)
 
 # Computing DICS solutions for time-frequency windows in a label in source
 # space for faster computation, use label=None for full solution
 stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
-               freq_bins=freq_bins, subtract_evoked=subtract_evoked, 
+               freq_bins=freq_bins, subtract_evoked=subtract_evoked,
                n_ffts=n_ffts, reg=0.001, label=label)
 
 # Plotting source spectrogram for source with maximum activity
-plot_source_spectrogram(stcs, freq_bins, source_index=None, colorbar=True)
+# Note that tmin and tmax are set to display a time range that is smaller than
+# the one for which beamforming estimates were calculated. This ensures that
+# all time bins shown are a result of smoothing across an identical number of
+# time windows.
+plot_source_spectrogram(stcs, freq_bins, tmin=tmin_plot, tmax=tmax_plot,
+                        source_index=None, colorbar=True)
diff --git a/examples/inverse/plot_tf_lcmv.py b/examples/inverse/plot_tf_lcmv.py
index e65482e..8fcd076 100644
--- a/examples/inverse/plot_tf_lcmv.py
+++ b/examples/inverse/plot_tf_lcmv.py
@@ -15,11 +15,11 @@ dynamics of cortical activity. NeuroImage (2008) vol. 40 (4) pp. 1686-1700
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne import compute_covariance
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.datasets import sample
 from mne.event import make_fixed_length_events
 from mne.beamformer import tf_lcmv
@@ -44,11 +44,19 @@ raw.info['bads'] = ['MEG 2443']  # 1 bad MEG channel
 # to speed up the example. For a solution based on all MEG channels use
 # meg=True, selection=None and add grad=4000e-13 to the reject dictionary.
 left_temporal_channels = mne.read_selection('Left-temporal')
-picks = mne.fiff.pick_types(raw.info, meg='mag', eeg=False, eog=False,
-                            stim=False, exclude='bads',
-                            selection=left_temporal_channels)
+picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
+                       stim=False, exclude='bads',
+                       selection=left_temporal_channels)
 reject = dict(mag=4e-12)
 
+# Setting time limits for reading epochs. Note that tmin and tmax are set so
+# that time-frequency beamforming will be performed for a wider range of time
+# points than will later be displayed on the final spectrogram. This ensures
+# that all time bins displayed represent an average of an equal number of time
+# windows.
+tmin, tmax = -0.55, 0.75  # s
+tmin_plot, tmax_plot = -0.3, 0.5  # s
+
 # Read epochs. Note that preload is set to False to enable tf_lcmv to read the
 # underlying raw object from epochs.raw, which would be set to None during
 # preloading. Filtering is then performed on raw data in tf_lcmv and the epochs
@@ -57,10 +65,10 @@ reject = dict(mag=4e-12)
 # until later. To perform bad epoch rejection based on the reject parameter
 # passed here, run epochs.drop_bad_epochs(). This is done automatically in
 # tf_lcmv to reject bad epochs based on unfiltered data.
-event_id, tmin, tmax = 1, -0.3, 0.5
+event_id = 1
 events = mne.read_events(event_fname)
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
-                    picks=picks, baseline=(None, 0), preload=False,
+                    picks=picks, baseline=None, preload=False,
                     reject=reject)
 
 # Read empty room noise, preload to allow filtering
@@ -72,7 +80,7 @@ events_noise = make_fixed_length_events(raw_noise, event_id, duration=1.)
 # Create an epochs object using preload=True to reject bad epochs based on
 # unfiltered data
 epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin, tmax,
-                          proj=True, picks=picks, baseline=(None, 0),
+                          proj=True, picks=picks, baseline=None,
                           preload=True, reject=reject)
 
 # Make sure the number of noise epochs is the same as data epochs
@@ -103,17 +111,18 @@ data_reg = 0.001
 subtract_evoked = False
 
 # Calculating covariance from empty room noise. To use baseline data as noise
-# substitute raw for raw_noise, epochs for epochs_noise, and 0 for tmax.
-# Note, if using baseline data, the averaged evoked response in the baseline 
-# epoch should be flat.
+# substitute raw for raw_noise, epochs.events for epochs_noise.events, tmin for
+# desired baseline length, and 0 for tmax_plot.
+# Note, if using baseline data, the averaged evoked response in the baseline
+# period should be flat.
 noise_covs = []
 for (l_freq, h_freq) in freq_bins:
     raw_band = raw_noise.copy()
     raw_band.filter(l_freq, h_freq, picks=epochs.picks, method='iir', n_jobs=1)
     epochs_band = mne.Epochs(raw_band, epochs_noise.events, event_id,
-                             tmin=tmin, tmax=tmax, picks=epochs.picks,
-                             proj=True)
-                             
+                             tmin=tmin_plot, tmax=tmax_plot, baseline=None,
+                             picks=epochs.picks, proj=True)
+
     noise_cov = compute_covariance(epochs_band)
     noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=noise_reg,
                                    grad=noise_reg, eeg=noise_reg, proj=True)
@@ -123,8 +132,13 @@ for (l_freq, h_freq) in freq_bins:
 # Computing LCMV solutions for time-frequency windows in a label in source
 # space for faster computation, use label=None for full solution
 stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
-               freq_bins=freq_bins, subtract_evoked=subtract_evoked, 
+               freq_bins=freq_bins, subtract_evoked=subtract_evoked,
                reg=data_reg, label=label)
 
-# Plotting source spectrogram for source with maximum activity
-plot_source_spectrogram(stcs, freq_bins, source_index=None, colorbar=True)
+# Plotting source spectrogram for source with maximum activity.
+# Note that tmin and tmax are set to display a time range that is smaller than
+# the one for which beamforming estimates were calculated. This ensures that
+# all time bins shown are a result of smoothing across an identical number of
+# time windows.
+plot_source_spectrogram(stcs, freq_bins, tmin=tmin_plot, tmax=tmax_plot,
+                        source_index=None, colorbar=True)
diff --git a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py b/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
index 831a20a..d15294d 100644
--- a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
+++ b/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
@@ -32,14 +32,13 @@ Lecture Notes in Computer Science, 2011, Volume 6801/2011,
 600-611, DOI: 10.1007/978-3-642-22092-0_49
 http://dx.doi.org/10.1007/978-3-642-22092-0_49
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
 from mne.datasets import sample
 from mne.minimum_norm import make_inverse_operator, apply_inverse
 from mne.inverse_sparse import tf_mixed_norm
@@ -54,9 +53,9 @@ cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
 cov = mne.read_cov(cov_fname)
 
 # Handling average file
-setno = 'Left visual'
-evoked = fiff.read_evoked(ave_fname, setno=setno, baseline=(None, 0))
-evoked = fiff.pick.pick_channels_evoked(evoked)
+condition = 'Left visual'
+evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
+evoked = mne.pick_channels_evoked(evoked)
 # We make the window slightly larger than what you'll eventually be interested
 # in ([-0.05, 0.3]) to avoid edge effects.
 evoked.crop(tmin=-0.1, tmax=0.4)
@@ -97,19 +96,19 @@ evoked.crop(tmin=-0.05, tmax=0.3)
 residual.crop(tmin=-0.05, tmax=0.3)
 
 ylim = dict(eeg=[-10, 10], grad=[-200, 250], mag=[-600, 600])
-picks = fiff.pick_types(evoked.info, meg='grad', exclude='bads')
+picks = mne.pick_types(evoked.info, meg='grad', exclude='bads')
 evoked.plot(picks=picks, ylim=ylim, proj=True,
             titles=dict(grad='Evoked Response (grad)'))
 
-picks = fiff.pick_types(residual.info, meg='grad', exclude='bads')
+picks = mne.pick_types(residual.info, meg='grad', exclude='bads')
 residual.plot(picks=picks, ylim=ylim, proj=True,
               titles=dict(grad='Residual (grad)'))
 
 ###############################################################################
 # View in 2D and 3D ("glass" brain like 3D plot)
 plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
-                             opacity=0.1, fig_name="TF-MxNE (cond %s)" % setno,
-                             modes=['sphere'], scale_factors=[1.])
+                             opacity=0.1, fig_name="TF-MxNE (cond %s)"
+                             % condition, modes=['sphere'], scale_factors=[1.])
 
 time_label = 'TF-MxNE time=%0.2f ms'
 brain = stc.plot('sample', 'inflated', 'rh', fmin=10e-9, fmid=15e-9,
diff --git a/examples/plot_bem_contour_mri.py b/examples/plot_bem_contour_mri.py
new file mode 100644
index 0000000..fd5e57f
--- /dev/null
+++ b/examples/plot_bem_contour_mri.py
@@ -0,0 +1,25 @@
+"""
+=====================
+Plotting BEM Contours
+=====================
+
+This example displays the BEM surfaces (inner skull, outer skull,
+outer skin) as yellow contours on top of the T1 MRI anatomical image
+used for segmentation. This is useful for inspecting the quality of the
+BEM segmentations which are required for computing the forward solution.
+"""
+
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+from mne.viz import plot_bem
+from mne.datasets import sample
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+
+plot_bem(subject='sample', subjects_dir=subjects_dir, orientation='axial')
+plot_bem(subject='sample', subjects_dir=subjects_dir, orientation='sagittal')
+plot_bem(subject='sample', subjects_dir=subjects_dir, orientation='coronal')
diff --git a/examples/plot_channel_epochs_image.py b/examples/plot_channel_epochs_image.py
index 61dc87d..8c61ff5 100644
--- a/examples/plot_channel_epochs_image.py
+++ b/examples/plot_channel_epochs_image.py
@@ -17,9 +17,9 @@ responses A. Gramfort, R. Keriven, M. Clerc, 2010,
 Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
 http://hal.inria.fr/inria-00497023
 """
-print __doc__
+print(__doc__)
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -27,7 +27,7 @@ import numpy as np
 import matplotlib.pyplot as plt
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -38,13 +38,13 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 raw.info['bads'] = ['MEG 2443', 'EEG 053']
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
diff --git a/examples/plot_coregistration_transform.py b/examples/plot_coregistration_transform.py
new file mode 100644
index 0000000..2ad05c1
--- /dev/null
+++ b/examples/plot_coregistration_transform.py
@@ -0,0 +1,31 @@
+"""
+=========================================
+Plotting head in helmet from a trans file
+=========================================
+
+In this example, the head is shown in the
+MEG helmet along with the EEG electrodes in MRI
+coordinate system. This allows assessing the
+MEG <-> MRI coregistration quality.
+
+"""
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+from mne import read_evokeds
+from mne.datasets import sample
+from mne.viz import plot_trans
+
+
+data_path = sample.data_path()
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
+
+condition = 'Left Auditory'
+evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0))
+plot_trans(evoked.info, trans_fname=trans_fname, subject='sample',
+           subjects_dir=subjects_dir)
diff --git a/examples/plot_decimate_head_surface.py b/examples/plot_decimate_head_surface.py
index 99d71d4..636985a 100644
--- a/examples/plot_decimate_head_surface.py
+++ b/examples/plot_decimate_head_surface.py
@@ -8,9 +8,9 @@ using a cloud of digitization points for coordinate alignment
 instead of e.g. EEG-cap positions.
 
 """
-print __doc__
+print(__doc__)
 
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
diff --git a/examples/plot_define_target_events.py b/examples/plot_define_target_events.py
index f5027db..59ea00d 100644
--- a/examples/plot_define_target_events.py
+++ b/examples/plot_define_target_events.py
@@ -13,14 +13,14 @@ visualize the evoked responses to both 'quickly-processed'
 and 'slowly-processed' face stimuli.
 
 """
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.event import define_target_events
 from mne.datasets import sample
 data_path = sample.data_path()
@@ -31,7 +31,7 @@ raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 #   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
@@ -39,8 +39,8 @@ include = []  # or stim channels ['STI 014']
 raw.info['bads'] += ['EEG 053']  # bads
 
 # pick MEG channels
-picks = fiff.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
-                        include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
+                       include=include, exclude='bads')
 
 ###############################################################################
 # Find stimulus event followed by quick button presses
@@ -56,12 +56,12 @@ fill_na = 99  # the fill value for misses
 events_, lag = define_target_events(events, reference_id, target_id,
                                     sfreq, tmin, tmax, new_id, fill_na)
 
-print events_  # The 99 indicates missing or too late button presses
+print(events_)  # The 99 indicates missing or too late button presses
 
 # besides the events also the lag between target and reference is returned
 # this could e.g. be used as parametric regressor in subsequent analyses.
 
-print lag[lag != fill_na]  # lag in milliseconds
+print(lag[lag != fill_na])  # lag in milliseconds
 
 # #############################################################################
 # Construct epochs
diff --git a/examples/plot_estimate_covariance_matrix_baseline.py b/examples/plot_estimate_covariance_matrix_baseline.py
index 613fb41..11c5c99 100644
--- a/examples/plot_estimate_covariance_matrix_baseline.py
+++ b/examples/plot_estimate_covariance_matrix_baseline.py
@@ -8,14 +8,14 @@ Then we estimate the noise covariance of prestimulus data,
 a.k.a. baseline.
 
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -23,14 +23,14 @@ fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
-raw = fiff.Raw(fname)
+raw = io.Raw(fname)
 
 ###############################################################################
 # Set parameters
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 #   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
@@ -38,8 +38,8 @@ include = []  # or stim channels ['STI 014']
 raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick EEG channels
-picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
-                                            include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
+                       include=include, exclude='bads')
 # Read epochs, with proj off by default so we can plot either way later
 reject = dict(grad=4000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -47,7 +47,7 @@ epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
 
 # Compute the covariance on baseline
 cov = mne.compute_covariance(epochs, tmin=None, tmax=0)
-print cov
+print(cov)
 
 ###############################################################################
 # Show covariance
diff --git a/examples/plot_estimate_covariance_matrix_raw.py b/examples/plot_estimate_covariance_matrix_raw.py
index bcbfc15..837b93b 100644
--- a/examples/plot_estimate_covariance_matrix_raw.py
+++ b/examples/plot_estimate_covariance_matrix_raw.py
@@ -4,35 +4,35 @@ Estimate covariance matrix from a raw FIF file
 ==============================================
 
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 
 data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 
-raw = fiff.Raw(fname)
+raw = io.Raw(fname)
 
 include = []  # or stim channels ['STI 014']
 raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick EEG channels
-picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
-                                            include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
+                       include=include, exclude='bads')
 # setup rejection
 reject = dict(eeg=80e-6, eog=150e-6)
 
 # Compute the covariance from the raw data
 cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject)
-print cov
+print(cov)
 
 ###############################################################################
 # Show covariance
-mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True)
+fig_cov, fig_svd = mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True)
 # try setting proj to False to see the effect
diff --git a/examples/plot_evoked_delayed_ssp.py b/examples/plot_evoked_delayed_ssp.py
index 597736c..7abf6e8 100644
--- a/examples/plot_evoked_delayed_ssp.py
+++ b/examples/plot_evoked_delayed_ssp.py
@@ -12,16 +12,16 @@ Then we will explore the impact of the particular SSP projectors
 on the evoked data.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -32,12 +32,13 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(1, 40, method='iir')
 events = mne.read_events(event_fname)
 
 # pick magnetometer channels
-picks = fiff.pick_types(raw.info, meg='mag', stim=False, eog=True,
-                        include=[], exclude='bads')
+picks = mne.pick_types(raw.info, meg='mag', stim=False, eog=True,
+                       include=[], exclude='bads')
 
 # If we suspend SSP projection at the epochs stage we might reject
 # more epochs than necessary. To deal with this we set proj to `delayed`
@@ -50,7 +51,7 @@ picks = fiff.pick_types(raw.info, meg='mag', stim=False, eog=True,
 # projections at the evoked stage.
 
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), reject=dict(mag=4e-12),
+                    baseline=None, reject=dict(mag=4e-12),
                     proj='delayed')
 
 evoked = epochs.average()  # average epochs and get an Evoked dataset.
diff --git a/examples/plot_evoked_topomap.py b/examples/plot_evoked_topomap.py
index b4ade71..7793540 100644
--- a/examples/plot_evoked_topomap.py
+++ b/examples/plot_evoked_topomap.py
@@ -1,35 +1,43 @@
-"""
-========================================
-Plotting topographic maps of evoked data
-========================================
-
-Load evoked data and plot topomaps for selected time points.
-
-"""
-# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
-#
-# License: BSD (3-clause)
-
-print __doc__
-
-import numpy as np
-import mne
-
-path = mne.datasets.sample.data_path()
-fname = path + '/MEG/sample/sample_audvis-ave.fif'
-
-# load evoked and subtract baseline
-evoked = mne.fiff.read_evoked(fname, 'Left Auditory', baseline=(None, 0))
-
-# plot magnetometer data as topomap at 1 time point : 100ms
-evoked.plot_topomap(0.1, ch_type='mag', size=3, colorbar=False)
-
-# set time instants in seconds (from 50 to 150ms in a step of 10ms)
-times = np.arange(0.05, 0.15, 0.01)
-# If times is set to None only 10 regularly spaced topographies will be shown
-
-# plot magnetometer data as topomaps
-evoked.plot_topomap(times, ch_type='mag')
-
-# plot gradiometer data (plots the RMS for each pair of gradiometers)
-evoked.plot_topomap(times, ch_type='grad')
+"""
+========================================
+Plotting topographic maps of evoked data
+========================================
+
+Load evoked data and plot topomaps for selected time points.
+
+"""
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#          Tal Linzen <linzen at nyu.edu>
+#          Denis A. Engeman <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import numpy as np
+import matplotlib.pyplot as plt
+from mne.datasets import sample
+from mne import read_evokeds
+
+path = sample.data_path()
+fname = path + '/MEG/sample/sample_audvis-ave.fif'
+
+# load evoked and subtract baseline
+condition = 'Left Auditory'
+evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
+
+# set time instants in seconds (from 50 to 150ms in a step of 10ms)
+times = np.arange(0.05, 0.15, 0.01)
+# If times is set to None only 10 regularly spaced topographies will be shown
+
+# plot magnetometer data as topomaps
+evoked.plot_topomap(times, ch_type='mag')
+
+# plot gradiometer data (plots the RMS for each pair of gradiometers)
+evoked.plot_topomap(times, ch_type='grad')
+
+# plot magnetometer data as topomap at 1 time point : 100ms
+# and add channel labels and title
+evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
+                    size=6, res=128, title='Auditory response')
+plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
diff --git a/examples/plot_evoked_topomap_delayed_ssp.py b/examples/plot_evoked_topomap_delayed_ssp.py
index 3f80e8d..82745e3 100644
--- a/examples/plot_evoked_topomap_delayed_ssp.py
+++ b/examples/plot_evoked_topomap_delayed_ssp.py
@@ -9,17 +9,17 @@ related to the trade-off between denoising and preserving signal.
 In this example we demonstrate how to use topographic maps for delayed
 SSP application.
 """
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #          Christian Brodbeck <christianbrodbeck at nyu.edu>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -31,7 +31,7 @@ ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # delete EEG projections (we know it's the last one)
@@ -40,8 +40,8 @@ raw.del_proj(-1)
 [raw.add_proj(p) for p in mne.read_proj(ecg_fname) if 'axial' in p['desc']]
 
 # pick magnetometer channels
-picks = fiff.pick_types(raw.info, meg='mag', stim=False, eog=True,
-                        include=[], exclude='bads')
+picks = mne.pick_types(raw.info, meg='mag', stim=False, eog=True,
+                       include=[], exclude='bads')
 
 # We will make of the proj `delayed` option to
 # interactively select projections at the evoked stage.
diff --git a/examples/plot_evoked_whitening.py b/examples/plot_evoked_whitening.py
index 201333b..98e4522 100644
--- a/examples/plot_evoked_whitening.py
+++ b/examples/plot_evoked_whitening.py
@@ -10,13 +10,14 @@ of Gaussian whiten noise from which we expect values around
 and less than 2 standard deviations.
 
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-import mne
+from mne import read_cov, whiten_evoked, pick_types, read_evokeds
+from mne.cov import regularize
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -25,23 +26,21 @@ fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
 
 # Reading
-evoked = mne.fiff.Evoked(fname, setno=0, baseline=(None, 0), proj=True)
-noise_cov = mne.read_cov(cov_fname)
+evoked = read_evokeds(fname, condition=0, baseline=(None, 0), proj=True)
+noise_cov = read_cov(cov_fname)
 
 ###############################################################################
 # Show result
 
   # Pick channels to view
-picks = mne.fiff.pick_types(evoked.info, meg=True, eeg=True, exclude='bads')
+picks = pick_types(evoked.info, meg=True, eeg=True, exclude='bads')
 evoked.plot(picks=picks)
 
-noise_cov = mne.cov.regularize(noise_cov, evoked.info,
-                               grad=0.1, mag=0.1, eeg=0.1)
+noise_cov = regularize(noise_cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1)
 
-evoked_white = mne.whiten_evoked(evoked, noise_cov, picks, diag=True)
+evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
 
 # plot the whitened evoked data to see if baseline signals match the
 # assumption of Gaussian whiten noise from which we expect values around
 # and less than 2 standard deviations.
-import matplotlib.pyplot as plt
 evoked_white.plot(picks=picks, unit=False, hline=[-2, 2])
diff --git a/examples/plot_extract_events_from_raw.py b/examples/plot_extract_events_from_raw.py
new file mode 100644
index 0000000..e4a7a2e
--- /dev/null
+++ b/examples/plot_extract_events_from_raw.py
@@ -0,0 +1,41 @@
+"""
+=========================
+Find events in a raw file
+=========================
+
+Find events from the stimulation/trigger channel in the raw data.
+The plot them to get an idea of the paradigm.
+"""
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import mne
+from mne.datasets import sample
+from mne.io import Raw
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+
+# Reading events
+raw = Raw(fname)
+
+events = mne.find_events(raw, stim_channel='STI 014')
+
+# Writing events
+mne.write_events('events.fif', events)
+
+for ind, before, after in events[:5]:
+    print("At sample %d stim channel went from %d to %d"
+          % (ind, before, after))
+
+# Plot the events to get an idea of the paradigm
+# Specify colors and an event_id dictionary for the legend.
+event_id = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4, 'smiley': 5,
+            'button': 32}
+color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c', 5: 'black', 32: 'blue'}
+
+mne.viz.plot_events(events, raw.info['sfreq'], raw.first_samp, color=color,
+                    event_id=event_id)
diff --git a/examples/plot_from_raw_to_epochs_to_evoked.py b/examples/plot_from_raw_to_epochs_to_evoked.py
index 599813e..a656bbe 100644
--- a/examples/plot_from_raw_to_epochs_to_evoked.py
+++ b/examples/plot_from_raw_to_epochs_to_evoked.py
@@ -8,17 +8,16 @@ a list of events. The epochs are averaged to produce evoked
 data and then saved to disk.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis A. Engemann <d.engemann at fz-juelich.de>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis A. Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
-from mne.viz import plot_drop_log
 data_path = sample.data_path()
 
 ###############################################################################
@@ -28,7 +27,7 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 #   Plot raw data
@@ -39,8 +38,8 @@ include = []  # or stim channels ['STI 014']
 raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick EEG channels
-picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
-                        include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
+                       include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6),
@@ -55,15 +54,25 @@ evoked.save('sample_audvis_eeg-ave.fif')  # save evoked data to disk
 ###############################################################################
 # View evoked response
 times = 1e3 * epochs.times  # time in miliseconds
+
+ch_max_name, latency = evoked.get_peak(mode='neg')
+
 import matplotlib.pyplot as plt
 evoked.plot()
+
 plt.xlim([times[0], times[-1]])
 plt.xlabel('time (ms)')
 plt.ylabel('Potential (uV)')
 plt.title('EEG evoked potential')
+
+plt.axvline(latency * 1e3, color='red', 
+            label=ch_max_name, linewidth=2,
+            linestyle='--')
+plt.legend(loc='best')
+
 plt.show()
 
 # Look at channels that caused dropped events, showing that the subject's
 # blinks were likely to blame for most epochs being dropped
 epochs.drop_bad_epochs()
-plot_drop_log(epochs.drop_log, subject='sample')
+epochs.plot_drop_log(subject='sample')
diff --git a/examples/plot_from_raw_to_multiple_epochs_to_evoked.py b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
index d0aa70c..a72272b 100644
--- a/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
+++ b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
@@ -8,16 +8,16 @@ a raw file given a list of events. The epochs are averaged to produce
 evoked data and then saved to disk.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
 #          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.epochs import combine_event_ids
 data_path = sample.data_path()
@@ -31,7 +31,7 @@ tmin = -0.2
 tmax = 0.5
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 #   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
@@ -39,8 +39,8 @@ include = []  # or stim channels ['STI 014']
 raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick EEG channels
-picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
-                        include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
+                       include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
@@ -54,7 +54,7 @@ combine_event_ids(epochs, ['VisL', 'VisR'], {'Visual': 34}, copy=False)
 evokeds = [epochs[cond].average() for cond in ['Auditory', 'Visual']]
 
 # save evoked data to disk
-fiff.write_evoked('sample_auditory_and_visual_eeg-ave.fif', evokeds)
+mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)
 
 ###############################################################################
 # View evoked response
diff --git a/examples/plot_make_forward.py b/examples/plot_make_forward.py
index 8ac7951..69af985 100644
--- a/examples/plot_make_forward.py
+++ b/examples/plot_make_forward.py
@@ -7,7 +7,7 @@ Create a forward operator and display sensitivity maps
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
@@ -27,26 +27,37 @@ fwd = mne.make_forward_solution(raw_fname, mri=mri, src=src, bem=bem,
 fwd = mne.convert_forward_solution(fwd, surf_ori=True)
 leadfield = fwd['sol']['data']
 
-print "Leadfield size : %d x %d" % leadfield.shape
+print("Leadfield size : %d x %d" % leadfield.shape)
 
 grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
 mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
 eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
 
 ###############################################################################
-# Show gain matrix a.k.a. leadfield matrix with sensitivy map
+# Show gain matrix a.k.a. leadfield matrix with sensitivity map
 
 import matplotlib.pyplot as plt
-plt.matshow(leadfield[:, :500])
-plt.xlabel('sources')
-plt.ylabel('sensors')
-plt.title('Lead field matrix (500 dipoles only)')
+picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
+picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
+
+fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
+fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
+for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
+    im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto')
+    ax.set_title(ch_type.upper())
+    ax.set_xlabel('sources')
+    ax.set_ylabel('sensors')
+    plt.colorbar(im, ax=ax, cmap='RdBu_r')
+plt.show()
 
 plt.figure()
 plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
-         bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'])
+         bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
+         color=['c', 'b', 'k'])
 plt.legend()
 plt.title('Normal orientation sensitivity')
+plt.xlabel('sensitivity')
+plt.ylabel('count')
 plt.show()
 
 args = dict(fmin=0.1, fmid=0.5, fmax=0.9, smoothing_steps=7)
diff --git a/examples/plot_meg_eeg_fields_3d.py b/examples/plot_meg_eeg_fields_3d.py
new file mode 100644
index 0000000..e863edd
--- /dev/null
+++ b/examples/plot_meg_eeg_fields_3d.py
@@ -0,0 +1,37 @@
+"""
+======================
+Plot M/EEG field lines
+======================
+
+In this example, M/EEG data are remapped onto the
+MEG helmet (MEG) and subject's head surface (EEG).
+This process can be computationally intensive.
+"""
+
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+
+# License: BSD (3-clause)
+
+print(__doc__)
+
+from mne.datasets import sample
+from mne import make_field_map, read_evokeds
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
+# If trans_fname is set to None then only MEG estimates can be visualized
+
+condition = 'Left Auditory'
+evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0))
+
+# Compute the field maps to project MEG and EEG data to MEG helmet
+# and scalp surface
+maps = make_field_map(evoked, trans_fname=trans_fname, subject='sample',
+                      subjects_dir=subjects_dir, n_jobs=1)
+
+# explore several points in time
+[evoked.plot_field(maps, time=time) for time in [0.09, .11]]
diff --git a/examples/plot_read_and_write_raw_data.py b/examples/plot_read_and_write_raw_data.py
index a048422..b75b72b 100644
--- a/examples/plot_read_and_write_raw_data.py
+++ b/examples/plot_read_and_write_raw_data.py
@@ -3,23 +3,23 @@
 Reading and writing raw files
 =============================
 
-In this example we read a raw file. Plot a segment of MEG data
+In this example, we read a raw file. Plot a segment of MEG data
 restricted to MEG channels. And save these data in a new
 raw file.
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-from mne import fiff
+import mne
 from mne.datasets import sample
 data_path = sample.data_path()
 
 fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 
-raw = fiff.Raw(fname)
+raw = mne.io.Raw(fname)
 
 # Set up pick list: MEG + STI 014 - bad channels
 want_meg = True
@@ -28,8 +28,8 @@ want_stim = False
 include = ['STI 014']
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bad channels + 2 more
 
-picks = fiff.pick_types(raw.info, meg=want_meg, eeg=want_eeg, stim=want_stim,
-                        include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg=want_meg, eeg=want_eeg, stim=want_stim,
+                       include=include, exclude='bads')
 
 some_picks = picks[:5]  # take 5 first
 start, stop = raw.time_as_index([0, 15])  # read the first 15s of data
diff --git a/examples/plot_read_bem_surfaces.py b/examples/plot_read_bem_surfaces.py
index 0d946d2..2d1f177 100644
--- a/examples/plot_read_bem_surfaces.py
+++ b/examples/plot_read_bem_surfaces.py
@@ -3,11 +3,11 @@
 Reading BEM surfaces from a forward solution
 ============================================
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
@@ -17,7 +17,7 @@ fname = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
 
 surfaces = mne.read_bem_surfaces(fname, add_geom=True)
 
-print "Number of surfaces : %d" % len(surfaces)
+print("Number of surfaces : %d" % len(surfaces))
 
 ###############################################################################
 # Show result
diff --git a/examples/plot_read_epochs.py b/examples/plot_read_epochs.py
index fd64d53..b926a76 100644
--- a/examples/plot_read_epochs.py
+++ b/examples/plot_read_epochs.py
@@ -8,16 +8,15 @@ a list of events. For illustration, we compute the evoked responses
 for both MEG and EEG data by averaging all the epochs.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
-from mne.viz import plot_evoked
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -28,13 +27,13 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
-picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
@@ -44,4 +43,4 @@ evoked = epochs.average()  # average epochs to get the evoked response
 
 ###############################################################################
 # Show result
-plot_evoked(evoked)
+evoked.plot()
diff --git a/examples/plot_read_evoked.py b/examples/plot_read_evoked.py
index 60e54be..aea48a7 100644
--- a/examples/plot_read_evoked.py
+++ b/examples/plot_read_evoked.py
@@ -4,25 +4,28 @@ Reading and writing an evoked file
 ==================================
 
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-from mne import fiff
+from mne import read_evokeds
 from mne.datasets import sample
-from mne.viz import plot_evoked
 
 data_path = sample.data_path()
 
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
 # Reading
-evoked = fiff.Evoked(fname, setno='Left Auditory',
-                     baseline=(None, 0), proj=True)
+condition = 'Left Auditory'
+evoked = read_evokeds(fname, condition=condition, baseline=(None, 0),
+                      proj=True)
 
 ###############################################################################
-# Show result:
+# Show result as a butteryfly plot:
 # By using exclude=[] bad channels are not excluded and are shown in red
-plot_evoked(evoked, exclude=[])
+evoked.plot(exclude=[])
+
+# Show result as a 2D image (x: time, y: channels, color: amplitude)
+evoked.plot_image(exclude=[])
diff --git a/examples/plot_read_forward.py b/examples/plot_read_forward.py
index 3ca2947..38b4613 100644
--- a/examples/plot_read_forward.py
+++ b/examples/plot_read_forward.py
@@ -3,11 +3,11 @@
 Read a forward operator and display sensitivity maps
 ====================================================
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
@@ -19,14 +19,14 @@ subjects_dir = data_path + '/subjects'
 fwd = mne.read_forward_solution(fname, surf_ori=True)
 leadfield = fwd['sol']['data']
 
-print "Leadfield size : %d x %d" % leadfield.shape
+print("Leadfield size : %d x %d" % leadfield.shape)
 
 grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
 mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
 eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
 
 ###############################################################################
-# Show gain matrix a.k.a. leadfield matrix with sensitivy map
+# Show gain matrix a.k.a. leadfield matrix with sensitivity map
 
 import matplotlib.pyplot as plt
 plt.matshow(leadfield[:, :500])
diff --git a/examples/plot_read_noise_covariance_matrix.py b/examples/plot_read_noise_covariance_matrix.py
index b583578..284e4f8 100644
--- a/examples/plot_read_noise_covariance_matrix.py
+++ b/examples/plot_read_noise_covariance_matrix.py
@@ -3,11 +3,11 @@
 Reading/Writing a noise covariance matrix
 =========================================
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
@@ -16,7 +16,7 @@ data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
 
 cov = mne.Covariance(fname)
-print cov
+print(cov)
 
 ###############################################################################
 # Show covariance
diff --git a/examples/plot_shift_evoked.py b/examples/plot_shift_evoked.py
index 98a5f96..6b8fe5d 100644
--- a/examples/plot_shift_evoked.py
+++ b/examples/plot_shift_evoked.py
@@ -8,11 +8,11 @@ Shifting time-scale in evoked data
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 import mne
-from mne import fiff
+from mne.viz import tight_layout
 from mne.datasets import sample
 
 data_path = sample.data_path()
@@ -20,11 +20,12 @@ data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
 # Reading evoked data
-evoked = fiff.Evoked(fname, setno='Left Auditory',
-                     baseline=(None, 0), proj=True)
+condition = 'Left Auditory'
+evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
+                          proj=True)
 
-picks = fiff.pick_channels(ch_names=evoked.info['ch_names'],
-                           include="MEG 2332", exclude="bad")
+ch_names = evoked.info['ch_names']
+picks = mne.pick_channels(ch_names=ch_names, include="MEG 2332", exclude="bad")
 
 # Create subplots
 f, (ax1, ax2, ax3) = plt.subplots(3)
@@ -43,4 +44,4 @@ evoked.shift_time(0.5, relative=False)
 evoked.plot(exclude=[], picks=picks, axes=ax3,
             titles=dict(grad='Absolute shift: 500 ms'))
 
-mne.viz.tight_layout()
+tight_layout()
diff --git a/examples/plot_simulate_evoked_data.py b/examples/plot_simulate_evoked_data.py
index a0cc632..6fbed34 100644
--- a/examples/plot_simulate_evoked_data.py
+++ b/examples/plot_simulate_evoked_data.py
@@ -5,26 +5,30 @@ Generate simulated evoked data
 
 """
 # Author: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
-#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
+print(__doc__)
+
 import numpy as np
 import matplotlib.pyplot as plt
 
-import mne
-from mne.fiff.pick import pick_types_evoked, pick_types_forward
+from mne import (read_proj, read_forward_solution, read_cov, read_label,
+                 pick_types_evoked, pick_types_forward, pick_types,
+                 read_evokeds)
+from mne.io import Raw
 from mne.datasets import sample
 from mne.time_frequency import iir_filter_raw, morlet
-from mne.viz import plot_evoked, plot_sparse_source_estimates
+from mne.viz import plot_sparse_source_estimates
 from mne.simulation import generate_sparse_stc, generate_evoked
 
 ###############################################################################
 # Load real data as templates
 data_path = sample.data_path()
 
-raw = mne.fiff.Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
-proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
+raw = Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
+proj = read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
 raw.info['projs'] += proj
 raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels
 
@@ -32,17 +36,18 @@ fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
 cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
 
-fwd = mne.read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
+fwd = read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
 fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
 
-cov = mne.read_cov(cov_fname)
+cov = read_cov(cov_fname)
 
-evoked_template = mne.fiff.read_evoked(ave_fname, setno=0, baseline=None)
+condition = 'Left Auditory'
+evoked_template = read_evokeds(ave_fname, condition=condition, baseline=None)
 evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
                                     exclude=raw.info['bads'])
 
 label_names = ['Aud-lh', 'Aud-rh']
-labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
+labels = [read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
           for ln in label_names]
 
 ###############################################################################
@@ -68,7 +73,7 @@ stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
 
 ###############################################################################
 # Generate noisy evoked data
-picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads')
+picks = pick_types(raw.info, meg=True, exclude='bads')
 iir_filter = iir_filter_raw(raw, order=5, picks=picks, tmin=60, tmax=180)
 evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
                          tmin=0.0, tmax=0.2, iir_filter=iir_filter)
@@ -81,4 +86,4 @@ plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
 plt.figure()
 plt.psd(evoked.data[0])
 
-plot_evoked(evoked)
+evoked.plot()
diff --git a/examples/plot_ssp_projs_sensitivity_map.py b/examples/plot_ssp_projs_sensitivity_map.py
index c64bc2e..878a15e 100644
--- a/examples/plot_ssp_projs_sensitivity_map.py
+++ b/examples/plot_ssp_projs_sensitivity_map.py
@@ -6,13 +6,13 @@ Sensitivity map of SSP projections
 This example shows the sources that have a forward field
 similar to the first SSP vector correcting for ECG.
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-import mne
+from mne import read_forward_solution, read_proj, sensitivity_map
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -20,16 +20,15 @@ subjects_dir = data_path + '/subjects'
 fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
 
-fwd = mne.read_forward_solution(fname, surf_ori=True)
-projs = mne.read_proj(ecg_fname)
+fwd = read_forward_solution(fname, surf_ori=True)
+projs = read_proj(ecg_fname)
 projs = projs[3:][::2]  # take only one projection per channel type
 
 # Compute sensitivity map
-ssp_ecg_map = mne.sensitivity_map(fwd, ch_type='grad', projs=projs,
-                                  mode='angle')
+ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')
 
 ###############################################################################
-# Show sensitivy map
+# Show sensitivity map
 
 import matplotlib.pyplot as plt
 plt.hist(ssp_ecg_map.data.ravel())
diff --git a/examples/plot_ssp_projs_topomaps.py b/examples/plot_ssp_projs_topomaps.py
index cc49cde..c86a666 100644
--- a/examples/plot_ssp_projs_topomaps.py
+++ b/examples/plot_ssp_projs_topomaps.py
@@ -6,26 +6,24 @@ Plot SSP projections topographies
 This example shows how to display topographies of SSP projection vectors.
 The projections used are the ones correcting for ECG artifacts.
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#         Denis A. Engemann <d.engemann at fz-juuelich.de>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis A. Engemann <denis.engemann at gmail.com>
 
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
-import matplotlib.pyplot as plt
-import mne
+from mne import read_proj, find_layout, read_evokeds
 from mne.datasets import sample
+from mne import viz
 data_path = sample.data_path()
 
 ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
 ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
-evoked = mne.fiff.read_evoked(ave_fname, setno='Left Auditory')
-projs = mne.read_proj(ecg_fname)
+evoked = read_evokeds(ave_fname, condition='Left Auditory')
+projs = read_proj(ecg_fname)
 
-layouts = [mne.find_layout(evoked.info, k) for k in 'meg', 'eeg']
+layouts = [find_layout(evoked.info, k) for k in 'meg', 'eeg']
 
-plt.figure(figsize=(12, 6))
-mne.viz.plot_projs_topomap(projs, layout=layouts)
-mne.viz.tight_layout(w_pad=0.5)
+viz.plot_projs_topomap(projs, layout=layouts)
diff --git a/examples/plot_topo_channel_epochs_image.py b/examples/plot_topo_channel_epochs_image.py
index f2a5ef5..a08c699 100644
--- a/examples/plot_topo_channel_epochs_image.py
+++ b/examples/plot_topo_channel_epochs_image.py
@@ -9,17 +9,17 @@ potential / field (ERP/ERF) images.
 One sensor topography plot is produced with the evoked field images from
 the selected channels.
 """
-print __doc__
+print(__doc__)
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 import matplotlib.pyplot as plt
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -30,13 +30,13 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 # Set up pick list: EEG + MEG - bad channels (modify to your needs)
 raw.info['bads'] = ['MEG 2443', 'EEG 053']
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
+                       exclude='bads')
 
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
diff --git a/examples/plot_topo_compare_conditions.py b/examples/plot_topo_compare_conditions.py
index d1c3458..b929d68 100644
--- a/examples/plot_topo_compare_conditions.py
+++ b/examples/plot_topo_compare_conditions.py
@@ -11,17 +11,17 @@ evoked responses.
 
 """
 
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 import mne
 
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.viz import plot_topo
 from mne.datasets import sample
 data_path = sample.data_path()
@@ -46,8 +46,8 @@ include = []  # or stim channels ['STI 014']
 reject = dict(grad=4000e-13, mag=4e-12)
 
 # pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                       include=include, exclude='bads')
 
 # Create epochs including different events
 epochs = mne.Epochs(raw, events, dict(audio_l=1, visual_r=3), tmin, tmax,
diff --git a/examples/plot_topo_customized.py b/examples/plot_topo_customized.py
new file mode 100644
index 0000000..d8c4003
--- /dev/null
+++ b/examples/plot_topo_customized.py
@@ -0,0 +1,62 @@
+"""
+========================================
+Plot custom topographies for MEG sensors
+========================================
+
+This example exposes the `iter_topography` function that makes it
+very easy to generate custom sensor topography plots.
+Here we will plot the power spectrum of each channel on a topographic
+layout.
+
+"""
+
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import numpy as np
+import mne
+
+from mne.viz import iter_topography
+from mne import io
+from mne.time_frequency import compute_raw_psd
+
+import matplotlib.pyplot as plt
+
+from mne.datasets import sample
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(1, 20)
+
+picks = mne.pick_types(raw.info, meg=True, exclude=[])
+tmin, tmax = 0, 120  # use the first 120s of data
+fmin, fmax = 2, 20  # look at frequencies between 2 and 20Hz
+n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
+psds, freqs = compute_raw_psd(raw, picks=picks, tmin=tmin, tmax=tmax,
+                              fmin=fmin, fmax=fmax)
+psds = 20 * np.log10(psds)  # scale to dB
+
+
+def my_callback(ax, ch_idx):
+    """
+    This block of code is executed once you click on one of the channel axes
+    in the plot. To work with the viz internals, this function should only take
+    two parameters, the axis and the channel or data index.
+    """
+    ax.plot(freqs, psds[ch_idx], color='red')
+    ax.set_xlabel = 'Frequency (Hz)'
+    ax.set_ylabel = 'Power (dB)'
+
+for ax, idx in iter_topography(raw.info,
+                               fig_facecolor='white',
+                               axis_facecolor='white',
+                               axis_spinecolor='white',
+                               on_pick=my_callback):
+    ax.plot(psds[idx], color='red')
+
+plt.gcf().suptitle('Power spectral densities')
+plt.show()
diff --git a/examples/plot_topography.py b/examples/plot_topography.py
index 5b5f0c5..4bb04cd 100644
--- a/examples/plot_topography.py
+++ b/examples/plot_topography.py
@@ -5,15 +5,15 @@ Plot topographies for MEG sensors
 
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import matplotlib.pyplot as plt
 
-from mne import fiff
+from mne import read_evokeds
 from mne.viz import plot_topo
 from mne.datasets import sample
 data_path = sample.data_path()
@@ -21,7 +21,8 @@ data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
 # Reading
-evoked = fiff.read_evoked(fname, setno=0, baseline=(None, 0))
+condition = 'Left Auditory'
+evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
 
 ###############################################################################
 # Show topography
diff --git a/examples/preprocessing/plot_eog_artifact_histogram.py b/examples/preprocessing/plot_eog_artifact_histogram.py
new file mode 100644
index 0000000..ea9e25c
--- /dev/null
+++ b/examples/preprocessing/plot_eog_artifact_histogram.py
@@ -0,0 +1,49 @@
+"""
+========================
+Show EOG artifact timing
+========================
+
+Compute the distribution of timing for EOG artifacts.
+
+"""
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import numpy as np
+import matplotlib.pyplot as plt
+import mne
+from mne import io
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname, preload=True)
+events = mne.find_events(raw, 'STI 014')
+eog_event_id = 512
+eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
+raw.add_events(eog_events, 'STI 014')
+
+# Read epochs
+picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
+tmin, tmax = -0.2, 0.5
+event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
+epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
+
+# Get the stim channel data
+pick_ch = mne.pick_channels(epochs.ch_names, 'STI 014')[0]
+data = epochs.get_data()[:, pick_ch, :].astype(int)
+data = np.sum((data.astype(int) & 512) == 512, axis=0)
+
+###############################################################################
+# Plot EOG artifact distribution
+plt.stem(1e3 * epochs.times, data)
+plt.xlabel('Times (ms)')
+plt.ylabel('Blink counts (from %s trials)' % len(epochs))
+plt.show()
diff --git a/examples/preprocessing/plot_find_ecg_artifacts.py b/examples/preprocessing/plot_find_ecg_artifacts.py
index 47ccf4a..bc3f653 100644
--- a/examples/preprocessing/plot_find_ecg_artifacts.py
+++ b/examples/preprocessing/plot_find_ecg_artifacts.py
@@ -6,16 +6,16 @@ Find ECG artifacts
 Locate QRS component of ECG.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -24,21 +24,21 @@ data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 
 event_id = 999
 ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
                                                      ch_name='MEG 1531')
 
 # Read epochs
-picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
-                        include=['MEG 1531'], exclude='bads')
+picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
+                       include=['MEG 1531'], exclude='bads')
 tmin, tmax = -0.1, 0.1
 epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
                     proj=False)
 data = epochs.get_data()
 
-print "Number of detected ECG artifacts : %d" % len(data)
+print("Number of detected ECG artifacts : %d" % len(data))
 
 ###############################################################################
 # Plot ECG artifacts
diff --git a/examples/preprocessing/plot_find_eog_artifacts.py b/examples/preprocessing/plot_find_eog_artifacts.py
index d59d0fe..e40af5f 100644
--- a/examples/preprocessing/plot_find_eog_artifacts.py
+++ b/examples/preprocessing/plot_find_eog_artifacts.py
@@ -6,16 +6,16 @@ Find EOG artifacts
 Locate peaks of EOG to spot blinks and general EOG artifacts.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 data_path = sample.data_path()
 
@@ -24,19 +24,19 @@ data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 
 event_id = 998
 eog_events = mne.preprocessing.find_eog_events(raw, event_id)
 
 # Read epochs
-picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
+picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
                         exclude='bads')
 tmin, tmax = -0.2, 0.2
 epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
 data = epochs.get_data()
 
-print "Number of detected EOG artifacts : %d" % len(data)
+print("Number of detected EOG artifacts : %d" % len(data))
 
 ###############################################################################
 # Plot EOG artifacts
diff --git a/examples/preprocessing/plot_ica_from_epochs.py b/examples/preprocessing/plot_ica_from_epochs.py
index 1153a5d..ba7c43e 100644
--- a/examples/preprocessing/plot_ica_from_epochs.py
+++ b/examples/preprocessing/plot_ica_from_epochs.py
@@ -3,25 +3,22 @@
 Compute ICA components on epochs
 ================================
 
-ICA is used to decompose raw data in 49 to 50 sources.
-The source matching the ECG is found automatically
-and displayed. Finally, after the cleaned epochs are
-compared to the uncleaned epochs, evoked ICA sources
-are investigated using sensor space ERF plotting
-techniques.
-
+ICA is fit to MEG raw data.
+We assume that the non-stationary EOG artifacts have already been removed.
+The sources matching the ECG are automatically found and displayed.
+Subsequently, artefact detection and rejection quality are assessed.
+Finally, the impact on the evoked ERF is visualized.
 """
-print __doc__
+print(__doc__)
 
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-import matplotlib.pyplot as plt
 import numpy as np
 import mne
-from mne.fiff import Raw
-from mne.preprocessing.ica import ICA
+from mne.io import Raw
+from mne.preprocessing import ICA, create_ecg_epochs
 from mne.datasets import sample
 
 ###############################################################################
@@ -31,109 +28,48 @@ data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 
 raw = Raw(raw_fname, preload=True)
-raw.apply_proj()
-
-picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
-                            ecg=True, stim=False, exclude='bads')
-
-tmin, tmax, event_id = -0.2, 0.5, 1
-baseline = (None, 0)
-reject = None
+raw.filter(1, 30, method='iir')
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, ecg=True,
+                       stim=False, exclude='bads')
 
+# longer + more epochs for more artifact exposure
 events = mne.find_events(raw, stim_channel='STI 014')
+event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
+reject = dict(eog=250e-6)
+tmin, tmax = -0.5, 0.5
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks,
-                    baseline=baseline, preload=True, reject=reject)
-
-random_state = np.random.RandomState(42)
+                    baseline=(None, 0), preload=True, reject=reject)
 
 ###############################################################################
-# Setup ICA seed decompose data, then access and plot sources.
-# for more background information visit the plot_ica_from_raw.py example
+# 1) Fit ICA model using the FastICA algorithm
 
-# fit sources from epochs or from raw (both works for epochs)
-ica = ICA(n_components=0.90, n_pca_components=64, max_pca_components=100,
-          noise_cov=None, random_state=random_state)
+ica = ICA(n_components=0.95, method='fastica').fit(epochs)
 
-ica.decompose_epochs(epochs, decim=2)
-print ica
+###############################################################################
+# 2) Find ECG Artifacts
 
-# plot spatial sensitivities of a few ICA components
-title = 'Spatial patterns of ICA components (Magnetometers)'
-source_idx = range(35, 50)
-ica.plot_topomap(source_idx, ch_type='mag')
-plt.suptitle(title, fontsize=12)
+# generate ECG epochs to improve detection by correlation
+ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5, picks=picks)
 
+ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
+ica.plot_scores(scores, exclude=ecg_inds)
 
-###############################################################################
-# Automatically find ECG and EOG component using correlation coefficient.
-
-# As we don't have an ECG channel we use one that correlates a lot with heart
-# beats: 'MEG 1531'. We can directly pass the name to the find_sources method.
-# In our example, the find_sources method returns and array of correlation
-# scores for each ICA source.
-ecg_ch_name = 'MEG 1531'
-ecg_scores = ica.find_sources_epochs(epochs, target=ecg_ch_name,
-                                     score_func='pearsonr')
-
-# get the source most correlated with the ECG.
-ecg_source_idx = np.argsort(np.abs(ecg_scores))[-1]
-
-# get sources as epochs object and inspect some trial
-some_trial = 10
-title = 'Source most similar to ECG'
-ica.plot_sources_epochs(epochs[some_trial], ecg_source_idx, title=title)
-
-# As we have an EOG channel, we can use it to detect the source.
-eog_scores = ica.find_sources_epochs(epochs, target='EOG 061',
-                                     score_func='pearsonr')
-
-# get maximum correlation index for EOG
-eog_source_idx = np.abs(eog_scores).argmax()
-
-# As the subject did not constantly move her eyes, the movement artifacts
-# may remain hidden when plotting single epochs.
-# Plotting the identified source across epochs reveals
-# considerable EOG artifacts.
-title = 'Source most similar to EOG'
-ica.plot_sources_epochs(epochs, eog_source_idx, title=title)
+title = 'Sources related to %s artifacts (red)'
+show_picks = np.abs(scores).argsort()[::-1][:5]
 
-###############################################################################
-# Reject artifact sources and compare results
-
-# Add detected artifact sources to exclusion list
-ica.exclude += [ecg_source_idx, eog_source_idx]
-
-# Restore sensor space data
-epochs_ica = ica.pick_sources_epochs(epochs)
-
-
-# First show unprocessed, then cleaned epochs
-mags = mne.fiff.pick_types(epochs.info, meg='mag', exclude=[])
-fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
-times = epochs.times * 1e3
-scale = 1e15
-titles = ['raw - ', 'cleaned - ']
-ecg_ch = epochs.ch_names.index(ecg_ch_name)
-for e, (ax1, ax2), title in zip([epochs, epochs_ica], axes.T, titles):
-    ax1.plot(times, e.average(mags).data.T * scale, color='k')
-    ax1.set_title(title + 'evoked')
-    ax2.plot(times, e._data[some_trial, ecg_ch].T * scale, color='r')
-    ax2.set_title(title + 'single trial')
-    if title == 'raw':
-        ax1.set_ylabel('data (fT)')
-    else:
-        ax2.set_xlabel('Time (ms)')
+ica.plot_sources(epochs, show_picks, exclude=ecg_inds, title=title % 'ecg')
+ica.plot_components(ecg_inds, title=title % 'ecg', colorbar=True)
 
-###############################################################################
-# Inspect evoked ICA sources
+ica.exclude += ecg_inds[:3]  # by default we expect 3 reliable ECG components
 
-# create ICA Epochs object.
-ica_epochs = ica.sources_as_epochs(epochs)
+###############################################################################
+# 3) Assess component selection and unmixing quality
 
-# don't exclude bad sources by passing an empty list.
-ica_picks = mne.fiff.pick_types(ica_epochs.info, misc=True, exclude=[])
-ica_evoked = ica_epochs.average(ica_picks)
-ica_evoked.plot(titles=dict(misc='ICA sources'))
+# estimate average artifact
+ecg_evoked = ecg_epochs.average()
+ica.plot_sources(ecg_evoked)  # plot ECG sources + selection
+ica.plot_overlay(ecg_evoked)  # plot ECG cleaning
 
-# Tip: use this for epochs constructed around ECG r-peaks to check whether all
-# ECG components were identified.
+# check effect on ERF of interest
+epochs.crop(-.2, None)  # crop to baseline of interest
+ica.plot_overlay(epochs['aud_l'].average())  # plot remaining left auditory ERF
diff --git a/examples/preprocessing/plot_ica_from_raw.py b/examples/preprocessing/plot_ica_from_raw.py
index ba9a848..6657c9b 100644
--- a/examples/preprocessing/plot_ica_from_raw.py
+++ b/examples/preprocessing/plot_ica_from_raw.py
@@ -3,29 +3,23 @@
 Compute ICA components on raw data
 ==================================
 
-ICA is used to decompose raw data in 49 to 50 sources.
-The source matching the ECG is found automatically
-and displayed. Subsequently, the cleaned data is compared
-with the uncleaned data. The last section shows how to export
-the sources into a fiff file for further processing and displaying, e.g.
-using mne_browse_raw.
-
+ICA is fit to MEG raw data.
+The sources matching the ECG and EOG are automatically found and displayed.
+Subsequently, artifact detection and rejection quality are assessed.
 """
-print __doc__
+print(__doc__)
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
 import numpy as np
-import matplotlib.pyplot as plt
-
 import mne
-from mne.fiff import Raw
-from mne.preprocessing.ica import ICA
+from mne.io import Raw
+from mne.preprocessing import ICA
+from mne.preprocessing import create_ecg_epochs, create_eog_epochs
 from mne.datasets import sample
-from mne.filter import band_pass_filter
 
 ###############################################################################
 # Setup paths and prepare raw data
@@ -34,166 +28,80 @@ data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 
 raw = Raw(raw_fname, preload=True)
-
-picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
-                            stim=False, exclude='bads')
+raw.filter(1, 45, n_jobs=2)
 
 ###############################################################################
-# Setup ICA seed decompose data, then access and plot sources.
+# 1) Fit ICA model using the FastICA algorithm
 
-# Instead of the actual number of components here we pass a float value
-# between 0 and 1 to select n_components by a percentage of
-# explained variance. Also we decide to use 64 PCA components before mixing
-# back to sensor space. These include the PCA components supplied to ICA plus
-# additional PCA components up to rank 64 of the MEG data.
-# This allows to control the trade-off between denoising and preserving signal.
+# Other available choices are `infomax` or `extended-infomax`
+# We pass a float value between 0 and 1 to select n_components based on the
+# percentage of variance explained by the PCA components.
 
-ica = ICA(n_components=0.90, n_pca_components=None, max_pca_components=100,
-          random_state=0)
+ica = ICA(n_components=0.95, method='fastica')
 
-# 1 minute exposure should be sufficient for artifact detection.
-# However, rejection performance may significantly improve when using
-# the entire data range
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                       stim=False, exclude='bads')
 
-# decompose sources for raw data using each third sample.
-ica.decompose_raw(raw, picks=picks, decim=3)
-print ica
+ica.fit(raw, picks=picks, decim=3, reject=dict(mag=4e-12, grad=4000e-13))
 
-# plot reasonable time window for inspection
-start_plot, stop_plot = 100., 103.
-ica.plot_sources_raw(raw, range(30), start=start_plot, stop=stop_plot)
+# maximum number of components to reject
+n_max_ecg, n_max_eog = 3, 1  # here we don't expect horizontal EOG components
 
 ###############################################################################
-# Automatically find the ECG component using correlation with ECG signal.
-
-# First, we create a helper function that iteratively applies the pearson
-# correlation function to sources and returns an array of r values
-# This is to illustrate the way ica.find_sources_raw works. Actually, this is
-# the default score_func.
-
-from scipy.stats import pearsonr
-corr = lambda x, y: np.array([pearsonr(a, y.ravel()) for a in x])[:, 0]
-
-# As we don't have an ECG channel we use one that correlates a lot with heart
-# beats: 'MEG 1531'. To improve detection, we filter the the channel and pass
-# it directly to find sources. The method then returns an array of correlation
-# scores for each ICA source.
+# 2) identify bad components by analyzing latent sources.
 
-ecg_ch_name = 'MEG 1531'
-l_freq, h_freq = 8, 16
-ecg = raw[[raw.ch_names.index(ecg_ch_name)], :][0]
-ecg = band_pass_filter(ecg, raw.info['sfreq'], l_freq, h_freq)
-ecg_scores = ica.find_sources_raw(raw, target=ecg, score_func=corr)
+title = 'Sources related to %s artifacts (red)'
 
-# get maximum correlation index for ECG
-ecg_source_idx = np.abs(ecg_scores).argmax()
-title = 'ICA source matching ECG'
-ica.plot_sources_raw(raw, ecg_source_idx, title=title, stop=3.0)
+# generate ECG epochs use detection via phase statistics
 
-# let us have a look which other components resemble the ECG.
-# We can do this by reordering the plot by our scores using order
-# and generating sort indices for the sources:
+ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5, picks=picks)
 
-ecg_order = np.abs(ecg_scores).argsort()[::-1][:30]  # ascending order
+ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
+ica.plot_scores(scores, exclude=ecg_inds, title=title % 'ecg')
 
-ica.plot_sources_raw(raw, ecg_order, start=start_plot, stop=stop_plot)
+show_picks = np.abs(scores).argsort()[::-1][:5]
 
-# Let's make our ECG component selection more liberal and include sources
-# for which the variance explanation in terms of \{r^2}\ exceeds 5 percent.
-# we will directly extend the ica.exclude list by the result.
+ica.plot_sources(raw, show_picks, exclude=ecg_inds, title=title % 'ecg')
+ica.plot_components(ecg_inds, title=title % 'ecg', colorbar=True)
 
-ica.exclude.extend(np.where(np.abs(ecg_scores) ** 2 > .05)[0])
+ecg_inds = ecg_inds[:n_max_ecg]
+ica.exclude += ecg_inds
 
-###############################################################################
-# Automatically find the EOG component using correlation with EOG signal.
-
-# As we have an EOG channel, we can use it to detect the source.
+# detect EOG by correlation
 
-eog_scores = ica.find_sources_raw(raw, target='EOG 061', score_func=corr)
+eog_inds, scores = ica.find_bads_eog(raw)
+ica.plot_scores(scores, exclude=eog_inds, title=title % 'eog')
 
-# get maximum correlation index for EOG
-eog_source_idx = np.abs(eog_scores).argmax()
+show_picks = np.abs(scores).argsort()[::-1][:5]
 
-# plot the component that correlates most with the EOG
-title = 'ICA source matching EOG'
-ica.plot_sources_raw(raw, eog_source_idx, title=title, stop=3.0)
+ica.plot_sources(raw, show_picks, exclude=ecg_inds, title=title % 'eog')
+ica.plot_components(eog_inds, title=title % 'eog', colorbar=True)
 
-# plot spatial sensitivities of EOG and ECG ICA components
-title = 'Spatial patterns of ICA components for ECG+EOG (Magnetometers)'
-source_idx = range(15)
-ica.plot_topomap([ecg_source_idx, eog_source_idx], ch_type='mag')
-plt.suptitle(title, fontsize=12)
+eog_inds = eog_inds[:n_max_eog]
+ica.exclude += eog_inds
 
 ###############################################################################
-# Show MEG data before and after ICA cleaning.
-
-# We now add the eog artifacts to the ica.exclusion list
-ica.exclude += [eog_source_idx]
-
-# Restore sensor space data
-raw_ica = ica.pick_sources_raw(raw, include=None)
-
-start_compare, stop_compare = raw.time_as_index([100, 106])
-
-data, times = raw[picks, start_compare:stop_compare]
-data_clean, _ = raw_ica[picks, start_compare:stop_compare]
-
-plt.figure()
-plt.plot(times, data.T)
-plt.xlabel('time (s)')
-plt.xlim(100, 106)
-plt.ylabel('Raw MEG data (T)')
-y0, y1 = plt.ylim()
-
-plt.figure()
-plt.plot(times, data_clean.T)
-plt.xlabel('time (s)')
-plt.xlim(100, 106)
-plt.ylabel('Denoised MEG data (T)')
-plt.ylim(y0, y1)
-plt.show()
-
-###############################################################################
-# Compare the affected channel before and after ICA cleaning.
-
-affected_idx = raw.ch_names.index(ecg_ch_name)
-
-# plot the component that correlates most with the ECG
-plt.figure()
-plt.plot(times, data[affected_idx], color='k')
-plt.title('Affected channel MEG 1531 before cleaning.')
-y0, y1 = plt.ylim()
-
-# plot the component that correlates most with the ECG
-plt.figure()
-plt.plot(times, data_clean[affected_idx], color='k')
-plt.title('Affected channel MEG 1531 after cleaning.')
-plt.ylim(y0, y1)
-plt.show()
-
-###############################################################################
-# Export ICA as raw for subsequent processing steps in ICA space.
-
-from mne.layouts import make_grid_layout
-
-ica_raw = ica.sources_as_raw(raw, start=100., stop=160., picks=None)
+# 3) Assess component selection and unmixing quality
 
-print ica_raw.ch_names[:5]  # just a few
+# estimate average artifact
+ecg_evoked = ecg_epochs.average()
+ica.plot_sources(ecg_evoked, exclude=ecg_inds)  # plot ECG sources + selection
+ica.plot_overlay(ecg_evoked, exclude=ecg_inds)  # plot ECG cleaning
 
-ica_lout = make_grid_layout(ica_raw.info)
+eog_evoked = create_eog_epochs(raw, tmin=-.5, tmax=.5, picks=picks).average()
+ica.plot_sources(eog_evoked, exclude=eog_inds)  # plot EOG sources + selection
+ica.plot_overlay(eog_evoked, exclude=eog_inds)  # plot EOG cleaning
 
-# Uncomment the following two lines to save sources and layout.
-# ica_raw.save('ica_raw.fif')
-# ica_lout.save(os.path.join(os.environ['HOME'], '.mne/lout/ica.lout'))
+# check the amplitudes do not change
+ica.plot_overlay(raw)  # EOG artifacts remain
 
 ###############################################################################
-# To save an ICA session you can say:
-# ica.save('my_ica.fif')
+# To save an ICA solution you can say:
+# >>> ica.save('my_ica.fif')
 #
-# You can later restore the session by saying:
+# You can later load the solution by saying:
 # >>> from mne.preprocessing import read_ica
 # >>> read_ica('my_ica.fif')
 #
-# The ICA functionality exposed in this example will then be available at
-# at any later point in time provided the data have the same structure as the
-# data initially supplied to ICA.
+# Apply the solution to Raw, Epochs or Evoked like this:
+# >>> ica.apply(epochs, copy=False)
diff --git a/examples/read_events.py b/examples/read_events.py
index a088010..8ef1778 100644
--- a/examples/read_events.py
+++ b/examples/read_events.py
@@ -5,11 +5,11 @@ Reading an event file
 
 Read events from a file.
 """
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
 from mne.datasets import sample
@@ -27,5 +27,5 @@ events = mne.read_events(fname, exclude=[4, 32])  # keep all but 4 and 32
 mne.write_events('events.fif', events)
 
 for ind, before, after in events[:5]:
-    print "At sample %d stim channel went from %d to %d" % (
-                                                    ind, before, after)
+    print("At sample %d stim channel went from %d to %d"
+          % (ind, before, after))
diff --git a/examples/realtime/ftclient_rt_average.py b/examples/realtime/ftclient_rt_average.py
new file mode 100644
index 0000000..3465ed7
--- /dev/null
+++ b/examples/realtime/ftclient_rt_average.py
@@ -0,0 +1,90 @@
+"""
+========================================================
+Compute real-time evoked responses with FieldTrip client
+========================================================
+
+This example demonstrates how to connect the MNE real-time
+system to the Fieldtrip buffer using FieldTripClient class.
+
+This example was tested in simulation mode
+
+neuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif
+
+using a modified version of neuromag2ft available at
+
+http://neuro.hut.fi/~mainak/neuromag2ft-2.0.0.zip
+
+to run the FieldTrip buffer. Then running this example acquires the
+data on the client side.
+
+Since the Fieldtrip buffer does not contain all the
+measurement information required by the MNE real-time processing
+pipeline, an info dictionary must be provided to instantiate FieldTripClient.
+Alternatively, the MNE-Python script will try to guess the missing
+measurement info from the Fieldtrip Header object.
+
+Together with RtEpochs, this can be used to compute evoked
+responses using moving averages.
+"""
+
+print(__doc__)
+
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne.viz import plot_events
+from mne.realtime import FieldTripClient, RtEpochs
+
+import matplotlib.pyplot as plt
+
+# select the left-auditory condition
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# user must provide list of bad channels because
+# FieldTrip header object does not provide that
+bads = ['MEG 2443', 'EEG 053']
+
+plt.ion()  # make plot interactive
+_, ax = plt.subplots(2, 1, figsize=(8, 8))  # create subplots
+
+with FieldTripClient(host='localhost', port=1972,
+                     tmax=150, wait_max=10) as rt_client:
+
+    # get measurement info guessed by MNE-Python
+    raw_info = rt_client.get_measurement_info()
+
+    # select gradiometers
+    picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=bads)
+
+    # create the real-time epochs object
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax,
+                         stim_channel='STI 014', picks=picks,
+                         reject=dict(grad=4000e-13, eog=150e-6),
+                         decim=1, isi_max=10.0, proj=None)
+
+    # start the acquisition
+    rt_epochs.start()
+
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        print("Just got epoch %d" % (ii + 1))
+
+        if ii > 0:
+            ev += evoked
+        evoked = ev
+
+        ax[0].cla(), ax[1].cla()  # clear axis
+
+        plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'],
+                    first_samp=-rt_client.tmin_samp, axes=ax[0])
+
+        evoked.plot(axes=ax[1])  # plot on second subplot
+        ax[1].set_title('Evoked response for gradiometer channels'
+                        '(event_id = %d)' % event_id)
+
+        plt.pause(0.05)
+        plt.draw()
+
+    plt.close()
diff --git a/examples/realtime/plot_compute_rt_average.py b/examples/realtime/plot_compute_rt_average.py
index 2f00283..50ba72c 100644
--- a/examples/realtime/plot_compute_rt_average.py
+++ b/examples/realtime/plot_compute_rt_average.py
@@ -11,7 +11,7 @@ Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
 has to be running on the same computer.
 """
 
-print __doc__
+print(__doc__)
 
 # Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Mainak Jas <mainak at neuro.hut.fi>
@@ -26,11 +26,11 @@ from mne.realtime import RtEpochs, MockRtClient
 # Fiff file to simulate the realtime client
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-raw = mne.fiff.Raw(raw_fname, preload=True)
+raw = mne.io.Raw(raw_fname, preload=True)
 
 # select gradiometers
-picks = mne.fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                            stim=True, exclude=raw.info['bads'])
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=True, exclude=raw.info['bads'])
 
 # select the left-auditory condition
 event_id, tmin, tmax = 1, -0.2, 0.5
@@ -48,7 +48,7 @@ rt_epochs.start()
 # send raw buffers
 rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
 for ii, ev in enumerate(rt_epochs.iter_evoked()):
-    print "Just got epoch %d" % (ii + 1)
+    print("Just got epoch %d" % (ii + 1))
     if ii > 0:
         ev += evoked
     evoked = ev
diff --git a/examples/realtime/plot_compute_rt_decoder.py b/examples/realtime/plot_compute_rt_decoder.py
index 9446b3b..61e566d 100644
--- a/examples/realtime/plot_compute_rt_decoder.py
+++ b/examples/realtime/plot_compute_rt_decoder.py
@@ -11,7 +11,7 @@ accuracy is plotted
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import time
 
@@ -25,7 +25,7 @@ import matplotlib.pyplot as plt
 # Fiff file to simulate the realtime client
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-raw = mne.fiff.Raw(raw_fname, preload=True)
+raw = mne.io.Raw(raw_fname, preload=True)
 
 tmin, tmax = -0.2, 0.5
 event_id = dict(aud_l=1, vis_l=3)
@@ -34,8 +34,8 @@ tr_percent = 60  # Training percentage
 min_trials = 10  # minimum trials after which decoding should start
 
 # select gradiometers
-picks = mne.fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                            stim=True, exclude=raw.info['bads'])
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=True, exclude=raw.info['bads'])
 
 # create the mock-client object
 rt_client = MockRtClient(raw)
@@ -72,7 +72,7 @@ concat_classifier = Pipeline([('filter', filt), ('concat', concatenator),
 
 for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
 
-    print "Just got epoch %d" % (ev_num + 1)
+    print("Just got epoch %d" % (ev_num + 1))
 
     if ev_num == 0:
         X = ev.data[None, ...]
diff --git a/examples/realtime/rt_feedback_client.py b/examples/realtime/rt_feedback_client.py
index 192533d..1362f21 100644
--- a/examples/realtime/rt_feedback_client.py
+++ b/examples/realtime/rt_feedback_client.py
@@ -25,7 +25,7 @@ for a real experiment.
 
 """
 
-print __doc__
+print(__doc__)
 
 # Author: Mainak Jas <mainak at neuro.hut.fi>
 #
diff --git a/examples/realtime/rt_feedback_server.py b/examples/realtime/rt_feedback_server.py
index e103add..0a188ea 100644
--- a/examples/realtime/rt_feedback_server.py
+++ b/examples/realtime/rt_feedback_server.py
@@ -25,7 +25,7 @@ for a real experiment.
 
 """
 
-print __doc__
+print(__doc__)
 
 # Author: Mainak Jas <mainak at neuro.hut.fi>
 #
@@ -51,7 +51,7 @@ from sklearn.metrics import confusion_matrix
 # Load fiff file to simulate data
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-raw = mne.fiff.Raw(raw_fname, preload=True)
+raw = mne.io.Raw(raw_fname, preload=True)
 
 # Instantiating stimulation server
 
@@ -59,8 +59,8 @@ raw = mne.fiff.Raw(raw_fname, preload=True)
 with StimServer('localhost', port=4218) as stim_server:
 
     # The channels to be used while decoding
-    picks = mne.fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                                stim=True, exclude=raw.info['bads'])
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
 
     rt_client = MockRtClient(raw)
 
@@ -119,10 +119,10 @@ with StimServer('localhost', port=4218) as stim_server:
 
             # do something if one class is decoded better than the other
             if score_c1[-1] < score_c2[-1]:
-                print "We decoded class RV better than class LV"
+                print("We decoded class RV better than class LV")
                 ev_list.append(3)  # adding more LV to future simulated data
             else:
-                print "We decoded class LV better than class RV"
+                print("We decoded class LV better than class RV")
                 ev_list.append(4)  # adding more RV to future simulated data
 
             # Clear the figure
diff --git a/examples/stats/plot_cluster_1samp_test_time_frequency.py b/examples/stats/plot_cluster_1samp_test_time_frequency.py
index f4e01fa..e779ec2 100644
--- a/examples/stats/plot_cluster_1samp_test_time_frequency.py
+++ b/examples/stats/plot_cluster_1samp_test_time_frequency.py
@@ -16,16 +16,16 @@ The procedure consists in:
   - compute stats to see if ratio deviates from 1.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.time_frequency import single_trial_power
 from mne.stats import permutation_cluster_1samp_test
 from mne.datasets import sample
@@ -39,15 +39,15 @@ tmin = -0.3
 tmax = 0.6
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.find_events(raw, stim_channel='STI 014')
 
 include = []
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=False, include=include, exclude='bads')
 
 # Load condition 1
 event_id = 1
@@ -127,7 +127,7 @@ vmin = -vmax
 plt.imshow(T_obs, cmap=plt.cm.gray,
            extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
            aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
-plt.imshow(T_obs_plot, cmap=plt.cm.jet,
+plt.imshow(T_obs_plot, cmap=plt.cm.RdBu_r,
            extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
            aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
 plt.colorbar()
diff --git a/examples/stats/plot_cluster_methods_tutorial.py b/examples/stats/plot_cluster_methods_tutorial.py
index 308ea00..32876d8 100644
--- a/examples/stats/plot_cluster_methods_tutorial.py
+++ b/examples/stats/plot_cluster_methods_tutorial.py
@@ -50,7 +50,7 @@ a single, broad cluster.
 # Authors: Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 from scipy import stats
diff --git a/examples/stats/plot_cluster_stats_evoked.py b/examples/stats/plot_cluster_stats_evoked.py
index 649ffd8..c8d1156 100644
--- a/examples/stats/plot_cluster_stats_evoked.py
+++ b/examples/stats/plot_cluster_stats_evoked.py
@@ -9,14 +9,14 @@ with cluster level permutation test.
 
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.stats import permutation_cluster_test
 from mne.datasets import sample
 
@@ -30,7 +30,7 @@ tmin = -0.2
 tmax = 0.5
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 channel = 'MEG 1332'  # include only this channel in analysis
@@ -38,8 +38,8 @@ include = [channel]
 
 ###############################################################################
 # Read epochs for the channel of interest
-picks = fiff.pick_types(raw.info, meg=False, eog=True, include=include,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
+                       exclude='bads')
 event_id = 1
 reject = dict(grad=4000e-13, eog=150e-6)
 epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal.py b/examples/stats/plot_cluster_stats_spatio_temporal.py
index bbad9f9..621a7d8 100644
--- a/examples/stats/plot_cluster_stats_spatio_temporal.py
+++ b/examples/stats/plot_cluster_stats_spatio_temporal.py
@@ -10,11 +10,11 @@ permutation test across space and time.
 
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import os.path as op
 import numpy as np
@@ -22,7 +22,7 @@ from numpy.random import randn
 from scipy import stats as stats
 
 import mne
-from mne import (fiff, spatial_tris_connectivity, compute_morph_matrix,
+from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
                  grade_to_tris)
 from mne.epochs import equalize_epoch_counts
 from mne.stats import (spatio_temporal_cluster_1samp_test,
@@ -42,13 +42,13 @@ tmin = -0.2
 tmax = 0.3  # Use a lower tmax to reduce multiple comparisons
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 ###############################################################################
 # Read epochs for all channels, removing a bad one
 raw.info['bads'] += ['MEG 2443']
-picks = fiff.pick_types(raw.info, meg=True, eog=True, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
 event_id = 1  # L auditory
 reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
 epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -97,7 +97,7 @@ tstep = condition1.tstep
 #    permutation test is only p = 1/(2 ** 6) = 0.015, which is large.
 n_vertices_sample, n_times = condition1.data.shape
 n_subjects = 7
-print 'Simulating data for %d subjects.' % n_subjects
+print('Simulating data for %d subjects.' % n_subjects)
 
 #    Let's make sure our results replicate, so set the seed.
 np.random.seed(0)
@@ -118,7 +118,7 @@ n_vertices_fsave = morph_mat.shape[0]
 
 #    We have to change the shape for the dot() to work properly
 X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
-print 'Morphing data.'
+print('Morphing data.')
 X = morph_mat.dot(X)  # morph_mat is a sparse matrix
 X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
 
@@ -134,7 +134,7 @@ X = X[:, :, :, 0] - X[:, :, :, 1]  # make paired contrast
 
 #    To use an algorithm optimized for spatio-temporal clustering, we
 #    just pass the spatial connectivity matrix (instead of spatio-temporal)
-print 'Computing connectivity.'
+print('Computing connectivity.')
 connectivity = spatial_tris_connectivity(grade_to_tris(5))
 
 #    Note that X needs to be a multi-dimensional array of shape
@@ -145,7 +145,7 @@ X = np.transpose(X, [2, 1, 0])
 #    Here we set the threshold quite high to reduce computation.
 p_threshold = 0.001
 t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
-print 'Clustering.'
+print('Clustering.')
 T_obs, clusters, cluster_p_values, H0 = clu = \
     spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=2,
                                        threshold=t_threshold)
@@ -156,7 +156,7 @@ good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
 ###############################################################################
 # Visualize the clusters
 
-print 'Visualizing clusters.'
+print('Visualizing clusters.')
 
 #    Now let's build a convenient representation of each cluster, where each
 #    cluster becomes a "time point" in the SourceEstimate
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py b/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
index e7ac096..3cd5765 100644
--- a/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
+++ b/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
@@ -9,11 +9,11 @@ The multiple comparisons problem is addressed with a cluster-level
 permutation test across space and time.
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import os.path as op
 import numpy as np
@@ -40,7 +40,7 @@ n_vertices_fsave, n_times = stc.data.shape
 tstep = stc.tstep
 
 n_subjects1, n_subjects2 = 7, 9
-print 'Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2)
+print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
 
 #    Let's make sure our results replicate, so set the seed.
 np.random.seed(0)
@@ -59,7 +59,7 @@ X2 = np.abs(X2)  # only magnitude
 
 #    To use an algorithm optimized for spatio-temporal clustering, we
 #    just pass the spatial connectivity matrix (instead of spatio-temporal)
-print 'Computing connectivity.'
+print('Computing connectivity.')
 connectivity = spatial_tris_connectivity(grade_to_tris(5))
 
 #    Note that X needs to be a list of multi-dimensional array of shape
@@ -73,7 +73,7 @@ X = [X1, X2]
 p_threshold = 0.0001
 f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                         n_subjects1 - 1, n_subjects2 - 1)
-print 'Clustering.'
+print('Clustering.')
 T_obs, clusters, cluster_p_values, H0 = clu =\
     spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
                                  threshold=f_threshold)
@@ -84,7 +84,7 @@ good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
 ###############################################################################
 # Visualize the clusters
 
-print 'Visualizing clusters.'
+print('Visualizing clusters.')
 
 #    Now let's build a convenient representation of each cluster, where each
 #    cluster becomes a "time point" in the SourceEstimate
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py b/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
index 94fb0b3..00b4b20 100644
--- a/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
+++ b/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
@@ -14,20 +14,20 @@ comparisons problem is addressed with a cluster-level permutation test
 across space and time.
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
-#          Denis Engemannn <d.engemann at fz-juelich.de>
+#          Denis Engemannn <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import os.path as op
 import numpy as np
 from numpy.random import randn
 
 import mne
-from mne import (fiff, spatial_tris_connectivity, compute_morph_matrix,
+from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
                  grade_to_tris)
 from mne.stats import (spatio_temporal_cluster_test, f_threshold_twoway_rm,
                        f_twoway_rm, summarize_clusters_stc)
@@ -46,13 +46,13 @@ tmin = -0.2
 tmax = 0.3  # Use a lower tmax to reduce multiple comparisons
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 ###############################################################################
 # Read epochs for all channels, removing a bad one
 raw.info['bads'] += ['MEG 2443']
-picks = fiff.pick_types(raw.info, meg=True, eog=True, exclude='bads')
+picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
 # we'll load all four conditions that make up the 'two ways' of our ANOVA
 
 event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
@@ -101,7 +101,7 @@ tstep = conditions[0].tstep
 # we'll only consider the left hemisphere in this example.
 n_vertices_sample, n_times = conditions[0].lh_data.shape
 n_subjects = 7
-print 'Simulating data for %d subjects.' % n_subjects
+print('Simulating data for %d subjects.' % n_subjects)
 
 #    Let's make sure our results replicate, so set the seed.
 np.random.seed(0)
@@ -122,7 +122,7 @@ n_vertices_fsave = morph_mat.shape[0]
 
 #    We have to change the shape for the dot() to work properly
 X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
-print 'Morphing data.'
+print('Morphing data.')
 X = morph_mat.dot(X)  # morph_mat is a sparse matrix
 X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
 
@@ -169,9 +169,9 @@ def stat_fun(*args):
     # The following expression catches the list input, swaps the first and the
     # second dimension and puts the remaining observations in the third
     # dimension.
-    data = np.swapaxes(np.asarray(args), 1, 0).reshape(n_subjects,
-                                                       n_conditions, n_times *
-                                                       n_vertices_fsave)
+    data = np.squeeze(np.swapaxes(np.array(args), 1, 0))
+    data = data.reshape(n_subjects, n_conditions,  # generalized if buffer used
+                        data.size / (n_subjects * n_conditions))
     return f_twoway_rm(data, factor_levels=factor_levels, effects=effects,
                        return_pvals=return_pvals)[0]
                        #  drop p-values (empty array).
@@ -187,17 +187,18 @@ def stat_fun(*args):
 source_space = grade_to_tris(5)
 # as we only have one hemisphere we need only need half the connectivity
 lh_source_space = source_space[source_space[:, 0] < 10242]
-print 'Computing connectivity.'
+print('Computing connectivity.')
 connectivity = spatial_tris_connectivity(lh_source_space)
 
 #    Now let's actually do the clustering. Please relax, on a small
 #    notebook and one single thread only this will take a couple of minutes ...
-#    To speed things up a bit we will
-pthresh = 0.001
+pthresh = 0.0005
 f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)
+
+#    To speed things up a bit we will ...
 n_permutations = 100  # ... run fewer permutations (reduces sensitivity)
 
-print 'Clustering.'
+print('Clustering.')
 T_obs, clusters, cluster_p_values, H0 = clu = \
     spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
                                  threshold=f_thresh, stat_fun=stat_fun,
@@ -210,7 +211,7 @@ good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
 ###############################################################################
 # Visualize the clusters
 
-print 'Visualizing clusters.'
+print('Visualizing clusters.')
 
 #    Now let's build a convenient representation of each cluster, where each
 #    cluster becomes a "time point" in the SourceEstimate
@@ -261,10 +262,12 @@ for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
     plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
                      alpha=0.5, label='')
 
+ymin, ymax = mean_tc.min() -5, mean_tc.max() + 5 
 plt.xlabel('Time (ms)')
 plt.ylabel('Activation (F-values)')
 plt.xlim(times[[0, -1]])
-plt.fill_betweenx(np.arange(*plt.ylim()), times[inds_t[0]],
+plt.ylim(ymin, ymax)
+plt.fill_betweenx(np.arange(ymin, ymax), times[inds_t[0]],
                   times[inds_t[-1]], color='orange', alpha=0.3)
 plt.legend()
 plt.title('Interaction between stimulus-modality and location.')
diff --git a/examples/stats/plot_cluster_stats_time_frequency.py b/examples/stats/plot_cluster_stats_time_frequency.py
index 812cbde..1deb208 100644
--- a/examples/stats/plot_cluster_stats_time_frequency.py
+++ b/examples/stats/plot_cluster_stats_time_frequency.py
@@ -17,16 +17,16 @@ The procedure consists in:
     between conditions.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.time_frequency import single_trial_power
 from mne.stats import permutation_cluster_test
 from mne.datasets import sample
@@ -41,15 +41,15 @@ tmin = -0.2
 tmax = 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 include = []
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=False, include=include, exclude='bads')
 
 ch_name = raw.info['ch_names'][picks[0]]
 
diff --git a/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py b/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
index 2a48d71..ba2da24 100644
--- a/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
+++ b/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
@@ -18,18 +18,18 @@ performing a permutation clustering test using the ANOVA as
 clustering function. The results final will be compared to
 multiple comparisons using False Discovery Rate correction.
 """
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #          Eric Larson <larson.eric.d at gmail.com>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.time_frequency import single_trial_power
 from mne.stats import f_threshold_twoway_rm, f_twoway_rm, fdr_correction
 from mne.datasets import sample
@@ -44,15 +44,15 @@ tmin = -0.2
 tmax = 0.5
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 include = []
 raw.info['bads'] += ['MEG 2443']  # bads
 
 # picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=False, include=include, exclude='bads')
 
 ch_name = raw.info['ch_names'][picks[0]]
 
@@ -114,7 +114,7 @@ data = np.swapaxes(np.asarray(epochs_power), 1, 0)
 data = data.reshape(n_replications, n_conditions, n_frequencies * n_times)
 
 # so we have replications * conditions * observations:
-print data.shape
+print(data.shape)
 
 # while the iteration scheme used above for assembling the data matrix
 # makes sure the first two dimensions are organized as expected (with A =
diff --git a/examples/stats/plot_fdr_stats_evoked.py b/examples/stats/plot_fdr_stats_evoked.py
index 5484f64..bda24d4 100644
--- a/examples/stats/plot_fdr_stats_evoked.py
+++ b/examples/stats/plot_fdr_stats_evoked.py
@@ -9,16 +9,16 @@ False Discovery Rate (FDR) correction.
 
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 from scipy import stats
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.stats import bonferroni_correction, fdr_correction
 
@@ -30,7 +30,7 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
 event_id, tmin, tmax = 1, -0.2, 0.5
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)[:30]
 
 channel = 'MEG 1332'  # include only this channel in analysis
@@ -38,8 +38,8 @@ include = [channel]
 
 ###############################################################################
 # Read epochs for the channel of interest
-picks = fiff.pick_types(raw.info, meg=False, eog=True, include=include,
-                        exclude='bads')
+picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
+                       exclude='bads')
 event_id = 1
 reject = dict(grad=4000e-13, eog=150e-6)
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
diff --git a/examples/stats/plot_sensor_permutation_test.py b/examples/stats/plot_sensor_permutation_test.py
index 4597eaa..51614cf 100644
--- a/examples/stats/plot_sensor_permutation_test.py
+++ b/examples/stats/plot_sensor_permutation_test.py
@@ -9,16 +9,16 @@ is performed on MNE sample dataset between 40 and 60 ms.
 
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.stats import permutation_t_test
 from mne.datasets import sample
 
@@ -32,7 +32,7 @@ tmin = -0.2
 tmax = 0.5
 
 #   Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 #   Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
@@ -40,48 +40,36 @@ include = []  # or stim channel ['STI 014']
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # pick MEG Gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
-                        include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                       include=include, exclude='bads')
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
 data = epochs.get_data()
 times = epochs.times
 
 temporal_mask = np.logical_and(0.04 <= times, times <= 0.06)
-data = np.squeeze(np.mean(data[:, :, temporal_mask], axis=2))
+data = np.mean(data[:, :, temporal_mask], axis=2)
 
 n_permutations = 50000
 T0, p_values, H0 = permutation_t_test(data, n_permutations, n_jobs=2)
 
 significant_sensors = picks[p_values <= 0.05]
-significant_sensors_names = [raw.info['ch_names'][k]
-                             for k in significant_sensors]
+significant_sensors_names = [raw.ch_names[k] for k in significant_sensors]
 
-print "Number of significant sensors : %d" % len(significant_sensors)
-print "Sensors names : %s" % significant_sensors_names
+print("Number of significant sensors : %d" % len(significant_sensors))
+print("Sensors names : %s" % significant_sensors_names)
 
 ###############################################################################
 # View location of significantly active sensors
-import matplotlib.pyplot as plt
 
-# load sensor layout
-layout = mne.find_layout(epochs.info)
+evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis],
+                         epochs.info, tmin=0.)
 
 # Extract mask and indices of active sensors in layout
-idx_of_sensors = [layout.names.index(name)
-                  for name in significant_sensors_names
-                  if name in layout.names]
-mask_significant_sensors = np.zeros(len(layout.pos), dtype=np.bool)
-mask_significant_sensors[idx_of_sensors] = True
-mask_non_significant_sensors = mask_significant_sensors == False
-
-# plot it
-plt.figure(figsize=(5, 3.5), facecolor='k')
-plt.axis('off')
-plt.scatter(layout.pos[mask_significant_sensors, 0],
-            layout.pos[mask_significant_sensors, 1], s=50, c='r')
-plt.scatter(layout.pos[mask_non_significant_sensors, 0],
-            layout.pos[mask_non_significant_sensors, 1], c='w')
-title = 'Left auditory between 40 and 60 ms'
-plt.figtext(0.03, 0.93, title, color='w', fontsize=18)
-plt.show()
+stats_picks = mne.pick_channels(evoked.ch_names, significant_sensors_names)
+mask = p_values[:, np.newaxis] <= 0.05
+
+evoked.plot_topomap(ch_type='grad', times=[0], scale=1, time_format=None,
+                    cmap='Reds', vmin=0., vmax=np.max,
+                    unit='-log10(p)', format='-%0.1f', mask=mask,
+                    size=3, show_names=lambda x: x[4:] + ' ' * 20)
diff --git a/examples/stats/plot_sensor_regression.py b/examples/stats/plot_sensor_regression.py
new file mode 100644
index 0000000..26abe37
--- /dev/null
+++ b/examples/stats/plot_sensor_regression.py
@@ -0,0 +1,79 @@
+"""
+====================================================================
+Sensor space least squares regression
+====================================================================
+
+Predict single trial activity from a continuous variable.
+A single-trial regression is performed in each sensor and timepoint
+individually, resulting in an Evoked object which contains the
+regression coefficient (beta value) for each combination of sensor
+and timepoint. Example also shows the T statistics and the associated
+p-values.
+
+Note that this example is for educational purposes and that the data used
+here do not contain any significant effect.
+
+(See Hauk et al. (2006). The time course of visual word recognition as
+revealed by linear regression analysis of ERP data. Neuroimage.)
+"""
+# Authors: Tal Linzen <linzen at nyu.edu>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+print(__doc__)
+
+import numpy as np
+
+import mne
+from mne.datasets import sample
+from mne.stats.regression import linear_regression
+
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters and read data
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, aud_r=2)
+
+# Setup for reading the raw data
+raw = mne.io.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,
+                       eog=False, exclude='bads')
+
+# Reject some epochs based on amplitude
+reject = dict(mag=5e-12)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=reject)
+
+###############################################################################
+# Run regression
+
+names = ['intercept', 'trial-count']
+
+intercept = np.ones((len(epochs),), dtype=np.float)
+design_matrix = np.column_stack([intercept,  # intercept
+                                 np.linspace(0, 1, len(intercept))])
+
+# also accepts source estimates
+lm = linear_regression(epochs, design_matrix, names)
+
+
+def plot_topomap(x, unit):
+    x.plot_topomap(ch_type='mag', scale=1, size=1.5, vmax=np.max, unit=unit,
+                   times=np.linspace(0.1, 0.2, 5))
+
+trial_count = lm['trial-count']
+
+plot_topomap(trial_count.beta, unit='z (beta)')
+
+plot_topomap(trial_count.t_val, unit='t')
+
+plot_topomap(trial_count.mlog10_p_val, unit='-log10 p')
+
+plot_topomap(trial_count.stderr, unit='z (error)')
diff --git a/examples/time_frequency/plot_compute_raw_data_spectrum.py b/examples/time_frequency/plot_compute_raw_data_spectrum.py
index 5b1fc85..4eeb6a2 100644
--- a/examples/time_frequency/plot_compute_raw_data_spectrum.py
+++ b/examples/time_frequency/plot_compute_raw_data_spectrum.py
@@ -7,17 +7,18 @@ This script shows how to compute the power spectral density (PSD)
 of measurements on a raw dataset. It also show the effect of applying SSP
 to the data to reduce ECG and EOG artifacts.
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 
-from mne import fiff, read_proj, read_selection
+import mne
+from mne import io, read_proj, read_selection
 from mne.datasets import sample
 
 ###############################################################################
@@ -27,7 +28,7 @@ raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname, preload=True)
+raw = io.Raw(raw_fname, preload=True)
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # Add SSP projection vectors to reduce EOG and ECG artifacts
@@ -37,7 +38,7 @@ raw.add_proj(projs, remove_existing=True)
 
 tmin, tmax = 0, 60  # use the first 60s of data
 fmin, fmax = 2, 300  # look at frequencies between 2 and 300Hz
-n_fft = 2048  # the FFT size (NFFT). Ideally a power of 2
+n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
 
 plt.ion()
 
@@ -47,7 +48,7 @@ raw.plot_psds(area_mode='range', tmax=10.0)
 # Now let's focus on a smaller subset:
 # Pick MEG magnetometers in the Left-temporal region
 selection = read_selection('Left-temporal')
-picks = fiff.pick_types(raw.info, meg='mag', eeg=False, eog=False,
+picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
                         stim=False, exclude='bads', selection=selection)
 
 # Let's just look at the first few channels for demonstration purposes
diff --git a/examples/time_frequency/plot_compute_source_psd_epochs.py b/examples/time_frequency/plot_compute_source_psd_epochs.py
index 39583f0..f3d2952 100644
--- a/examples/time_frequency/plot_compute_source_psd_epochs.py
+++ b/examples/time_frequency/plot_compute_source_psd_epochs.py
@@ -13,13 +13,13 @@ Discrete Prolate Spheroidal Sequence (DPSS) windows.
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne.io import Raw
 from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs
 
 
@@ -46,7 +46,7 @@ include = []
 raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick MEG channels
-picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
                    include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
diff --git a/examples/time_frequency/plot_single_trial_spectra.py b/examples/time_frequency/plot_single_trial_spectra.py
index 0213f9f..e20cded 100644
--- a/examples/time_frequency/plot_single_trial_spectra.py
+++ b/examples/time_frequency/plot_single_trial_spectra.py
@@ -8,17 +8,17 @@ compute average spectra to identify channels and
 frequencies of interest for subsequent TFR analyses.
 """
 
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.time_frequency import compute_epochs_psd
 ###############################################################################
@@ -28,7 +28,7 @@ raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 tmin, tmax, event_id = -1., 1., 1
@@ -36,8 +36,8 @@ include = []
 raw.info['bads'] += ['MEG 2443']  # bads
 
 # picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=False, include=include, exclude='bads')
 
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, proj=True,
                     baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
@@ -82,3 +82,15 @@ plt.show()
 # The ``plot_time_frequency.py`` example investigates one of the channels
 # around index 140.
 # Finally, also note the power line artifacts across all channels.
+
+# Now let's take a look at the spatial distributions of the lower frequencies
+# Note. We're 'abusing' the Evoked.plot_topomap method here to display
+# our average powermap
+
+evoked = epochs.average()  # create evoked
+evoked.data = average_psds[:, freq_mask]  # insert our psd data
+evoked.times = freqs  # replace times with frequencies.
+evoked.plot_topomap(ch_type='grad', times=range(5, 12, 2),
+                    scale=1, scale_time=1, time_format='%0.1f Hz',
+                    cmap='Reds', vmin=np.min, vmax=np.max,
+                    unit='dB', format='-%0.1f')
diff --git a/examples/time_frequency/plot_source_label_time_frequency.py b/examples/time_frequency/plot_source_label_time_frequency.py
index b0e6553..6a39259 100644
--- a/examples/time_frequency/plot_source_label_time_frequency.py
+++ b/examples/time_frequency/plot_source_label_time_frequency.py
@@ -11,16 +11,16 @@ when they are computed with and without subtracting the evoked response
 from each epoch. The former results in induced activity only while the
 latter also includes evoked (stimulus-locked) activity.
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, source_induced_power
 
@@ -35,7 +35,7 @@ fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
 tmin, tmax, event_id = -0.2, 0.5, 2
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.find_events(raw, stim_channel='STI 014')
 inverse_operator = read_inverse_operator(fname_inv)
 
@@ -43,7 +43,7 @@ include = []
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # Picks MEG channels
-picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
                         stim=False, include=include, exclude='bads')
 reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
 
diff --git a/examples/time_frequency/plot_source_power_spectrum.py b/examples/time_frequency/plot_source_power_spectrum.py
index d761cd1..a998744 100644
--- a/examples/time_frequency/plot_source_power_spectrum.py
+++ b/examples/time_frequency/plot_source_power_spectrum.py
@@ -6,14 +6,14 @@ Compute power spectrum densities of the sources with dSPM
 Returns an STC file containing the PSD (in dB) of each of the sources.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, compute_source_psd
 
@@ -25,23 +25,23 @@ fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
 fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname, verbose=False)
+raw = io.Raw(raw_fname, verbose=False)
 events = mne.find_events(raw, stim_channel='STI 014')
 inverse_operator = read_inverse_operator(fname_inv)
 raw.info['bads'] = ['MEG 2443', 'EEG 053']
 
 # picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
                         stim=False, exclude='bads')
 
 tmin, tmax = 0, 120  # use the first 120s of data
 fmin, fmax = 4, 100  # look at frequencies between 4 and 100Hz
-NFFT = 2048  # the FFT size (NFFT). Ideally a power of 2
+n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
 label = mne.read_label(fname_label)
 
 stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
                          tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
-                         pick_ori="normal", NFFT=NFFT, label=label)
+                         pick_ori="normal", n_fft=n_fft, label=label)
 
 stc.save('psd_dSPM')
 
diff --git a/examples/time_frequency/plot_source_space_time_frequency.py b/examples/time_frequency/plot_source_space_time_frequency.py
index 33b6002..86ed0da 100644
--- a/examples/time_frequency/plot_source_space_time_frequency.py
+++ b/examples/time_frequency/plot_source_space_time_frequency.py
@@ -8,14 +8,14 @@ for different bands in the source space. The inverse method
 is linear based on dSPM inverse operator.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import mne
-from mne import fiff
+from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, source_band_induced_power
 
@@ -27,7 +27,7 @@ fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
 tmin, tmax, event_id = -0.2, 0.5, 1
 
 # Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
+raw = io.Raw(raw_fname)
 events = mne.find_events(raw, stim_channel='STI 014')
 inverse_operator = read_inverse_operator(fname_inv)
 
@@ -35,7 +35,7 @@ include = []
 raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
                         stim=False, include=include, exclude='bads')
 
 # Load condition 1
diff --git a/examples/time_frequency/plot_temporal_whitening.py b/examples/time_frequency/plot_temporal_whitening.py
index 1185955..ca92eec 100644
--- a/examples/time_frequency/plot_temporal_whitening.py
+++ b/examples/time_frequency/plot_temporal_whitening.py
@@ -7,11 +7,11 @@ This script shows how to fit an AR model to data and use it
 to temporally whiten the signals.
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print __doc__
+print(__doc__)
 
 import numpy as np
 from scipy import signal
@@ -25,13 +25,13 @@ data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 proj_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
 
-raw = mne.fiff.Raw(raw_fname)
+raw = mne.io.Raw(raw_fname)
 proj = mne.read_proj(proj_fname)
 raw.info['projs'] += proj
 raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels
 
 # Set up pick list: Gradiometers - bad channels
-picks = mne.fiff.pick_types(raw.info, meg='grad', exclude='bads')
+picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
 
 order = 5  # define model order
 picks = picks[:5]
diff --git a/examples/time_frequency/plot_tfr_topography.py b/examples/time_frequency/plot_tfr_topography.py
deleted file mode 100644
index bf2c75b..0000000
--- a/examples/time_frequency/plot_tfr_topography.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-===================================================================
-Plot time-frequency representations on topographies for MEG sensors
-===================================================================
-
-Both induced power and phase locking values are displayed.
-"""
-print __doc__
-
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
-#
-# License: BSD (3-clause)
-
-import numpy as np
-import matplotlib.pyplot as plt
-import mne
-from mne import fiff
-from mne.time_frequency import induced_power
-from mne.viz import plot_topo_power, plot_topo_phase_lock
-from mne.datasets import sample
-
-data_path = sample.data_path()
-
-raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
-event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
-event_id, tmin, tmax = 1, -0.2, 0.5
-
-# Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
-events = mne.read_events(event_fname)
-
-include = []
-raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
-
-# picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
-
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
-data = epochs.get_data()  # as 3D matrix
-
-layout = mne.find_layout(epochs.info, 'meg')
-
-###############################################################################
-# Calculate power and phase locking value
-
-frequencies = np.arange(7, 30, 3)  # define frequencies of interest
-n_cycles = frequencies / float(7)  # different number of cycle per frequency
-Fs = raw.info['sfreq']  # sampling in Hz
-decim = 3
-power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
-                                  n_cycles=n_cycles, n_jobs=1, use_fft=False,
-                                  decim=decim, zero_mean=True)
-
-###############################################################################
-# Prepare topography plots, set baseline correction parameters
-
-baseline = (None, 0)  # set the baseline for induced power
-mode = 'ratio'  # set mode for baseline rescaling
-
-###############################################################################
-# Show topography of power.
-
-title = 'Induced power - MNE sample data'
-plot_topo_power(epochs, power, frequencies, layout, baseline=baseline,
-                mode=mode, decim=decim, vmin=0., vmax=14, title=title)
-plt.show()
-
-###############################################################################
-# Show topography of phase locking value (PLV)
-
-mode = None  # no baseline rescaling for PLV
-
-title = 'Phase locking value - MNE sample data'
-plot_topo_phase_lock(epochs, phase_lock, frequencies, layout,
-                     baseline=baseline, mode=mode, decim=decim, title=title)
-
-plt.show()
diff --git a/examples/time_frequency/plot_time_frequency.py b/examples/time_frequency/plot_time_frequency.py
deleted file mode 100644
index 98248b1..0000000
--- a/examples/time_frequency/plot_time_frequency.py
+++ /dev/null
@@ -1,95 +0,0 @@
-"""
-=========================================================
-Time frequency : Induced power and inter-trial phase-lock
-=========================================================
-
-This script shows how to compute induced power and inter-trial
-phase-lock for a list of epochs read in a raw file given
-a list of events.
-
-"""
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#
-# License: BSD (3-clause)
-
-print __doc__
-
-import numpy as np
-
-import mne
-from mne import fiff
-from mne.time_frequency import induced_power
-from mne.datasets import sample
-
-###############################################################################
-# Set parameters
-data_path = sample.data_path()
-raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
-event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
-event_id, tmin, tmax = 1, -0.2, 0.5
-
-# Setup for reading the raw data
-raw = fiff.Raw(raw_fname)
-events = mne.read_events(event_fname)
-
-include = []
-raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
-
-# picks MEG gradiometers
-picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
-
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
-data = epochs.get_data()  # as 3D matrix
-evoked = epochs.average()  # compute evoked fields
-
-times = 1e3 * epochs.times  # change unit to ms
-evoked_data = evoked.data * 1e13  # change unit to fT / cm
-
-# Take only one channel
-data = data[:, 97:98, :]
-evoked_data = evoked_data[97:98, :]
-
-frequencies = np.arange(7, 30, 3)  # define frequencies of interest
-n_cycles = frequencies / float(7)  # different number of cycle per frequency
-Fs = raw.info['sfreq']  # sampling in Hz
-decim = 3
-power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
-                                  n_cycles=n_cycles, n_jobs=1, use_fft=False,
-                                  decim=decim, zero_mean=True)
-
-# baseline corrections with ratio
-power /= np.mean(power[:, :, times[::decim] < 0], axis=2)[:, :, None]
-
-###############################################################################
-# View time-frequency plots
-import matplotlib.pyplot as plt
-plt.clf()
-plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.63)
-plt.subplot(3, 1, 1)
-plt.plot(times, evoked_data.T)
-plt.title('Evoked response (%s)' % evoked.ch_names[97])
-plt.xlabel('time (ms)')
-plt.ylabel('Magnetic Field (fT/cm)')
-plt.xlim(times[0], times[-1])
-plt.ylim(-150, 300)
-
-plt.subplot(3, 1, 2)
-plt.imshow(20 * np.log10(power[0]),
-           extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
-           aspect='auto', origin='lower')
-plt.xlabel('Time (s)')
-plt.ylabel('Frequency (Hz)')
-plt.title('Induced power (%s)' % evoked.ch_names[97])
-plt.colorbar()
-
-plt.subplot(3, 1, 3)
-plt.imshow(phase_lock[0],
-           extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
-           aspect='auto', origin='lower')
-plt.xlabel('Time (s)')
-plt.ylabel('Frequency (Hz)')
-plt.title('Phase-lock (%s)' % evoked.ch_names[97])
-plt.colorbar()
-plt.show()
diff --git a/examples/time_frequency/plot_time_frequency_sensors.py b/examples/time_frequency/plot_time_frequency_sensors.py
new file mode 100644
index 0000000..39a6431
--- /dev/null
+++ b/examples/time_frequency/plot_time_frequency_sensors.py
@@ -0,0 +1,65 @@
+"""
+==============================================================
+Time-frequency representations on topographies for MEG sensors
+==============================================================
+
+Both average power and intertrial coherence are displayed.
+"""
+print(__doc__)
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import mne
+from mne import io
+from mne.time_frequency import tfr_morlet
+from mne.datasets import somato
+
+###############################################################################
+# Set parameters
+data_path = somato.data_path()
+raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
+event_id, tmin, tmax = 1, -1., 3.
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname)
+baseline = (None, 0)
+events = mne.find_events(raw, stim_channel='STI 014')
+
+# picks MEG gradiometers
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6))
+
+###############################################################################
+# Calculate power and intertrial coherence
+
+freqs = np.arange(6, 30, 3)  # define frequencies of interest
+n_cycles = freqs / 2.  # different number of cycle per frequency
+power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=False,
+                        return_itc=True, decim=3, n_jobs=1)
+
+# Baseline correction can be applied to power or done in plots
+# To illustrate the baseline correction in plots the next line is commented
+# power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
+
+# Inspect power
+power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
+power.plot([82], baseline=(-0.5, 0), mode='logratio')
+
+import matplotlib.pyplot as plt
+fig, axis = plt.subplots(1, 2, figsize=(7, 4))
+power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
+                   baseline=(-0.5, 0), mode='logratio', axes=axis[0],
+                   title='Alpha', vmin=-0.45, vmax=0.45)
+power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
+                   baseline=(-0.5, 0), mode='logratio', axes=axis[1],
+                   title='Beta', vmin=-0.45, vmax=0.45)
+mne.viz.tight_layout()
+
+# Inspect ITC
+itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
diff --git a/mne/__init__.py b/mne/__init__.py
index 7b504e0..6196bfd 100644
--- a/mne/__init__.py
+++ b/mne/__init__.py
@@ -1,13 +1,18 @@
 """MNE for MEG and EEG data analysis
 """
 
-__version__ = '0.7.1'
+__version__ = '0.8'
 
 # have to import verbose first since it's needed by many things
 from .utils import (set_log_level, set_log_file, verbose, set_config,
                     get_config, get_config_path, set_cache_dir,
                     set_memmap_min_size)
-
+from .io.pick import (pick_types, pick_channels, pick_types_evoked,
+                      pick_channels_regexp, pick_channels_forward,
+                      pick_types_forward, pick_channels_cov,
+                      pick_channels_evoked, pick_info)
+from .io.base import concatenate_raws, get_chpi_positions
+from .io.meas_info import create_info
 from .cov import (read_cov, write_cov, Covariance,
                   compute_covariance, compute_raw_data_covariance,
                   whiten_evoked)
@@ -17,7 +22,7 @@ from .event import (read_events, write_events, find_events, merge_events,
 from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
                       do_forward_solution, average_forward_solutions,
                       write_forward_solution, make_forward_solution,
-                      convert_forward_solution)
+                      convert_forward_solution, make_field_map)
 from .source_estimate import (read_source_estimate,
                               SourceEstimate, VolSourceEstimate, morph_data,
                               morph_data_precomputed, compute_morph_matrix,
@@ -31,24 +36,31 @@ from .source_estimate import (read_source_estimate,
                               save_stc_as_volume, extract_label_time_course)
 from .surface import (read_bem_surfaces, read_surface, write_bem_surface,
                       write_surface, decimate_surface, read_morph_map,
-                      read_bem_solution)
+                      read_bem_solution, get_head_surf,
+                      get_meg_helmet_surf)
 from .source_space import (read_source_spaces, vertex_to_mni,
                            write_source_spaces, setup_source_space,
                            setup_volume_source_space,
                            add_source_space_distances)
-from .epochs import Epochs, read_epochs
+from .epochs import Epochs, EpochsArray, read_epochs
+from .evoked import (Evoked, EvokedArray, read_evoked, write_evoked,
+                     read_evokeds, write_evokeds)
 from .label import (label_time_courses, read_label, label_sign_flip,
-                    write_label, stc_to_label, grow_labels, Label,
-                    BiHemiLabel, labels_from_parc, parc_from_labels)
+                    write_label, stc_to_label, grow_labels, Label, split_label,
+                    BiHemiLabel, labels_from_parc, parc_from_labels,
+                    read_labels_from_annot, write_labels_to_annot)
 from .misc import parse_config, read_reject_parameters
-from .coreg import (create_default_subject, scale_mri, scale_labels,
+from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
                     scale_source_space)
-from .transforms import transform_coordinates, read_trans, write_trans
+from .transforms import (transform_coordinates, read_trans, write_trans,
+                         transform_surface_to)
 from .proj import (read_proj, write_proj, compute_proj_epochs,
                    compute_proj_evoked, compute_proj_raw, sensitivity_map)
 from .selection import read_selection
 from .dipole import read_dip
 from .layouts.layout import find_layout
+from .channels import (equalize_channels, rename_channels,
+                       read_ch_connectivity)
 
 from . import beamformer
 from . import connectivity
@@ -56,7 +68,9 @@ from . import coreg
 from . import cuda
 from . import datasets
 from . import epochs
-from . import fiff
+from . import externals
+from . import fiff  # XXX : to be deprecated in 0.9
+from . import io
 from . import filter
 from . import gui
 from . import layouts
@@ -65,7 +79,6 @@ from . import mixed_norm
 from . import preprocessing
 from . import simulation
 from . import stats
-from . import tests
 from . import time_frequency
 from . import viz
 from . import decoding
diff --git a/mne/_hdf5.py b/mne/_hdf5.py
new file mode 100644
index 0000000..738ec3e
--- /dev/null
+++ b/mne/_hdf5.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from os import path as op
+
+from .utils import _check_pytables
+from .externals.six import string_types, text_type
+
+
+##############################################################################
+# WRITE
+
+def write_hdf5(fname, data, overwrite=False):
+    """Write python object to HDF5 format using Pytables
+
+    Parameters
+    ----------
+    fname : str
+        Filename to use.
+    data : object
+        Object to write. Can be of any of these types:
+            {ndarray, dict, list, tuple, int, float, str}
+        Note that dict objects must only have ``str`` keys.
+    overwrite : bool
+        If True, overwrite file (if it exists).
+    """
+    tb = _check_pytables()
+    if op.isfile(fname) and not overwrite:
+        raise IOError('file "%s" exists, use overwrite=True to overwrite'
+                      % fname)
+    o_f = tb.open_file if hasattr(tb, 'open_file') else tb.openFile
+    with o_f(fname, mode='w') as fid:
+        if hasattr(fid, 'create_group'):
+            c_g = fid.create_group
+            c_t = fid.create_table
+            c_c_a = fid.create_carray
+        else:
+            c_g = fid.createGroup
+            c_t = fid.createTable
+            c_c_a = fid.createCArray
+        filters = tb.Filters(complib='zlib', complevel=5)
+        write_params = (c_g, c_t, c_c_a, filters)
+        _triage_write('mnepython', data, fid.root, *write_params)
+
+
+def _triage_write(key, value, root, *write_params):
+    tb = _check_pytables()
+    create_group, create_table, create_c_array, filters = write_params
+    if isinstance(value, dict):
+        sub_root = create_group(root, key, 'dict')
+        for key, sub_value in value.items():
+            if not isinstance(key, string_types):
+                raise TypeError('All dict keys must be strings')
+            _triage_write('key_{0}'.format(key), sub_value, sub_root,
+                          *write_params)
+    elif isinstance(value, (list, tuple)):
+        title = 'list' if isinstance(value, list) else 'tuple'
+        sub_root = create_group(root, key, title)
+        for vi, sub_value in enumerate(value):
+            _triage_write('idx_{0}'.format(vi), sub_value, sub_root,
+                          *write_params)
+    elif isinstance(value, type(None)):
+        atom = tb.BoolAtom()
+        s = create_c_array(root, key, atom, (1,), title='None',
+                           filters=filters)
+        s[:] = False
+    elif isinstance(value, (int, float)):
+        if isinstance(value, int):
+            title = 'int'
+        else:  # isinstance(value, float):
+            title = 'float'
+        value = np.atleast_1d(value)
+        atom = tb.Atom.from_dtype(value.dtype)
+        s = create_c_array(root, key, atom, (1,),
+                           title=title, filters=filters)
+        s[:] = value
+    elif isinstance(value, string_types):
+        atom = tb.UInt8Atom()
+        if isinstance(value, text_type):  # unicode
+            value = np.fromstring(value.encode('utf-8'), np.uint8)
+            title = 'unicode'
+        else:
+            value = np.fromstring(value.encode('ASCII'), np.uint8)
+            title = 'ascii'
+        s = create_c_array(root, key, atom, (len(value),), title=title,
+                           filters=filters)
+        s[:] = value
+    elif isinstance(value, np.ndarray):
+        atom = tb.Atom.from_dtype(value.dtype)
+        s = create_c_array(root, key, atom, value.shape,
+                           title='ndarray', filters=filters)
+        s[:] = value
+    else:
+        raise TypeError('unsupported type %s' % type(value))
+
+
+##############################################################################
+# READ
+
+def read_hdf5(fname):
+    """Read python object from HDF5 format using Pytables
+
+    Parameters
+    ----------
+    fname : str
+        File to load.
+
+    Returns
+    -------
+    data : object
+        The loaded data. Can be of any type supported by ``write_hdf5``.
+    """
+    tb = _check_pytables()
+    if not op.isfile(fname):
+        raise IOError('file "%s" not found' % fname)
+    o_f = tb.open_file if hasattr(tb, 'open_file') else tb.openFile
+    with o_f(fname, mode='r') as fid:
+        if not hasattr(fid.root, 'mnepython'):
+            raise TypeError('no mne-python data found')
+        data = _triage_read(fid.root.mnepython)
+    return data
+
+
+def _triage_read(node):
+    tb = _check_pytables()
+    type_str = node._v_title
+    if isinstance(node, tb.Group):
+        if type_str == 'dict':
+            data = dict()
+            for subnode in node:
+                key = subnode._v_name[4:]  # cut off "idx_" or "key_" prefix
+                data[key] = _triage_read(subnode)
+        elif type_str in ['list', 'tuple']:
+            data = list()
+            ii = 0
+            while True:
+                subnode = getattr(node, 'idx_{0}'.format(ii), None)
+                if subnode is None:
+                    break
+                data.append(_triage_read(subnode))
+                ii += 1
+            assert len(data) == ii
+            data = tuple(data) if type_str == 'tuple' else data
+            return data
+        else:
+            raise NotImplementedError('Unknown group type: {0}'
+                                      ''.format(type_str))
+    elif type_str == 'ndarray':
+        data = np.array(node)
+    elif type_str in ('int', 'float'):
+        if type_str == 'int':
+            cast = int
+        else:  # type_str == 'float':
+            cast = float
+        data = cast(np.array(node)[0])
+    elif type_str in ('unicode', 'ascii'):
+        decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
+        cast = text_type if type_str == 'unicode' else str
+        data = cast(np.array(node).tostring().decode(decoder))
+    elif type_str == 'None':
+        data = None
+    else:
+        raise TypeError('Unknown node type: {0}'.format(type_str))
+    return data
diff --git a/mne/baseline.py b/mne/baseline.py
index de81f7e..6bd204d 100644
--- a/mne/baseline.py
+++ b/mne/baseline.py
@@ -1,7 +1,7 @@
 """Util function to baseline correct data
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
diff --git a/mne/beamformer/_dics.py b/mne/beamformer/_dics.py
index 39b4b16..b83ae1b 100644
--- a/mne/beamformer/_dics.py
+++ b/mne/beamformer/_dics.py
@@ -12,12 +12,13 @@ import numpy as np
 from scipy import linalg
 
 from ..utils import logger, verbose
-from ..fiff.pick import pick_types
+from ..io.pick import pick_types
 from ..forward import _subject_from_forward
 from ..minimum_norm.inverse import combine_xyz
 from ..source_estimate import SourceEstimate
 from ..time_frequency import CrossSpectralDensity, compute_epochs_csd
 from ._lcmv import _prepare_beamformer_input
+from ..externals import six
 
 
 @verbose
@@ -48,7 +49,7 @@ def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
         The regularization for the cross-spectral density.
     label : Label | None
         Restricts the solution to a given label.
-    picks : array of int | None
+    picks : array-like of int | None
         Indices (in info) of data channels. If None, MEG and EEG data channels
         (without bad channels) will be used.
     pick_ori : None | 'normal'
@@ -77,7 +78,7 @@ def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
     n_orient = 3 if is_free_ori else 1
     n_sources = G.shape[1] // n_orient
 
-    for k in xrange(n_sources):
+    for k in range(n_sources):
         Wk = W[n_orient * k: n_orient * k + n_orient]
         Gk = G[:, n_orient * k: n_orient * k + n_orient]
         Ck = np.dot(Wk, Gk)
@@ -191,7 +192,7 @@ def dics(evoked, forward, noise_csd, data_csd, reg=0.01, label=None,
 
     stc = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
                       label=label, pick_ori=pick_ori)
-    return stc.next()
+    return six.advance_iterator(stc)
 
 
 @verbose
@@ -372,7 +373,7 @@ def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
 
         # Compute spatial filters
         W = np.dot(G.T, Cm_inv)
-        for k in xrange(n_sources):
+        for k in range(n_sources):
             Wk = W[n_orient * k: n_orient * k + n_orient]
             Gk = G[:, n_orient * k: n_orient * k + n_orient]
             Ck = np.dot(Wk, Gk)
@@ -492,7 +493,7 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
         raise ValueError('Time step should not be larger than any of the '
                          'window lengths')
     if n_ffts is not None and len(n_ffts) != len(freq_bins):
-        raise ValueError('When specifiying number of FFT samples, one value '
+        raise ValueError('When specifying number of FFT samples, one value '
                          'must be provided per frequency bin')
     if mt_bandwidths is not None and len(mt_bandwidths) != len(freq_bins):
         raise ValueError('When using multitaper mode and specifying '
@@ -541,6 +542,12 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
                             '%d to %d Hz' % (win_tmin * 1e3, win_tmax * 1e3,
                                              freq_bin[0], freq_bin[1]))
 
+                # Counteracts unsafe floating point arithmetic ensuring all
+                # relevant samples will be taken into account when selecting
+                # data in time windows
+                win_tmin = win_tmin - 1e-10
+                win_tmax = win_tmax + 1e-10
+
                 # Calculating data CSD in current time window
                 data_csd = compute_epochs_csd(epochs, mode=mode,
                                               fmin=freq_bin[0],
diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py
index 4a909b4..00a895a 100644
--- a/mne/beamformer/_lcmv.py
+++ b/mne/beamformer/_lcmv.py
@@ -1,7 +1,7 @@
 """Compute Linearly constrained minimum variance (LCMV) beamformer.
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Roman Goj <roman.goj at gmail.com>
 #
 # License: BSD (3-clause)
@@ -11,9 +11,9 @@ import warnings
 import numpy as np
 from scipy import linalg
 
-from ..fiff.constants import FIFF
-from ..fiff.proj import make_projector
-from ..fiff.pick import pick_types, pick_channels_forward, pick_channels_cov
+from ..io.constants import FIFF
+from ..io.proj import make_projector
+from ..io.pick import pick_types, pick_channels_forward, pick_channels_cov
 from ..forward import _subject_from_forward
 from ..minimum_norm.inverse import _get_vertno, combine_xyz
 from ..cov import compute_whitener, compute_covariance
@@ -21,6 +21,7 @@ from ..source_estimate import _make_stc, SourceEstimate
 from ..source_space import label_src_vertno_sel
 from ..utils import logger, verbose
 from .. import Epochs
+from ..externals import six
 
 
 @verbose
@@ -48,7 +49,7 @@ def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
         The regularization for the whitened data covariance.
     label : Label
         Restricts the LCMV solution to a given label.
-    picks : array of int | None
+    picks : array-like of int | None
         Indices (in info) of data channels. If None, MEG and EEG data channels
         (without bad channels) will be used.
     pick_ori : None | 'normal' | 'max-power'
@@ -290,9 +291,9 @@ def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
     tmin = evoked.times[0]
 
     stc = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
-                      label, pick_ori=pick_ori).next()
+                      label, pick_ori=pick_ori)
 
-    return stc
+    return six.advance_iterator(stc)
 
 
 @verbose
@@ -378,7 +379,7 @@ def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
 
     Parameters
     ----------
-    raw : mne.fiff.Raw
+    raw : mne.io.Raw
         Raw data to invert.
     forward : dict
         Forward operator.
@@ -394,7 +395,7 @@ def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
         Index of first time sample (index not time is seconds).
     stop : int
         Index of first time sample not to include (index not time is seconds).
-    picks : array of int
+    picks : array-like of int
         Channel indices in raw to use for beamforming (if None all channels
         are used except bad channels).
     pick_ori : None | 'normal' | 'max-power'
@@ -432,9 +433,9 @@ def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
     tmin = times[0]
 
     stc = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
-                      label, picks, pick_ori).next()
+                      label, picks, pick_ori)
 
-    return stc
+    return six.advance_iterator(stc)
 
 
 @verbose
@@ -464,7 +465,7 @@ def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
         The regularization for the whitened data covariance.
     label : Label | None
         Restricts the solution to a given label.
-    picks : array of int | None
+    picks : array-like of int | None
         Indices (in info) of data channels. If None, MEG and EEG data channels
         (without bad channels) will be used.
     pick_ori : None | 'normal'
@@ -645,8 +646,10 @@ def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
         raw_band = raw.copy()
         raw_band.filter(l_freq, h_freq, picks=raw_picks, method='iir',
                         n_jobs=n_jobs)
+        raw_band.info['highpass'] = l_freq
+        raw_band.info['lowpass'] = h_freq
         epochs_band = Epochs(raw_band, epochs.events, epochs.event_id,
-                             tmin=epochs.tmin, tmax=epochs.tmax,
+                             tmin=epochs.tmin, tmax=epochs.tmax, baseline=None,
                              picks=raw_picks, proj=epochs.proj, preload=True)
         del raw_band
 
@@ -674,6 +677,12 @@ def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
                             '%d to %d Hz' % (win_tmin * 1e3, win_tmax * 1e3,
                                              l_freq, h_freq))
 
+                # Counteracts unsafe floating point arithmetic ensuring all
+                # relevant samples will be taken into account when selecting
+                # data in time windows
+                win_tmin = win_tmin - 1e-10
+                win_tmax = win_tmax + 1e-10
+
                 # Calculating data covariance from filtered epochs in current
                 # time window
                 data_cov = compute_covariance(epochs_band, tmin=win_tmin,
diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py
index b76c03e..6eaf4ad 100644
--- a/mne/beamformer/tests/test_dics.py
+++ b/mne/beamformer/tests/test_dics.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import warnings
 import os.path as op
 import copy as cp
@@ -10,6 +11,7 @@ import mne
 from mne.datasets import sample
 from mne.beamformer import dics, dics_epochs, dics_source_power, tf_dics
 from mne.time_frequency import compute_epochs_csd
+from mne.externals.six import advance_iterator
 
 # Note that this is the first test file, this will apply to all subsequent
 # tests in a full nosetest:
@@ -32,7 +34,7 @@ def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
     """
     label = mne.read_label(fname_label)
     events = mne.read_events(fname_event)[:10]
-    raw = mne.fiff.Raw(fname_raw, preload=False)
+    raw = mne.io.Raw(fname_raw, preload=False)
     forward = mne.read_forward_solution(fname_fwd)
     if read_all_forward:
         forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
@@ -51,9 +53,9 @@ def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
 
     # Set up pick list: MEG - bad channels
     left_temporal_channels = mne.read_selection('Left-temporal')
-    picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False,
-                                stim=True, eog=True, exclude='bads',
-                                selection=left_temporal_channels)
+    picks = mne.pick_types(raw.info, meg=True, eeg=False,
+                           stim=True, eog=True, exclude='bads',
+                           selection=left_temporal_channels)
 
     # Read epochs
     epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
@@ -126,7 +128,7 @@ def test_dics():
     # Testing returning of generator
     stcs_ = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, reg=0.01,
                         return_generator=True, label=label)
-    assert_array_equal(stcs[0].data, stcs_.next().data)
+    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
 
     # Test whether correct number of trials was returned
     epochs.drop_bad_epochs()
@@ -206,7 +208,7 @@ def test_dics_source_power():
     for freq, data_csd in zip(frequencies, data_csds):
         data_csd.frequencies = [freq]
     noise_csds = data_csds
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
         dics_source_power(epochs.info, forward, noise_csds, data_csds)
     assert len(w) == 1
 
@@ -235,7 +237,7 @@ def test_tf_dics():
                    freq_bins, reg=reg, label=label)
 
     assert_true(len(stcs) == len(freq_bins))
-    print stcs[0].shape
+    print(stcs[0].shape)
     assert_true(stcs[0].shape[1] == 4)
 
     # Manually calculating source power in several time windows to compare
diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py
index 99825c5..fcb5f6d 100644
--- a/mne/beamformer/tests/test_lcmv.py
+++ b/mne/beamformer/tests/test_lcmv.py
@@ -11,6 +11,7 @@ from mne.datasets import sample
 from mne.beamformer import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
 from mne.beamformer._lcmv import _lcmv_source_power
 from mne.source_estimate import SourceEstimate, VolSourceEstimate
+from mne.externals.six import advance_iterator
 
 
 data_path = sample.data_path(download=False)
@@ -34,7 +35,7 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
     """
     label = mne.read_label(fname_label)
     events = mne.read_events(fname_event)
-    raw = mne.fiff.Raw(fname_raw, preload=True)
+    raw = mne.io.Raw(fname_raw, preload=True)
     forward = mne.read_forward_solution(fname_fwd)
     if all_forward:
         forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
@@ -54,10 +55,9 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
     if epochs:
         # Set up pick list: MEG - bad channels
         left_temporal_channels = mne.read_selection('Left-temporal')
-        picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False,
-                                    stim=True, eog=True, ref_meg=False,
-                                    exclude='bads',
-                                    selection=left_temporal_channels)
+        picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                               eog=True, ref_meg=False, exclude='bads',
+                               selection=left_temporal_channels)
 
         # Read epochs
         epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
@@ -74,8 +74,8 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
         info = raw.info
 
     noise_cov = mne.read_cov(fname_cov)
-    noise_cov = mne.cov.regularize(noise_cov, info,
-                                   mag=0.05, grad=0.05, eeg=0.1, proj=True)
+    noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,
+                                   eeg=0.1, proj=True)
     if data_cov:
         data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
     else:
@@ -162,7 +162,7 @@ def test_lcmv():
     stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
     stcs_ = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01,
                         return_generator=True)
-    assert_array_equal(stcs[0].data, stcs_.next().data)
+    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
 
     epochs.drop_bad_epochs()
     assert_true(len(epochs.events) == len(stcs))
@@ -197,8 +197,8 @@ def test_lcmv_raw():
 
     # use only the left-temporal MEG channels for LCMV
     left_temporal_channels = mne.read_selection('Left-temporal')
-    picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads',
-                                selection=left_temporal_channels)
+    picks = mne.pick_types(raw.info, meg=True, exclude='bads',
+                           selection=left_temporal_channels)
 
     data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax)
 
@@ -265,7 +265,7 @@ def test_tf_lcmv():
                         'sample_audvis_filt-0-40_raw.fif')
     label = mne.read_label(fname_label)
     events = mne.read_events(fname_event)
-    raw = mne.fiff.Raw(fname_raw, preload=True)
+    raw = mne.io.Raw(fname_raw, preload=True)
     forward = mne.read_forward_solution(fname_fwd)
 
     event_id, tmin, tmax = 1, -0.2, 0.2
@@ -275,14 +275,13 @@ def test_tf_lcmv():
 
     # Set up pick list: MEG - bad channels
     left_temporal_channels = mne.read_selection('Left-temporal')
-    picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False,
-                                stim=True, eog=True, exclude='bads',
-                                selection=left_temporal_channels)
+    picks = mne.pick_types(raw.info, meg=True, eeg=False,
+                           stim=True, eog=True, exclude='bads',
+                           selection=left_temporal_channels)
 
     # Read epochs
     epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
-                        picks=picks, baseline=(None, 0),
-                        preload=False,
+                        picks=picks, baseline=None, preload=False,
                         reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
     epochs.drop_bad_epochs()
 
@@ -298,8 +297,9 @@ def test_tf_lcmv():
         raw_band = raw.copy()
         raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, picks=picks)
         epochs_band = mne.Epochs(raw_band, epochs.events, epochs.event_id,
-                                 tmin=tmin, tmax=tmax, proj=True)
-        with warnings.catch_warnings(True):  # not enough samples
+                                 tmin=tmin, tmax=tmax, baseline=None,
+                                 proj=True)
+        with warnings.catch_warnings(record=True):  # not enough samples
             noise_cov = compute_covariance(epochs_band, tmin=tmin, tmax=tmin +
                                            win_length)
         noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=reg,
@@ -311,7 +311,7 @@ def test_tf_lcmv():
         # time windows to compare to tf_lcmv results and test overlapping
         if (l_freq, h_freq) == freq_bins[0]:
             for time_window in time_windows:
-                with warnings.catch_warnings(True):
+                with warnings.catch_warnings(record=True):
                     data_cov = compute_covariance(epochs_band,
                                                   tmin=time_window[0],
                                                   tmax=time_window[1])
@@ -360,7 +360,7 @@ def test_tf_lcmv():
     assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs,
                   tmin, tmax, tstep, win_lengths, freq_bins)
 
-    with warnings.catch_warnings(True):  # not enough samples
+    with warnings.catch_warnings(record=True):  # not enough samples
         # Pass only one epoch to test if subtracting evoked
         # responses yields zeros
         stcs = tf_lcmv(epochs[0], forward, noise_covs, tmin, tmax, tstep,
diff --git a/mne/channels.py b/mne/channels.py
new file mode 100644
index 0000000..1c525fd
--- /dev/null
+++ b/mne/channels.py
@@ -0,0 +1,343 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engmeann <denis.engemann at gmail.com>
+#          Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.io import loadmat
+from scipy import sparse
+
+from .externals.six import string_types
+
+from .utils import verbose, logger
+from .io.pick import channel_type, pick_info
+from .io.constants import FIFF
+
+
+def _get_meg_system(info):
+    """Educated guess for the helmet type based on channels"""
+    system = '306m'
+    for ch in info['chs']:
+        if ch['kind'] == FIFF.FIFFV_MEG_CH:
+            coil_type = ch['coil_type'] & 0xFFFF
+            if coil_type == FIFF.FIFFV_COIL_NM_122:
+                system = '122m'
+                break
+            elif coil_type // 1000 == 3:  # All Vectorview coils are 30xx
+                system = '306m'
+                break
+            elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
+                  coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
+                nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
+                               for c in info['chs']])
+                system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
+                system = 'CTF_275'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
+                system = 'KIT'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
+                system = 'BabySQUID'
+                break
+    return system
+
+
+def _contains_ch_type(info, ch_type):
+    """Check whether a certain channel type is in an info object
+
+    Parameters
+    ---------
+    info : instance of mne.io.meas_info.Info
+        The measurement information.
+    ch_type : str
+        the channel type to be checked for
+
+    Returns
+    -------
+    has_ch_type : bool
+        Whether the channel type is present or not.
+    """
+    if not isinstance(ch_type, string_types):
+        raise ValueError('`ch_type` is of class {actual_class}. It must be '
+                         '`str`'.format(actual_class=type(ch_type)))
+
+    valid_channel_types = ('grad mag eeg stim eog emg ecg ref_meg resp '
+                           'exci ias syst misc').split()
+
+    if ch_type not in valid_channel_types:
+        msg = ('The ch_type passed ({passed}) is not valid. '
+               'it must be {valid}')
+        raise ValueError(msg.format(passed=ch_type,
+                                    valid=' or '.join(valid_channel_types)))
+    return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
+
+
+ at verbose
+def equalize_channels(candidates, verbose=None):
+    """Equalize channel picks for a collection of MNE-Python objects
+
+    Parameters
+    ----------
+    candidates : list
+        list Raw | Epochs | Evoked.
+    verbose : None | bool
+        whether to be verbose or not.
+
+    Note. This function operates inplace.
+    """
+    from .io.base import _BaseRaw
+    from .epochs import Epochs
+    from .evoked import Evoked
+
+    if not all([isinstance(c, (_BaseRaw, Epochs, Evoked))
+                for c in candidates]):
+        valid = ['Raw', 'Epochs', 'Evoked']
+        raise ValueError('candidates must be ' + ' or '.join(valid))
+
+    chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
+    chan_template = candidates[chan_max_idx].ch_names
+    logger.info('Identiying common channels ...')
+    channels = [set(c.ch_names) for c in candidates]
+    common_channels = set(chan_template).intersection(*channels)
+    dropped = list()
+    for c in candidates:
+        drop_them = list(set(c.ch_names) - common_channels)
+        if drop_them:
+            c.drop_channels(drop_them)
+            dropped.extend(drop_them)
+    if dropped:
+        dropped = list(set(dropped))
+        logger.info('Dropped the following channels:\n%s' % dropped)
+    else:
+        logger.info('all channels are corresponding, nothing to do.')
+
+
+class ContainsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def __contains__(self, ch_type):
+        """Check channel type membership"""
+        if ch_type == 'meg':
+            has_ch_type = (_contains_ch_type(self.info, 'mag') or
+                           _contains_ch_type(self.info, 'grad'))
+        else:
+            has_ch_type = _contains_ch_type(self.info, ch_type)
+        return has_ch_type
+
+
+class PickDropChannelsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def pick_channels(self, ch_names, copy=False):
+        """Pick some channels
+
+        Parameters
+        ----------
+        ch_names : list
+            The list of channels to select.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+        """
+        inst = self.copy() if copy else self
+
+        idx = [inst.ch_names.index(c) for c in ch_names if c in inst.ch_names]
+        inst._pick_drop_channels(idx)
+
+        return inst
+
+    def drop_channels(self, ch_names, copy=False):
+        """Drop some channels
+
+        Parameters
+        ----------
+        ch_names : list
+            The list of channels to remove.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+        """
+        inst = self.copy() if copy else self
+
+        bad_idx = [inst.ch_names.index(c) for c in ch_names
+                   if c in inst.ch_names]
+        idx = np.setdiff1d(np.arange(len(inst.ch_names)), bad_idx)
+        inst._pick_drop_channels(idx)
+
+        return inst
+
+    def _pick_drop_channels(self, idx):
+        # avoid circular imports
+        from .io.base import _BaseRaw
+        from .epochs import Epochs
+        from .evoked import Evoked
+        if isinstance(self, _BaseRaw):
+            if not self.preload:
+                raise RuntimeError('Raw data must be preloaded to drop or pick'
+                                   ' channels')
+
+        inst_has = lambda attr: getattr(self, attr, None) is not None
+
+        if inst_has('picks'):
+            self.picks = self.picks[idx]
+
+        if inst_has('cals'):
+            self.cals = self.cals[idx]
+
+        self.info = pick_info(self.info, idx, copy=False)
+
+        if inst_has('_projector'):
+            self._projector = self._projector[idx][:, idx]
+
+        if isinstance(self, _BaseRaw) and inst_has('_data'):
+            self._data = self._data[idx, :]
+        elif isinstance(self, Epochs) and inst_has('_data'):
+            self._data = self._data[:, idx, :]
+        elif isinstance(self, Evoked):
+            self.data = self.data[idx, :]
+
+
+def rename_channels(info, mapping):
+    """Rename channels and optionally change the sensor type.
+
+    Note: This only changes between the following sensor types: eeg, eog,
+    emg, ecg, and misc. It also cannot change to eeg.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    mapping : dict
+        a dictionary mapping the old channel to a new channel name {'EEG061' :
+        'EEG161'}. If changing the sensor type, make the new name a tuple with
+        the name (str) and the new channel type (str)
+        {'EEG061',('EOG061','eog')}.
+    """
+    human2fiff = {'eog': FIFF.FIFFV_EOG_CH,
+                  'emg': FIFF.FIFFV_EMG_CH,
+                  'ecg': FIFF.FIFFV_ECG_CH,
+                  'misc': FIFF.FIFFV_MISC_CH}
+
+    bads, chs = info['bads'], info['chs']
+    ch_names = info['ch_names']
+    new_names, new_kinds, new_bads = list(), list(), list()
+
+    # first check and assemble clean mappings of index and name
+    for ch_name, new_name in mapping.items():
+        if ch_name not in ch_names:
+            raise ValueError("This channel name (%s) doesn't exist in info."
+                             % ch_name)
+
+        c_ind = ch_names.index(ch_name)
+        if not isinstance(new_name, (string_types, tuple)):
+            raise ValueError('Your mapping is not configured properly. '
+                             'Please see the help: mne.rename_channels?')
+
+        elif isinstance(new_name, tuple):  # name and type change
+            new_name, new_type = new_name  # unpack
+            if new_type not in human2fiff:
+                raise ValueError('This function cannot change to this '
+                                 'channel type: %s.' % new_type)
+            new_kinds.append((c_ind, human2fiff[new_type]))
+
+        if new_name in ch_names:
+            raise ValueError('The new name ({new}) already exists. Choose a '
+                             'unique name'.format(new=new_name))
+
+        new_names.append((c_ind, new_name))
+        if ch_name in bads:  # check bads
+            new_bads.append((bads.index(ch_name), new_name))
+
+    # Reset ch_names and Check that all the channel names are unique.
+    for key, collection in [('ch_name', new_names), ('kind', new_kinds)]:
+        for c_ind, new_name in collection:
+            chs[c_ind][key] = new_name
+    for c_ind, new_name in new_bads:
+        bads[c_ind] = new_name
+
+    # reference magic, please don't change (with the local binding
+    # it doesn't work)
+    info['ch_names'] = [c['ch_name'] for c in chs]
+
+
+def _recursive_flatten(cell, dtype):
+    """Helper to unpack mat files in Python"""
+    while not isinstance(cell[0], dtype):
+        cell = [c for d in cell for c in d]
+    return cell
+
+
+def read_ch_connectivity(fname, picks=None):
+    """Parse FieldTrip neighbors .mat file
+
+    Parameters
+    ----------
+    fname : str
+        The file name.
+    picks : array-like of int, shape (n_channels)
+        The indices of the channels to include. Must match the template.
+        Defaults to None.
+
+    Returns
+    -------
+    ch_connectivity : scipy.sparse matrix
+        The connectivity matrix.
+    """
+    nb = loadmat(fname)['neighbours']
+    ch_names = _recursive_flatten(nb['label'], string_types)
+    neighbors = [_recursive_flatten(c, string_types) for c in
+                 nb['neighblabel'].flatten()]
+    assert len(ch_names) == len(neighbors)
+    if picks is not None:
+        if max(picks) >= len(ch_names):
+            raise ValueError('The picks must be compatible with '
+                             'channels. Found a pick ({}) which exceeds '
+                             'the channel range ({})'
+                             .format(max(picks), len(ch_names)))
+    connectivity = ch_neighbor_connectivity(ch_names, neighbors)
+    if picks is not None:
+        # picking before constructing matrix is buggy
+        connectivity = connectivity[picks][:, picks]
+    return connectivity
+
+
+def ch_neighbor_connectivity(ch_names, neighbors):
+    """Compute sensor connectivity matrix
+
+    Parameters
+    ----------
+    ch_names : list of str
+        The channel names.
+    neighbors : list of list
+        A list of list of channel names. The neighbors to
+        which the channels in ch_names are connected with.
+        Must be of the same length as ch_names.
+    Returns
+    -------
+    ch_connectivity : scipy.sparse matrix
+        The connectivity matrix.
+    """
+    if len(ch_names) != len(neighbors):
+        raise ValueError('`ch_names` and `neighbors` must '
+                         'have the same length')
+    set_neighbors = set([c for d in neighbors for c in d])
+    rest = set(ch_names) - set_neighbors
+    if len(rest) > 0:
+        raise ValueError('Some of your neighbors are not present in the '
+                         'list of channel names')
+
+    for neigh in neighbors:
+        if (not isinstance(neigh, list) and
+           not all(isinstance(c, string_types) for c in neigh)):
+            raise ValueError('`neighbors` must be a list of lists of str')
+
+    ch_connectivity = np.eye(len(ch_names), dtype=bool)
+    for ii, neigbs in enumerate(neighbors):
+        ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
+
+    ch_connectivity = sparse.csr_matrix(ch_connectivity)
+    return ch_connectivity
diff --git a/mne/commands/mne_browse_raw.py b/mne/commands/mne_browse_raw.py
index 81c4dab..b4ccb5c 100755
--- a/mne/commands/mne_browse_raw.py
+++ b/mne/commands/mne_browse_raw.py
@@ -63,7 +63,7 @@ if __name__ == '__main__':
         parser.print_help()
         sys.exit(1)
 
-    raw = mne.fiff.Raw(raw_in, preload=preload)
+    raw = mne.io.Raw(raw_in, preload=preload)
     if len(proj_in) > 0:
         projs = mne.read_proj(proj_in)
         raw.info['projs'] = projs
diff --git a/mne/commands/mne_bti2fiff.py b/mne/commands/mne_bti2fiff.py
index 0295f83..28f983f 100755
--- a/mne/commands/mne_bti2fiff.py
+++ b/mne/commands/mne_bti2fiff.py
@@ -15,9 +15,9 @@ are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
 appear in the channel names of the raw object.
 """
 
-# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Yuval Harpaz <yuvharpaz at gmail.com>
 #
@@ -26,7 +26,7 @@ appear in the channel names of the raw object.
 
 import sys
 
-from mne.fiff.bti import read_raw_bti
+from mne.io import read_raw_bti
 
 
 if __name__ == '__main__':
diff --git a/mne/commands/mne_clean_eog_ecg.py b/mne/commands/mne_clean_eog_ecg.py
index 9d91595..4e8f54a 100755
--- a/mne/commands/mne_clean_eog_ecg.py
+++ b/mne/commands/mne_clean_eog_ecg.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 """Clean a raw file from EOG and ECG artifacts with PCA (ie SSP)
 """
+from __future__ import print_function
 
 # Authors : Dr Engr. Sheraz Khan,  P.Eng, Ph.D.
 #           Engr. Nandita Shetty,  MS.
@@ -37,7 +38,7 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
         raise Exception("EOG and ECG cannot be both disabled")
 
     # Reading fif File
-    raw_in = mne.fiff.Raw(in_fif_fname)
+    raw_in = mne.io.Raw(in_fif_fname)
 
     if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'):
         prefix = in_fif_fname[:-8]
@@ -55,14 +56,14 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
     if eog_event_fname is None:
         eog_event_fname = prefix + '_eog-eve.fif'
 
-    print 'Implementing ECG and EOG artifact rejection on data'
+    print('Implementing ECG and EOG artifact rejection on data')
 
     if ecg:
         ecg_events, _, _  = mne.preprocessing.find_ecg_events(raw_in)
-        print "Writing ECG events in %s" % ecg_event_fname
+        print("Writing ECG events in %s" % ecg_event_fname)
         mne.write_events(ecg_event_fname, ecg_events)
 
-        print 'Computing ECG projector'
+        print('Computing ECG projector')
 
         command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
                    '--projtmin -0.08 --projtmax 0.08 --saveprojtag _ecg_proj '
@@ -72,14 +73,14 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
         st = os.system(command)
 
         if st != 0:
-            print "Error while running : %s" % command
+            print("Error while running : %s" % command)
 
     if eog:
         eog_events = mne.preprocessing.find_eog_events(raw_in)
-        print "Writing EOG events in %s" % eog_event_fname
+        print("Writing EOG events in %s" % eog_event_fname)
         mne.write_events(eog_event_fname, eog_events)
 
-        print 'Computing EOG projector'
+        print('Computing EOG projector')
 
         command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
                    '--projtmin -0.15 --projtmax 0.15 --saveprojtag _eog_proj '
@@ -87,7 +88,7 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
                    '--projmagrej 4000  --projgradrej 3000' % (in_path,
                    in_fif_fname, eog_event_fname))
 
-        print 'Running : %s' % command
+        print('Running : %s' % command)
 
         st = os.system(command)
         if st != 0:
@@ -95,25 +96,25 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
 
     if out_fif_fname is not None:
         # Applying the ECG EOG projector
-        print 'Applying ECG EOG projector'
+        print('Applying ECG EOG projector')
 
         command = ('mne_process_raw --cd %s --raw %s '
                    '--proj %s --projoff --save %s --filteroff'
                    % (in_path, in_fif_fname, in_fif_fname, out_fif_fname))
         command += ' --proj %s --proj %s' % (ecg_proj_fname, eog_proj_fname)
 
-        print 'Command executed: %s' % command
+        print('Command executed: %s' % command)
 
         st = os.system(command)
 
         if st != 0:
             raise ValueError('Pb while running : %s' % command)
 
-        print 'Done removing artifacts.'
-        print "Cleaned raw data saved in: %s" % out_fif_fname
-        print 'IMPORTANT : Please eye-ball the data !!'
+        print('Done removing artifacts.')
+        print("Cleaned raw data saved in: %s" % out_fif_fname)
+        print('IMPORTANT : Please eye-ball the data !!')
     else:
-        print 'Projection not applied to raw data.'
+        print('Projection not applied to raw data.')
 
 
 if __name__ == '__main__':
diff --git a/mne/commands/mne_compute_proj_ecg.py b/mne/commands/mne_compute_proj_ecg.py
index 3407686..55275b3 100755
--- a/mne/commands/mne_compute_proj_ecg.py
+++ b/mne/commands/mne_compute_proj_ecg.py
@@ -5,10 +5,12 @@ You can do for example:
 
 $ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" --l-freq 1 --h-freq 100 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
 """
+from __future__ import print_function
 
 # Authors : Alexandre Gramfort, Ph.D.
 #           Martin Luessi, Ph.D.
 
+from mne.externals.six import string_types
 import os
 import sys
 import mne
@@ -154,7 +156,7 @@ if __name__ == '__main__':
 
     if bad_fname is not None:
         bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
-        print 'Bad channels read : %s' % bads
+        print('Bad channels read : %s' % bads)
     else:
         bads = []
 
@@ -170,10 +172,10 @@ if __name__ == '__main__':
     else:
         ecg_proj_fname = prefix + '_ecg_proj.fif'
 
-    raw = mne.fiff.Raw(raw_in, preload=preload)
+    raw = mne.io.Raw(raw_in, preload=preload)
 
     if raw_event_fname is not None:
-        raw_event = mne.fiff.Raw(raw_event_fname)
+        raw_event = mne.io.Raw(raw_event_fname)
     else:
         raw_event = raw
 
@@ -191,15 +193,15 @@ if __name__ == '__main__':
         raw_event.close()
 
     if proj_fname is not None:
-        print 'Including SSP projections from : %s' % proj_fname
+        print('Including SSP projections from : %s' % proj_fname)
         # append the ecg projs, so they are last in the list
         projs = mne.read_proj(proj_fname) + projs
 
-    if isinstance(preload, basestring) and os.path.exists(preload):
+    if isinstance(preload, string_types) and os.path.exists(preload):
         os.remove(preload)
 
-    print "Writing ECG projections in %s" % ecg_proj_fname
+    print("Writing ECG projections in %s" % ecg_proj_fname)
     mne.write_proj(ecg_proj_fname, projs)
 
-    print "Writing ECG events in %s" % ecg_event_fname
+    print("Writing ECG events in %s" % ecg_event_fname)
     mne.write_events(ecg_event_fname, events)
diff --git a/mne/commands/mne_compute_proj_eog.py b/mne/commands/mne_compute_proj_eog.py
index f20c7a2..96f31ca 100755
--- a/mne/commands/mne_compute_proj_eog.py
+++ b/mne/commands/mne_compute_proj_eog.py
@@ -11,10 +11,12 @@ $ mne compute_proj_eog -i sample_audvis_raw.fif --l-freq 1 --h-freq 35 --rej-gra
 
 to exclude ECG artifacts from projection computation.
 """
+from __future__ import print_function
 
 # Authors : Alexandre Gramfort, Ph.D.
 #           Martin Luessi, Ph.D.
 
+from mne.externals.six import string_types
 import os
 import sys
 import mne
@@ -137,7 +139,7 @@ if __name__ == '__main__':
 
     if bad_fname is not None:
         bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
-        print 'Bad channels read : %s' % bads
+        print('Bad channels read : %s' % bads)
     else:
         bads = []
 
@@ -153,10 +155,10 @@ if __name__ == '__main__':
     else:
         eog_proj_fname = prefix + '_eog_proj.fif'
 
-    raw = mne.fiff.Raw(raw_in, preload=preload)
+    raw = mne.io.Raw(raw_in, preload=preload)
 
     if raw_event_fname is not None:
-        raw_event = mne.fiff.Raw(raw_event_fname)
+        raw_event = mne.io.Raw(raw_event_fname)
     else:
         raw_event = raw
 
@@ -176,15 +178,15 @@ if __name__ == '__main__':
         raw_event.close()
 
     if proj_fname is not None:
-        print 'Including SSP projections from : %s' % proj_fname
+        print('Including SSP projections from : %s' % proj_fname)
         # append the eog projs, so they are last in the list
         projs = mne.read_proj(proj_fname) + projs
 
-    if isinstance(preload, basestring) and os.path.exists(preload):
+    if isinstance(preload, string_types) and os.path.exists(preload):
         os.remove(preload)
 
-    print "Writing EOG projections in %s" % eog_proj_fname
+    print("Writing EOG projections in %s" % eog_proj_fname)
     mne.write_proj(eog_proj_fname, projs)
 
-    print "Writing EOG events in %s" % eog_event_fname
+    print("Writing EOG events in %s" % eog_event_fname)
     mne.write_events(eog_event_fname, events)
diff --git a/mne/commands/mne_coreg.py b/mne/commands/mne_coreg.py
new file mode 100644
index 0000000..1adc15e
--- /dev/null
+++ b/mne/commands/mne_coreg.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# Authors: Christian Brodbeck  <christianbrodbeck at nyu.edu>
+
+""" Open the coregistration GUI.
+
+example usage:  $ mne coreg
+
+"""
+
+import os
+import sys
+
+import mne
+
+
+if __name__ == '__main__':
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+    options, args = parser.parse_args()
+
+    os.environ['ETS_TOOLKIT'] = 'qt4'
+    mne.gui.coregistration()
+    sys.exit(0)
diff --git a/mne/commands/mne_flash_bem_model.py b/mne/commands/mne_flash_bem_model.py
index 7a49b8e..595583b 100755
--- a/mne/commands/mne_flash_bem_model.py
+++ b/mne/commands/mne_flash_bem_model.py
@@ -13,6 +13,7 @@ and brain.mgz MRI volumes should be, as usual, in the subject's mri
 directory.
 
 """
+from __future__ import print_function
 
 # Authors:  Rey Rene Ramirez, Ph.D.   e-mail: rrramir at uw.edu
 #           Alexandre Gramfort, Ph.D.
@@ -71,19 +72,19 @@ def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
     # flash_dir = os.getcwd()
     if not os.path.exists('parameter_maps'):
         os.mkdir("parameter_maps")
-    print "--- Converting Flash 5"
+    print("--- Converting Flash 5")
     os.system('mri_convert -flip_angle %s -tr 25 %s mef05.mgz' %
                                             (5 * math.pi / 180, flash05))
-    print "--- Converting Flash 30"
+    print("--- Converting Flash 30")
     os.system('mri_convert -flip_angle %s -tr 25 %s mef30.mgz' %
                                             (30 * math.pi / 180, flash30))
-    print "--- Running mne_flash_bem"
+    print("--- Running mne_flash_bem")
     os.system('mne_flash_bem --noconvert')
     os.chdir(os.path.join(subjects_dir, subject, 'bem'))
     if not os.path.exists('flash'):
         os.mkdir("flash")
     os.chdir("flash")
-    print "[done]"
+    print("[done]")
 
     if show:
         fnames = ['outer_skin.surf', 'outer_skull.surf', 'inner_skull.surf']
diff --git a/mne/commands/mne_kit2fiff.py b/mne/commands/mne_kit2fiff.py
index 90eb785..68197ff 100755
--- a/mne/commands/mne_kit2fiff.py
+++ b/mne/commands/mne_kit2fiff.py
@@ -3,13 +3,16 @@
 
 """ Import KIT / NYU data to fif file.
 
-example usage: mne kit2fiff --input input.sqd --output output.fif
+example usage:  $ mne kit2fiff --input input.sqd --output output.fif
+Use without arguments to invoke GUI:  $ mne kt2fiff
 
 """
 
+import os
 import sys
 
-from mne.fiff.kit import read_raw_kit
+import mne
+from mne.io import read_raw_kit
 
 if __name__ == '__main__':
 
@@ -41,8 +44,9 @@ if __name__ == '__main__':
 
     input_fname = options.input_fname
     if input_fname is None:
-        parser.print_help()
-        sys.exit(1)
+        os.environ['ETS_TOOLKIT'] = 'qt4'
+        mne.gui.kit2fiff()
+        sys.exit(0)
 
     hsp_fname = options.hsp_fname
     elp_fname = options.elp_fname
diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py
index 45f550e..a1a18ff 100755
--- a/mne/commands/mne_make_scalp_surfaces.py
+++ b/mne/commands/mne_make_scalp_surfaces.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
-# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 #          simplified bsd-3 license
@@ -11,10 +11,11 @@ Create high-resolution head surfaces for coordinate alignment.
 
 example usage: mne make_scalp_surfaces --overwrite --subject sample
 """
+from __future__ import print_function
+
 import os
 import os.path as op
 import sys
-from commands import getstatusoutput
 import mne
 
 if __name__ == '__main__':
@@ -44,16 +45,17 @@ if __name__ == '__main__':
     verbose = options.verbose
     force = '--force' if options.force else '--check'
 
+    from mne.commands.utils import get_status_output
     def my_run_cmd(cmd, err_msg):
-        sig, out = getstatusoutput(cmd)
+        sig, out, error = get_status_output(cmd)
         if verbose:
-            print out
+            print(out, error)
         if sig != 0:
-            print err_msg
+            print(err_msg)
             sys.exit(1)
 
     if not 'SUBJECTS_DIR' in env:
-        print 'The environment variable SUBJECTS_DIR should be set'
+        print('The environment variable SUBJECTS_DIR should be set')
         sys.exit(1)
 
     if not op.isabs(env['SUBJECTS_DIR']):
@@ -61,17 +63,17 @@ if __name__ == '__main__':
     subj_dir = env['SUBJECTS_DIR']
 
     if not 'MNE_ROOT' in env:
-        print 'MNE_ROOT environment variable is not set'
+        print('MNE_ROOT environment variable is not set')
         sys.exit(1)
 
     if not 'FREESURFER_HOME' in env:
-        print 'The FreeSurfer environment needs to be set up for this script'
+        print('The FreeSurfer environment needs to be set up for this script')
         sys.exit(1)
 
     subj_path = op.join(subj_dir, subject)
     if not op.exists(subj_path):
-        print ('%s does not exits. Please check your subject directory '
-               'path.' % subj_path)
+        print(('%s does not exits. Please check your subject directory '
+               'path.' % subj_path))
         sys.exit(1)
 
     if op.exists(op.join(subj_path, 'mri', 'T1.mgz')):
@@ -79,7 +81,7 @@ if __name__ == '__main__':
     else:
         mri = 'T1'
 
-    print '1. Creating a dense scalp tessellation with mkheadsurf...'
+    print('1. Creating a dense scalp tessellation with mkheadsurf...')
 
     def check_seghead(surf_path=op.join(subj_path, 'surf')):
         for k in ['/lh.seghead', '/lh.smseghead']:
@@ -93,30 +95,30 @@ if __name__ == '__main__':
         cmd = 'mkheadsurf -subjid %s -srcvol %s >/dev/null' % (subject, mri)
         my_run_cmd(cmd, 'mkheadsurf failed')
     else:
-        print '%s/surf/%s already there' % (subj_path, my_seghead)
+        print('%s/surf/%s already there' % (subj_path, my_seghead))
         if not overwrite:
-            print 'Use the --overwrite option to replace exisiting surfaces.'
+            print('Use the --overwrite option to replace exisiting surfaces.')
             sys.exit()
 
     surf = check_seghead()
     if surf is None:
-        print 'mkheadsurf did not produce the standard output file.'
+        print('mkheadsurf did not produce the standard output file.')
         sys.exit(1)
 
     fif = '{0}/{1}/bem/{1}-head-dense.fif'.format(subj_dir, subject)
-    print '2. Creating %s ...' % fif
+    print('2. Creating %s ...' % fif)
     cmd = 'mne_surf2bem --surf %s --id 4 %s --fif %s' % (surf, force, fif)
     my_run_cmd(cmd, 'Failed to create %s, see above' % fif)
     levels = 'medium', 'sparse'
     for ii, (n_tri, level) in enumerate(zip([30000, 2500], levels), 3):
         my_surf = mne.read_bem_surfaces(fif)[0]
-        print '%i. Creating medium grade tessellation...' % ii
-        print '%i.1 Decimating the dense tessellation...' % ii
+        print('%i. Creating medium grade tessellation...' % ii)
+        print('%i.1 Decimating the dense tessellation...' % ii)
         points, tris = mne.decimate_surface(points=my_surf['rr'],
                                             triangles=my_surf['tris'],
                                             n_triangles=n_tri)
         out_fif = fif.replace('dense', level)
-        print '%i.2 Creating %s' % (ii, out_fif)
+        print('%i.2 Creating %s' % (ii, out_fif))
         surf_fname = '/tmp/tmp-surf.surf'
         # convert points to meters, make mne_analyze happy
         mne.write_surface(surf_fname, points * 1e3, tris)
diff --git a/mne/commands/mne_report.py b/mne/commands/mne_report.py
new file mode 100644
index 0000000..e646b64
--- /dev/null
+++ b/mne/commands/mne_report.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""Create mne report for a folder
+
+Example usage
+
+mne report -p MNE-sample-data/ -i \
+MNE-sample-data/MEG/sample/sample_audvis-ave.fif -d MNE-sample-data/subjects/ \
+-s sample
+
+"""
+
+from mne.report import Report
+
+
+if __name__ == '__main__':
+
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-p", "--path", dest="path",
+                      help="Path to folder who MNE-Report must be created")
+    parser.add_option("-i", "--info", dest="info_fname",
+                      help="File from which info dictionary is to be read",
+                      metavar="FILE")
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="The subjects directory")
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="The subject name")
+    parser.add_option("-v", "--verbose", dest="verbose",
+                      action='store_true', help="run in verbose mode")
+    parser.add_option("--no-browser", dest="no_browser", action='store_false',
+                      help="Do not open MNE-Report in browser")
+    parser.add_option("--overwrite", dest="overwrite", action='store_false',
+                      help="Overwrite html report if it already exists")
+    parser.add_option("-j", "--jobs", dest="n_jobs", help="Number of jobs to"
+                      " run in parallel")
+
+    options, args = parser.parse_args()
+    path = options.path
+    info_fname = options.info_fname
+    subjects_dir = options.subjects_dir
+    subject = options.subject
+    verbose = True if options.verbose is not None else False
+    open_browser = False if options.no_browser is not None else True
+    overwrite = True if options.overwrite is not None else False
+    n_jobs = int(options.n_jobs) if options.n_jobs is not None else 1
+
+    report = Report(info_fname, subjects_dir=subjects_dir, subject=subject,
+                    verbose=verbose)
+    report.parse_folder(path, verbose=verbose, n_jobs=n_jobs)
+    report.save(open_browser=open_browser, overwrite=overwrite)
diff --git a/mne/commands/mne_surf2bem.py b/mne/commands/mne_surf2bem.py
index 6036f12..07e447b 100755
--- a/mne/commands/mne_surf2bem.py
+++ b/mne/commands/mne_surf2bem.py
@@ -7,7 +7,8 @@ mne surf2bem --surf ${SUBJECTS_DIR}/${SUBJECT}/surf/lh.seghead --fif \
 ${SUBJECTS_DIR}/${SUBJECT}/bem/${SUBJECT}-head.fif --id=4
 
 """
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+from __future__ import print_function
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -34,7 +35,7 @@ if __name__ == '__main__':
         parser.print_help()
         sys.exit(1)
 
-    print "Converting %s to BEM FIF file." % options.surf
+    print("Converting %s to BEM FIF file." % options.surf)
 
     points, tris = mne.read_surface(options.surf)
     points *= 1e-3
diff --git a/mne/commands/utils.py b/mne/commands/utils.py
index 233a586..2e97e03 100644
--- a/mne/commands/utils.py
+++ b/mne/commands/utils.py
@@ -9,11 +9,12 @@
 
 import imp, os, re
 from optparse import OptionParser
+from subprocess import Popen, PIPE
 
 import mne
 
 def get_optparser(cmdpath):
-    """Create OptionParser with cmdsource specific settings (e.g. prog value)
+    """Create OptionParser with cmd source specific settings (e.g. prog value)
     """
     command = os.path.basename(cmdpath)
     if re.match('mne_(.*).py', command):
@@ -37,3 +38,11 @@ def get_optparser(cmdpath):
                           epilog=epilog)
 
     return parser
+
+def get_status_output(cmd):
+    """ Replacement for commands.getstatusoutput which has been deprecated since 2.6
+        Returns the error status, output and error output"""
+    pipe = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
+    output, error = pipe.communicate()
+    status = pipe.returncode
+    return status, output, error
diff --git a/mne/connectivity/effective.py b/mne/connectivity/effective.py
index 247b4fc..ad3e085 100644
--- a/mne/connectivity/effective.py
+++ b/mne/connectivity/effective.py
@@ -1,6 +1,7 @@
 # Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
+from ..externals.six.moves import zip
 import copy
 
 import numpy as np
@@ -127,7 +128,7 @@ def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
     if fmin is None:
         fmin = -np.inf  # set it to -inf, so we can adjust it later
 
-    bands = zip(np.asarray((fmin,)).ravel(), np.asarray((fmax,)).ravel())
+    bands = list(zip(np.asarray((fmin,)).ravel(), np.asarray((fmax,)).ravel()))
     n_bands = len(bands)
 
     freq_dim = -2 if mode == 'cwt_morlet' else -1
@@ -143,8 +144,8 @@ def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
     acc = np.empty(acc_shape, dtype=np.complex128)
 
     freqs = list()
-    idx_fi = [Ellipsis] * cohy.ndim
-    idx_fj = [Ellipsis] * cohy.ndim
+    idx_fi = [slice(None)] * cohy.ndim
+    idx_fj = [slice(None)] * cohy.ndim
     for band_idx, band in enumerate(bands):
         freq_idx = np.where((freqs_ > band[0]) & (freqs_ < band[1]))[0]
         freqs.append(freqs_[freq_idx])
diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py
index 25b1145..2b4957c 100644
--- a/mne/connectivity/spectral.py
+++ b/mne/connectivity/spectral.py
@@ -2,6 +2,7 @@
 #
 # License: BSD (3-clause)
 
+from ..externals.six import string_types
 from warnings import warn
 from inspect import getargspec, getmembers
 
@@ -417,7 +418,7 @@ def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
 
     # accumulate connectivity scores
     if mode in ['multitaper', 'fourier']:
-        for i in xrange(0, n_cons, block_size):
+        for i in range(0, n_cons, block_size):
             con_idx = slice(i, i + block_size)
             if mt_adaptive:
                 csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
@@ -433,7 +434,7 @@ def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
                 method.accumulate(con_idx, csd)
     else:
         # cwt_morlet mode
-        for i in xrange(0, n_cons, block_size):
+        for i in range(0, n_cons, block_size):
             con_idx = slice(i, i + block_size)
 
             csd = x_cwt[idx_map[0][con_idx]]\
@@ -651,9 +652,15 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
         the output freqs will be a list with arrays of the frequencies
         that were averaged.
     tmin : float | None
-        Time to start connectivity estimation.
+        Time to start connectivity estimation. Note: when "data" is an array,
+        the first sample is assumed to be at time 0. For other types
+        (Epochs, etc.), the time information contained in the object is used
+        to compute the time indices.
     tmax : float | None
-        Time to end connectivity estimation.
+        Time to end connectivity estimation. Note: when "data" is an array,
+        the first sample is assumed to be at time 0. For other types
+        (Epochs, etc.), the time information contained in the object is used
+        to compute the time indices.
     mt_bandwidth : float | None
         The bandwidth of the multitaper windowing function in Hz.
         Only used in 'multitaper' mode.
@@ -724,7 +731,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
         if m in _CON_METHOD_MAP:
             method = _CON_METHOD_MAP[m]
             con_method_types.append(method)
-        elif isinstance(m, basestring):
+        elif isinstance(m, string_types):
             raise ValueError('%s is not a valid connectivity method' % m)
         else:
             # add custom method
@@ -836,7 +843,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                 freq_mask |= ((freqs_all >= f_lower) & (freqs_all <= f_upper))
 
             # possibly skip frequency points
-            for pos in xrange(fskip):
+            for pos in range(fskip):
                 freq_mask[pos + 1::fskip + 1] = False
 
             # the frequency points where we compute connectivity
@@ -1004,7 +1011,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
             method.compute_con(slice(0, n_cons), n_epochs)
         else:
             # compute scores block-wise to save memory
-            for i in xrange(0, n_cons, block_size):
+            for i in range(0, n_cons, block_size):
                 con_idx = slice(i, i + block_size)
                 psd_xx = psd[idx_map[0][con_idx]]
                 psd_yy = psd[idx_map[1][con_idx]]
@@ -1022,7 +1029,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                                  'be the same as the number of frequencies')
             con_shape = (n_cons, n_bands) + this_con.shape[2:]
             this_con_bands = np.empty(con_shape, dtype=this_con.dtype)
-            for band_idx in xrange(n_bands):
+            for band_idx in range(n_bands):
                 this_con_bands[:, band_idx] =\
                     np.mean(this_con[:, freq_idx_bands[band_idx]], axis=1)
             this_con = this_con_bands
diff --git a/mne/connectivity/tests/test_spectral.py b/mne/connectivity/tests/test_spectral.py
index 680c27e..3ce45eb 100644
--- a/mne/connectivity/tests/test_spectral.py
+++ b/mne/connectivity/tests/test_spectral.py
@@ -43,7 +43,7 @@ def test_spectral_connectivity():
     times_data = np.linspace(tmin, tmax, n_times)
     # simulate connectivity from 5Hz..15Hz
     fstart, fend = 5.0, 15.0
-    for i in xrange(n_epochs):
+    for i in range(n_epochs):
         data[i, 1, :] = band_pass_filter(data[i, 0, :], sfreq, fstart, fend)
         # add some noise, so the spectrum is not exactly zero
         data[i, 1, :] += 1e-2 * np.random.randn(n_times)
diff --git a/mne/coreg.py b/mne/coreg.py
index 56242bb..ce634e1 100644
--- a/mne/coreg.py
+++ b/mne/coreg.py
@@ -4,7 +4,7 @@
 #
 # License: BSD (3-clause)
 
-from ConfigParser import RawConfigParser
+from .externals.six.moves import configparser
 import fnmatch
 from glob import glob, iglob
 import os
@@ -18,13 +18,16 @@ from scipy.optimize import leastsq
 from scipy.spatial.distance import cdist
 from scipy.linalg import norm
 
-from .fiff.meas_info import read_fiducials, write_fiducials
+from .io.meas_info import read_fiducials, write_fiducials
 from .label import read_label, Label
-from .source_space import read_source_spaces, write_source_spaces
+from .source_space import (add_source_space_distances, read_source_spaces,
+                           write_source_spaces)
 from .surface import (read_surface, write_surface, read_bem_surfaces,
                       write_bem_surface)
 from .transforms import rotation, rotation3d, scaling, translation
 from .utils import get_config, get_subjects_dir, logger, pformat
+from functools import reduce
+from .externals.six.moves import zip
 
 
 # some path templates
@@ -32,14 +35,13 @@ trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
 subject_dirname = os.path.join('{subjects_dir}', '{subject}')
 bem_dirname = os.path.join(subject_dirname, 'bem')
 surf_dirname = os.path.join(subject_dirname, 'surf')
-bem_fname = os.path.join(bem_dirname, "{subject}-{name}-bem.fif")
-fid_fname = os.path.join(bem_dirname, "{subject}-fiducials.fif")
+bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
+head_bem_fname = pformat(bem_fname, name='head')
+fid_fname = pformat(bem_fname, name='fiducials')
 fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
-head_bem_fname = os.path.join(bem_dirname, "{subject}-head.fif")
 src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
 
 
-
 def create_default_subject(mne_root=None, fs_home=None, update=False,
                            subjects_dir=None):
     """Create an average brain subject for subjects without structural MRI
@@ -368,7 +370,7 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
         return trans
     else:
         err = ("Invalid out parameter: %r. Needs to be 'params' or "
-              "'trans'." % out)
+               "'trans'." % out)
         raise ValueError(err)
 
 
@@ -533,7 +535,6 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
              "To improve performance, install the sklearn module.")
         errfunc = _point_cloud_error
 
-
     # for efficiency, define parameter specific error function
     param_info = (rotate, translate, scale)
     if param_info == (True, False, 0):
@@ -583,7 +584,7 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
         return _trans_from_params(param_info, est)
     else:
         err = ("Invalid out parameter: %r. Needs to be 'params' or "
-              "'trans'." % out)
+               "'trans'." % out)
         raise ValueError(err)
 
 
@@ -663,9 +664,17 @@ def _find_mri_paths(subject='fsaverage', subjects_dir=None):
 
     # BEM files
     paths['bem'] = bem = []
-    bem.append(head_bem_fname)
-    bem_file = pformat(bem_fname, name='inner_skull')
-    bem.append(bem_file)
+    path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
+    if os.path.exists(path):
+        bem.append('head')
+    bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
+                          subject=subject, name='*-bem')
+    re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
+                         name='(.+)')
+    for path in iglob(bem_pattern):
+        match = re.match(re_pattern, path)
+        name = match.group(1)
+        bem.append(name)
 
     # fiducials
     paths['fid'] = [fid_fname]
@@ -678,7 +687,7 @@ def _find_mri_paths(subject='fsaverage', subjects_dir=None):
         dup.append(fname)
 
     # check presence of required files
-    for ftype in ['surf', 'bem', 'fid', 'duplicate']:
+    for ftype in ['surf', 'fid', 'duplicate']:
         for fname in paths[ftype]:
             path = fname.format(subjects_dir=subjects_dir, subject=subject)
             path = os.path.realpath(path)
@@ -720,14 +729,56 @@ def _is_mri_subject(subject, subjects_dir=None):
     if not os.path.exists(fname):
         return False
 
-    fname = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
-                             name='*')
-    if len(glob(fname)) == 0:
-        return False
-
     return True
 
 
+def _mri_subject_has_bem(subject, subjects_dir=None):
+    """Check whether an mri subject has a file matching the bem pattern
+
+    Parameters
+    ----------
+    subject : str
+        Name of the subject.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+
+    Returns
+    -------
+    has_bem_file : bool
+        Whether ``subject`` has a bem file.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
+                               name='*-bem')
+    fnames = glob(pattern)
+    return bool(len(fnames))
+
+
+def read_elp(fname):
+    """Read point coordinates from a text file
+
+    Parameters
+    ----------
+    fname : str
+        Absolute path to laser point file (*.txt).
+
+    Returns
+    -------
+    elp_points : array, [n_points x 3]
+        Point coordinates.
+    """
+    pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
+    with open(fname) as fid:
+        elp_points = pattern.findall(fid.read())
+    elp_points = np.array(elp_points, dtype=float)
+    if elp_points.shape[1] != 3:
+        err = ("File %r does not contain 3 columns as required; got shape "
+               "%s." % (fname, elp_points.shape))
+        raise ValueError(err)
+
+    return elp_points
+
+
 def read_mri_cfg(subject, subjects_dir=None):
     """Read information from the cfg file of a scaled MRI brain
 
@@ -752,14 +803,14 @@ def read_mri_cfg(subject, subjects_dir=None):
         raise IOError(err)
 
     logger.info("Reading MRI cfg file %s" % fname)
-    config = RawConfigParser()
+    config = configparser.RawConfigParser()
     config.read(fname)
     n_params = config.getint("MRI Scaling", 'n_params')
     if n_params == 1:
         scale = config.getfloat("MRI Scaling", 'scale')
     elif n_params == 3:
         scale_str = config.get("MRI Scaling", 'scale')
-        scale = np.array(map(float, scale_str.split()))
+        scale = np.array([float(s) for s in scale_str.split()])
     else:
         raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
 
@@ -788,7 +839,7 @@ def _write_mri_config(fname, subject_from, subject_to, scale):
     else:
         n_params = 3
 
-    config = RawConfigParser()
+    config = configparser.RawConfigParser()
     config.add_section("MRI Scaling")
     config.set("MRI Scaling", 'subject_from', subject_from)
     config.set("MRI Scaling", 'subject_to', subject_to)
@@ -796,12 +847,80 @@ def _write_mri_config(fname, subject_from, subject_to, scale):
     if n_params == 1:
         config.set("MRI Scaling", 'scale', str(scale))
     else:
-        config.set("MRI Scaling", 'scale', ' '.join(map(str, scale)))
+        config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
     config.set("MRI Scaling", 'version', '1')
-    with open(fname, 'wb') as fid:
+    with open(fname, 'w') as fid:
         config.write(fid)
 
 
+def _scale_params(subject_to, subject_from, scale, subjects_dir):
+    subjects_dir = get_subjects_dir(subjects_dir, True)
+    if (subject_from is None) != (scale is None):
+        err = ("Need to provide either both subject_from and scale "
+               "parameters, or neither.")
+        raise TypeError(err)
+
+    if subject_from is None:
+        cfg = read_mri_cfg(subject_to, subjects_dir)
+        subject_from = cfg['subject_from']
+        n_params = cfg['n_params']
+        scale = cfg['scale']
+    else:
+        scale = np.asarray(scale)
+        if scale.ndim == 0:
+            n_params = 1
+        elif scale.shape == (3,):
+            n_params = 3
+        else:
+            err = ("Invalid shape for scale parameer. Need scalar or array of "
+                   "length 3. Got %s." % str(scale))
+            raise ValueError(err)
+
+    return subjects_dir, subject_from, n_params, scale
+
+
+def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
+              subjects_dir=None):
+    """Scale a bem file
+
+    Parameters
+    ----------
+    subject_to : str
+        Name of the scaled MRI subject (the destination mri subject).
+    bem_name : str
+        Name of the bem file. For example, to scale
+        ``fsaverage-inner_skull-bem.fif``, the bem_name would be
+        "inner_skull-bem".
+    subject_from : None | str
+        The subject from which to read the source space. If None, subject_from
+        is read from subject_to's config file.
+    scale : None | float | array, shape = (3,)
+        Scaling factor. Has to be specified if subjects_from is specified,
+        otherwise it is read from subject_to's config file.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    """
+    subjects_dir, subject_from, _, scale = _scale_params(subject_to,
+                                                         subject_from, scale,
+                                                         subjects_dir)
+
+    src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
+                           name=bem_name)
+    dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
+                           name=bem_name)
+
+    if os.path.exists(dst):
+        raise IOError("File alredy exists: %s" % dst)
+
+    surfs = read_bem_surfaces(src)
+    if len(surfs) != 1:
+        err = ("BEM file with more than one surface: %r" % src)
+        raise NotImplementedError(err)
+    surf0 = surfs[0]
+    surf0['rr'] = surf0['rr'] * scale
+    write_bem_surface(dst, surf0)
+
+
 def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
                  scale=None, subjects_dir=None):
     """Scale labels to match a brain that was previously created by scaling
@@ -916,17 +1035,8 @@ def scale_mri(subject_from, subject_to, scale, overwrite=False,
         write_surface(dest, pts * scale, tri)
 
     # BEM files [in m]
-    for fname in paths['bem']:
-        src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
-        src = os.path.realpath(src)
-        surfs = read_bem_surfaces(src)
-        if len(surfs) != 1:
-            err = ("BEM file with more than one surface: %r" % src)
-            raise NotImplementedError(err)
-        surf0 = surfs[0]
-        surf0['rr'] = surf0['rr'] * scale
-        dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
-        write_bem_surface(dest, surf0)
+    for bem_name in paths['bem']:
+        scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
 
     # fiducials [in m]
     for fname in paths['fid']:
@@ -956,7 +1066,7 @@ def scale_mri(subject_from, subject_to, scale, overwrite=False,
 
 
 def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
-                       subjects_dir=None):
+                       subjects_dir=None, n_jobs=1):
     """Scale a source space for an mri created with scale_mri()
 
     Parameters
@@ -977,28 +1087,15 @@ def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
         otherwise it is read from subject_to's config file.
     subjects_dir : None | str
         Override the SUBJECTS_DIR environment variable.
+    n_jobs : int
+        Number of jobs to run in parallel if recomputing distances (only
+        applies if scale is an array of length 3, and will not use more cores
+        than there are source spaces).
     """
-    subjects_dir = get_subjects_dir(subjects_dir, True)
-    if (subject_from is None) != (scale is None):
-        err = ("Need to provide either both subject_from and scale "
-               "parameters, or neither.")
-        raise TypeError(err)
-
-    if subject_from is None:
-        cfg = read_mri_cfg(subject_to, subjects_dir)
-        subject_from = cfg['subject_from']
-        n_params = cfg['n_params']
-        scale = cfg['scale']
-    else:
-        scale = np.asarray(scale)
-        if scale.ndim == 0:
-            n_params = 1
-        elif scale.shape == (3,):
-            n_params = 3
-        else:
-            err = ("Invalid shape for scale parameer. Need scalar or array of "
-                   "length 3. Got %s." % str(scale))
-            raise ValueError(err)
+    subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
+                                                                subject_from,
+                                                                scale,
+                                                                subjects_dir)
 
     # find the source space file names
     if src_name.isdigit():
@@ -1032,15 +1129,27 @@ def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
     logger.info("scaling source space %s:  %s -> %s", spacing, subject_from,
                 subject_to)
     logger.info("Scale factor: %s", scale)
+    add_dist = False
     for ss in sss:
-        ss['rr'] = ss['rr'] * scale
-        if norm_scale is not None:
-            nn = ss['nn'] * norm_scale
+        ss['subject_his_id'] = subject_to
+        ss['rr'] *= scale
+
+        # distances and patch info
+        if norm_scale is None:
+            if ss['dist'] is not None:
+                ss['dist'] *= scale
+                ss['nearest_dist'] *= scale
+                ss['dist_limit'] *= scale
+        else:
+            nn = ss['nn']
+            nn *= norm_scale
             norm = np.sqrt(np.sum(nn ** 2, 1))
             nn /= norm[:, np.newaxis]
-            ss['nn'] = nn
+            if ss['dist'] is not None:
+                add_dist = True
+
+    if add_dist:
+        logger.info("Recomputing distances, this might take a while")
+        add_source_space_distances(sss, sss[0]['dist_limit'], n_jobs)
 
-            ss['dist'] = None
-            ss['dist_limit'] = None
-            ss['nearest_dist'] = None
     write_source_spaces(dst, sss)
diff --git a/mne/cov.py b/mne/cov.py
index 4209041..2ebbd67 100644
--- a/mne/cov.py
+++ b/mne/cov.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
@@ -11,23 +11,31 @@ import warnings
 import numpy as np
 from scipy import linalg
 
-from . import fiff
-from .utils import logger, verbose
-from .fiff.write import start_file, end_file
-from .fiff.proj import (make_projector, proj_equal, activate_proj,
-                        _has_eeg_average_ref_proj)
-from .fiff import fiff_open
-from .fiff.pick import (pick_types, channel_indices_by_type, pick_channels_cov,
-                        pick_channels)
-from .fiff.constants import FIFF
+from .io.write import start_file, end_file
+from .io.proj import (make_projector, proj_equal, activate_proj,
+                      _has_eeg_average_ref_proj)
+from .io import fiff_open
+from .io.pick import (pick_types, channel_indices_by_type, pick_channels_cov,
+                      pick_channels)
+from .io.constants import FIFF
+from .io.meas_info import read_bad_channels
+from .io.proj import _read_proj, _write_proj
+from .io.tag import find_tag
+from .io.tree import dir_tree_find
+from .io.write import (start_block, end_block, write_int, write_name_list,
+                       write_double, write_float_matrix)
 from .epochs import _is_good
+from .utils import check_fname, logger, verbose
+from .externals.six.moves import zip
 
 
 def _check_covs_algebra(cov1, cov2):
     if cov1.ch_names != cov2.ch_names:
         raise ValueError('Both Covariance do not have the same list of '
                          'channels.')
-    if map(str, cov1['projs']) != map(str, cov2['projs']):
+    projs1 = [str(c) for c in cov1['projs']]
+    projs2 = [str(c) for c in cov1['projs']]
+    if projs1 != projs2:
         raise ValueError('Both Covariance do not have the same list of '
                          'SSP projections.')
 
@@ -55,7 +63,7 @@ class Covariance(dict):
 
         # Reading
         fid, tree, _ = fiff_open(fname)
-        self.update(fiff.read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV))
+        self.update(_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV))
         fid.close()
 
     @property
@@ -72,10 +80,12 @@ class Covariance(dict):
 
     def save(self, fname):
         """save covariance matrix in a FIF file"""
+        check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
+
         fid = start_file(fname)
 
         try:
-            fiff.write_cov(fid, self)
+            _write_cov(fid, self)
         except Exception as inst:
             os.remove(fname)
             raise inst
@@ -153,13 +163,16 @@ def read_cov(fname):
     Parameters
     ----------
     fname : string
-        The name of file containing the covariance matrix.
+        The name of file containing the covariance matrix. It should end with
+        -cov.fif or -cov.fif.gz.
 
     Returns
     -------
     cov : Covariance
         The noise covariance matrix.
     """
+    check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
+
     return Covariance(fname)
 
 
@@ -168,14 +181,14 @@ def read_cov(fname):
 
 def _check_n_samples(n_samples, n_chan):
     """Check to see if there are enough samples for reliable cov calc"""
-    n_samples_min = 10 * (n_chan + 1) / 2
+    n_samples_min = 10 * (n_chan + 1) // 2
     if n_samples <= 0:
         raise ValueError('No samples found to compute the covariance matrix')
     if n_samples < n_samples_min:
         text = ('Too few samples (required : %d got : %d), covariance '
                 'estimate may be unreliable' % (n_samples_min, n_samples))
         warnings.warn(text)
-        logger.warn(text)
+        logger.warning(text)
 
 
 @verbose
@@ -216,7 +229,7 @@ def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
         Rejection parameters based on flatness of signal
         Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
         If flat is None then no rejection is done.
-    picks : array of int
+    picks : array-like of int
         Indices of channels to include (if None, all channels
         except bad channels are used).
     verbose : bool, str, int, or None
@@ -232,7 +245,7 @@ def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
     # Convert to samples
     start = 0 if tmin is None else int(floor(tmin * sfreq))
     if tmax is None:
-        stop = raw.last_samp - raw.first_samp
+        stop = int(raw.last_samp - raw.first_samp)
     else:
         stop = int(ceil(tmax * sfreq))
     step = int(ceil(tstep * raw.info['sfreq']))
@@ -347,7 +360,7 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
 
     # check for baseline correction
     for epochs_t in epochs:
-        if epochs_t.baseline is None:
+        if epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5:
             warnings.warn('Epochs are not baseline corrected, covariance '
                           'matrix may be inaccurate')
 
@@ -406,7 +419,7 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
     if keep_sample_mean:
         data /= n_samples_tot
     else:
-        n_samples_epoch = n_samples / n_epochs
+        n_samples_epoch = n_samples // n_epochs
         norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
         for i, mean in enumerate(data_mean):
             data -= 1.0 / n_epochs[i] * np.dot(mean, mean.T)
@@ -437,7 +450,7 @@ def write_cov(fname, cov):
     Parameters
     ----------
     fname : string
-        The name of the file
+        The name of the file. It should end with -cov.fif or -cov.fif.gz.
     cov : Covariance
         The noise covariance matrix
     """
@@ -553,7 +566,7 @@ def prepare_noise_cov(noise_cov, info, ch_names, verbose=None):
     return noise_cov
 
 
-def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude=None,
+def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
                proj=True, verbose=None):
     """Regularize noise covariance matrix
 
@@ -573,8 +586,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude=None,
         Regularization factor for MEG gradiometers.
     eeg : float
         Regularization factor for EEG.
-    exclude : list | None
-        List of channels to mark as bad. If None, bads channels
+    exclude : list | 'bads'
+        List of channels to mark as bad. If 'bads', bads channels
         are extracted from both info['bads'] and cov['bads'].
     proj : bool
         Apply or not projections to keep rank of data.
@@ -587,7 +600,11 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude=None,
         The regularized covariance matrix.
     """
     cov = cp.deepcopy(cov)
+
     if exclude is None:
+        raise ValueError('exclude must be a list of strings or "bads"')
+
+    if exclude == 'bads':
         exclude = info['bads'] + cov['bads']
 
     sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
@@ -667,7 +684,7 @@ def compute_whitener(noise_cov, info, picks=None, verbose=None):
         The noise covariance.
     info : dict
         The measurement info.
-    picks : array of int | None
+    picks : array-like of int | None
         The channels indices to include. If None the data
         channels in info, except bad channels, are used.
     verbose : bool, str, int, or None
@@ -714,7 +731,7 @@ def whiten_evoked(evoked, noise_cov, picks, diag=False):
         The evoked data
     noise_cov : instance of Covariance
         The noise covariance
-    picks : array of ints
+    picks : array-like of int
         The channel indices to whiten
     diag : bool
         If True, whiten using only the diagonal of the covariance
@@ -748,3 +765,140 @@ def whiten_evoked(evoked, noise_cov, picks, diag=False):
     W = np.dot(noise_cov['eigvec'].T, W)
     evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
     return evoked
+
+
+ at verbose
+def _read_cov(fid, node, cov_kind, verbose=None):
+    """Read a noise covariance matrix"""
+    #   Find all covariance matrices
+    covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
+    if len(covs) == 0:
+        raise ValueError('No covariance matrices found')
+
+    #   Is any of the covariance matrices a noise covariance
+    for p in range(len(covs)):
+        tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
+
+        if tag is not None and int(tag.data) == cov_kind:
+            this = covs[p]
+
+            #   Find all the necessary data
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
+            if tag is None:
+                raise ValueError('Covariance matrix dimension not found')
+            dim = int(tag.data)
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
+            if tag is None:
+                nfree = -1
+            else:
+                nfree = int(tag.data)
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
+            if tag is None:
+                names = []
+            else:
+                names = tag.data.split(':')
+                if len(names) != dim:
+                    raise ValueError('Number of names does not match '
+                                     'covariance matrix dimension')
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
+            if tag is None:
+                tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
+                if tag is None:
+                    raise ValueError('No covariance matrix data found')
+                else:
+                    #   Diagonal is stored
+                    data = tag.data
+                    diagmat = True
+                    logger.info('    %d x %d diagonal covariance (kind = '
+                                '%d) found.' % (dim, dim, cov_kind))
+
+            else:
+                from scipy import sparse
+                if not sparse.issparse(tag.data):
+                    #   Lower diagonal is stored
+                    vals = tag.data
+                    data = np.zeros((dim, dim))
+                    data[np.tril(np.ones((dim, dim))) > 0] = vals
+                    data = data + data.T
+                    data.flat[::dim + 1] /= 2.0
+                    diagmat = False
+                    logger.info('    %d x %d full covariance (kind = %d) '
+                                'found.' % (dim, dim, cov_kind))
+                else:
+                    diagmat = False
+                    data = tag.data
+                    logger.info('    %d x %d sparse covariance (kind = %d)'
+                                ' found.' % (dim, dim, cov_kind))
+
+            #   Read the possibly precomputed decomposition
+            tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
+            tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
+            if tag1 is not None and tag2 is not None:
+                eig = tag1.data
+                eigvec = tag2.data
+            else:
+                eig = None
+                eigvec = None
+
+            #   Read the projection operator
+            projs = _read_proj(fid, this)
+
+            #   Read the bad channel list
+            bads = read_bad_channels(fid, this)
+
+            #   Put it together
+            cov = dict(kind=cov_kind, diag=diagmat, dim=dim, names=names,
+                       data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
+                       eigvec=eigvec)
+            return cov
+
+    logger.info('    Did not find the desired covariance matrix (kind = %d)'
+                % cov_kind)
+
+    return None
+
+
+def _write_cov(fid, cov):
+    """Write a noise covariance matrix"""
+    start_block(fid, FIFF.FIFFB_MNE_COV)
+
+    #   Dimensions etc.
+    write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
+    write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
+    if cov['nfree'] > 0:
+        write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
+
+    #   Channel names
+    if cov['names'] is not None and len(cov['names']) > 0:
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
+
+    #   Data
+    if cov['diag']:
+        write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
+    else:
+        # Store only lower part of covariance matrix
+        dim = cov['dim']
+        mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
+        vals = cov['data'][mask].ravel()
+        write_double(fid, FIFF.FIFF_MNE_COV, vals)
+
+    #   Eigenvalues and vectors if present
+    if cov['eig'] is not None and cov['eigvec'] is not None:
+        write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
+        write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
+
+    #   Projection operator
+    if cov['projs'] is not None and len(cov['projs']) > 0:
+        _write_proj(fid, cov['projs'])
+
+    #   Bad channels
+    if cov['bads'] is not None and len(cov['bads']) > 0:
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    #   Done!
+    end_block(fid, FIFF.FIFFB_MNE_COV)
diff --git a/mne/cuda.py b/mne/cuda.py
index a299463..b11efad 100644
--- a/mne/cuda.py
+++ b/mne/cuda.py
@@ -50,14 +50,14 @@ def init_cuda():
         import pycuda.gpuarray
         import pycuda.driver
     except ImportError:
-        logger.warn('module pycuda not found, CUDA not enabled')
+        logger.warning('module pycuda not found, CUDA not enabled')
     else:
         try:
             # Initialize CUDA; happens with importing autoinit
             import pycuda.autoinit
         except ImportError:
-            logger.warn('pycuda.autoinit could not be imported, likely '
-                        'a hardware error, CUDA not enabled')
+            logger.warning('pycuda.autoinit could not be imported, likely '
+                           'a hardware error, CUDA not enabled')
         else:
             # Make our multiply inplace kernel
             try:
@@ -84,16 +84,16 @@ def init_cuda():
                 try:
                     from scikits.cuda import fft as cudafft
                 except ImportError:
-                    logger.warn('module scikits.cuda not found, CUDA not '
-                                'enabled')
+                    logger.warning('module scikits.cuda not found, CUDA not '
+                                   'enabled')
                 else:
                     # Make sure we can use 64-bit FFTs
                     try:
                         fft_plan = cudafft.Plan(16, np.float64, np.complex128)
                         del fft_plan
                     except:
-                        logger.warn('Device does not support 64-bit FFTs, '
-                                    'CUDA not enabled')
+                        logger.warning('Device does not support 64-bit FFTs, '
+                                       'CUDA not enabled')
                     else:
                         cuda_capable = True
                         # Figure out limit for CUDA FFT calculations
@@ -279,9 +279,9 @@ def setup_cuda_fft_resample(n_jobs, W, new_len):
             # try setting up for float64
             try:
                 n_fft_x = len(W)
-                cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) / 2 + 1)
+                cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) // 2 + 1)
                 n_fft_y = new_len
-                cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) / 2 + 1)
+                cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) // 2 + 1)
                 fft_plan = cudafft.Plan(n_fft_x, np.float64, np.complex128)
                 ifft_plan = cudafft.Plan(n_fft_y, np.complex128, np.float64)
                 x_fft = gpuarray.zeros(max(cuda_fft_len_x,
@@ -347,12 +347,12 @@ def fft_resample(x, W, new_len, npad, to_remove,
     old_len = len(x)
     if not cuda_dict['use_cuda']:
         N = int(min(new_len, old_len))
-        sl_1 = slice((N + 1) / 2)
+        sl_1 = slice((N + 1) // 2)
         y_fft = np.zeros(new_len, np.complex128)
         x_fft = fft(x).ravel()
         x_fft *= W
         y_fft[sl_1] = x_fft[sl_1]
-        sl_2 = slice(-(N - 1) / 2, None)
+        sl_2 = slice(-(N - 1) // 2, None)
         y_fft[sl_2] = x_fft[sl_2]
         y = np.real(ifft(y_fft, overwrite_x=True)).ravel()
     else:
@@ -369,12 +369,12 @@ def fft_resample(x, W, new_len, npad, to_remove,
         # or taking just the real component...
         if new_len > old_len:
             if old_len % 2 == 0:
-                nyq = int((old_len - (old_len % 2)) / 2)
+                nyq = int((old_len - (old_len % 2)) // 2)
                 cuda_dict['halve_value'](cuda_dict['x_fft'],
                                         slice=slice(nyq, nyq + 1))
         else:
             if new_len % 2 == 0:
-                nyq = int((new_len - (new_len % 2)) / 2)
+                nyq = int((new_len - (new_len % 2)) // 2)
                 cuda_dict['real_value'](cuda_dict['x_fft'],
                                         slice=slice(nyq, nyq + 1))
         cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
diff --git a/mne/data/helmets/122m.fif.gz b/mne/data/helmets/122m.fif.gz
new file mode 100644
index 0000000..79d1773
Binary files /dev/null and b/mne/data/helmets/122m.fif.gz differ
diff --git a/mne/data/helmets/306m.fif.gz b/mne/data/helmets/306m.fif.gz
new file mode 100644
index 0000000..e57e840
Binary files /dev/null and b/mne/data/helmets/306m.fif.gz differ
diff --git a/mne/data/helmets/306m_rt.fif.gz b/mne/data/helmets/306m_rt.fif.gz
new file mode 100644
index 0000000..60d025b
Binary files /dev/null and b/mne/data/helmets/306m_rt.fif.gz differ
diff --git a/mne/data/helmets/BabySQUID.fif.gz b/mne/data/helmets/BabySQUID.fif.gz
new file mode 100644
index 0000000..3269ffb
Binary files /dev/null and b/mne/data/helmets/BabySQUID.fif.gz differ
diff --git a/mne/data/helmets/CTF_275.fif.gz b/mne/data/helmets/CTF_275.fif.gz
new file mode 100644
index 0000000..5656e9a
Binary files /dev/null and b/mne/data/helmets/CTF_275.fif.gz differ
diff --git a/mne/data/helmets/KIT.fif.gz b/mne/data/helmets/KIT.fif.gz
new file mode 100644
index 0000000..b508585
Binary files /dev/null and b/mne/data/helmets/KIT.fif.gz differ
diff --git a/mne/data/helmets/Magnes_2500wh.fif.gz b/mne/data/helmets/Magnes_2500wh.fif.gz
new file mode 100644
index 0000000..27275ae
Binary files /dev/null and b/mne/data/helmets/Magnes_2500wh.fif.gz differ
diff --git a/mne/data/helmets/Magnes_3600wh.fif.gz b/mne/data/helmets/Magnes_3600wh.fif.gz
new file mode 100644
index 0000000..c665595
Binary files /dev/null and b/mne/data/helmets/Magnes_3600wh.fif.gz differ
diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py
index 015d34c..38ea9f3 100644
--- a/mne/datasets/__init__.py
+++ b/mne/datasets/__init__.py
@@ -3,4 +3,6 @@
 
 from . import sample
 from . import megsim
-from . import spm_face
\ No newline at end of file
+from . import spm_face
+from . import eegbci
+from . import somato
diff --git a/mne/datasets/eegbci/__init__.py b/mne/datasets/eegbci/__init__.py
new file mode 100644
index 0000000..4a47873
--- /dev/null
+++ b/mne/datasets/eegbci/__init__.py
@@ -0,0 +1,4 @@
+"""EEG Motor Movement/Imagery Dataset
+"""
+
+from .eegbci import data_path, load_data
diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py
new file mode 100644
index 0000000..56ee632
--- /dev/null
+++ b/mne/datasets/eegbci/eegbci.py
@@ -0,0 +1,203 @@
+# Author: Martin Billinger <martin.billinger at tugraz.at>
+# License: BSD Style.
+
+import os
+from os import path as op
+from ...externals.six import string_types
+from ...utils import _fetch_file, get_config, set_config, _url_to_local_path
+
+if 'raw_input' not in __builtins__:
+    raw_input = input
+
+
+EEGMI_URL = 'http://www.physionet.org/physiobank/database/eegmmidb/'
+
+
+def data_path(url, path=None, force_update=False, update_path=None):
+    """Get path to local copy of EEGMMI dataset URL
+
+    This is a low-level function useful for getting a local copy of a
+    remote EEGBCI dataet.
+
+    Parameters
+    ----------
+    url : str
+        The dataset to use.
+    path : None | str
+        Location of where to look for the EEGBCI data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_EEGBCI_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the EEGBCI dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MNE-eegbci-data"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+
+    Returns
+    -------
+    path : list of str
+        Local path to the given data file. This path is contained inside a list
+        of length one, for compatibility.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import eegbci
+        >>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/'
+        >>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    This would download the given EEGBCI data file to the 'datasets' folder,
+    and prompt the user to save the 'datasets' path to the mne-python config,
+    if it isn't there already.
+
+    The EEGBCI dataset is documented in the following publication:
+        Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
+        Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
+        (BCI) System. IEEE TBME 51(6):1034-1043
+    The data set is available at PhysioNet:
+        Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
+        Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
+        PhysioToolkit, and PhysioNet: Components of a New Research Resource for
+        Complex Physiologic Signals. Circulation 101(23):e215-e220
+    """
+
+    if path is None:
+        # use an intelligent guess if it's not defined
+        def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
+                                       '..', 'examples'))
+
+        key = 'MNE_DATASETS_EEGBCI_PATH'
+        # backward compatibility
+        if get_config(key) is None:
+            key = 'MNE_DATA'
+
+        path = get_config(key, def_path)
+
+        # use the same for all datasets
+        if not op.exists(path) or not os.access(path, os.W_OK):
+            try:
+                os.mkdir(path)
+            except OSError:
+                try:
+                    logger.info("Checking for EEGBCI data in '~/mne_data'...")
+                    path = op.join(op.expanduser("~"), "mne_data")
+                    if not op.exists(path):
+                        logger.info("Trying to create "
+                                    "'~/mne_data' in home directory")
+                        os.mkdir(path)
+                except OSError:
+                    raise OSError("User does not have write permissions "
+                                  "at '%s', try giving the path as an argument "
+                                  "to data_path() where user has write "
+                                  "permissions, for ex:data_path"
+                                  "('/home/xyz/me2/')" % (path))
+
+    if not isinstance(path, string_types):
+        raise ValueError('path must be a string or None')
+
+    destination = _url_to_local_path(url, op.join(path, 'MNE-eegbci-data'))
+    destinations = [destination]
+
+    # Fetch the file
+    if not op.isfile(destination) or force_update:
+        if op.isfile(destination):
+            os.remove(destination)
+        if not op.isdir(op.dirname(destination)):
+            os.makedirs(op.dirname(destination))
+        _fetch_file(url, destination, print_destination=False)
+
+    # Offer to update the path
+    path = op.abspath(path)
+    if update_path is None:
+        if get_config(key, '') != path:
+            update_path = True
+            msg = ('Do you want to set the path:\n    %s\nas the default '
+                   'EEGBCI dataset path in the mne-python config ([y]/n)? '
+                   % path)
+            answer = raw_input(msg)
+            if answer.lower() == 'n':
+                update_path = False
+        else:
+            update_path = False
+    if update_path is True:
+        set_config(key, path)
+
+    return destinations
+
+
+def load_data(subject, runs, path=None, force_update=False, update_path=None,
+              base_url=EEGMI_URL):
+    """Get paths to local copy of EEGBCI dataset files
+
+    Parameters
+    ----------
+    subject : int
+        The subject to use. Can be in the range of 1-109 (inclusive).
+    runs : int | list of ints
+        The runs to use. Can be a list or a single number. The runs correspond
+        to the following tasks:
+              run | task
+        ----------+-----------------------------------------
+                1 | Baseline, eyes open
+                2 | Baseline, eyes closed
+         3, 7, 11 | Motor execution: left vs right hand
+         4, 8, 12 | Motor imagery: left vs right hand
+         5, 9, 13 | Motor execution: hands vs feet
+        6, 10, 14 | Motor imagery: hands vs feet
+    path : None | str
+        Location of where to look for the EEGBCI data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_EEGBCI_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the EEGBCI dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MEGSIM"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+
+    Returns
+    -------
+    paths : list
+        List of local data paths of the given type.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import eegbci
+        >>> eegbci.load_data(1, [4, 10, 14],\
+                             os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from
+    subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the
+    user to save the 'datasets' path to the  mne-python config, if it isn't
+    there already.
+
+    The EEGBCI dataset is documented in the following publication:
+        Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
+        Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
+        (BCI) System. IEEE TBME 51(6):1034-1043
+    The data set is available at PhysioNet:
+        Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
+        Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
+        PhysioToolkit, and PhysioNet: Components of a New Research Resource for
+        Complex Physiologic Signals. Circulation 101(23):e215-e220
+    """
+    if not hasattr(runs, '__iter__'):
+        runs = [runs]
+
+    data_paths = []
+    for r in runs:
+        url = '{u}S{s:03d}/S{s:03d}R{r:02d}.edf'.format(u=base_url,
+                                                        s=subject, r=r)
+        data_paths.extend(data_path(url, path, force_update, update_path))
+
+    return data_paths
diff --git a/mne/datasets/megsim/megsim.py b/mne/datasets/megsim/megsim.py
index f7b86ed..3fa5fd3 100644
--- a/mne/datasets/megsim/megsim.py
+++ b/mne/datasets/megsim/megsim.py
@@ -1,6 +1,7 @@
 # Author: Eric Larson <larson.eric.d at gmail.com>
 # License: BSD Style.
 
+from ...externals.six import string_types
 import os
 from os import path as op
 import zipfile
@@ -63,20 +64,33 @@ def data_path(url, path=None, force_update=False, update_path=None):
 
     if path is None:
         # use an intelligent guess if it's not defined
-        def_path = op.abspath(op.join(op.dirname(__file__), '..', '..',
+        def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
                                       '..', 'examples'))
-        path = get_config('MNE_DATASETS_MEGSIM_PATH', None)
-        if path is None:
-            path = def_path
-            msg = ('No path entered, defaulting to download MEGSIM data to:\n'
-                   '    %s\nDo you want to continue ([y]/n)? '
-                   % path)
-            answer = raw_input(msg)
-            if answer.lower() == 'n':
-                raise ValueError('Please enter preferred path as '
-                                 'megsim.data_path(url, path)')
-
-    if not isinstance(path, basestring):
+        key = 'MNE_DATASETS_MEGSIM_PATH'
+        if get_config(key) is None:
+            key = 'MNE_DATA'
+        path = get_config(key, def_path)
+
+        # use the same for all datasets
+        if not op.exists(path) or not os.access(path, os.W_OK):
+            try:
+                os.mkdir(path)
+            except OSError:
+                try:
+                    logger.info("Checking for megsim data in '~/mne_data'...")
+                    path = op.join(op.expanduser("~"), "mne_data")
+                    if not op.exists(path):
+                        logger.info("Trying to create "
+                                    "'~/mne_data' in home directory")
+                        os.mkdir(path)
+                except OSError:
+                    raise OSError("User does not have write permissions "
+                                  "at '%s', try giving the path as an argument "
+                                  "to data_path() where user has write "
+                                  "permissions, for ex:data_path"
+                                  "('/home/xyz/me2/')" % (path))
+
+    if not isinstance(path, string_types):
         raise ValueError('path must be a string or None')
 
     destination = _url_to_local_path(url, op.join(path, 'MEGSIM'))
@@ -110,7 +124,7 @@ def data_path(url, path=None, force_update=False, update_path=None):
     # Offer to update the path
     path = op.abspath(path)
     if update_path is None:
-        if get_config('MNE_DATASETS_MEGSIM_PATH', '') != path:
+        if get_config(key, '') != path:
             update_path = True
             msg = ('Do you want to set the path:\n    %s\nas the default '
                    'MEGSIM dataset path in the mne-python config ([y]/n)? '
@@ -121,7 +135,7 @@ def data_path(url, path=None, force_update=False, update_path=None):
         else:
             update_path = False
     if update_path is True:
-        set_config('MNE_DATASETS_MEGSIM_PATH', path)
+        set_config(key, path)
 
     return destinations
 
diff --git a/mne/datasets/sample/sample.py b/mne/datasets/sample/sample.py
index 560dad0..d5a6532 100644
--- a/mne/datasets/sample/sample.py
+++ b/mne/datasets/sample/sample.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD Style.
@@ -6,8 +6,13 @@
 import numpy as np
 
 from ...utils import get_config, verbose
+from ...fixes import partial
 from ..utils import has_dataset, _data_path, _doc
 
+
+has_sample_data = partial(has_dataset, name='sample')
+
+
 @verbose
 def data_path(path=None, force_update=False, update_path=True,
               download=True, verbose=None):
@@ -21,8 +26,10 @@ data_path.__doc__ = _doc.format(name='sample',
 
 # Allow forcing of sample dataset skip (for tests) using:
 # `make test-no-sample`
-has_sample_data = has_dataset('sample')
-skip_sample = get_config('MNE_SKIP_SAMPLE_DATASET_TESTS', 'false') == 'true'
-requires_sample_data = np.testing.dec.skipif(not has_dataset('sample')
-                                             or skip_sample,
+def _skip_sample_data():
+    skip_sample = get_config('MNE_SKIP_SAMPLE_DATASET_TESTS', 'false') == 'true'
+    skip = skip_sample or not has_sample_data()
+    return skip
+
+requires_sample_data = np.testing.dec.skipif(_skip_sample_data,
                                              'Requires sample dataset')
diff --git a/mne/datasets/somato/__init__.py b/mne/datasets/somato/__init__.py
new file mode 100644
index 0000000..bdc4725
--- /dev/null
+++ b/mne/datasets/somato/__init__.py
@@ -0,0 +1,4 @@
+"""Somatosensory dataset
+"""
+
+from .somato import data_path, has_somato_data, requires_somato_data
diff --git a/mne/datasets/somato/somato.py b/mne/datasets/somato/somato.py
new file mode 100644
index 0000000..89b1781
--- /dev/null
+++ b/mne/datasets/somato/somato.py
@@ -0,0 +1,35 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import numpy as np
+
+from ...utils import get_config, verbose
+from ...fixes import partial
+from ..utils import has_dataset, _data_path, _doc
+
+
+has_somato_data = partial(has_dataset, name='somato')
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True,
+              download=True, verbose=None):
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='somato',
+                      download=download,
+                      verbose=verbose)
+
+data_path.__doc__ = _doc.format(name='somato',
+                                conf='MNE_DATASETS_SOMATO_PATH')
+
+# Allow forcing of somato dataset skip (for tests) using:
+# `make test-no-somato`
+def _skip_somato_data():
+    skip_somato = get_config('MNE_SKIP_SOMATO_DATASET_TESTS', 'false') == 'true'
+    skip = skip_somato or not has_somato_data()
+    return skip
+
+requires_somato_data = np.testing.dec.skipif(_skip_somato_data,
+                                             'Requires somato dataset')
diff --git a/mne/datasets/spm_face/spm_data.py b/mne/datasets/spm_face/spm_data.py
index c744026..7471e70 100644
--- a/mne/datasets/spm_face/spm_data.py
+++ b/mne/datasets/spm_face/spm_data.py
@@ -1,4 +1,4 @@
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD Style.
 
@@ -8,8 +8,10 @@ from ...utils import get_config, verbose
 from ...fixes import partial
 from ..utils import has_dataset, _data_path, _doc
 
+
 has_spm_data = partial(has_dataset, name='spm')
 
+
 @verbose
 def data_path(path=None, force_update=False, update_path=True,
               download=True, verbose=None):
@@ -23,8 +25,10 @@ data_path.__doc__ = _doc.format(name='spm',
 
 # Allow forcing of sample dataset skip (for tests) using:
 # `make test-no-sample`
-skip_spm = get_config('MNE_SKIP_SPM_DATASET_TESTS', 'false') == 'true'
-has_spm_data = has_dataset('spm')
-requires_spm_data = np.testing.dec.skipif(not has_spm_data
-                                          or skip_spm,
+def _skip_spm_sample_data():
+    skip_spm = get_config('MNE_SKIP_SPM_DATASET_TESTS', 'false') == 'true'
+    skip = skip_spm or not has_spm_data()
+    return skip
+
+requires_spm_data = np.testing.dec.skipif(_skip_spm_sample_data,
                                           'Requires spm dataset')
diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py
index 1fd8c2b..b876730 100644
--- a/mne/datasets/utils.py
+++ b/mne/datasets/utils.py
@@ -1,9 +1,10 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
-#          Denis Egnemann <d.engemann at fz-juelich.de>
+#          Denis Egnemann <denis.engemann at gmail.com>
 # License: BSD Style.
 
+from ..externals.six import string_types
 import os
 import os.path as op
 import shutil
@@ -62,50 +63,70 @@ def _dataset_version(path, name):
 
 
 def _data_path(path=None, force_update=False, update_path=True,
-               download=True, name=None, verbose=None):
+               download=True, name=None, check_version=True, verbose=None):
     """Aux function
     """
     key = {'sample': 'MNE_DATASETS_SAMPLE_PATH',
-           'spm': 'MNE_DATASETS_SPM_FACE_PATH'}[name]
+           'spm': 'MNE_DATASETS_SPM_FACE_PATH',
+           'somato': 'MNE_DATASETS_SOMATO_PATH',}[name]
 
     if path is None:
         # use an intelligent guess if it's not defined
         def_path = op.realpath(op.join(op.dirname(__file__),
                                        '..', '..', 'examples'))
 
+        # backward compatibility
+        if get_config(key) is None:
+            key = 'MNE_DATA'
+
         path = get_config(key, def_path)
-        # use the same for all datasets
-        if not os.path.exists(path):
-            path = def_path
 
-    if not isinstance(path, basestring):
+        # use the same for all datasets
+        if not op.exists(path) or not os.access(path, os.W_OK):
+            try:
+                os.mkdir(path)
+            except OSError:
+                try:
+                    logger.info("Checking for dataset in '~/mne_data'...")
+                    path = op.join(op.expanduser("~"), "mne_data")
+                    if not op.exists(path):
+                        logger.info("Trying to create "
+                                    "'~/mne_data' in home directory")
+                        os.mkdir(path)
+                except OSError:
+                    raise OSError("User does not have write permissions "
+                                  "at '%s', try giving the path as an argument "
+                                  "to data_path() where user has write "
+                                  "permissions, for ex:data_path"
+                                  "('/home/xyz/me2/')" % (path))
+
+    if not isinstance(path, string_types):
         raise ValueError('path must be a string or None')
-
     if name == 'sample':
         archive_name = "MNE-sample-data-processed.tar.gz"
-        url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/" + archive_name
+        url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/" + archive_name
         folder_name = "MNE-sample-data"
         folder_path = op.join(path, folder_name)
-        rm_archive = False
     elif name == 'spm':
         archive_name = 'MNE-spm-face.tar.bz2'
         url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
         folder_name = "MNE-spm-face"
         folder_path = op.join(path, folder_name)
-        rm_archive = False
+    elif name == 'somato':
+        archive_name = 'MNE-somato-data.tar.gz'
+        url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
+        folder_name = "MNE-somato-data"
+        folder_path = op.join(path, folder_name)
     else:
         raise ValueError('Sorry, the dataset "%s" is not known.' % name)
-
+    rm_archive = False
     martinos_path = '/cluster/fusion/sample_data/' + archive_name
     neurospin_path = '/neurospin/tmp/gramfort/' + archive_name
-
     if not op.exists(folder_path) and not download:
         return ''
-
     if not op.exists(folder_path) or force_update:
-        logger.info('Sample data archive %s not found at:\n%s\n'
-                    'It will be downloaded and extracted at this location.'
-                    % (archive_name, folder_path))
+        logger.info('Downloading or reinstalling '
+                    'data archive %s at location %s' % (archive_name, path))
 
         if op.exists(martinos_path):
             archive_name = martinos_path
@@ -114,27 +135,28 @@ def _data_path(path=None, force_update=False, update_path=True,
         else:
             archive_name = op.join(path, archive_name)
             rm_archive = True
+            fetch_archive = True
             if op.exists(archive_name):
-                msg = ('Archive already exists at %r. Overwrite it '
-                       '(y/[n])? ' % archive_name)
+                msg = ('Archive already exists. Overwrite it (y/[n])? ')
                 answer = raw_input(msg)
                 if answer.lower() == 'y':
                     os.remove(archive_name)
                 else:
-                    raise IOError('Archive file already exists at target '
-                                  'location %r.' % archive_name)
+                    fetch_archive = False
 
-            _fetch_file(url, archive_name, print_destination=False)
+            if fetch_archive:
+                _fetch_file(url, archive_name, print_destination=False)
 
         if op.exists(folder_path):
             shutil.rmtree(folder_path)
 
-        logger.info('Decompressiong the archive: ' + archive_name)
+        logger.info('Decompressing the archive: ' + archive_name)
         logger.info('... please be patient, this can take some time')
         for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
             try:
                 tarfile.open(archive_name, 'r:%s' % ext).extractall(path=path)
-            except tarfile.ReadError, err:
+                break
+            except tarfile.ReadError as err:
                 logger.info('%s is %s trying "bz2"' % (archive_name, err))
 
         if rm_archive:
@@ -166,12 +188,12 @@ def _data_path(path=None, force_update=False, update_path=True,
         warn('Could not determine sample dataset version; dataset could\n'
              'be out of date. Please install the "distutils" package.')
     else:  # 0.7 < 0.7.git shoud be False, therefore strip
-        if LV(data_version) < LV(mne_version.strip('.git')):
+        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
             warn('The {name} dataset (version {current}) is older than '
-                 'the mne-python (version {newest}). If the examples fail, '
-                 'you may need to update the {name} dataset by using'
-                 'force_update=True'.format(name=name, current=data_version,
-                                            newest=mne_version))
+                 'mne-python (version {newest}). If the examples fail, '
+                 'you may need to update the {name} dataset by using '
+                 'mne.datasets.{name}.data_path(force_update=True)'.format(
+                     name=name, current=data_version, newest=mne_version))
 
     return path
 
@@ -179,8 +201,7 @@ def _data_path(path=None, force_update=False, update_path=True,
 def has_dataset(name):
     """Helper for sample dataset presence"""
     endswith = {'sample': 'MNE-sample-data',
-                'spm': 'MNE-spm-face'}[name]
-    if _data_path(download=False, name=name).endswith(endswith):
-        return True
-    else:
-        return False
+                'spm': 'MNE-spm-face',
+                'somato': 'MNE-somato-data'}[name]
+    dp = _data_path(download=False, name=name, check_version=False)
+    return dp.endswith(endswith)
diff --git a/mne/decoding/__init__.py b/mne/decoding/__init__.py
index 0e79ecf..b0cb320 100644
--- a/mne/decoding/__init__.py
+++ b/mne/decoding/__init__.py
@@ -1,4 +1,6 @@
 from .classifier import Scaler, FilterEstimator
 from .classifier import PSDEstimator, ConcatenateChannels
 from .mixin import TransformerMixin
-from .csp import CSP
\ No newline at end of file
+from .csp import CSP
+from .ems import compute_ems
+from .time_gen import time_generalization
diff --git a/mne/decoding/classifier.py b/mne/decoding/classifier.py
index 56b699c..0f97357 100644
--- a/mne/decoding/classifier.py
+++ b/mne/decoding/classifier.py
@@ -1,5 +1,5 @@
 # Authors: Mainak Jas <mainak at neuro.hut.fi>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -7,10 +7,12 @@ import numpy as np
 
 from .mixin import TransformerMixin
 
+from .. import pick_types
 from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
                       band_stop_filter)
 from ..time_frequency import multitaper_psd
-from ..fiff import pick_types
+from ..externals import six
+from ..utils import _check_type_picks
 
 
 class Scaler(TransformerMixin):
@@ -99,7 +101,7 @@ class Scaler(TransformerMixin):
 
         X = np.atleast_3d(epochs_data)
 
-        for key, this_pick in self.picks_list_.iteritems():
+        for key, this_pick in six.iteritems(self.picks_list_):
             if self.with_mean:
                 X[:, this_pick, :] -= self.ch_mean_[key]
             if self.with_std:
@@ -188,11 +190,16 @@ class PSDEstimator(TransformerMixin):
         bandwidth.
     n_jobs : int
         Number of parallel jobs to use (only used if adaptive=True).
+    normalization : str
+        Either "full" or "length" (default). If "full", the PSD will
+        be normalized by the sampling rate as well as the length of
+        the signal (as in nitime).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
     def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
-                 adaptive=False, low_bias=True, n_jobs=1, verbose=None):
+                 adaptive=False, low_bias=True, n_jobs=1, normalization='length',
+                 verbose=None):
         self.sfreq = sfreq
         self.fmin = fmin
         self.fmax = fmax
@@ -201,6 +208,7 @@ class PSDEstimator(TransformerMixin):
         self.low_bias = low_bias
         self.n_jobs = n_jobs
         self.verbose = verbose
+        self.normalization = normalization
 
     def fit(self, epochs_data, y):
         """Compute power spectrum density (PSD) using a multi-taper method
@@ -247,9 +255,12 @@ class PSDEstimator(TransformerMixin):
         n_epochs, n_channels, n_times = epochs_data.shape
         X = epochs_data.reshape(n_epochs * n_channels, n_times)
 
-        psd, _ = multitaper_psd(X, self.sfreq, self.fmin, self.fmax,
-                                self.bandwidth, self.adaptive, self.low_bias,
-                                self.n_jobs, self.verbose)
+        psd, _ = multitaper_psd(x=X, sfreq=self.sfreq, fmin=self.fmin,
+                                fmax=self.fmax, bandwidth=self.bandwidth,
+                                adaptive=self.adaptive, low_bias=self.low_bias,
+                                n_jobs=self.n_jobs,
+                                normalization=self.normalization,
+                                verbose=self.verbose)
 
         _, n_freqs = psd.shape
         psd = psd.reshape(n_epochs, n_channels, n_freqs)
@@ -282,7 +293,7 @@ class FilterEstimator(TransformerMixin):
     h_freq : float | None
         High cut-off frequency in Hz. If None the data are only
         high-passed.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices of channels to filter. If None only the data (MEG/EEG)
         channels will be filtered.
     filter_length : str (Default: '10s') | int | None
@@ -316,7 +327,7 @@ class FilterEstimator(TransformerMixin):
         self.info = info
         self.l_freq = l_freq
         self.h_freq = h_freq
-        self.picks = picks
+        self.picks = _check_type_picks(picks)
         self.filter_length = filter_length
         self.l_trans_bandwidth = l_trans_bandwidth
         self.h_trans_bandwidth = h_trans_bandwidth
diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py
index 167bf7d..d84334e 100644
--- a/mne/decoding/csp.py
+++ b/mne/decoding/csp.py
@@ -1,5 +1,5 @@
 # Authors: Romain Trachel <romain.trachel at inria.fr>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -27,6 +27,9 @@ class CSP(TransformerMixin):
         if float, shrinkage covariance is used (0 <= shrinkage <= 1).
         if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('lws') or
                 Oracle Approximating Shrinkage ('oas')
+    log : bool
+        If true, apply log to standardize the features.
+        If false, features are just z-scored.
 
     Attributes
     ----------
@@ -43,9 +46,10 @@ class CSP(TransformerMixin):
     of the abnormal components in the clinical EEG. Electroencephalography
     and Clinical Neurophysiology, 79(6):440--447, December 1991.
     """
-    def __init__(self, n_components=4, reg=None):
+    def __init__(self, n_components=4, reg=None, log=True):
         self.n_components = n_components
         self.reg = reg
+        self.log = log
         self.filters_ = None
         self.patterns_ = None
         self.mean_ = None
@@ -200,6 +204,9 @@ class CSP(TransformerMixin):
 
         # compute features (mean band power)
         X = (X ** 2).mean(axis=-1)
-        X -= self.mean_
-        X /= self.std_
+        if self.log:
+            X = np.log(X)
+        else:
+            X -= self.mean_
+            X /= self.std_
         return X
diff --git a/mne/decoding/ems.py b/mne/decoding/ems.py
new file mode 100644
index 0000000..7e74b9f
--- /dev/null
+++ b/mne/decoding/ems.py
@@ -0,0 +1,117 @@
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..utils import logger, verbose
+from ..fixes import Counter
+from ..parallel import parallel_func
+from .. import pick_types, pick_info
+
+
+ at verbose
+def compute_ems(epochs, conditions=None, picks=None, verbose=None, n_jobs=1):
+    """Compute event-matched spatial filter on epochs
+
+    This version operates on the entire time course. No time window needs to
+    be specified. The result is a spatial filter at each time point and a
+    corresponding time course. Intuitively, the result gives the similarity
+    between the filter at each time point and the data vector (sensors) at
+    that time point.
+
+    References
+    ----------
+    [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
+        multi-sensor data to a single time course that reveals experimental
+        effects", BMC Neuroscience 2013, 14:122
+
+    Parameters
+    ----------
+    epochs : instance of mne.Epochs
+        The epochs.
+    conditions : list of str | None
+        If a list of strings, strings must match the
+        epochs.event_id's key as well as the number of conditions supported
+        by the objective_function. If None keys in epochs.event_id are used.
+    picks : array-like of int | None
+        Channels to be included. If None only good data channels are used.
+        Defaults to None
+    n_jobs : int
+        Number of jobs to run in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to self.verbose.
+
+    Returns
+    -------
+    surrogate_trials : ndarray, shape (trials, n_trials, n_time_points)
+        The trial surrogates.
+    mean_spatial_filter : ndarray, shape (n_channels, n_times)
+        The set of spatial filters.
+    conditions : ndarray, shape (n_epochs,)
+        The conditions used. Values correspond to original event ids.
+    """
+    logger.info('...computing surrogate time series. This can take some time')
+    if picks is None:
+        picks = pick_types(epochs.info, meg=True, eeg=True)
+
+    if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
+        raise ValueError('The same number of epochs is required by '
+                         'this function. Please consider '
+                         '`epochs.equalize_event_counts`')
+
+    if conditions is None:
+        conditions = epochs.event_id.keys()
+        epochs = epochs.copy()
+    else:
+        epochs = epochs[conditions]
+
+    epochs.drop_bad_epochs()
+
+    if len(conditions) != 2:
+        raise ValueError('Currently this function expects exactly 2 '
+                         'conditions but you gave me %i' %
+                         len(conditions))
+
+    ev = epochs.events[:, 2]
+    # special care to avoid path dependant mappings and orders
+    conditions = list(sorted(conditions))
+    cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
+
+    info = pick_info(epochs.info, picks)
+    data = epochs.get_data()[:, picks]
+
+    # Scale (z-score) the data by channel type
+    for ch_type in ['mag', 'grad', 'eeg']:
+        if ch_type in epochs:
+            if ch_type == 'eeg':
+                this_picks = pick_types(info, meg=False, eeg=True)
+            else:
+                this_picks = pick_types(info, meg=ch_type, eeg=False)
+            data[:, this_picks] /= np.std(data[:, this_picks])
+
+    from sklearn.cross_validation import LeaveOneOut
+
+    parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
+    out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
+                   for train, test in LeaveOneOut(len(data)))
+
+    surrogate_trials, spatial_filter = zip(*out)
+    surrogate_trials = np.array(surrogate_trials)
+    spatial_filter = np.mean(spatial_filter, axis=0)
+
+    return surrogate_trials, spatial_filter, epochs.events[:, 2]
+
+
+def _ems_diff(data0, data1):
+    """default diff objective function"""
+    return np.mean(data0, axis=0) - np.mean(data1, axis=0)
+
+
+def _run_ems(objective_function, data, cond_idx, train, test):
+    d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
+    d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
+    # compute surrogates
+    return np.sum(data[test[0]] * d, axis=0), d
diff --git a/mne/decoding/tests/test_classifier.py b/mne/decoding/tests/test_classifier.py
index c30b171..aaed3fb 100644
--- a/mne/decoding/tests/test_classifier.py
+++ b/mne/decoding/tests/test_classifier.py
@@ -9,7 +9,7 @@ import numpy as np
 from nose.tools import assert_true, assert_raises
 from numpy.testing import assert_array_equal
 
-from mne import fiff, read_events, Epochs
+from mne import io, read_events, Epochs, pick_types
 from mne.decoding.classifier import Scaler, FilterEstimator
 from mne.decoding.classifier import PSDEstimator, ConcatenateChannels
 
@@ -19,7 +19,7 @@ tmin, tmax = -0.2, 0.5
 event_id = dict(aud_l=1, vis_l=3)
 start, stop = 0, 8
 
-data_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_dir, 'test_raw.fif')
 event_name = op.join(data_dir, 'test-eve.fif')
 
@@ -27,9 +27,9 @@ event_name = op.join(data_dir, 'test-eve.fif')
 def test_scaler():
     """Test methods of Scaler
     """
-    raw = fiff.Raw(raw_fname, preload=False)
+    raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')
     picks = picks[1:13:3]
 
@@ -38,7 +38,9 @@ def test_scaler():
     epochs_data = epochs.get_data()
     scaler = Scaler(epochs.info)
     y = epochs.events[:, -1]
-    with warnings.catch_warnings(True):  # np invalid divide value warnings
+
+    # np invalid divide value warnings
+    with warnings.catch_warnings(record=True):
         X = scaler.fit_transform(epochs_data, y)
         assert_true(X.shape == epochs_data.shape)
         X2 = scaler.fit(epochs_data, y).transform(epochs_data)
@@ -53,9 +55,9 @@ def test_scaler():
 def test_filterestimator():
     """Test methods of FilterEstimator
     """
-    raw = fiff.Raw(raw_fname, preload=False)
+    raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -63,7 +65,7 @@ def test_filterestimator():
     epochs_data = epochs.get_data()
     filt = FilterEstimator(epochs.info, 1, 40)
     y = epochs.events[:, -1]
-    with warnings.catch_warnings(True):  # stop freq attenuation warning
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
         X = filt.fit_transform(epochs_data, y)
         assert_true(X.shape == epochs_data.shape)
         assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)
@@ -76,9 +78,9 @@ def test_filterestimator():
 def test_psdestimator():
     """Test methods of PSDEstimator
     """
-    raw = fiff.Raw(raw_fname, preload=False)
+    raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -99,12 +101,12 @@ def test_psdestimator():
 def test_concatenatechannels():
     """Test methods of ConcatenateChannels
     """
-    raw = fiff.Raw(raw_fname, preload=False)
+    raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')
     picks = picks[1:13:3]
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
         epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                         baseline=(None, 0), preload=True)
     epochs_data = epochs.get_data()
diff --git a/mne/decoding/tests/test_csp.py b/mne/decoding/tests/test_csp.py
index 74cb38a..11bcb3d 100644
--- a/mne/decoding/tests/test_csp.py
+++ b/mne/decoding/tests/test_csp.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #         Romain Trachel <romain.trachel at inria.fr>
 #
 # License: BSD (3-clause)
@@ -9,13 +9,13 @@ from nose.tools import assert_true, assert_raises
 import numpy as np
 from numpy.testing import assert_array_almost_equal
 
-from mne import fiff, Epochs, read_events
+from mne import io, Epochs, read_events, pick_types
 from mne.decoding.csp import CSP
 from mne.utils import _TempDir, requires_sklearn
 
 tempdir = _TempDir()
 
-data_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_dir, 'test_raw.fif')
 event_name = op.join(data_dir, 'test-eve.fif')
 
@@ -28,9 +28,9 @@ start, stop = 0, 8  # if stop is too small pca may fail in some cases, but
 def test_csp():
     """Test Common Spatial Patterns algorithm on epochs
     """
-    raw = fiff.Raw(raw_fname, preload=False)
+    raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -64,9 +64,9 @@ def test_csp():
 def test_regularized_csp():
     """Test Common Spatial Patterns algorithm using regularized covariance
     """
-    raw = fiff.Raw(raw_fname, preload=False)
+    raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                             eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
diff --git a/mne/decoding/tests/test_ems.py b/mne/decoding/tests/test_ems.py
new file mode 100644
index 0000000..386de4a
--- /dev/null
+++ b/mne/decoding/tests/test_ems.py
@@ -0,0 +1,58 @@
+# Author: Denis A. Engemann <d.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_equal, assert_raises
+
+from mne import io, Epochs, read_events, pick_types
+from mne.utils import _TempDir, requires_sklearn
+from mne.decoding import compute_ems
+
+tempdir = _TempDir()
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+curdir = op.join(op.dirname(__file__))
+
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+
+
+ at requires_sklearn
+def test_ems():
+    """Test event-matched spatial filters"""
+    raw = io.Raw(raw_fname, preload=False)
+
+    # create unequal number of events
+    events = read_events(event_name)
+    events[-2, 2] = 3
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                            eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
+    epochs.equalize_event_counts(epochs.event_id, copy=False)
+
+    assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
+    surrogates, filters, conditions = compute_ems(epochs)
+    assert_equal(list(set(conditions)), [1, 3])
+
+    events = read_events(event_name)
+    event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
+    epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs.equalize_event_counts(epochs.event_id, copy=False)
+
+    n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
+
+    assert_raises(ValueError, compute_ems, epochs)
+    surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
+    assert_equal(n_expected, len(surrogates))
+    assert_equal(n_expected, len(conditions))
+    assert_equal(list(set(conditions)), [2, 3])
+    raw.close()
diff --git a/mne/decoding/tests/test_time_gen.py b/mne/decoding/tests/test_time_gen.py
new file mode 100644
index 0000000..d84e42b
--- /dev/null
+++ b/mne/decoding/tests/test_time_gen.py
@@ -0,0 +1,44 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import warnings
+import os.path as op
+
+from nose.tools import assert_true
+
+from mne import io, Epochs, read_events, pick_types
+from mne.utils import _TempDir, requires_sklearn
+from mne.decoding import time_generalization
+
+tempdir = _TempDir()
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+
+
+ at requires_sklearn
+def test_time_generalization():
+    """Test time generalization decoding
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
+                            eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    decim = 30
+
+    with warnings.catch_warnings(record=True) as w:
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), preload=True, decim=decim)
+
+        epochs_list = [epochs[k] for k in event_id.keys()]
+        scores = time_generalization(epochs_list, cv=2, random_state=42)
+        n_times = len(epochs.times)
+        assert_true(scores.shape == (n_times, n_times))
+        assert_true(scores.max() <= 1.)
+        assert_true(scores.min() >= 0.)
diff --git a/mne/decoding/time_gen.py b/mne/decoding/time_gen.py
new file mode 100644
index 0000000..c17c74d
--- /dev/null
+++ b/mne/decoding/time_gen.py
@@ -0,0 +1,123 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..utils import logger, verbose
+from ..parallel import parallel_func
+from ..io.pick import channel_type, pick_types
+
+
+def _time_gen_one_fold(clf, X, y, train, test, scoring):
+    """Aux function of time_generalization"""
+    from sklearn.metrics import SCORERS
+    n_times = X.shape[2]
+    scores = np.zeros((n_times, n_times))
+    scorer = SCORERS[scoring]
+
+    for t_train in range(n_times):
+        X_train = X[train, :, t_train]
+        clf.fit(X_train, y[train])
+        for t_test in range(n_times):
+            X_test = X[test, :, t_test]
+            scores[t_test, t_train] += scorer(clf, X_test, y[test])
+
+    return scores
+
+
+ at verbose
+def time_generalization(epochs_list, clf=None, cv=5, scoring="roc_auc",
+                        shuffle=True, random_state=None, n_jobs=1,
+                        verbose=None):
+    """Fit decoder at each time instant and test at all others
+
+    The function returns the cross-validation scores when the train set
+    is from one time instant and the test from all others.
+
+    The decoding will be done using all available data channels, but
+    will only work if 1 type of channel is availalble. For example
+    epochs should contain only gradiometers.
+
+    Parameters
+    ----------
+    epochs_list : list of Epochs
+        The epochs in all the conditions.
+    clf : object | None
+        A object following scikit-learn estimator API (fit & predict).
+        If None the classifier will be a linear SVM (C=1.) after
+        feature standardization.
+    cv : integer or cross-validation generator, optional
+        If an integer is passed, it is the number of fold (default 5).
+        Specific cross-validation objects can be passed, see
+        sklearn.cross_validation module for the list of possible objects.
+    scoring : {string, callable, None}, optional, default: "roc_auc"
+        A string (see model evaluation documentation in scikit-learn) or
+        a scorer callable object / function with signature
+        ``scorer(estimator, X, y)``.
+    shuffle : bool
+        If True, shuffle the epochs before splitting them in folds.
+    random_state : None | int
+        The random state used to shuffle the epochs. Ignored if
+        shuffle is False.
+    n_jobs : int
+        Number of jobs to run in parallel. Each fold is fit
+        in parallel.
+
+    Returns
+    -------
+    scores : array, shape (n_times, n_times)
+        The scores averaged across folds. scores[i, j] contains
+        the generalization score when learning at time j and testing
+        at time i. The diagonal is the cross-validation score
+        at each time-independant instant.
+
+    Notes
+    -----
+    The function implements the method used in:
+
+    Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
+    and Stanislas Dehaene, "Two distinct dynamic modes subtend the detection of
+    unexpected sounds", PLOS ONE, 2013
+    """
+    from sklearn.base import clone
+    from sklearn.utils import check_random_state
+    from sklearn.svm import SVC
+    from sklearn.pipeline import Pipeline
+    from sklearn.preprocessing import StandardScaler
+    from sklearn.cross_validation import check_cv
+
+    if clf is None:
+        scaler = StandardScaler()
+        svc = SVC(C=1, kernel='linear')
+        clf = Pipeline([('scaler', scaler), ('svc', svc)])
+
+    info = epochs_list[0].info
+    data_picks = pick_types(info, meg=True, eeg=True, exclude='bads')
+
+    # Make arrays X and y such that :
+    # X is 3d with X.shape[0] is the total number of epochs to classify
+    # y is filled with integers coding for the class to predict
+    # We must have X.shape[0] equal to y.shape[0]
+    X = [e.get_data()[:, data_picks, :] for e in epochs_list]
+    y = [k * np.ones(len(this_X)) for k, this_X in enumerate(X)]
+    X = np.concatenate(X)
+    y = np.concatenate(y)
+
+    cv = check_cv(cv, X, y, classifier=True)
+
+    ch_types = set([channel_type(info, idx) for idx in data_picks])
+    logger.info('Running time generalization on %s epochs using %s.' %
+                (len(X), ch_types.pop()))
+
+    if shuffle:
+        rng = check_random_state(random_state)
+        order = np.argsort(rng.randn(len(X)))
+        X = X[order]
+        y = y[order]
+
+    parallel, p_time_gen, _ = parallel_func(_time_gen_one_fold, n_jobs)
+    scores = parallel(p_time_gen(clone(clf), X, y, train, test, scoring)
+                      for train, test in cv)
+    scores = np.mean(scores, axis=0)
+    return scores
diff --git a/mne/dipole.py b/mne/dipole.py
index 25a5363..faf0c44 100644
--- a/mne/dipole.py
+++ b/mne/dipole.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: Simplified BSD
 
diff --git a/mne/epochs.py b/mne/epochs.py
index d82f20a..bac78b2 100644
--- a/mne/epochs.py
+++ b/mne/epochs.py
@@ -1,43 +1,49 @@
 """Tools for working with epoched data"""
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #          Mainak Jas <mainak at neuro.hut.fi>
 #
 # License: BSD (3-clause)
 
+from .externals.six import string_types
+
 import copy as cp
 import warnings
+import json
 
 import numpy as np
-from copy import deepcopy
-
-from .fiff.write import (start_file, start_block, end_file, end_block,
-                         write_int, write_float_matrix, write_float,
-                         write_id, write_string)
-from .fiff.meas_info import read_meas_info, write_meas_info
-from .fiff.open import fiff_open
-from .fiff.raw import _time_as_index, _index_as_time
-from .fiff.tree import dir_tree_find
-from .fiff.tag import read_tag
-from .fiff import Evoked, FIFF
-from .fiff.pick import (pick_types, channel_indices_by_type, channel_type,
-                        pick_channels)
-from .fiff.proj import setup_proj, ProjMixin
-from .fiff.evoked import aspect_rev
+
+from .io.write import (start_file, start_block, end_file, end_block,
+                       write_int, write_float_matrix, write_float,
+                       write_id, write_string)
+from .io.meas_info import read_meas_info, write_meas_info, _merge_info
+from .io.open import fiff_open
+from .io.tree import dir_tree_find
+from .io.tag import read_tag
+from .io.constants import FIFF
+from .io.pick import (pick_types, channel_indices_by_type, channel_type,
+                      pick_channels, pick_info)
+from .io.proj import setup_proj, ProjMixin
+from .io.base import _BaseRaw, _time_as_index, _index_as_time
+from .evoked import EvokedArray, aspect_rev
 from .baseline import rescale
 from .utils import (check_random_state, _check_pandas_index_arguments,
-                    _check_pandas_installed)
+                    _check_pandas_installed, object_hash)
+from .channels import ContainsMixin, PickDropChannelsMixin
 from .filter import resample, detrend
 from .event import _read_events_fif
 from .fixes import in1d
-from .viz import _mutable_defaults, plot_epochs
-from .utils import logger, verbose
+from .viz import _mutable_defaults, plot_epochs, _drop_log_stats
+from .utils import check_fname, logger, verbose
+from .externals import six
+from .externals.six.moves import zip
+from .utils import deprecated, _check_type_picks
 
 
-class _BaseEpochs(ProjMixin):
+class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
     """Abstract base class for Epochs-type classes
 
     This class provides basic functionality and should never be instantiated
@@ -54,7 +60,7 @@ class _BaseEpochs(ProjMixin):
         if isinstance(event_id, dict):
             if not all([isinstance(v, int) for v in event_id.values()]):
                 raise ValueError('Event IDs must be of type integer')
-            if not all([isinstance(k, basestring) for k in event_id]):
+            if not all([isinstance(k, string_types) for k in event_id]):
                 raise ValueError('Event names must be of type str')
             self.event_id = event_id
         elif isinstance(event_id, list):
@@ -77,6 +83,21 @@ class _BaseEpochs(ProjMixin):
         if not detrend in [None, 0, 1]:
             raise ValueError('detrend must be None, 0, or 1')
 
+        # check that baseline is in available data
+        if baseline is not None:
+            baseline_tmin, baseline_tmax = baseline
+            tstep = 1. / info['sfreq']
+            if baseline_tmin is not None:
+                if baseline_tmin < tmin - tstep:
+                    err = ("Baseline interval (tmin = %s) is outside of epoch "
+                           "data (tmin = %s)" % (baseline_tmin, tmin))
+                    raise ValueError(err)
+            if baseline_tmax is not None:
+                if baseline_tmax > tmax + tstep:
+                    err = ("Baseline interval (tmax = %s) is outside of epoch "
+                           "data (tmax = %s)" % (baseline_tmax, tmax))
+                    raise ValueError(err)
+
         self.tmin = tmin
         self.tmax = tmax
         self.baseline = baseline
@@ -87,17 +108,18 @@ class _BaseEpochs(ProjMixin):
         self.decim = decim = int(decim)
         self._bad_dropped = False
         self.drop_log = None
+        self.selection = None
         self.detrend = detrend
 
         # Handle measurement info
         self.info = info
         if picks is None:
-            picks = range(len(self.info['ch_names']))
+            picks = list(range(len(self.info['ch_names'])))
         else:
             self.info['chs'] = [self.info['chs'][k] for k in picks]
             self.info['ch_names'] = [self.info['ch_names'][k] for k in picks]
             self.info['nchan'] = len(picks)
-        self.picks = picks
+        self.picks = _check_type_picks(picks)
 
         if len(picks) == 0:
             raise ValueError("Picks cannot be empty.")
@@ -187,6 +209,33 @@ class _BaseEpochs(ProjMixin):
                             self.reject, self.flat, full_report=True,
                             ignore_chs=self.info['bads'])
 
+    @verbose
+    def _preprocess(self, epoch, verbose=None):
+        """ Aux Function
+        """
+        # Detrend
+        if self.detrend is not None:
+            picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                               ref_meg=False, eog=False, ecg=False,
+                               emg=False, exclude=[])
+            epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
+
+        # Baseline correct
+        picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                           ref_meg=True, eog=True, ecg=True,
+                           emg=True, exclude=[])
+        epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
+                               'mean', copy=False, verbose=verbose)
+
+        # handle offset
+        if self._offset is not None:
+            epoch += self._offset
+
+        # Decimate
+        if self.decim > 1:
+            epoch = epoch[:, self._decim_idx]
+        return epoch
+
     def get_data(self):
         """Get all epochs as a 3D array
 
@@ -207,18 +256,11 @@ class _BaseEpochs(ProjMixin):
         self._current = 0
 
         while True:
-            evoked = Evoked(None)
-            evoked.info = cp.deepcopy(self.info)
+            data, event_id = self.next(True)
+            tmin = self.times[0]
+            info = cp.deepcopy(self.info)
 
-            evoked.times = self.times.copy()
-            evoked.nave = 1
-            evoked.first = int(self.times[0] * self.info['sfreq'])
-            evoked.last = evoked.first + len(self.times) - 1
-
-            evoked.data, event_id = self.next(True)
-            evoked.comment = str(event_id)
-
-            yield evoked
+            yield EvokedArray(data, info, tmin, comment=str(event_id))
 
     def subtract_evoked(self, evoked=None):
         """Subtract an evoked response from each epoch
@@ -233,13 +275,13 @@ class _BaseEpochs(ProjMixin):
 
         Parameters
         ----------
-        evoked : instance of mne.fiff.Evoked | None
+        evoked : instance of Evoked | None
             The evoked response to subtract. If None, the evoked response
             is computed from Epochs itself.
 
         Returns
         -------
-        self : instance of mne.Epochs
+        self : instance of Epochs
             The modified instance (instance is also modified inplace).
         """
         logger.info('Subtracting Evoked from Epochs')
@@ -308,12 +350,21 @@ class _BaseEpochs(ProjMixin):
         raise NotImplementedError('next() must be implemented in derived '
                                   'class.')
 
+    def __next__(self, *args, **kwargs):
+        """Wrapper for Py3k"""
+        return self.next(*args, **kwargs)
+
+    def __hash__(self):
+        if not self.preload:
+            raise RuntimeError('Cannot hash epochs unless preloaded')
+        return object_hash(dict(info=self.info, data=self._data))
+
     def average(self, picks=None):
         """Compute average of epochs
 
         Parameters
         ----------
-        picks : None | array of int
+        picks : array-like of int | None
             If None only MEG and EEG channels are kept
             otherwise the channels indices in picks are kept.
 
@@ -330,7 +381,7 @@ class _BaseEpochs(ProjMixin):
 
         Parameters
         ----------
-        picks : None | array of int
+        picks : array-like of int | None
             If None only MEG and EEG channels are kept
             otherwise the channels indices in picks are kept.
 
@@ -345,10 +396,7 @@ class _BaseEpochs(ProjMixin):
         """Compute the mean or std over epochs and return Evoked"""
 
         _do_std = True if mode == 'stderr' else False
-        evoked = Evoked(None)
-        evoked.info = cp.deepcopy(self.info)
-        # make sure projs are really copied.
-        evoked.info['projs'] = [cp.deepcopy(p) for p in self.info['projs']]
+
         n_channels = len(self.ch_names)
         n_times = len(self.times)
         if self.preload:
@@ -364,7 +412,12 @@ class _BaseEpochs(ProjMixin):
             for e in self:
                 data += e
                 n_events += 1
-            data /= n_events
+
+            if n_events > 0:
+                data /= n_events
+            else:
+                data.fill(np.nan)
+
             # convert to stderr if requested, could do in one pass but do in
             # two (slower) in case there are large numbers
             if _do_std:
@@ -374,36 +427,39 @@ class _BaseEpochs(ProjMixin):
                     data += (e - data_mean) ** 2
                 data = np.sqrt(data / n_events)
 
-        evoked.data = data
-        evoked.times = self.times.copy()
-        evoked.comment = self.name
-        evoked.nave = n_events
-        evoked.first = int(self.times[0] * self.info['sfreq'])
-        evoked.last = evoked.first + len(self.times) - 1
         if not _do_std:
-            evoked._aspect_kind = FIFF.FIFFV_ASPECT_AVERAGE
+            _aspect_kind = FIFF.FIFFV_ASPECT_AVERAGE
         else:
-            evoked._aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
-            evoked.data /= np.sqrt(evoked.nave)
-        evoked.kind = aspect_rev.get(str(evoked._aspect_kind), 'Unknown')
+            _aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
+            data /= np.sqrt(n_events)
+        kind = aspect_rev.get(str(_aspect_kind), 'Unknown')
+
+        info = cp.deepcopy(self.info)
+        evoked = EvokedArray(data, info, tmin=self.times[0],
+                             comment=self.name, nave=n_events, kind=kind,
+                             verbose=self.verbose)
+        # XXX: above constructor doesn't recreate the times object precisely
+        evoked.times = self.times.copy()
+        evoked._aspect_kind = _aspect_kind
 
-        # dropping EOG, ECG and STIM channels. Keeping only data
+        # pick channels
         if picks is None:
             picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=True,
                                stim=False, eog=False, ecg=False,
                                emg=False, exclude=[])
-            if len(picks) == 0:
-                raise ValueError('No data channel found when averaging.')
-
-        picks = np.sort(picks)  # make sure channel order does not change
-        evoked.info['chs'] = [evoked.info['chs'][k] for k in picks]
-        evoked.info['ch_names'] = [evoked.info['ch_names'][k]
-                                   for k in picks]
-        evoked.info['nchan'] = len(picks)
-        evoked.data = evoked.data[picks]
+
+        ch_names = [evoked.ch_names[p] for p in picks]
+        evoked.pick_channels(ch_names)
+
+        if len(evoked.info['ch_names']) == 0:
+            raise ValueError('No data channel found when averaging.')
+
         # otherwise the apply_proj will be confused
         evoked.proj = True if self.proj is True else None
-        evoked.verbose = self.verbose
+
+        if evoked.nave < 1:
+            warnings.warn('evoked object is empty (based on less '
+                          'than 1 epoch)', RuntimeWarning)
 
         return evoked
 
@@ -420,10 +476,9 @@ class _BaseEpochs(ProjMixin):
         epoch_idx : array-like | int | None
             The epochs to visualize. If None, the frist 20 epochs are shoen.
             Defaults to None.
-        picks : array-like | None
+        picks : array-like of int | None
             Channels to be included. If None only good data channels are used.
             Defaults to None
-            scalings : dict | None
         scalings : dict | None
             Scale factors for the traces. If None, defaults to:
             `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
@@ -444,8 +499,9 @@ class _BaseEpochs(ProjMixin):
         fig : Instance of matplotlib.figure.Figure
             The figure.
         """
-        plot_epochs(self, epoch_idx=epoch_idx, picks=picks, scalings=scalings,
-                    title_str=title_str, show=show, block=block)
+        return plot_epochs(self, epoch_idx=epoch_idx, picks=picks,
+                           scalings=scalings, title_str=title_str,
+                           show=show, block=block)
 
 
 class Epochs(_BaseEpochs):
@@ -456,7 +512,9 @@ class Epochs(_BaseEpochs):
     raw : Raw object
         An instance of Raw.
     events : array, of shape [n_events, 3]
-        Returned by the read_events function.
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
     event_id : int | list of int | dict | None
         The id of the event to consider. If dict,
         the keys can later be used to acces associated events. Example:
@@ -479,7 +537,9 @@ class Epochs(_BaseEpochs):
         and if b is None then b is set to the end of the interval.
         If baseline is equal to (None, None) all the time
         interval is used.
-    picks : None (default) or array of int
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
+    picks : array-like of int | None (default)
         Indices of channels to include (if None, all channels
         are used).
     preload : boolean
@@ -534,6 +594,11 @@ class Epochs(_BaseEpochs):
     add_eeg_ref : bool
         If True, an EEG average reference will be added (unless one
         already exists).
+    on_missing : str
+        What to do if an event id is not found in the recording.
+        Valid keys are 'error' | 'warning' | 'ignore'
+        Default is 'error'. If on_missing is 'warning' it will proceed but
+        warn, if 'ignore' it will proceed silently.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
         Defaults to raw.verbose.
@@ -546,14 +611,23 @@ class Epochs(_BaseEpochs):
         Names of  of conditions corresponding to event_ids.
     ch_names : list of string
         List of channels' names.
+    selection : array
+        List of indices of selected events (not dropped or ignored etc.). For
+        example, if the original event array had 4 events and the second event
+        has been dropped, this attribute would be np.array([0, 2, 3]).
+    preload : bool
+        Indicates whether epochs are in memory.
     drop_log : list of lists
-        This list (same length as events) contains the channel(s),
-        or the reasons (count equalization, not reaching minimum duration),
-        if any, that caused an event in the original event list to be dropped
-        by drop_bad_epochs(). Caveat. The drop log will only know about the
-        events passed to epochs. If the events represent a selection the
-        drop log can be misaligned with regard to other external logs (e.g.,
-        behavioral responses) that still refer to the complete list of events.
+        A list of the same length as the event array used to initialize the
+        Epochs object. If the i-th original event is still part of the
+        selection, drop_log[i] will be an empty list; otherwise it will be
+        a list of the reasons the event is not longer in the selection, e.g.:
+
+        'IGNORED' if it isn't part of the current subset defined by the user;
+        'NO DATA' or 'TOO SHORT' if epoch didn't contain enough data;
+        names of channels that exceeded the amplitude threshold;
+        'EQUALIZED_COUNTS' (see equalize_event_counts);
+        or user-defined reasons (see drop_epochs).
     verbose : bool, str, int, or None
         See above.
 
@@ -584,9 +658,16 @@ class Epochs(_BaseEpochs):
     def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
                  picks=None, name='Unknown', preload=False, reject=None,
                  flat=None, proj=True, decim=1, reject_tmin=None,
-                 reject_tmax=None, detrend=None, add_eeg_ref=True, verbose=None):
+                 reject_tmax=None, detrend=None, add_eeg_ref=True,
+                 on_missing='error', verbose=None):
         if raw is None:
             return
+        elif not isinstance(raw, _BaseRaw):
+            raise ValueError('The first argument to `Epochs` must be `None` '
+                             'or an instance of `mne.io.Raw`')
+        if on_missing not in ['error', 'warning', 'ignore']:
+            raise ValueError('on_missing must be one of: error, '
+                             'warning, ignore. Got: %s' % on_missing)
 
         # prepare for calling the base constructor
 
@@ -621,20 +702,44 @@ class Epochs(_BaseEpochs):
         activate = False if self._check_delayed() else self.proj
         self._projector, self.info = setup_proj(self.info, add_eeg_ref,
                                                 activate=activate)
+
+        for key, val in self.event_id.items():
+            if val not in events[:, 2]:
+                msg = ('No matching events found for %s '
+                       '(event id %i)' % (key, val))
+                if on_missing == 'error':
+                    raise ValueError(msg)
+                elif on_missing == 'warning':
+                    logger.warning(msg)
+                    warnings.warn(msg)
+                else:  # on_missing == 'ignore':
+                    pass
+
         # Select the desired events
-        selected = in1d(events[:, 2], self.event_id.values())
+        values = list(self.event_id.values())
+        selected = in1d(events[:, 2], values)
         self.events = events[selected]
-        if len(self.events) > 1:
+
+        n_events = len(self.events)
+        if n_events > 1:
             if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
                 warnings.warn('The events passed to the Epochs constructor '
                               'are not chronologically ordered.',
                               RuntimeWarning)
-        n_events = len(self.events)
+
         if n_events > 0:
             logger.info('%d matching events found' % n_events)
         else:
             raise ValueError('No desired events found.')
 
+        self.selection = np.where(selected)[0]
+        self.drop_log = []
+        for k in range(len(events)):
+            if events[k, 2] in values:
+                self.drop_log.append([])
+            else:
+                self.drop_log.append(['IGNORED'])
+
         self.preload = preload
         if self.preload:
             self._data = self._get_data_from_disk()
@@ -642,19 +747,16 @@ class Epochs(_BaseEpochs):
         else:
             self._data = None
 
+    @deprecated('drop_picks will be removed in v0.9. Use drop_channels.')
     def drop_picks(self, bad_picks):
         """Drop some picks
 
         Allows to discard some channels.
         """
-        self.picks = list(self.picks)
         idx = [k for k, p in enumerate(self.picks) if p not in bad_picks]
-        self.picks = [self.picks[k] for k in idx]
+        self.picks = self.picks[idx]
 
-        # XXX : could maybe be factorized
-        self.info['chs'] = [self.info['chs'][k] for k in idx]
-        self.info['ch_names'] = [self.info['ch_names'][k] for k in idx]
-        self.info['nchan'] = len(idx)
+        self.info = pick_info(self.info, idx, copy=False)
 
         if self._projector is not None:
             self._projector = self._projector[idx][:, idx]
@@ -674,6 +776,63 @@ class Epochs(_BaseEpochs):
         """
         self._get_data_from_disk(out=False)
 
+    def drop_log_stats(self, ignore=['IGNORED']):
+        """Compute the channel stats based on a drop_log from Epochs.
+
+        Parameters
+        ----------
+        ignore : list
+            The drop reasons to ignore.
+
+        Returns
+        -------
+        perc : float
+            Total percentage of epochs dropped.
+        """
+        return _drop_log_stats(self.drop_log, ignore)
+
+    def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
+                      color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
+                      show=True, return_fig=False):
+        """Show the channel stats based on a drop_log from Epochs
+
+        Parameters
+        ----------
+        threshold : float
+            The percentage threshold to use to decide whether or not to
+            plot. Default is zero (always plot).
+        n_max_plot : int
+            Maximum number of channels to show stats for.
+        subject : str
+            The subject name to use in the title of the plot.
+        color : tuple | str
+            Color to use for the bars.
+        width : float
+            Width of the bars.
+        ignore : list
+            The drop reasons to ignore.
+        show : bool
+            Show figure if True.
+        return_fig : bool
+            Return only figure handle if True. This argument will default
+            to True in v0.9 and then be removed.
+
+        Returns
+        -------
+        perc : float
+            Total percentage of epochs dropped.
+        fig : Instance of matplotlib.figure.Figure
+            The figure.
+        """
+        if not self._bad_dropped:
+            print("Bad epochs have not yet been dropped.")
+            return
+
+        from .viz import plot_drop_log
+        return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
+                             color=color, width=width, ignore=ignore,
+                             show=show, return_fig=return_fig)
+
     def _check_delayed(self):
         """ Aux method
         """
@@ -688,25 +847,50 @@ class Epochs(_BaseEpochs):
         return is_delayed
 
     @verbose
-    def drop_epochs(self, indices, verbose=None):
+    def drop_epochs(self, indices, reason='USER', verbose=None):
         """Drop epochs based on indices or boolean mask
 
+        Note that the indices refer to the current set of undropped epochs
+        rather than the complete set of dropped and undropped epochs.
+        They are therefore not necessarily consistent with any external indices
+        (e.g., behavioral logs). To drop epochs based on external criteria,
+        do not use the preload=True flag when constructing an Epochs object,
+        and call this method before calling the drop_bad_epochs method.
+
         Parameters
         ----------
         indices : array of ints or bools
             Set epochs to remove by specifying indices to remove or a boolean
             mask to apply (where True values get removed). Events are
             correspondingly modified.
+        reason : str
+            Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
+            Default: 'USER'.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to raw.verbose.
         """
-        indices = np.asarray(indices)
+        indices = np.atleast_1d(indices)
+
+        if indices.ndim > 1:
+            raise ValueError("indices must be a scalar or a 1-d array")
+
         if indices.dtype == bool:
             indices = np.where(indices)[0]
+
+        out_of_bounds = (indices < 0) | (indices >= len(self.events))
+        if out_of_bounds.any():
+            first = indices[out_of_bounds][0]
+            raise IndexError("Epoch index %d is out of bounds" % first)
+
+        for ii in indices:
+            self.drop_log[self.selection[ii]].append(reason)
+
+        self.selection = np.delete(self.selection, indices)
         self.events = np.delete(self.events, indices, axis=0)
-        if(self.preload):
+        if self.preload:
             self._data = np.delete(self._data, indices, axis=0)
+
         count = len(indices)
         logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
 
@@ -751,7 +935,7 @@ class Epochs(_BaseEpochs):
         # only preprocess first candidate, to make delayed SSP working
         # we need to postpone the preprocessing since projection comes
         # first.
-        epochs[0] = self._preprocess(epochs[0], verbose)
+        epochs[0] = self._preprocess(epochs[0])
 
         # return a second None if nothing is projected
         if len(epochs) == 1:
@@ -760,28 +944,6 @@ class Epochs(_BaseEpochs):
         return epochs
 
     @verbose
-    def _preprocess(self, epoch, verbose=None):
-        """ Aux Function
-        """
-        if self.detrend is not None:
-            picks = pick_types(self.info, meg=True, eeg=True, stim=False,
-                               ref_meg=False, eog=False, ecg=False,
-                               emg=False, exclude=[])
-            epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
-        # Baseline correct
-        epoch = rescale(epoch, self._raw_times, self.baseline, 'mean',
-                        copy=False, verbose=verbose)
-
-        # handle offset
-        if self._offset is not None:
-            epoch += self._offset
-
-        # Decimate
-        if self.decim > 1:
-            epoch = epoch[:, self._decim_idx]
-        return epoch
-
-    @verbose
     def _get_data_from_disk(self, out=True, verbose=None):
         """Load all data from disk
 
@@ -800,7 +962,7 @@ class Epochs(_BaseEpochs):
             proj = False if self._check_delayed() else self.proj
             if not out:
                 return
-            for ii in xrange(n_events):
+            for ii in range(n_events):
                 # faster to pre-allocate memory here
                 epoch, epoch_raw = self._get_epoch_from_disk(ii, proj=proj)
                 if ii == 0:
@@ -812,9 +974,8 @@ class Epochs(_BaseEpochs):
         else:
             proj = True if self._check_delayed() else self.proj
             good_events = []
-            drop_log = [[] for _ in range(n_events)]
             n_out = 0
-            for idx in xrange(n_events):
+            for idx, sel in zip(range(n_events), self.selection):
                 epoch, epoch_raw = self._get_epoch_from_disk(idx, proj=proj)
                 is_good, offenders = self._is_good_epoch(epoch)
                 if is_good:
@@ -830,9 +991,9 @@ class Epochs(_BaseEpochs):
                         data[n_out] = epoch
                         n_out += 1
                 else:
-                    drop_log[idx] = offenders
+                    self.drop_log[sel] += offenders
 
-            self.drop_log = drop_log
+            self.selection = self.selection[good_events]
             self.events = np.atleast_2d(self.events[good_events])
             self._bad_dropped = True
             logger.info("%d bad epochs dropped"
@@ -946,7 +1107,7 @@ class Epochs(_BaseEpochs):
                 raise StopIteration
             epoch = self._data[self._current]
             if self._check_delayed():
-                epoch = self._preprocess(epoch.copy())
+                epoch = self._preprocess(epoch.copy(), self.verbose)
             self._current += 1
         else:
             proj = True if self._check_delayed() else self.proj
@@ -960,7 +1121,7 @@ class Epochs(_BaseEpochs):
                 is_good, _ = self._is_good_epoch(epoch)
             # If delayed-ssp mode, pass 'virgin' data after rejection decision.
             if self._check_delayed():
-                epoch = self._preprocess(epoch_raw)
+                epoch = self._preprocess(epoch_raw, self.verbose)
 
         if not return_event_id:
             return epoch
@@ -981,7 +1142,7 @@ class Epochs(_BaseEpochs):
         s += ', baseline : %s' % str(self.baseline)
         if len(self.event_id) > 1:
             counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
-                      for k, v in self.event_id.items()]
+                      for k, v in sorted(self.event_id.items())]
             s += ',\n %s' % ', '.join(counts)
 
         return '<Epochs  |  %s>' % s
@@ -995,35 +1156,34 @@ class Epochs(_BaseEpochs):
     def __getitem__(self, key):
         """Return an Epochs object with a subset of epochs
         """
-
         data = self._data
         del self._data
         epochs = self.copy()
         self._data, epochs._data = data, data
 
-        if isinstance(key, basestring):
+        if isinstance(key, string_types):
             key = [key]
 
-        if isinstance(key, list) and isinstance(key[0], basestring):
-            key_match = np.any(np.atleast_2d([epochs._key_match(k)
-                                              for k in key]), axis=0)
-            select = key_match
-            epochs.name = ('-'.join(key) if epochs.name == 'Unknown'
-                           else 'epochs_%s' % '-'.join(key))
+        if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
+            select = np.any(np.atleast_2d([epochs._key_match(k)
+                                           for k in key]), axis=0)
+            epochs.name = ('+'.join(key) if epochs.name == 'Unknown'
+                           else 'epochs_%s' % '+'.join(key))
         else:
-            key_match = key
             select = key if isinstance(key, slice) else np.atleast_1d(key)
-            if not epochs._bad_dropped:
-                # Only matters if preload is not true, since bad epochs are
-                # dropped on preload; doesn't mater for key lookup, either
-                warnings.warn("Bad epochs have not been dropped, indexing will"
-                              " be inaccurate. Use drop_bad_epochs() or"
-                              " preload=True")
-
-        epochs.events = np.atleast_2d(epochs.events[key_match])
+
+        key_selection = epochs.selection[select]
+        for k in np.setdiff1d(epochs.selection, key_selection):
+            epochs.drop_log[k] = ['IGNORED']
+        epochs.selection = key_selection
+        epochs.events = np.atleast_2d(epochs.events[select])
         if epochs.preload:
             epochs._data = epochs._data[select]
 
+        # update event id to reflect new content of epochs
+        epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
+                               if v in epochs.events[:, 2])
+
         return epochs
 
     def crop(self, tmin=None, tmax=None, copy=False):
@@ -1042,6 +1202,11 @@ class Epochs(_BaseEpochs):
         -------
         epochs : Epochs instance
             The cropped epochs.
+
+        Note
+        ----
+        Unlike Python slices, MNE time intervals include both their end points;
+        crop(tmin, tmax) returns the interval tmin <= t <= tmax.
         """
         if not self.preload:
             raise RuntimeError('Modifying data of epochs is only supported '
@@ -1111,7 +1276,7 @@ class Epochs(_BaseEpochs):
         """Return copy of Epochs instance"""
         raw = self.raw
         del self.raw
-        new = deepcopy(self)
+        new = cp.deepcopy(self)
         self.raw = raw
         new.raw = raw
 
@@ -1123,8 +1288,11 @@ class Epochs(_BaseEpochs):
         Parameters
         ----------
         fname : str
-            The name of the file.
+            The name of the file, which should end with -epo.fif or
+            -epo.fif.gz.
         """
+        check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
+
         # Create the file and save the essentials
         fid = start_file(fname)
 
@@ -1175,6 +1343,13 @@ class Epochs(_BaseEpochs):
 
         # undo modifications to data
         data /= decal[np.newaxis, :, np.newaxis]
+
+        write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
+                     json.dumps(self.drop_log))
+
+        write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
+                  self.selection)
+
         end_block(fid, FIFF.FIFFB_EPOCHS)
 
         end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
@@ -1192,7 +1367,7 @@ class Epochs(_BaseEpochs):
 
         Parameters
         ----------
-        picks : None | array of int
+        picks : array-like of int | None
             If None only MEG and EEG channels are kept
             otherwise the channels indices in picks are kept.
         index : tuple of str | None
@@ -1222,7 +1397,7 @@ class Epochs(_BaseEpochs):
             index = default_index
 
         if picks is None:
-            picks = range(self.info['nchan'])
+            picks = list(range(self.info['nchan']))
         else:
             if not in1d(picks, np.arange(len(self.events))).all():
                 raise ValueError('At least one picked channel is not present '
@@ -1266,7 +1441,7 @@ class Epochs(_BaseEpochs):
         df = pd.DataFrame(data, columns=col_names)
         [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
         if index is not None:
-            with warnings.catch_warnings(True):
+            with warnings.catch_warnings(record=True):
                 if 'time' in index:
                     df['time'] = df['time'].astype(np.int64)
                 df.set_index(index, inplace=True)
@@ -1279,7 +1454,7 @@ class Epochs(_BaseEpochs):
 
         Parameters
         ----------
-        picks : array-like | None
+        picks : array-like of int | None
             Indices for exporting subsets of the epochs channels. If None
             all good channels will be used.
         epochs_idx : slice | array-like | None
@@ -1392,17 +1567,135 @@ class Epochs(_BaseEpochs):
                 key_match = np.logical_or(key_match, epochs._key_match(key))
             eq_inds.append(np.where(key_match)[0])
 
-        event_times = [epochs.events[eq, 0] for eq in eq_inds]
+        event_times = [epochs.events[e, 0] for e in eq_inds]
         indices = _get_drop_indices(event_times, method)
         # need to re-index indices
-        indices = np.concatenate([eq[inds]
-                                  for eq, inds in zip(eq_inds, indices)])
-        epochs = _check_add_drop_log(epochs, indices)
-        epochs.drop_epochs(indices)
+        indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
+        epochs.drop_epochs(indices, reason='EQUALIZED_COUNT')
         # actually remove the indices
         return epochs, indices
 
 
+class EpochsArray(Epochs):
+    """Epochs object from numpy array
+
+    Parameters
+    ----------
+    data : array, shape (n_epochs, n_channels, n_times)
+        The channels' time series for each epoch.
+    info : instance of Info
+        Info dictionary. Consider using ``create_info`` to populate
+        this structure.
+    events : array, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
+    tmin : float
+        Start time before event.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to acces associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    reject : dict
+        Epoch rejection parameters based on peak to peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done.
+        Values are float. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict
+        Epoch rejection parameters based on flatness of signal
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        If flat is None then no rejection is done.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+    """
+
+    @verbose
+    def __init__(self, data, info, events, tmin=0, event_id=None,
+                 reject=None, flat=None, reject_tmin=None,
+                 reject_tmax=None, verbose=None):
+
+        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
+        data = np.asanyarray(data, dtype=dtype)
+
+        if data.ndim != 3:
+            raise ValueError('Data must be a 3D array of shape (n_epochs, '
+                             'n_channels, n_samples)')
+
+        if len(info['ch_names']) != np.shape(data)[1]:
+            raise ValueError('Info and data must have same number of '
+                             'channels.')
+
+        self.info = info
+        self._data = data
+        if event_id is None:  # convert to int to make typing-checks happy
+            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
+        self.event_id = event_id
+        self.events = events
+
+        for key, val in self.event_id.items():
+            if val not in events[:, 2]:
+                msg = ('No matching events found for %s '
+                       '(event id %i)' % (key, val))
+                raise ValueError(msg)
+
+        self.proj = None
+        self.baseline = None
+        self.preload = True
+        self.reject = None
+        self.decim = 1
+        self._decim_idx = slice(0, data.shape[-1], self.decim)
+        self.raw = None
+        self.drop_log = [[] for _ in range(len(events))]
+        self._bad_dropped = True
+
+        self.selection = np.arange(len(events))
+        self.picks = None
+        self.times = (np.arange(data.shape[-1], dtype=np.float) /
+                      info['sfreq'] + tmin)
+        self.tmin = self.times[0]
+        self.tmax = self.times[-1]
+        self.verbose = verbose
+        self.name = 'Unknown'
+        self._projector = None
+        self.reject = reject
+        self.flat = flat
+        self.reject_tmin = reject_tmin
+        self.reject_tmax = reject_tmax
+        self._reject_setup()
+        drop_inds = list()
+        if self.reject is not None or self.flat is not None:
+            for i_epoch, epoch in enumerate(self):
+                is_good, chan = self._is_good_epoch(epoch,
+                                                    verbose=self.verbose)
+                if not is_good:
+                    drop_inds.append(i_epoch)
+                    self.drop_log[i_epoch].extend(chan)
+        if drop_inds:
+            select = np.ones(len(events), dtype=np.bool)
+            select[drop_inds] = False
+            self.events = self.events[select]
+            self._data = self._data[select]
+            self.selection[select]
+
+
 def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
     """Collapse event_ids from an epochs instance into a new event_id
 
@@ -1437,9 +1730,9 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
     else:
         if not isinstance(new_event_id, dict):
             raise ValueError('new_event_id must be a dict or int')
-        if not len(new_event_id.keys()) == 1:
+        if not len(list(new_event_id.keys())) == 1:
             raise ValueError('new_event_id dict must have one entry')
-    new_event_num = new_event_id.values()[0]
+    new_event_num = list(new_event_id.values())[0]
     if not isinstance(new_event_num, int):
         raise ValueError('new_event_id value must be an integer')
     if new_event_num in epochs.event_id.values():
@@ -1495,8 +1788,7 @@ def equalize_epoch_counts(epochs_list, method='mintime'):
     event_times = [e.events[:, 0] for e in epochs_list]
     indices = _get_drop_indices(event_times, method)
     for e, inds in zip(epochs_list, indices):
-        e = _check_add_drop_log(e, inds)
-        e.drop_epochs(inds)
+        e.drop_epochs(inds, reason='EQUALIZED_COUNT')
 
 
 def _get_drop_indices(event_times, method):
@@ -1535,8 +1827,8 @@ def _minimize_time_diff(t_shorter, t_longer):
 
 def _area_between_times(t1, t2):
     """Quantify the difference between two timing sets"""
-    x1 = range(len(t1))
-    x2 = range(len(t2))
+    x1 = list(range(len(t1)))
+    x2 = list(range(len(t2)))
     xs = np.concatenate((x1, x2))
     return np.sum(np.abs(np.interp(xs, x1, t1) - np.interp(xs, x2, t2)))
 
@@ -1555,7 +1847,7 @@ def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
                         for c in ch_names], dtype=bool)] = False
     for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
         if refl is not None:
-            for key, thresh in refl.iteritems():
+            for key, thresh in six.iteritems(refl):
                 idx = channel_type_idx[key]
                 name = key.upper()
                 if len(idx) > 0:
@@ -1592,7 +1884,7 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     Parameters
     ----------
     fname : str
-        The name of the file.
+        The name of the file, which should end with -epo.fif or -epo.fif.gz.
     proj : bool | 'delayed'
         Apply SSP projection vectors. If proj is 'delayed' and reject is not
         None the single epochs will be projected before the rejection
@@ -1616,6 +1908,8 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     epochs : instance of Epochs
         The epochs
     """
+    check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
+
     epochs = Epochs(None, None, None, None, None)
 
     logger.info('Reading %s ...' % fname)
@@ -1645,6 +1939,8 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     data = None
     bmin, bmax = None, None
     baseline = None
+    selection = None
+    drop_log = []
     for k in range(my_epochs['nent']):
         kind = my_epochs['directory'][k].kind
         pos = my_epochs['directory'][k].pos
@@ -1666,6 +1962,12 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
         elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
             tag = read_tag(fid, pos)
             bmax = float(tag.data)
+        elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
+            tag = read_tag(fid, pos)
+            selection = np.array(tag.data)
+        elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
+            tag = read_tag(fid, pos)
+            drop_log = json.loads(tag.data)
 
     if bmin is not None or bmax is not None:
         baseline = (bmin, bmax)
@@ -1700,6 +2002,7 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     # Put it all together
     epochs.preload = True
     epochs.raw = None
+    epochs.picks = np.arange(data.shape[1])
     epochs._bad_dropped = True
     epochs.events = events
     epochs._data = data
@@ -1718,7 +2021,14 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     epochs.event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
                        if mappings is None else mappings)
     epochs.verbose = verbose
-    epochs.drop_log = []
+
+    # In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
+    # (version < 0.8):
+    if selection is None:
+        selection = range(len(epochs))
+
+    epochs.selection = selection
+    epochs.drop_log = drop_log
     fid.close()
 
     return epochs
@@ -1752,18 +2062,100 @@ def bootstrap(epochs, random_state=None):
     return epochs_bootstrap
 
 
-def _check_add_drop_log(epochs, inds):
-    """Aux Function"""
-    new_idx, new_drop_log = 0, []
-    for idx, log in enumerate(epochs.drop_log):
-        if not log:
-            new_idx += 1
-        if new_idx in inds:
-            new_log = ['EQUALIZED_COUNT']
-        elif log:
-            new_log = log
-        else:
-            new_log = []
-        new_drop_log.append(new_log)
-    epochs.drop_log = new_drop_log
+def _check_merge_epochs(epochs_list):
+    """Aux function"""
+    event_ids = set(tuple(epochs.event_id.items()) for epochs in epochs_list)
+    if len(event_ids) == 1:
+        event_id = dict(event_ids.pop())
+    else:
+        raise NotImplementedError("Epochs with unequal values for event_id")
+
+    tmins = set(epochs.tmin for epochs in epochs_list)
+    if len(tmins) == 1:
+        tmin = tmins.pop()
+    else:
+        raise NotImplementedError("Epochs with unequal values for tmin")
+
+    tmaxs = set(epochs.tmax for epochs in epochs_list)
+    if len(tmaxs) == 1:
+        tmax = tmaxs.pop()
+    else:
+        raise NotImplementedError("Epochs with unequal values for tmax")
+
+    baselines = set(epochs.baseline for epochs in epochs_list)
+    if len(baselines) == 1:
+        baseline = baselines.pop()
+    else:
+        raise NotImplementedError("Epochs with unequal values for baseline")
+
+    return event_id, tmin, tmax, baseline
+
+
+ at verbose
+def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
+                        verbose=None):
+    """Concatenate channels, info and data from two Epochs objects
+
+    Parameters
+    ----------
+    epochs_list : list of Epochs
+        Epochs object to concatenate.
+    name : str
+        Comment that describes the Evoked data created.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless there is no
+        EEG in the data).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to True if any of the input epochs have verbose=True.
+
+    Returns
+    -------
+    epochs : Epochs
+        Concatenated epochs.
+    """
+    if not np.all([e.preload for e in epochs_list]):
+        raise ValueError('All epochs must be preloaded.')
+
+    info = _merge_info([epochs.info for epochs in epochs_list])
+    data = [epochs.get_data() for epochs in epochs_list]
+    event_id, tmin, tmax, baseline = _check_merge_epochs(epochs_list)
+
+    for d in data:
+        if len(d) != len(data[0]):
+            raise ValueError('all epochs must be of the same length')
+
+    data = np.concatenate(data, axis=1)
+
+    if len(info['chs']) != data.shape[1]:
+        err = "Data shape does not match channel number in measurement info"
+        raise RuntimeError(err)
+
+    events = epochs_list[0].events.copy()
+    all_same = np.all([events == epochs.events for epochs in epochs_list[1:]],
+                      axis=0)
+    if not np.all(all_same):
+        raise ValueError('Events must be the same.')
+
+    proj = any(e.proj for e in epochs_list) or add_eeg_ref
+
+    if verbose is None:
+        verbose = any(e.verbose for e in epochs_list)
+
+    epochs = epochs_list[0].copy()
+    epochs.info = info
+    epochs.event_id = event_id
+    epochs.tmin = tmin
+    epochs.tmax = tmax
+    epochs.baseline = baseline
+    epochs.picks = None
+    epochs.name = name
+    epochs.verbose = verbose
+    epochs.events = events
+    epochs.preload = True
+    epochs._bad_dropped = True
+    epochs._data = data
+    epochs.proj = proj
+    epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
+                                                activate=proj)
     return epochs
diff --git a/mne/event.py b/mne/event.py
index 05c4d76..c7d6d48 100644
--- a/mne/event.py
+++ b/mne/event.py
@@ -1,7 +1,7 @@
 """IO with fif files containing events
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
@@ -9,13 +9,13 @@
 import numpy as np
 from os.path import splitext
 
-from .fiff.constants import FIFF
-from .fiff.tree import dir_tree_find
-from .fiff.tag import read_tag
-from .fiff.open import fiff_open
-from .fiff.write import write_int, start_block, start_file, end_block, end_file
-from .fiff.pick import pick_channels
-from .utils import get_config, logger, verbose
+from .utils import check_fname, logger, verbose, _get_stim_channel
+from .io.constants import FIFF
+from .io.tree import dir_tree_find
+from .io.tag import read_tag
+from .io.open import fiff_open
+from .io.write import write_int, start_block, start_file, end_block, end_file
+from .io.pick import pick_channels
 
 
 def pick_events(events, include=None, exclude=None):
@@ -166,10 +166,11 @@ def _read_events_fif(fid, tree):
     else:
         mappings = None
 
-    if mappings is not None:
-        m_ = (m.split(':') for m in mappings.split(';'))
-        mappings = dict((k, int(v)) for k, v in m_)
-    event_list = event_list.reshape(len(event_list) / 3, 3)
+    if mappings is not None:  # deal with ':' in keys
+        m_ = [[s[::-1] for s in m[::-1].split(':', 1)]
+              for m in mappings.split(';')]
+        mappings = dict((k, int(v)) for v, k in m_)
+    event_list = event_list.reshape(len(event_list) // 3, 3)
     return event_list, mappings
 
 
@@ -202,12 +203,21 @@ def read_events(filename, include=None, exclude=None):
     -----
     This function will discard the offset line (i.e., first line with zero
     event number) if it is present in a text file.
+
+    Working with downsampled data: Events that were computed before the data
+    was decimated are no longer valid. Please recompute your events after
+    decimation.
     """
+    check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
+                                     '-eve.lst', '-eve.txt'))
+
     ext = splitext(filename)[1].lower()
     if ext == '.fif' or ext == '.gz':
         fid, tree, _ = fiff_open(filename)
-        event_list, _ = _read_events_fif(fid, tree)
-        fid.close()
+        try:
+            event_list, _ = _read_events_fif(fid, tree)
+        finally:
+            fid.close()
     else:
         #  Have to read this in as float64 then convert because old style
         #  eve/lst files had a second float column that will raise errors
@@ -249,6 +259,9 @@ def write_events(filename, event_list):
     event_list : array, shape (n_events, 3)
         The list of events
     """
+    check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
+                                     '-eve.lst', '-eve.txt'))
+
     ext = splitext(filename)[1].lower()
     if ext == '.fif' or ext == '.gz':
         #   Start writing...
@@ -355,8 +368,8 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
         raise ValueError('No stim channel found to extract event triggers.')
     data, _ = raw[picks, :]
     if np.any(data < 0):
-        logger.warn('Trigger channel contains negative values. '
-                    'Taking absolute value.')
+        logger.warning('Trigger channel contains negative values. '
+                       'Taking absolute value.')
         data = np.abs(data)  # make sure trig channel is positive
     data = data.astype(np.int)
 
@@ -376,8 +389,8 @@ def _find_events(data, first_samp, verbose=None, output='onset',
         merge = 0
 
     if np.any(data < 0):
-        logger.warn('Trigger channel contains negative values. '
-                    'Taking absolute value.')
+        logger.warning('Trigger channel contains negative values. '
+                       'Taking absolute value.')
         data = np.abs(data)  # make sure trig channel is positive
     data = data.astype(np.int)
 
@@ -432,7 +445,8 @@ def _find_events(data, first_samp, verbose=None, output='onset',
 
 @verbose
 def find_events(raw, stim_channel=None, verbose=None, output='onset',
-                consecutive='increasing', min_duration=0):
+                consecutive='increasing', min_duration=0,
+                shortest_event=2):
     """Find events from raw file
 
     Parameters
@@ -459,6 +473,9 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     min_duration : float
         The minimum duration of a change in the events channel required
         to consider it as an event (in seconds).
+    shortest_event : int
+        Minimum number of samples an event must last (default is 2). If the
+        duration is less than this an exception will be raised.
 
     Returns
     -------
@@ -540,6 +557,16 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     events = _find_events(data, raw.first_samp, verbose=verbose, output=output,
                           consecutive=consecutive, min_samples=min_samples)
 
+    # add safety check for spurious events (for ex. from neuromag syst.) by
+    # checking the number of low sample events
+    n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event)
+    if n_short_events > 0:
+        raise ValueError("You have %i events shorter than the "
+                         "shortest_event. These are very unusual and you "
+                         "may want to set min_duration to a larger value e.g."
+                         " x / raw.info['sfreq']. Where x = 1 sample shorter "
+                         "than the shortest event length." % (n_short_events))
+
     return events
 
 
@@ -686,26 +713,3 @@ def concatenate_events(events, first_samps, last_samps):
         events_out = np.concatenate((events_out, e2), axis=0)
 
     return events_out
-
-
-def _get_stim_channel(stim_channel):
-    """Helper to determine the appropriate stim_channel"""
-    if stim_channel is not None:
-        if not isinstance(stim_channel, list):
-            if not isinstance(stim_channel, basestring):
-                raise ValueError('stim_channel must be a str, list, or None')
-            stim_channel = [stim_channel]
-        if not all([isinstance(s, basestring) for s in stim_channel]):
-            raise ValueError('stim_channel list must contain all strings')
-        return stim_channel
-
-    stim_channel = list()
-    ch_count = 0
-    ch = get_config('MNE_STIM_CHANNEL')
-    while(ch is not None):
-        stim_channel.append(ch)
-        ch_count += 1
-        ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
-    if ch_count == 0:
-        stim_channel = ['STI 014']
-    return stim_channel
diff --git a/mne/fiff/evoked.py b/mne/evoked.py
similarity index 50%
rename from mne/fiff/evoked.py
rename to mne/evoked.py
index e8fa6e4..77a884d 100644
--- a/mne/fiff/evoked.py
+++ b/mne/evoked.py
@@ -1,6 +1,7 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Andrew Dykstra <andrew.r.dykstra at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -8,22 +9,27 @@ from copy import deepcopy
 import numpy as np
 import warnings
 
-from .constants import FIFF
-from .open import fiff_open
-from .tag import read_tag
-from .tree import dir_tree_find
-from .pick import channel_type, pick_types
-from .meas_info import read_meas_info, write_meas_info
-from .proj import ProjMixin
-from ..baseline import rescale
-from ..filter import resample, detrend
-from ..fixes import in1d
-from ..utils import _check_pandas_installed, logger, verbose
-from .write import (start_file, start_block, end_file, end_block,
-                    write_int, write_string, write_float_matrix,
-                    write_id)
-
-from ..viz import plot_evoked, plot_evoked_topomap, _mutable_defaults
+from .baseline import rescale
+from .channels import ContainsMixin, PickDropChannelsMixin
+from .filter import resample, detrend
+from .fixes import in1d
+from .utils import (_check_pandas_installed, check_fname, logger, verbose,
+                    deprecated, object_hash)
+from .viz import plot_evoked, plot_evoked_topomap, _mutable_defaults
+from .viz import plot_evoked_field
+from .viz import plot_evoked_image
+from .externals.six import string_types
+
+from .io.constants import FIFF
+from .io.open import fiff_open
+from .io.tag import read_tag
+from .io.tree import dir_tree_find
+from .io.pick import channel_type, pick_types
+from .io.meas_info import read_meas_info, write_meas_info
+from .io.proj import ProjMixin
+from .io.write import (start_file, start_block, end_file, end_block,
+                       write_int, write_string, write_float_matrix,
+                       write_id)
 
 aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
                'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
@@ -31,7 +37,7 @@ aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
               str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
 
 
-class Evoked(ProjMixin):
+class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
     """Evoked data
 
     Parameters
@@ -39,7 +45,7 @@ class Evoked(ProjMixin):
     fname : string
         Name of evoked/average FIF file to load.
         If None no data is loaded.
-    setno : int, or str
+    condition : int, or str
         Dataset ID number (int) or comment/name (str). Optional if there is
         only one data set in file.
     baseline : tuple or list of length 2, or None
@@ -54,7 +60,7 @@ class Evoked(ProjMixin):
         Apply SSP projection vectors
     kind : str
         Either 'average' or 'standard_error'. The type of data to read.
-        Only used if 'setno' is a str.
+        Only used if 'condition' is a str.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -82,10 +88,17 @@ class Evoked(ProjMixin):
         See above.
     """
     @verbose
-    def __init__(self, fname, setno=None, baseline=None, proj=True,
-                 kind='average', verbose=None):
+    def __init__(self, fname, condition=None, baseline=None, proj=True,
+                 kind='average', verbose=None, setno=None):
+
         if fname is None:
-            return
+            raise ValueError('No evoked filename specified')
+
+        if condition is None and setno is not None:
+            condition = setno
+            msg = ("'setno' will be deprecated in 0.9. Use 'condition' "
+                   "instead.")
+            warnings.warn(msg, DeprecationWarning)
 
         self.verbose = verbose
         logger.info('Reading %s ...' % fname)
@@ -108,43 +121,28 @@ class Evoked(ProjMixin):
             fid.close()
             raise ValueError('Could not find evoked data')
 
-        # convert setno to an integer
-        if setno is None:
-            if len(evoked_node) > 1:
-                try:
-                    _, _, t = _get_entries(fid, evoked_node)
-                except:
-                    t = 'None found, must use integer'
-                else:
-                    fid.close()
-                raise ValueError('%d datasets present, setno parameter '
-                                 'must be set. Candidate setno names:\n%s'
-                                 % (len(evoked_node), t))
-            else:
-                setno = 0
-
         # find string-based entry
-        elif isinstance(setno, basestring):
+        if isinstance(condition, string_types):
             if not kind in aspect_dict.keys():
                 fid.close()
                 raise ValueError('kind must be "average" or '
                                  '"standard_error"')
 
             comments, aspect_kinds, t = _get_entries(fid, evoked_node)
-            goods = np.logical_and(in1d(comments, [setno]),
+            goods = np.logical_and(in1d(comments, [condition]),
                                    in1d(aspect_kinds, [aspect_dict[kind]]))
-            found_setno = np.where(goods)[0]
-            if len(found_setno) != 1:
+            found_cond = np.where(goods)[0]
+            if len(found_cond) != 1:
                 fid.close()
-                raise ValueError('setno "%s" (%s) not found, out of found '
-                                 'datasets:\n  %s' % (setno, kind, t))
-            setno = found_setno[0]
+                raise ValueError('condition "%s" (%s) not found, out of found '
+                                 'datasets:\n  %s' % (condition, kind, t))
+            condition = found_cond[0]
 
-        if setno >= len(evoked_node) or setno < 0:
+        if condition >= len(evoked_node) or condition < 0:
             fid.close()
             raise ValueError('Data set selector out of range')
 
-        my_evoked = evoked_node[setno]
+        my_evoked = evoked_node[condition]
 
         # Identify the aspects
         aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
@@ -292,10 +290,10 @@ class Evoked(ProjMixin):
         fname : string
             Name of the file where to save the data.
         """
-        write_evoked(fname, self)
+        write_evokeds(fname, self)
 
     def __repr__(self):
-        s = "comment : %r" % self.comment
+        s = "comment : '%s'" % self.comment
         s += ", time : [%f, %f]" % (self.times[0], self.times[-1])
         s += ", n_epochs : %d" % self.nave
         s += ", n_channels x n_times : %s x %s" % self.data.shape
@@ -350,13 +348,13 @@ class Evoked(ProjMixin):
     def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
              proj=False, xlim='tight', hline=None, units=None, scalings=None,
              titles=None, axes=None):
-        """Plot evoked data
+        """Plot evoked data as butterfly plots
 
         Note: If bad channels are not excluded they are shown in red.
 
         Parameters
         ----------
-        picks : None | array-like of int
+        picks : array-like of int | None
             The indices of channels to plot. If None show all.
         exclude : list of str | 'bads'
             Channels names to exclude from being shown. If 'bads', the
@@ -390,14 +388,64 @@ class Evoked(ProjMixin):
             the same length as the number of channel types. If instance of
             Axes, there must be only one channel type plotted.
         """
-        plot_evoked(self, picks=picks, exclude=exclude, unit=unit, show=show,
-                    ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
-                    scalings=scalings, titles=titles, axes=axes)
-
-    def plot_topomap(self, times=None, ch_type='mag', layout=None, vmax=None,
-                     cmap='RdBu_r', sensors='k,', colorbar=True, scale=None,
-                     unit=None, res=256, size=1, format="%3.1f", proj=False,
-                     show=True):
+        return plot_evoked(self, picks=picks, exclude=exclude, unit=unit,
+                           show=show, ylim=ylim, proj=proj, xlim=xlim,
+                           hline=hline, units=units, scalings=scalings,
+                           titles=titles, axes=axes)
+
+    def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
+                   clim=None, proj=False, xlim='tight', units=None,
+                   scalings=None, titles=None, axes=None, cmap='RdBu_r'):
+        """Plot evoked data as images
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            The indices of channels to plot. If None show all.
+        exclude : list of str | 'bads'
+            Channels names to exclude from being shown. If 'bads', the
+            bad channels are excluded.
+        unit : bool
+            Scale plot with channel (SI) unit.
+        show : bool
+            Call pyplot.show() at the end or not.
+        clim : dict
+            clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
+            Valid keys are eeg, mag, grad
+        xlim : 'tight' | tuple | None
+            xlim for plots.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If
+            'interactive', a check box for reversible selection of SSP
+            projection vectors will be shown.
+        units : dict | None
+            The units of the channel types used for axes lables. If None,
+            defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+        scalings : dict | None
+            The scalings of the channel types to be applied for plotting.
+            If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+        titles : dict | None
+            The titles associated with the channels. If None, defaults to
+            `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of channel types. If instance of
+            Axes, there must be only one channel type plotted.
+        cmap : matplotlib colormap
+            Colormap.
+        """
+        return plot_evoked_image(self, picks=picks, exclude=exclude, unit=unit,
+                                 show=show, clim=clim, proj=proj, xlim=xlim,
+                                 units=units, scalings=scalings,
+                                 titles=titles, axes=axes, cmap=cmap)
+
+    def plot_topomap(self, times=None, ch_type='mag', layout=None, vmin=None,
+                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
+                     scale=None, scale_time=1e3, unit=None, res=64, size=1,
+                     format="%3.1f", time_format='%01d ms', proj=False,
+                     show=True, show_names=False, title=None, mask=None,
+                     mask_params=None, outlines='head', contours=6,
+                     image_interp='bilinear'):
         """Plot topographic maps of specific time points
 
         Parameters
@@ -409,14 +457,23 @@ class Evoked(ProjMixin):
         ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
             The channel type to plot. For 'grad', the gradiometers are collec-
             ted in pairs and the RMS for each pair is plotted.
-        layout : None | str | Layout
-            Layout name or instance specifying sensor positions (does not need
-            to be specified for Neuromag data).
-        vmax : scalar
-            The value specfying the range of the color scale (-vmax to +vmax).
-            If None, the largest absolute value in the data is used.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct
+            layout file is inferred from the data; if no appropriate layout
+            file was found, the layout is automatically generated from the
+            sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
         cmap : matplotlib colormap
-            Colormap.
+            Colormap. Defaults to 'RdBu_r'
         sensors : bool | str
             Add markers for sensor locations to the plot. Accepts matplotlib
             plot format string (e.g., 'r+' for red plusses).
@@ -425,6 +482,8 @@ class Evoked(ProjMixin):
         scale : float | None
             Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
             for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1e3 (ms).
         units : str | None
             The units of the channel types used for colorbar lables. If
             scale == None the unit is automatically determined.
@@ -435,24 +494,84 @@ class Evoked(ProjMixin):
             multiple topomaps at a time).
         format : str
             String format for colorbar values.
+        time_format : str
+            String format for topomap values. Defaults to "%01d ms"
         proj : bool | 'interactive'
             If true SSP projections are applied before display. If
             'interactive', a check box for reversible selection of SSP
             projection vectors will be shown.
         show : bool
             Call pyplot.show() at the end.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function
+            lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+            significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals:
+            dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                 linewidth=0, markersize=4)
+        outlines : 'head' | dict | None
+            The outlines to be drawn. If 'head', a head scheme will be drawn.
+            If dict, each key refers to a tuple of x and y positions. The
+            values in 'mask_pos' will serve as image mask. If None,
+            nothing will be drawn. Defaults to 'head'.
+        image_interp : str
+            The image interpolation to be used. All matplotlib options are
+            accepted.
+
+        """
+        return plot_evoked_topomap(self, times=times, ch_type=ch_type,
+                                   layout=layout, vmin=vmin,
+                                   vmax=vmax, cmap=cmap, sensors=sensors,
+                                   colorbar=colorbar, scale=scale,
+                                   scale_time=scale_time,
+                                   unit=unit, res=res, proj=proj, size=size,
+                                   format=format, time_format=time_format,
+                                   show=show, show_names=show_names,
+                                   title=title, mask=mask,
+                                   mask_params=mask_params,
+                                   outlines=outlines, contours=contours,
+                                   image_interp=image_interp)
+
+    def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
+                   n_jobs=1):
+        """Plot MEG/EEG fields on head surface and helmet in 3D
+
+        Parameters
+        ----------
+        surf_maps : list
+            The surface mapping information obtained with make_field_map.
+        time : float | None
+            The time point at which the field map shall be displayed. If None,
+            the average peak latency (across sensor types) is used.
+        time_label : str
+            How to print info about the time instant visualized.
+        n_jobs : int
+            Number of jobs to run in parallel.
+
+        Returns
+        -------
+        fig : instance of mlab.Figure
+            The mayavi figure.
         """
-        plot_evoked_topomap(self, times=times, ch_type=ch_type, layout=layout,
-                            vmax=vmax, cmap=cmap, sensors=sensors,
-                            colorbar=colorbar, scale=scale, unit=unit, res=res,
-                            proj=proj, size=size, format=format)
+        return plot_evoked_field(self, surf_maps, time=time,
+                                 time_label=time_label, n_jobs=n_jobs)
 
     def to_nitime(self, picks=None):
         """Export Evoked object to NiTime
 
         Parameters
         ----------
-        picks : array-like | None
+        picks : array-like of int | None
             Indices of channels to apply. If None, all channels will be
             exported.
 
@@ -473,13 +592,14 @@ class Evoked(ProjMixin):
 
     def as_data_frame(self, picks=None, scale_time=1e3, scalings=None,
                       use_time_index=True, copy=True):
-        """Get the epochs as Pandas DataFrame
+        """Get the Evoked object as a Pandas DataFrame
 
-        Export raw data in tabular structure with MEG channels.
+        Export data in tabular structure: each row corresponds to a time point,
+        and each column to a channel.
 
         Parameters
         ----------
-        picks : None | array of int
+        picks : array-like of int | None
             If None all channels are kept, otherwise the channels indices in
             picks are kept.
         scale_time : float
@@ -502,11 +622,11 @@ class Evoked(ProjMixin):
         pd = _check_pandas_installed()
 
         if picks is None:
-            picks = range(self.info['nchan'])
+            picks = list(range(self.info['nchan']))
         else:
             if not in1d(picks, np.arange(len(self.ch_names))).all():
                 raise ValueError('At least one picked channel is not present '
-                                 'in this eppochs instance.')
+                                 'in this Evoked instance.')
 
         data, times = self.data, self.times
 
@@ -538,7 +658,7 @@ class Evoked(ProjMixin):
         if use_time_index is True:
             if 'time' in df:
                 df['time'] = df['time'].astype(np.int64)
-            with warnings.catch_warnings(True):
+            with warnings.catch_warnings(record=True):
                 df.set_index('time', inplace=True)
 
         return df
@@ -558,7 +678,7 @@ class Evoked(ProjMixin):
             Window to use in resampling. See scipy.signal.resample.
         """
         o_sfreq = self.info['sfreq']
-        self.data = resample(self.data, sfreq, o_sfreq, npad, window)
+        self.data = resample(self.data, sfreq, o_sfreq, npad, -1, window)
         # adjust indirectly affected variables
         self.info['sfreq'] = sfreq
         self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq
@@ -576,7 +696,7 @@ class Evoked(ProjMixin):
         order : int
             Either 0 or 1, the order of the detrending. 0 is a constant
             (DC) detrend, 1 is a linear detrend.
-        picks : None | array of int
+        picks : array-like of int | None
             If None only MEG and EEG channels are detrended.
         """
         if picks is None:
@@ -609,6 +729,142 @@ class Evoked(ProjMixin):
         out.comment = self.comment + " - " + this_evoked.comment
         return out
 
+    def __hash__(self):
+        return object_hash(dict(info=self.info, data=self.data))
+
+    def get_peak(self, ch_type=None, tmin=None, tmax=None, mode='abs',
+                 time_as_index=False):
+        """Get location and latency of peak amplitude
+
+        Parameters
+        ----------
+        ch_type : {'mag', 'grad', 'eeg', 'misc', None}
+            The channel type to use. Defaults to None. If more than one sensor
+            Type is present in the data the channel type has to be explicitly
+            set.
+        tmin : float | None
+            The minimum point in time to be considered for peak getting.
+        tmax : float | None
+            The maximum point in time to be considered for peak getting.
+        mode : {'pos', 'neg', 'abs'}
+            How to deal with the sign of the data. If 'pos' only positive
+            values will be considered. If 'neg' only negative values will
+            be considered. If 'abs' absolute values will be considered.
+            Defaults to 'abs'.
+        time_as_index : bool
+            Whether to return the time index instead of the latency in seconds.
+
+        Returns
+        -------
+        ch_name : str
+            The channel exhibiting the maximum response.
+        latency : float | int
+            The time point of the maximum response, either latency in seconds
+            or index.
+        """
+        supported = ('mag', 'grad', 'eeg', 'misc', 'None')
+
+        data_picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False)
+        types_used = set([channel_type(self.info, idx) for idx in data_picks])
+
+        if str(ch_type) not in supported:
+            raise ValueError('Channel type must be `{supported}`. You gave me '
+                             '`{ch_type}` instead.'
+                             .format(ch_type=ch_type,
+                                     supported='` or `'.join(supported)))
+
+        elif ch_type is not None and ch_type not in types_used:
+            raise ValueError('Channel type `{ch_type}` not found in this '
+                             'evoked object.'
+                              .format(ch_type=ch_type))
+
+        elif len(types_used) > 1 and ch_type is None:
+            raise RuntimeError('More than one sensor type found. `ch_type` '
+                               'must not be `None`, pass a sensor type '
+                               'value instead')
+
+        meg, eeg, misc, picks = False, False, False, None
+
+        if ch_type == 'mag':
+            meg = ch_type
+        elif ch_type == 'grad':
+            meg = ch_type
+        elif ch_type == 'eeg':
+            eeg = True
+        elif ch_type == 'misc':
+            misc = True
+
+        if ch_type is not None:
+            picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
+                               ref_meg=False)
+
+        data = self.data if picks is None else self.data[picks]
+        ch_idx, time_idx = _get_peak(data, self.times, tmin,
+                                     tmax, mode)
+
+        return (self.ch_names[ch_idx],
+                time_idx if time_as_index else self.times[time_idx])
+
+
+class EvokedArray(Evoked):
+    """Evoked object from numpy array
+
+    Parameters
+    ----------
+    data : array of shape (n_channels, n_times)
+        The channels' evoked response.
+    info : instance of Info
+        Info dictionary. Consider using ``create_info`` to populate
+        this structure.
+    tmin : float
+        Start time before event.
+    comment : string
+        Comment on dataset. Can be the condition. Defaults to ''.
+    nave : int
+        Number of averaged epochs. Defaults to 1.
+    kind : str
+        Type of data, either average or standard_error. Defaults to 'average'.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+    """
+
+    @verbose
+    def __init__(self, data, info, tmin, comment='', nave=1, kind='average',
+                 verbose=None):
+
+        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
+        data = np.asanyarray(data, dtype=dtype)
+
+        if data.ndim != 2:
+            raise ValueError('Data must be a 2D array of shape (n_channels, '
+                             'n_samples)')
+
+        if len(info['ch_names']) != np.shape(data)[0]:
+            raise ValueError('Info and data must have same number of '
+                             'channels.')
+
+        self.data = data
+
+        # XXX: this should use round and be tested
+        self.first = int(tmin * info['sfreq'])
+        self.last = self.first + np.shape(data)[-1] - 1
+        self.times = np.arange(self.first, self.last + 1, dtype=np.float)
+        self.times /= info['sfreq']
+
+        self.info = info
+        self.nave = nave
+        self.kind = kind
+        self.comment = comment
+        self.proj = None
+        self.picks = None
+        self.verbose = verbose
+        self._projector = None
+        if self.kind == 'average':
+            self._aspect_kind = aspect_dict['average']
+        else:
+            self._aspect_kind = aspect_dict['standard_error']
+
 
 def _get_entries(fid, evoked_node):
     """Helper to get all evoked entries"""
@@ -640,6 +896,15 @@ def _get_entries(fid, evoked_node):
     return comments, aspect_kinds, t
 
 
+def _get_evoked_node(fname):
+    """Helper to get info in evoked file"""
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        _, meas = read_meas_info(fid, tree)
+        evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
+    return evoked_node
+
+
 def merge_evoked(all_evoked):
     """Merge/concat evoked data
 
@@ -677,6 +942,7 @@ def merge_evoked(all_evoked):
     return evoked
 
 
+ at deprecated("'read_evoked' will be removed in v0.9. Use 'read_evokeds.'")
 def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True):
     """Read an evoked dataset
 
@@ -685,17 +951,15 @@ def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True):
     fname : string
         The file name.
     setno : int or str | list of int or str | None
-        The index or list of indices of the evoked dataset to read. FIF
-        file can contain multiple datasets. If None and there is only one
-        dataset in the file, this dataset is loaded.
+        The index or list of indices of the evoked dataset to read. FIF files
+        can contain multiple datasets. If None and there is only one dataset in
+        the file, this dataset is loaded.
     baseline : None (default) or tuple of length 2
-        The time interval to apply baseline correction.
-        If None do not apply it. If baseline is (a, b)
-        the interval is between "a (s)" and "b (s)".
-        If a is None the beginning of the data is used
-        and if b is None then b is set to the end of the interval.
-        If baseline is equal ot (None, None) all the time
-        interval is used.
+        The time interval to apply baseline correction. If None do not apply it.
+        If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a
+        is None the beginning of the data is used and if b is None then b is set
+        to the end of the interval. If baseline is equal ot (None, None) all the
+        time interval is used.
     kind : str
         Either 'average' or 'standard_error', the type of data to read.
     proj : bool
@@ -706,13 +970,75 @@ def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True):
     evoked : instance of Evoked or list of Evoked
         The evoked datasets.
     """
-    if isinstance(setno, list):
+    evoked_node = _get_evoked_node(fname)
+    if setno is None and len(evoked_node) > 1:
+        fid, _, _ = fiff_open(fname)
+        try:
+            _, _, t = _get_entries(fid, evoked_node)
+        except:
+            t = 'None found, must use integer'
+        else:
+            fid.close()
+        raise ValueError('%d datasets present, setno parameter must be set.'
+                         'Candidate setno names:\n%s' % (len(evoked_node), t))
+    elif isinstance(setno, list):
         return [Evoked(fname, s, baseline=baseline, kind=kind, proj=proj)
                 for s in setno]
     else:
+        if setno is None:
+            setno = 0
         return Evoked(fname, setno, baseline=baseline, kind=kind, proj=proj)
 
 
+ at verbose
+def read_evokeds(fname, condition=None, baseline=None, kind='average',
+                 proj=True, verbose=None):
+    """Read evoked dataset(s)
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -ave.fif or -ave.fif.gz.
+    condition : int or str | list of int or str | None
+        The index or list of indices of the evoked dataset to read. FIF files
+        can contain multiple datasets. If None, all datasets are returned as a
+        list.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction. If None do not apply it.
+        If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a
+        is None the beginning of the data is used and if b is None then b is set
+        to the end of the interval. If baseline is equal ot (None, None) all the
+        time interval is used.
+    kind : str
+        Either 'average' or 'standard_error', the type of data to read.
+    proj : bool
+        If False, available projectors won't be applied to the data.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked : Evoked (if condition is int or str) or list of Evoked (if
+        condition is None or list)
+        The evoked dataset(s).
+    """
+    check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
+
+    return_list = True
+    if condition is None:
+        evoked_node = _get_evoked_node(fname)
+        condition = range(len(evoked_node))
+    elif not isinstance(condition, list):
+        condition = [condition]
+        return_list = False
+
+    out = [Evoked(fname, c, baseline=baseline, kind=kind, proj=proj,
+           verbose=verbose) for c in condition]
+
+    return out if return_list else out[0]
+
+
+ at deprecated("'write_evoked' will be removed in v0.9. Use 'write_evokeds.'")
 def write_evoked(fname, evoked):
     """Write an evoked dataset to a file
 
@@ -720,11 +1046,10 @@ def write_evoked(fname, evoked):
     ----------
     fname : string
         The file name.
-
     evoked : instance of Evoked, or list of Evoked
-        The evoked dataset to save, or a list of evoked datasets to save
-        in one file. Note that the measurement info from the first evoked
-        instance is used, so be sure that information matches.
+        The evoked dataset to save, or a list of evoked datasets to save in one
+        file. Note that the measurement info from the first evoked instance is
+        used, so be sure that information matches.
     """
 
     if not isinstance(evoked, list):
@@ -747,7 +1072,7 @@ def write_evoked(fname, evoked):
         start_block(fid, FIFF.FIFFB_EVOKED)
 
         # Comment is optional
-        if len(e.comment) > 0:
+        if e.comment is not None and len(e.comment) > 0:
             write_string(fid, FIFF.FIFF_COMMENT, e.comment)
 
         # First and last sample
@@ -772,3 +1097,136 @@ def write_evoked(fname, evoked):
     end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
     end_block(fid, FIFF.FIFFB_MEAS)
     end_file(fid)
+
+
+def write_evokeds(fname, evoked):
+    """Write an evoked dataset to a file
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -ave.fif or -ave.fif.gz.
+    evoked : Evoked instance, or list of Evoked instances
+        The evoked dataset, or list of evoked datasets, to save in one file.
+        Note that the measurement info from the first evoked instance is used,
+        so be sure that information matches.
+    """
+    check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
+
+    if not isinstance(evoked, list):
+        evoked = [evoked]
+
+    # Create the file and save the essentials
+    with start_file(fname) as fid:
+
+        start_block(fid, FIFF.FIFFB_MEAS)
+        write_id(fid, FIFF.FIFF_BLOCK_ID)
+        if evoked[0].info['meas_id'] is not None:
+            write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
+
+        # Write measurement info
+        write_meas_info(fid, evoked[0].info)
+
+        # One or more evoked data sets
+        start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+        for e in evoked:
+            start_block(fid, FIFF.FIFFB_EVOKED)
+
+            # Comment is optional
+            if e.comment is not None and len(e.comment) > 0:
+                write_string(fid, FIFF.FIFF_COMMENT, e.comment)
+
+            # First and last sample
+            write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
+            write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
+
+            # The epoch itself
+            start_block(fid, FIFF.FIFFB_ASPECT)
+
+            write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
+            write_int(fid, FIFF.FIFF_NAVE, e.nave)
+
+            decal = np.zeros((e.info['nchan'], 1))
+            for k in range(e.info['nchan']):
+                decal[k] = 1.0 / (e.info['chs'][k]['cal']
+                                  * e.info['chs'][k].get('scale', 1.0))
+
+            write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
+            end_block(fid, FIFF.FIFFB_ASPECT)
+            end_block(fid, FIFF.FIFFB_EVOKED)
+
+        end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+        end_block(fid, FIFF.FIFFB_MEAS)
+        end_file(fid)
+
+
+def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
+    """Get feature-index and time of maximum signal from 2D array
+
+    Note. This is a 'getter', not a 'finder'. For non-evoked type
+    data and continuous signals, please use proper peak detection algorithms.
+
+    Parameters
+    ----------
+    data : instance of numpy.ndarray (n_locations, n_times)
+        The data, either evoked in sensor or source space.
+    times : instance of numpy.ndarray (n_times)
+        The times in seconds.
+    tmin : float | None
+        The minimum point in time to be considered for peak getting.
+    tmax : float | None
+        The maximum point in time to be considered for peak getting.
+    mode : {'pos', 'neg', 'abs'}
+        How to deal with the sign of the data. If 'pos' only positive
+        values will be considered. If 'neg' only negative values will
+        be considered. If 'abs' absolute values will be considered.
+        Defaults to 'abs'.
+
+    Returns
+    -------
+    max_loc : int
+        The index of the feature with the maximum value.
+    max_time : int
+        The time point of the maximum response, index.
+    """
+    modes = ('abs', 'neg', 'pos')
+    if mode not in modes:
+        raise ValueError('The `mode` parameter must be `{modes}`. You gave '
+                          'me `{mode}`'.format(modes='` or `'.join(modes),
+                                               mode=mode))
+
+    if tmin is None:
+        tmin = times[0]
+    if tmax is None:
+        tmax = times[-1]
+
+    if tmin < times.min():
+        raise ValueError('The tmin value is out of bounds. It must be '
+                         'within {0} and {1}'.format(times.min(), times.max()))
+    if tmax > times.max():
+        raise ValueError('The tmin value is out of bounds. It must be '
+                         'within {0} and {1}'.format(times.min(), times.max()))
+    if tmin >= tmax:
+        raise ValueError('The tmin must be smaller than tmax')
+
+    time_win = (times >= tmin) & (times <= tmax)
+    mask = np.ones_like(data).astype(np.bool)
+    mask[:, time_win] = False
+
+    maxfun = np.argmax
+    if mode == 'pos':
+        if not np.any(data > 0):
+            raise ValueError('No positive values encountered. Cannot '
+                             'operate in pos mode.')
+    elif mode == 'neg':
+        if not np.any(data < 0):
+            raise ValueError('No negative values encountered. Cannot '
+                             'operate in neg mode.')
+        maxfun = np.argmin
+
+    masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data,
+                               mask=mask)
+
+    max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape)
+
+    return max_loc, max_time
diff --git a/mne/externals/FieldTrip.py b/mne/externals/FieldTrip.py
new file mode 100644
index 0000000..0ac21bc
--- /dev/null
+++ b/mne/externals/FieldTrip.py
@@ -0,0 +1,508 @@
+"""
+FieldTrip buffer (V1) client in pure Python
+
+(C) 2010 S. Klanke
+"""
+
+# We need socket, struct, and numpy
+import socket
+import struct
+import numpy
+
+VERSION = 1
+PUT_HDR = 0x101
+PUT_DAT = 0x102
+PUT_EVT = 0x103
+PUT_OK = 0x104
+PUT_ERR = 0x105
+GET_HDR = 0x201
+GET_DAT = 0x202
+GET_EVT = 0x203
+GET_OK = 0x204
+GET_ERR = 0x205
+FLUSH_HDR = 0x301
+FLUSH_DAT = 0x302
+FLUSH_EVT = 0x303
+FLUSH_OK = 0x304
+FLUSH_ERR = 0x305
+WAIT_DAT = 0x402
+WAIT_OK = 0x404
+WAIT_ERR = 0x405
+
+DATATYPE_CHAR = 0
+DATATYPE_UINT8 = 1
+DATATYPE_UINT16 = 2
+DATATYPE_UINT32 = 3
+DATATYPE_UINT64 = 4
+DATATYPE_INT8 = 5
+DATATYPE_INT16 = 6
+DATATYPE_INT32 = 7
+DATATYPE_INT64 = 8
+DATATYPE_FLOAT32 = 9
+DATATYPE_FLOAT64 = 10
+DATATYPE_UNKNOWN = 0xFFFFFFFF
+
+CHUNK_UNSPECIFIED = 0
+CHUNK_CHANNEL_NAMES = 1
+CHUNK_CHANNEL_FLAGS = 2
+CHUNK_RESOLUTIONS = 3
+CHUNK_ASCII_KEYVAL = 4
+CHUNK_NIFTI1 = 5
+CHUNK_SIEMENS_AP = 6
+CHUNK_CTF_RES4 = 7
+CHUNK_NEUROMAG_FIF = 8
+
+# List for converting FieldTrip datatypes to Numpy datatypes
+numpyType = ['int8', 'uint8', 'uint16', 'uint32', 'uint64',
+             'int8', 'int16', 'int32', 'int64', 'float32', 'float64']
+# Corresponding word sizes
+wordSize = [1, 1, 2, 4, 8, 1, 2, 4, 8, 4, 8]
+# FieldTrip data type as indexed by numpy dtype.num
+# this goes  0 => nothing, 1..4 => int8, uint8, int16, uint16, 7..10 =>
+# int32, uint32, int64, uint64  11..12 => float32, float64
+dataType = [-1, 5, 1, 6, 2, -1, -1, 7, 3, 8, 4, 9, 10]
+
+
+def serialize(A):
+    """
+    Returns Fieldtrip data type and string representation of the given
+    object, if possible.
+    """
+    if isinstance(A, str):
+        return (0, A)
+
+    if isinstance(A, numpy.ndarray):
+        dt = A.dtype
+        if not(dt.isnative) or dt.num < 1 or dt.num >= len(dataType):
+            return (DATATYPE_UNKNOWN, None)
+
+        ft = dataType[dt.num]
+        if ft == -1:
+            return (DATATYPE_UNKNOWN, None)
+
+        if A.flags['C_CONTIGUOUS']:
+            # great, just use the array's buffer interface
+            return (ft, str(A.data))
+
+        # otherwise, we need a copy to C order
+        AC = A.copy('C')
+        return (ft, str(AC.data))
+
+    if isinstance(A, int):
+        return (DATATYPE_INT32, struct.pack('i', A))
+
+    if isinstance(A, float):
+        return (DATATYPE_FLOAT64, struct.pack('d', A))
+
+    return (DATATYPE_UNKNOWN, None)
+
+
+class Chunk:
+
+    def __init__(self):
+        self.type = 0
+        self.size = 0
+        self.buf = ''
+
+
+class Header:
+
+    """Class for storing header information in the FieldTrip buffer format"""
+
+    def __init__(self):
+        self.nChannels = 0
+        self.nSamples = 0
+        self.nEvents = 0
+        self.fSample = 0.0
+        self.dataType = 0
+        self.chunks = {}
+        self.labels = []
+
+    def __str__(self):
+        return ('Channels.: %i\nSamples..: %i\nEvents...: %i\nSampFreq.: '
+                '%f\nDataType.: %s\n'
+                % (self.nChannels, self.nSamples, self.nEvents,
+                   self.fSample, numpyType[self.dataType]))
+
+
+class Event:
+    """Class for storing events in the FieldTrip buffer format"""
+
+    def __init__(self, S=None):
+        if S is None:
+            self.type = ''
+            self.value = ''
+            self.sample = 0
+            self.offset = 0
+            self.duration = 0
+        else:
+            self.deserialize(S)
+
+    def __str__(self):
+        return ('Type.....: %s\nValue....: %s\nSample...: %i\nOffset...: '
+                '%i\nDuration.: %i\n' % (str(self.type), str(self.value),
+                                         self.sample, self.offset,
+                                         self.duration))
+
+    def deserialize(self, buf):
+        bufsize = len(buf)
+        if bufsize < 32:
+            return 0
+
+        (type_type, type_numel, value_type, value_numel, sample,
+         offset, duration, bsiz) = struct.unpack('IIIIIiiI', buf[0:32])
+
+        self.sample = sample
+        self.offset = offset
+        self.duration = duration
+
+        st = type_numel * wordSize[type_type]
+        sv = value_numel * wordSize[value_type]
+
+        if bsiz + 32 > bufsize or st + sv > bsiz:
+            raise IOError(
+                'Invalid event definition -- does not fit in given buffer')
+
+        raw_type = buf[32:32 + st]
+        raw_value = buf[32 + st:32 + st + sv]
+
+        if type_type == 0:
+            self.type = raw_type
+        else:
+            self.type = numpy.ndarray(
+                (type_numel), dtype=numpyType[type_type], buffer=raw_type)
+
+        if value_type == 0:
+            self.value = raw_value
+        else:
+            self.value = numpy.ndarray(
+                (value_numel), dtype=numpyType[value_type], buffer=raw_value)
+
+        return bsiz + 32
+
+    def serialize(self):
+        """
+        Returns the contents of this event as a string, ready to
+        send over the network, or None in case of conversion problems.
+        """
+        type_type, type_buf = serialize(self.type)
+        if type_type == DATATYPE_UNKNOWN:
+            return None
+        type_size = len(type_buf)
+        type_numel = type_size / wordSize[type_type]
+
+        value_type, value_buf = serialize(self.value)
+        if value_type == DATATYPE_UNKNOWN:
+            return None
+        value_size = len(value_buf)
+        value_numel = value_size / wordSize[value_type]
+
+        bufsize = type_size + value_size
+
+        S = struct.pack('IIIIIiiI', type_type, type_numel, value_type,
+                        value_numel, int(self.sample), int(self.offset),
+                        int(self.duration), bufsize)
+        return S + type_buf + value_buf
+
+
+class Client:
+
+    """Class for managing a client connection to a FieldTrip buffer."""
+
+    def __init__(self):
+        self.isConnected = False
+        self.sock = []
+
+    def connect(self, hostname, port=1972):
+        """
+        connect(hostname [, port]) -- make a connection, default port is
+        1972.
+        """
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((hostname, port))
+        self.sock.setblocking(True)
+        self.isConnected = True
+
+    def disconnect(self):
+        """disconnect() -- close a connection."""
+        if self.isConnected:
+            self.sock.close()
+            self.sock = []
+            self.isConnected = False
+
+    def sendRaw(self, request):
+        """Send all bytes of the string 'request' out to socket."""
+        if not(self.isConnected):
+            raise IOError('Not connected to FieldTrip buffer')
+
+        N = len(request)
+        nw = self.sock.send(request)
+        while nw < N:
+            nw += self.sock.send(request[nw:])
+
+    def sendRequest(self, command, payload=None):
+        if payload is None:
+            request = struct.pack('HHI', VERSION, command, 0)
+        else:
+            request = struct.pack(
+                'HHI', VERSION, command, len(payload)) + payload
+        self.sendRaw(request)
+
+    def receiveResponse(self, minBytes=0):
+        """
+        Receive response from server on socket 's' and return it as
+        (status,bufsize,payload).
+        """
+
+        resp_hdr = self.sock.recv(8)
+        while len(resp_hdr) < 8:
+            resp_hdr += self.sock.recv(8 - len(resp_hdr))
+
+        (version, command, bufsize) = struct.unpack('HHI', resp_hdr)
+
+        if version != VERSION:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        if bufsize > 0:
+            payload = self.sock.recv(bufsize)
+            while len(payload) < bufsize:
+                payload += self.sock.recv(bufsize - len(payload))
+        else:
+            payload = None
+        return (command, bufsize, payload)
+
+    def getHeader(self):
+        """
+        getHeader() -- grabs header information from the buffer an returns
+        it as a Header object.
+        """
+
+        self.sendRequest(GET_HDR)
+        (status, bufsize, payload) = self.receiveResponse()
+
+        if status == GET_ERR:
+            return None
+
+        if status != GET_OK:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        if bufsize < 24:
+            self.disconnect()
+            raise IOError('Invalid HEADER packet received (too few bytes) - '
+                          'disconnecting')
+
+        (nchans, nsamp, nevt, fsamp, dtype,
+         bfsiz) = struct.unpack('IIIfII', payload[0:24])
+
+        H = Header()
+        H.nChannels = nchans
+        H.nSamples = nsamp
+        H.nEvents = nevt
+        H.fSample = fsamp
+        H.dataType = dtype
+
+        if bfsiz > 0:
+            offset = 24
+            while offset + 8 < bufsize:
+                (chunk_type, chunk_len) = struct.unpack(
+                    'II', payload[offset:offset + 8])
+                offset += 8
+                if offset + chunk_len < bufsize:
+                    break
+                H.chunks[chunk_type] = payload[offset:offset + chunk_len]
+                offset += chunk_len
+
+            if CHUNK_CHANNEL_NAMES in H.chunks:
+                L = H.chunks[CHUNK_CHANNEL_NAMES].split('\0')
+                numLab = len(L)
+                if numLab >= H.nChannels:
+                    H.labels = L[0:H.nChannels]
+
+        return H
+
+    def putHeader(self, nChannels, fSample, dataType, labels=None,
+                  chunks=None):
+        haveLabels = False
+        extras = ''
+        if not(labels is None):
+            serLabels = ''
+            try:
+                for n in range(0, nChannels):
+                    serLabels += labels[n] + '\0'
+            except:
+                raise ValueError('Channels names (labels), if given,'
+                                 ' must be a list of N=numChannels strings')
+
+            extras = struct.pack('II', CHUNK_CHANNEL_NAMES,
+                                 len(serLabels)) + serLabels
+            haveLabels = True
+
+        if not(chunks is None):
+            for chunk_type, chunk_data in chunks:
+                if haveLabels and chunk_type == CHUNK_CHANNEL_NAMES:
+                    # ignore channel names chunk in case we got labels
+                    continue
+                extras += struct.pack('II', chunk_type,
+                                      len(chunk_data)) + chunk_data
+
+        sizeChunks = len(extras)
+
+        hdef = struct.pack('IIIfII', nChannels, 0, 0,
+                           fSample, dataType, sizeChunks)
+        request = struct.pack('HHI', VERSION, PUT_HDR,
+                              sizeChunks + len(hdef)) + hdef + extras
+        self.sendRaw(request)
+        (status, bufsize, resp_buf) = self.receiveResponse()
+        if status != PUT_OK:
+            raise IOError('Header could not be written')
+
+    def getData(self, index=None):
+        """
+        getData([indices]) -- retrieve data samples and return them as a
+        Numpy array, samples in rows(!). The 'indices' argument is optional,
+        and if given, must be a tuple or list with inclusive, zero-based
+        start/end indices.
+        """
+
+        if index is None:
+            request = struct.pack('HHI', VERSION, GET_DAT, 0)
+        else:
+            indS = int(index[0])
+            indE = int(index[1])
+            request = struct.pack('HHIII', VERSION, GET_DAT, 8, indS, indE)
+        self.sendRaw(request)
+
+        (status, bufsize, payload) = self.receiveResponse()
+        if status == GET_ERR:
+            return None
+
+        if status != GET_OK:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        if bufsize < 16:
+            self.disconnect()
+            raise IOError('Invalid DATA packet received (too few bytes)')
+
+        (nchans, nsamp, datype, bfsiz) = struct.unpack('IIII', payload[0:16])
+
+        if bfsiz < bufsize - 16 or datype >= len(numpyType):
+            raise IOError('Invalid DATA packet received')
+
+        raw = payload[16:bfsiz + 16]
+        D = numpy.ndarray((nsamp, nchans), dtype=numpyType[datype], buffer=raw)
+
+        return D
+
+    def getEvents(self, index=None):
+        """
+        getEvents([indices]) -- retrieve events and return them as a list
+        of Event objects. The 'indices' argument is optional, and if given,
+        must be a tuple or list with inclusive, zero-based start/end indices.
+        The 'type' and 'value' fields of the event will be converted to strings
+        or Numpy arrays.
+        """
+
+        if index is None:
+            request = struct.pack('HHI', VERSION, GET_EVT, 0)
+        else:
+            indS = int(index[0])
+            indE = int(index[1])
+            request = struct.pack('HHIII', VERSION, GET_EVT, 8, indS, indE)
+        self.sendRaw(request)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+        if status == GET_ERR:
+            return []
+
+        if status != GET_OK:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        offset = 0
+        E = []
+        while 1:
+            e = Event()
+            nextOffset = e.deserialize(resp_buf[offset:])
+            if nextOffset == 0:
+                break
+            E.append(e)
+            offset = offset + nextOffset
+
+        return E
+
+    def putEvents(self, E):
+        """
+        putEvents(E) -- writes a single or multiple events, depending on
+        whether an 'Event' object, or a list of 'Event' objects is
+        given as an argument.
+        """
+        if isinstance(E, Event):
+            buf = E.serialize()
+        else:
+            buf = ''
+            num = 0
+            for e in E:
+                if not(isinstance(e, Event)):
+                    raise 'Element %i in given list is not an Event' % num
+                buf = buf + e.serialize()
+                num = num + 1
+
+        self.sendRequest(PUT_EVT, buf)
+        (status, bufsize, resp_buf) = self.receiveResponse()
+
+        if status != PUT_OK:
+            raise IOError('Events could not be written.')
+
+    def putData(self, D):
+        """
+        putData(D) -- writes samples that must be given as a NUMPY array,
+        samples x channels. The type of the samples (D) and the number of
+        channels must match the corresponding quantities in the FieldTrip
+        buffer.
+        """
+
+        if not(isinstance(D, numpy.ndarray)) or len(D.shape) != 2:
+            raise ValueError(
+                'Data must be given as a NUMPY array (samples x channels)')
+
+        nSamp = D.shape[0]
+        nChan = D.shape[1]
+
+        (dataType, dataBuf) = serialize(D)
+
+        dataBufSize = len(dataBuf)
+
+        request = struct.pack('HHI', VERSION, PUT_DAT, 16 + dataBufSize)
+        dataDef = struct.pack('IIII', nChan, nSamp, dataType, dataBufSize)
+        self.sendRaw(request + dataDef + dataBuf)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+        if status != PUT_OK:
+            raise IOError('Samples could not be written.')
+
+    def poll(self):
+
+        request = struct.pack('HHIIII', VERSION, WAIT_DAT, 12, 0, 0, 0)
+        self.sendRaw(request)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+
+        if status != WAIT_OK or bufsize < 8:
+            raise IOError('Polling failed.')
+
+        return struct.unpack('II', resp_buf[0:8])
+
+    def wait(self, nsamples, nevents, timeout):
+        request = struct.pack('HHIIII', VERSION, WAIT_DAT,
+                              12, int(nsamples), int(nevents), int(timeout))
+        self.sendRaw(request)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+
+        if status != WAIT_OK or bufsize < 8:
+            raise IOError('Wait request failed.')
+
+        return struct.unpack('II', resp_buf[0:8])
diff --git a/mne/externals/__init__.py b/mne/externals/__init__.py
new file mode 100644
index 0000000..2dd0d4a
--- /dev/null
+++ b/mne/externals/__init__.py
@@ -0,0 +1,4 @@
+from . import six
+from . import jdcal
+from . import decorator
+from . import tempita
\ No newline at end of file
diff --git a/mne/externals/decorator.py b/mne/externals/decorator.py
new file mode 100644
index 0000000..fa79521
--- /dev/null
+++ b/mne/externals/decorator.py
@@ -0,0 +1,253 @@
+##########################     LICENCE     ###############################
+
+# Copyright (c) 2005-2012, Michele Simionato
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+#   Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+#   Redistributions in bytecode form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""
+Decorator module, see http://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+from __future__ import print_function
+
+__version__ = '3.4.0'
+
+__all__ = ["decorator", "FunctionMaker", "contextmanager"]
+
+
+import sys, re, inspect
+if sys.version >= '3':
+    from inspect import getfullargspec
+    def get_init(cls):
+        return cls.__init__
+else:
+    class getfullargspec(object):
+        "A quick and dirty replacement for getfullargspec for Python 2.X"
+        def __init__(self, f):
+            self.args, self.varargs, self.varkw, self.defaults = \
+                inspect.getargspec(f)
+            self.kwonlyargs = []
+            self.kwonlydefaults = None
+        def __iter__(self):
+            yield self.args
+            yield self.varargs
+            yield self.varkw
+            yield self.defaults
+    def get_init(cls):
+        return cls.__init__.__func__
+
+DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
+
+# basic functionality
+class FunctionMaker(object):
+    """
+    An object with the ability to create functions with a given signature.
+    It has attributes name, doc, module, signature, defaults, dict and
+    methods update and make.
+    """
+    def __init__(self, func=None, name=None, signature=None,
+                 defaults=None, doc=None, module=None, funcdict=None):
+        self.shortsignature = signature
+        if func:
+            # func can be a class or a callable, but not an instance method
+            self.name = func.__name__
+            if self.name == '<lambda>': # small hack for lambda functions
+                self.name = '_lambda_'
+            self.doc = func.__doc__
+            self.module = func.__module__
+            if inspect.isfunction(func):
+                argspec = getfullargspec(func)
+                self.annotations = getattr(func, '__annotations__', {})
+                for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+                          'kwonlydefaults'):
+                    setattr(self, a, getattr(argspec, a))
+                for i, arg in enumerate(self.args):
+                    setattr(self, 'arg%d' % i, arg)
+                if sys.version < '3': # easy way
+                    self.shortsignature = self.signature = \
+                        inspect.formatargspec(
+                        formatvalue=lambda val: "", *argspec)[1:-1]
+                else: # Python 3 way
+                    allargs = list(self.args)
+                    allshortargs = list(self.args)
+                    if self.varargs:
+                        allargs.append('*' + self.varargs)
+                        allshortargs.append('*' + self.varargs)
+                    elif self.kwonlyargs:
+                        allargs.append('*') # single star syntax
+                    for a in self.kwonlyargs:
+                        allargs.append('%s=None' % a)
+                        allshortargs.append('%s=%s' % (a, a))
+                    if self.varkw:
+                        allargs.append('**' + self.varkw)
+                        allshortargs.append('**' + self.varkw)
+                    self.signature = ', '.join(allargs)
+                    self.shortsignature = ', '.join(allshortargs)
+                self.dict = func.__dict__.copy()
+        # func=None happens when decorating a caller
+        if name:
+            self.name = name
+        if signature is not None:
+            self.signature = signature
+        if defaults:
+            self.defaults = defaults
+        if doc:
+            self.doc = doc
+        if module:
+            self.module = module
+        if funcdict:
+            self.dict = funcdict
+        # check existence required attributes
+        assert hasattr(self, 'name')
+        if not hasattr(self, 'signature'):
+            raise TypeError('You are decorating a non function: %s' % func)
+
+    def update(self, func, **kw):
+        "Update the signature of func with the data in self"
+        func.__name__ = self.name
+        func.__doc__ = getattr(self, 'doc', None)
+        func.__dict__ = getattr(self, 'dict', {})
+        func.__defaults__ = getattr(self, 'defaults', ())
+        func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
+        func.__annotations__ = getattr(self, 'annotations', None)
+        callermodule = sys._getframe(3).f_globals.get('__name__', '?')
+        func.__module__ = getattr(self, 'module', callermodule)
+        func.__dict__.update(kw)
+
+    def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+        "Make a new function from a given template and update the signature"
+        src = src_templ % vars(self) # expand name and signature
+        evaldict = evaldict or {}
+        mo = DEF.match(src)
+        if mo is None:
+            raise SyntaxError('not a valid function template\n%s' % src)
+        name = mo.group(1) # extract the function name
+        names = set([name] + [arg.strip(' *') for arg in
+                             self.shortsignature.split(',')])
+        for n in names:
+            if n in ('_func_', '_call_'):
+                raise NameError('%s is overridden in\n%s' % (n, src))
+        if not src.endswith('\n'): # add a newline just for safety
+            src += '\n' # this is needed in old versions of Python
+        try:
+            code = compile(src, '<string>', 'single')
+            # print >> sys.stderr, 'Compiling %s' % src
+            exec(code, evaldict)
+        except:
+            print('Error in generated code:', file=sys.stderr)
+            print(src, file=sys.stderr)
+            raise
+        func = evaldict[name]
+        if addsource:
+            attrs['__source__'] = src
+        self.update(func, **attrs)
+        return func
+
+    @classmethod
+    def create(cls, obj, body, evaldict, defaults=None,
+               doc=None, module=None, addsource=True, **attrs):
+        """
+        Create a function from the strings name, signature and body.
+        evaldict is the evaluation dictionary. If addsource is true an attribute
+        __source__ is added to the result. The attributes attrs are added,
+        if any.
+        """
+        if isinstance(obj, str): # "name(signature)"
+            name, rest = obj.strip().split('(', 1)
+            signature = rest[:-1] #strip a right parens
+            func = None
+        else: # a function
+            name = None
+            signature = None
+            func = obj
+        self = cls(func, name, signature, defaults, doc, module)
+        ibody = '\n'.join('    ' + line for line in body.splitlines())
+        return self.make('def %(name)s(%(signature)s):\n' + ibody,
+                        evaldict, addsource, **attrs)
+
+def decorator(caller, func=None):
+    """
+    decorator(caller) converts a caller function into a decorator;
+    decorator(caller, func) decorates a function using a caller.
+    """
+    if func is not None: # returns a decorated function
+        evaldict = func.__globals__.copy()
+        evaldict['_call_'] = caller
+        evaldict['_func_'] = func
+        return FunctionMaker.create(
+            func, "return _call_(_func_, %(shortsignature)s)",
+            evaldict, undecorated=func, __wrapped__=func)
+    else: # returns a decorator
+        if inspect.isclass(caller):
+            name = caller.__name__.lower()
+            callerfunc = get_init(caller)
+            doc = 'decorator(%s) converts functions/generators into ' \
+                'factories of %s objects' % (caller.__name__, caller.__name__)
+            fun = getfullargspec(callerfunc).args[1] # second arg
+        elif inspect.isfunction(caller):
+            name = '_lambda_' if caller.__name__ == '<lambda>' \
+                else caller.__name__
+            callerfunc = caller
+            doc = caller.__doc__
+            fun = getfullargspec(callerfunc).args[0] # first arg
+        else: # assume caller is an object with a __call__ method
+            name = caller.__class__.__name__.lower()
+            callerfunc = caller.__call__.__func__
+            doc = caller.__call__.__doc__
+            fun = getfullargspec(callerfunc).args[1] # second arg
+        evaldict = callerfunc.__globals__.copy()
+        evaldict['_call_'] = caller
+        evaldict['decorator'] = decorator
+        return FunctionMaker.create(
+            '%s(%s)' % (name, fun),
+            'return decorator(_call_, %s)' % fun,
+            evaldict, undecorated=caller, __wrapped__=caller,
+            doc=doc, module=caller.__module__)
+
+######################### contextmanager ########################
+
+def __call__(self, func):
+    'Context manager decorator'
+    return FunctionMaker.create(
+        func, "with _self_: return _func_(%(shortsignature)s)",
+        dict(_self_=self, _func_=func), __wrapped__=func)
+
+try: # Python >= 3.2
+
+    from contextlib import _GeneratorContextManager
+    ContextManager = type(
+        'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
+
+except ImportError: # Python >= 2.5
+
+    from contextlib import GeneratorContextManager
+    def __init__(self, f, *a, **k):
+        return GeneratorContextManager.__init__(self, f(*a, **k))
+    ContextManager = type(
+        'ContextManager', (GeneratorContextManager,),
+        dict(__call__=__call__, __init__=__init__))
+
+contextmanager = decorator(ContextManager)
diff --git a/mne/externals/jdcal.py b/mne/externals/jdcal.py
new file mode 100644
index 0000000..1b6105c
--- /dev/null
+++ b/mne/externals/jdcal.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+"""Functions for converting between Julian dates and calendar dates.
+
+A function for converting Gregorian calendar dates to Julian dates, and
+another function for converting Julian calendar dates to Julian dates
+are defined. Two functions for the reverse calculations are also
+defined.
+
+Different regions of the world switched to Gregorian calendar from
+Julian calendar on different dates. Having separate functions for Julian
+and Gregorian calendars allow maximum flexibility in choosing the
+relevant calendar.
+
+All the above functions are "proleptic". This means that they work for
+dates on which the concerned calendar is not valid. For example,
+Gregorian calendar was not used prior to around October 1582.
+
+Julian dates are stored in two floating point numbers (double).  Julian
+dates, and Modified Julian dates, are large numbers. If only one number
+is used, then the precision of the time stored is limited. Using two
+numbers, time can be split in a manner that will allow maximum
+precision. For example, the first number could be the Julian date for
+the beginning of a day and the second number could be the fractional
+day. Calculations that need the latter part can now work with maximum
+precision.
+
+A function to test if a given Gregorian calendar year is a leap year is
+defined.
+
+Zero point of Modified Julian Date (MJD) and the MJD of 2000/1/1
+12:00:00 are also given.
+
+This module is based on the TPM C library, by Jeffery W. Percival. The
+idea for splitting Julian date into two floating point numbers was
+inspired by the IAU SOFA C library.
+
+:author: Prasanth Nair
+:contact: prasanthhn at gmail.com
+:license: BSD (http://www.opensource.org/licenses/bsd-license.php)
+
+NB: Code has been heavily adapted for streamlined use by mne-python devs
+"""
+
+
+import numpy as np
+
+MJD_0 = 2400000
+
+
+def ipart(x):
+    """Return integer part of given number."""
+    return np.modf(x)[1]
+
+
+def jcal2jd(year, month, day):
+    """Julian calendar date to Julian date.
+
+    The input and output are for the proleptic Julian calendar,
+    i.e., no consideration of historical usage of the calendar is
+    made.
+
+    Parameters
+    ----------
+    year : int
+        Year as an integer.
+    month : int
+        Month as an integer.
+    day : int
+        Day as an integer.
+
+    Returns
+    -------
+    jd: int
+        Julian date.
+    """
+    year = int(year)
+    month = int(month)
+    day = int(day)
+
+    jd = 367 * year
+    x = ipart((month - 9) / 7.0)
+    jd -= ipart((7 * (year + 5001 + x)) / 4.0)
+    jd += ipart((275 * month) / 9.0)
+    jd += day
+    jd += 1729777
+    return jd
+
+
+def jd2jcal(jd):
+    """Julian calendar date for the given Julian date.
+
+    The input and output are for the proleptic Julian calendar,
+    i.e., no consideration of historical usage of the calendar is
+    made.
+
+    Parameters
+    ----------
+    jd: int
+        The Julian date.
+
+    Returns
+    -------
+    y, m, d: int, int, int
+        Three element tuple containing year, month, day.
+    """
+    j = jd + 1402
+    k = ipart((j - 1) / 1461.0)
+    l = j - (1461.0 * k)
+    n = ipart((l - 1) / 365.0) - ipart(l / 1461.0)
+    i = l - (365.0 * n) + 30.0
+    j = ipart((80.0 * i) / 2447.0)
+    day = i - ipart((2447.0 * j) / 80.0)
+    i = ipart(j / 11.0)
+    month = j + 2 - (12.0 * i)
+    year = (4 * k) + n + i - 4716.0
+    return int(year), int(month), int(day)
diff --git a/mne/externals/six.py b/mne/externals/six.py
new file mode 100644
index 0000000..b3595a4
--- /dev/null
+++ b/mne/externals/six.py
@@ -0,0 +1,577 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin at python.org>"
+__version__ = "1.4.1"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)
+        # This is a bit ugly, but it avoids running this again.
+        delattr(tp, self.name)
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+
+class _MovedItems(types.ModuleType):
+    """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+del attr
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
+
+
+
+class Module_six_moves_urllib_parse(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
+sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
+sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
+sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
+sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
+sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    parse = sys.modules[__name__ + ".moves.urllib_parse"]
+    error = sys.modules[__name__ + ".moves.urllib_error"]
+    request = sys.modules[__name__ + ".moves.urllib_request"]
+    response = sys.modules[__name__ + ".moves.urllib_response"]
+    robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
+
+
+sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+
+    _iterkeys = "keys"
+    _itervalues = "values"
+    _iteritems = "items"
+    _iterlists = "lists"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+    _iterkeys = "iterkeys"
+    _itervalues = "itervalues"
+    _iteritems = "iteritems"
+    _iterlists = "iterlists"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+    """Return an iterator over the keys of a dictionary."""
+    return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+    """Return an iterator over the values of a dictionary."""
+    return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+    """Return an iterator over the (key, value) pairs of a dictionary."""
+    return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+    """Return an iterator over the (key, [values]) pairs of a dictionary."""
+    return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+    def u(s):
+        return s
+    unichr = chr
+    if sys.version_info[1] <= 1:
+        def int2byte(i):
+            return bytes((i,))
+    else:
+        # This is about 2x faster than the implementation above on 3.2+
+        int2byte = operator.methodcaller("to_bytes", 1, "big")
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+else:
+    def b(s):
+        return s
+    def u(s):
+        return unicode(s, "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+    def byte2int(bs):
+        return ord(bs[0])
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    def iterbytes(buf):
+        return (ord(byte) for byte in buf)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+    import builtins
+    exec_ = getattr(builtins, "exec")
+
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+
+    print_ = getattr(builtins, "print")
+    del builtins
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+    def print_(*args, **kwargs):
+        """The new-style print function."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    return meta("NewBase", bases, {})
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        for slots_var in orig_vars.get('__slots__', ()):
+            orig_vars.pop(slots_var)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
\ No newline at end of file
diff --git a/mne/externals/tempita/__init__.py b/mne/externals/tempita/__init__.py
new file mode 100644
index 0000000..bdf8457
--- /dev/null
+++ b/mne/externals/tempita/__init__.py
@@ -0,0 +1,1303 @@
+"""
+A small templating language
+
+This implements a small templating language.  This language implements
+if/elif/else, for/continue/break, expressions, and blocks of Python
+code.  The syntax is::
+
+  {{any expression (function calls etc)}}
+  {{any expression | filter}}
+  {{for x in y}}...{{endfor}}
+  {{if x}}x{{elif y}}y{{else}}z{{endif}}
+  {{py:x=1}}
+  {{py:
+  def foo(bar):
+      return 'baz'
+  }}
+  {{default var = default_value}}
+  {{# comment}}
+
+You use this with the ``Template`` class or the ``sub`` shortcut.
+The ``Template`` class takes the template string and the name of
+the template (for errors) and a default namespace.  Then (like
+``string.Template``) you can call the ``tmpl.substitute(**kw)``
+method to make a substitution (or ``tmpl.substitute(a_dict)``).
+
+``sub(content, **kw)`` substitutes the template immediately.  You
+can use ``__name='tmpl.html'`` to set the name of the template.
+
+If there are syntax errors ``TemplateError`` will be raised.
+"""
+
+import warnings
+import re
+import sys
+import cgi
+from ..six.moves.urllib.parse import quote as url_quote
+import os
+import tokenize
+from ..six.moves import cStringIO as StringIO
+from ._looper import looper
+from .compat3 import PY3, bytes, basestring_, next, is_unicode, coerce_text
+
+__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
+           'sub_html', 'html', 'bunch']
+
+in_re = re.compile(r'\s+in\s+')
+var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
+
+
+class TemplateError(Exception):
+    """Exception raised while parsing a template
+    """
+
+    def __init__(self, message, position, name=None):
+        Exception.__init__(self, message)
+        self.position = position
+        self.name = name
+
+    def __str__(self):
+        msg = ' '.join(self.args)
+        if self.position:
+            msg = '%s at line %s column %s' % (
+                msg, self.position[0], self.position[1])
+        if self.name:
+            msg += ' in %s' % self.name
+        return msg
+
+
+class _TemplateContinue(Exception):
+    pass
+
+
+class _TemplateBreak(Exception):
+    pass
+
+
+def get_file_template(name, from_template):
+    path = os.path.join(os.path.dirname(from_template.name), name)
+    return from_template.__class__.from_filename(
+        path, namespace=from_template.namespace,
+        get_template=from_template.get_template)
+
+
+class Template(object):
+
+    default_namespace = {
+        'start_braces': '{{',
+        'end_braces': '}}',
+        'looper': looper,
+    }
+
+    default_encoding = 'utf8'
+    default_inherit = None
+
+    def __init__(self, content, name=None, namespace=None, stacklevel=None,
+                 get_template=None, default_inherit=None, line_offset=0,
+                 delimeters=None):
+        self.content = content
+
+        # set delimeters
+        if delimeters is None:
+            delimeters = (self.default_namespace['start_braces'],
+                          self.default_namespace['end_braces'])
+        else:
+            assert len(delimeters) == 2 and all(
+                [isinstance(delimeter, basestring)
+                    for delimeter in delimeters])
+            self.default_namespace = self.__class__.default_namespace.copy()
+            self.default_namespace['start_braces'] = delimeters[0]
+            self.default_namespace['end_braces'] = delimeters[1]
+        self.delimeters = delimeters
+
+        self._unicode = is_unicode(content)
+        if name is None and stacklevel is not None:
+            try:
+                caller = sys._getframe(stacklevel)
+            except ValueError:
+                pass
+            else:
+                globals = caller.f_globals
+                lineno = caller.f_lineno
+                if '__file__' in globals:
+                    name = globals['__file__']
+                    if name.endswith('.pyc') or name.endswith('.pyo'):
+                        name = name[:-1]
+                elif '__name__' in globals:
+                    name = globals['__name__']
+                else:
+                    name = '<string>'
+                if lineno:
+                    name += ':%s' % lineno
+        self.name = name
+        self._parsed = parse(
+            content, name=name, line_offset=line_offset,
+            delimeters=self.delimeters)
+        if namespace is None:
+            namespace = {}
+        self.namespace = namespace
+        self.get_template = get_template
+        if default_inherit is not None:
+            self.default_inherit = default_inherit
+
+    def from_filename(cls, filename, namespace=None, encoding=None,
+                      default_inherit=None, get_template=get_file_template):
+        f = open(filename, 'rb')
+        c = f.read()
+        f.close()
+        if encoding:
+            c = c.decode(encoding)
+        return cls(content=c, name=filename, namespace=namespace,
+                   default_inherit=default_inherit, get_template=get_template)
+
+    from_filename = classmethod(from_filename)
+
+    def __repr__(self):
+        return '<%s %s name=%r>' % (
+            self.__class__.__name__,
+            hex(id(self))[2:], self.name)
+
+    def substitute(self, *args, **kw):
+        if args:
+            if kw:
+                raise TypeError(
+                    "You can only give positional *or* keyword arguments")
+            if len(args) > 1:
+                raise TypeError(
+                    "You can only give one positional argument")
+            if not hasattr(args[0], 'items'):
+                raise TypeError(
+                    ("If you pass in a single argument, you must pass in a ",
+                     "dict-like object (with a .items() method); you gave %r")
+                    % (args[0],))
+            kw = args[0]
+        ns = kw
+        ns['__template_name__'] = self.name
+        if self.namespace:
+            ns.update(self.namespace)
+        result, defs, inherit = self._interpret(ns)
+        if not inherit:
+            inherit = self.default_inherit
+        if inherit:
+            result = self._interpret_inherit(result, defs, inherit, ns)
+        return result
+
+    def _interpret(self, ns):
+        # __traceback_hide__ = True
+        parts = []
+        defs = {}
+        self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
+        if '__inherit__' in defs:
+            inherit = defs.pop('__inherit__')
+        else:
+            inherit = None
+        return ''.join(parts), defs, inherit
+
+    def _interpret_inherit(self, body, defs, inherit_template, ns):
+        # __traceback_hide__ = True
+        if not self.get_template:
+            raise TemplateError(
+                'You cannot use inheritance without passing in get_template',
+                position=None, name=self.name)
+        templ = self.get_template(inherit_template, self)
+        self_ = TemplateObject(self.name)
+        for name, value in defs.iteritems():
+            setattr(self_, name, value)
+        self_.body = body
+        ns = ns.copy()
+        ns['self'] = self_
+        return templ.substitute(ns)
+
+    def _interpret_codes(self, codes, ns, out, defs):
+        # __traceback_hide__ = True
+        for item in codes:
+            if isinstance(item, basestring_):
+                out.append(item)
+            else:
+                self._interpret_code(item, ns, out, defs)
+
+    def _interpret_code(self, code, ns, out, defs):
+        # __traceback_hide__ = True
+        name, pos = code[0], code[1]
+        if name == 'py':
+            self._exec(code[2], ns, pos)
+        elif name == 'continue':
+            raise _TemplateContinue()
+        elif name == 'break':
+            raise _TemplateBreak()
+        elif name == 'for':
+            vars, expr, content = code[2], code[3], code[4]
+            expr = self._eval(expr, ns, pos)
+            self._interpret_for(vars, expr, content, ns, out, defs)
+        elif name == 'cond':
+            parts = code[2:]
+            self._interpret_if(parts, ns, out, defs)
+        elif name == 'expr':
+            parts = code[2].split('|')
+            base = self._eval(parts[0], ns, pos)
+            for part in parts[1:]:
+                func = self._eval(part, ns, pos)
+                base = func(base)
+            out.append(self._repr(base, pos))
+        elif name == 'default':
+            var, expr = code[2], code[3]
+            if var not in ns:
+                result = self._eval(expr, ns, pos)
+                ns[var] = result
+        elif name == 'inherit':
+            expr = code[2]
+            value = self._eval(expr, ns, pos)
+            defs['__inherit__'] = value
+        elif name == 'def':
+            name = code[2]
+            signature = code[3]
+            parts = code[4]
+            ns[name] = defs[name] = TemplateDef(
+                self, name, signature, body=parts, ns=ns, pos=pos)
+        elif name == 'comment':
+            return
+        else:
+            assert 0, "Unknown code: %r" % name
+
+    def _interpret_for(self, vars, expr, content, ns, out, defs):
+        # __traceback_hide__ = True
+        for item in expr:
+            if len(vars) == 1:
+                ns[vars[0]] = item
+            else:
+                if len(vars) != len(item):
+                    raise ValueError(
+                        'Need %i items to unpack (got %i items)'
+                        % (len(vars), len(item)))
+                for name, value in zip(vars, item):
+                    ns[name] = value
+            try:
+                self._interpret_codes(content, ns, out, defs)
+            except _TemplateContinue:
+                continue
+            except _TemplateBreak:
+                break
+
+    def _interpret_if(self, parts, ns, out, defs):
+        # __traceback_hide__ = True
+        # @@: if/else/else gets through
+        for part in parts:
+            assert not isinstance(part, basestring_)
+            name, pos = part[0], part[1]
+            if name == 'else':
+                result = True
+            else:
+                result = self._eval(part[2], ns, pos)
+            if result:
+                self._interpret_codes(part[3], ns, out, defs)
+                break
+
+    def _eval(self, code, ns, pos):
+        # __traceback_hide__ = True
+        try:
+            try:
+                value = eval(code, self.default_namespace, ns)
+            except SyntaxError as e:
+                raise SyntaxError(
+                    'invalid syntax in expression: %s' % code)
+            return value
+        except:
+            exc_info = sys.exc_info()
+            e = exc_info[1]
+            if getattr(e, 'args', None):
+                arg0 = e.args[0]
+            else:
+                arg0 = coerce_text(e)
+            e.args = (self._add_line_info(arg0, pos),)
+            raise (exc_info[1], e, exc_info[2])
+
+    def _exec(self, code, ns, pos):
+        # __traceback_hide__ = True
+        try:
+            exec(code, self.default_namespace, ns)
+        except:
+            exc_info = sys.exc_info()
+            e = exc_info[1]
+            if e.args:
+                e.args = (self._add_line_info(e.args[0], pos),)
+            else:
+                e.args = (self._add_line_info(None, pos),)
+            raise(exc_info[1], e, exc_info[2])
+
+    def _repr(self, value, pos):
+        # __traceback_hide__ = True
+        try:
+            if value is None:
+                return ''
+            if self._unicode:
+                try:
+                    value = str(value)
+                    if not is_unicode(value):
+                        value = value.decode('utf-8')
+                except UnicodeDecodeError:
+                    value = bytes(value)
+            else:
+                if not isinstance(value, basestring_):
+                    value = coerce_text(value)
+                if (is_unicode(value) and self.default_encoding):
+                    value = value.encode(self.default_encoding)
+        except:
+            exc_info = sys.exc_info()
+            e = exc_info[1]
+            e.args = (self._add_line_info(e.args[0], pos),)
+            # raise(exc_info[1], e, exc_info[2])
+            raise(e)
+        else:
+            if self._unicode and isinstance(value, bytes):
+                if not self.default_encoding:
+                    raise UnicodeDecodeError(
+                        'Cannot decode bytes value %r into unicode '
+                        '(no default_encoding provided)' % value)
+                try:
+                    value = value.decode(self.default_encoding)
+                except UnicodeDecodeError as e:
+                    raise UnicodeDecodeError(
+                        e.encoding,
+                        e.object,
+                        e.start,
+                        e.end,
+                        e.reason + ' in string %r' % value)
+            elif not self._unicode and is_unicode(value):
+                if not self.default_encoding:
+                    raise UnicodeEncodeError(
+                        'Cannot encode unicode value %r into bytes '
+                        '(no default_encoding provided)' % value)
+                value = value.encode(self.default_encoding)
+            return value
+
+    def _add_line_info(self, msg, pos):
+        msg = "%s at line %s column %s" % (
+            msg, pos[0], pos[1])
+        if self.name:
+            msg += " in file %s" % self.name
+        return msg
+
+
+def sub(content, delimeters=None, **kw):
+    name = kw.get('__name')
+    tmpl = Template(content, name=name, delimeters=delimeters)
+    return tmpl.substitute(kw)
+
+
+def paste_script_template_renderer(content, vars, filename=None):
+    tmpl = Template(content, name=filename)
+    return tmpl.substitute(vars)
+
+
+class bunch(dict):
+
+    def __init__(self, **kw):
+        for name, value in kw.iteritems():
+            setattr(self, name, value)
+
+    def __setattr__(self, name, value):
+        self[name] = value
+
+    def __getattr__(self, name):
+        try:
+            return self[name]
+        except KeyError:
+            raise AttributeError(name)
+
+    def __getitem__(self, key):
+        if 'default' in self:
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return dict.__getitem__(self, 'default')
+        else:
+            return dict.__getitem__(self, key)
+
+    def __repr__(self):
+        items = [
+            (k, v) for k, v in self.iteritems()]
+        items.sort()
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            ' '.join(['%s=%r' % (k, v) for k, v in items]))
+
+############################################################
+## HTML Templating
+############################################################
+
+
+class html(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return self.value
+
+    def __html__(self):
+        return self.value
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__, self.value)
+
+
+def html_quote(value, force=True):
+    if not force and hasattr(value, '__html__'):
+        return value.__html__()
+    if value is None:
+        return ''
+    if not isinstance(value, basestring_):
+        value = coerce_text(value)
+    if sys.version >= "3" and isinstance(value, bytes):
+        value = cgi.escape(value.decode('latin1'), 1)
+        value = value.encode('latin1')
+    else:
+        with warnings.catch_warnings(record=True):  # annoying
+            value = cgi.escape(value, 1)
+    if sys.version < "3":
+        if is_unicode(value):
+            value = value.encode('ascii', 'xmlcharrefreplace')
+    return value
+
+
+def url(v):
+    v = coerce_text(v)
+    if is_unicode(v):
+        v = v.encode('utf8')
+    return url_quote(v)
+
+
+def attr(**kw):
+    kw = list(kw.iteritems())
+    kw.sort()
+    parts = []
+    for name, value in kw:
+        if value is None:
+            continue
+        if name.endswith('_'):
+            name = name[:-1]
+        parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
+    return html(' '.join(parts))
+
+
+class HTMLTemplate(Template):
+
+    default_namespace = Template.default_namespace.copy()
+    default_namespace.update(dict(
+        html=html,
+        attr=attr,
+        url=url,
+        html_quote=html_quote))
+
+    def _repr(self, value, pos):
+        if hasattr(value, '__html__'):
+            value = value.__html__()
+            quote = False
+        else:
+            quote = True
+        plain = Template._repr(self, value, pos)
+        if quote:
+            return html_quote(plain)
+        else:
+            return plain
+
+
+def sub_html(content, **kw):
+    name = kw.get('__name')
+    tmpl = HTMLTemplate(content, name=name)
+    return tmpl.substitute(kw)
+
+
+class TemplateDef(object):
+    def __init__(self, template, func_name, func_signature,
+                 body, ns, pos, bound_self=None):
+        self._template = template
+        self._func_name = func_name
+        self._func_signature = func_signature
+        self._body = body
+        self._ns = ns
+        self._pos = pos
+        self._bound_self = bound_self
+
+    def __repr__(self):
+        return '<mne.externals.tempita function %s(%s) at %s:%s>' % (
+            self._func_name, self._func_signature,
+            self._template.name, self._pos)
+
+    def __str__(self):
+        return self()
+
+    def __call__(self, *args, **kw):
+        values = self._parse_signature(args, kw)
+        ns = self._ns.copy()
+        ns.update(values)
+        if self._bound_self is not None:
+            ns['self'] = self._bound_self
+        out = []
+        subdefs = {}
+        self._template._interpret_codes(self._body, ns, out, subdefs)
+        return ''.join(out)
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        return self.__class__(
+            self._template, self._func_name, self._func_signature,
+            self._body, self._ns, self._pos, bound_self=obj)
+
+    def _parse_signature(self, args, kw):
+        values = {}
+        sig_args, var_args, var_kw, defaults = self._func_signature
+        extra_kw = {}
+        for name, value in kw.iteritems():
+            if not var_kw and name not in sig_args:
+                raise TypeError(
+                    'Unexpected argument %s' % name)
+            if name in sig_args:
+                values[sig_args] = value
+            else:
+                extra_kw[name] = value
+        args = list(args)
+        sig_args = list(sig_args)
+        while args:
+            while sig_args and sig_args[0] in values:
+                sig_args.pop(0)
+            if sig_args:
+                name = sig_args.pop(0)
+                values[name] = args.pop(0)
+            elif var_args:
+                values[var_args] = tuple(args)
+                break
+            else:
+                raise TypeError(
+                    'Extra position arguments: %s'
+                    % ', '.join(repr(v) for v in args))
+        for name, value_expr in defaults.iteritems():
+            if name not in values:
+                values[name] = self._template._eval(
+                    value_expr, self._ns, self._pos)
+        for name in sig_args:
+            if name not in values:
+                raise TypeError(
+                    'Missing argument: %s' % name)
+        if var_kw:
+            values[var_kw] = extra_kw
+        return values
+
+
+class TemplateObject(object):
+
+    def __init__(self, name):
+        self.__name = name
+        self.get = TemplateObjectGetter(self)
+
+    def __repr__(self):
+        return '<%s %s>' % (self.__class__.__name__, self.__name)
+
+
+class TemplateObjectGetter(object):
+
+    def __init__(self, template_obj):
+        self.__template_obj = template_obj
+
+    def __getattr__(self, attr):
+        return getattr(self.__template_obj, attr, Empty)
+
+    def __repr__(self):
+        return '<%s around %r>' % (
+            self.__class__.__name__, self.__template_obj)
+
+
+class _Empty(object):
+    def __call__(self, *args, **kw):
+        return self
+
+    def __str__(self):
+        return ''
+
+    def __repr__(self):
+        return 'Empty'
+
+    def __unicode__(self):
+        if PY3:
+            return str('')
+        else:
+            return unicode('')
+
+    def __iter__(self):
+        return iter(())
+
+    def __bool__(self):
+        return False
+
+    if sys.version < "3":
+        __nonzero__ = __bool__
+
+Empty = _Empty()
+del _Empty
+
+############################################################
+## Lexing and Parsing
+############################################################
+
+
+def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
+    if delimeters is None:
+        delimeters = (Template.default_namespace['start_braces'],
+                      Template.default_namespace['end_braces'])
+    in_expr = False
+    chunks = []
+    last = 0
+    last_pos = (line_offset + 1, 1)
+    token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
+                                      re.escape(delimeters[1])))
+    for match in token_re.finditer(s):
+        expr = match.group(0)
+        pos = find_position(s, match.end(), last, last_pos)
+        if expr == delimeters[0] and in_expr:
+            raise TemplateError('%s inside expression' % delimeters[0],
+                                position=pos,
+                                name=name)
+        elif expr == delimeters[1] and not in_expr:
+            raise TemplateError('%s outside expression' % delimeters[1],
+                                position=pos,
+                                name=name)
+        if expr == delimeters[0]:
+            part = s[last:match.start()]
+            if part:
+                chunks.append(part)
+            in_expr = True
+        else:
+            chunks.append((s[last:match.start()], last_pos))
+            in_expr = False
+        last = match.end()
+        last_pos = pos
+    if in_expr:
+        raise TemplateError('No %s to finish last expression' % delimeters[1],
+                            name=name, position=last_pos)
+    part = s[last:]
+    if part:
+        chunks.append(part)
+    if trim_whitespace:
+        chunks = trim_lex(chunks)
+    return chunks
+
+lex.__doc__ = """
+Lex a string into chunks:
+
+    >>> lex('hey')
+    ['hey']
+    >>> lex('hey {{you}}')
+    ['hey ', ('you', (1, 7))]
+    >>> lex('hey {{')
+    Traceback (most recent call last):
+        ...
+    mne.externals.tempita.TemplateError: No }} to finish last expression at line 1 column 7
+    >>> lex('hey }}')
+    Traceback (most recent call last):
+        ...
+    mne.externals.tempita.TemplateError: }} outside expression at line 1 column 7
+    >>> lex('hey {{ {{')
+    Traceback (most recent call last):
+        ...
+    mne.externals.tempita.TemplateError: {{ inside expression at line 1 column 10
+
+""" if PY3 else """
+Lex a string into chunks:
+
+    >>> lex('hey')
+    ['hey']
+    >>> lex('hey {{you}}')
+    ['hey ', ('you', (1, 7))]
+    >>> lex('hey {{')
+    Traceback (most recent call last):
+        ...
+    TemplateError: No }} to finish last expression at line 1 column 7
+    >>> lex('hey }}')
+    Traceback (most recent call last):
+        ...
+    TemplateError: }} outside expression at line 1 column 7
+    >>> lex('hey {{ {{')
+    Traceback (most recent call last):
+        ...
+    TemplateError: {{ inside expression at line 1 column 10
+
+"""
+
+statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
+single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
+trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
+lead_whitespace_re = re.compile(r'^[\t ]*\n')
+
+
+def trim_lex(tokens):
+    last_trim = None
+    for i in range(len(tokens)):
+        current = tokens[i]
+        if isinstance(tokens[i], basestring_):
+            # we don't trim this
+            continue
+        item = current[0]
+        if not statement_re.search(item) and item not in single_statements:
+            continue
+        if not i:
+            prev = ''
+        else:
+            prev = tokens[i - 1]
+        if i + 1 >= len(tokens):
+            next_chunk = ''
+        else:
+            next_chunk = tokens[i + 1]
+        if (not
+                isinstance(next_chunk, basestring_)
+                or not isinstance(prev, basestring_)):
+            continue
+        prev_ok = not prev or trail_whitespace_re.search(prev)
+        if i == 1 and not prev.strip():
+            prev_ok = True
+        if last_trim is not None and last_trim + 2 == i and not prev.strip():
+            prev_ok = 'last'
+        if (prev_ok
+            and (not next_chunk or lead_whitespace_re.search(next_chunk)
+                 or (i == len(tokens) - 2 and not next_chunk.strip()))):
+            if prev:
+                if ((i == 1 and not prev.strip()) or prev_ok == 'last'):
+                    tokens[i - 1] = ''
+                else:
+                    m = trail_whitespace_re.search(prev)
+                    # +1 to leave the leading \n on:
+                    prev = prev[:m.start() + 1]
+                    tokens[i - 1] = prev
+            if next_chunk:
+                last_trim = i
+                if i == len(tokens) - 2 and not next_chunk.strip():
+                    tokens[i + 1] = ''
+                else:
+                    m = lead_whitespace_re.search(next_chunk)
+                    next_chunk = next_chunk[m.end():]
+                    tokens[i + 1] = next_chunk
+    return tokens
+
+trim_lex.__doc__ = r"""
+    Takes a lexed set of tokens, and removes whitespace when there is
+    a directive on a line by itself:
+
+       >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
+       >>> tokens
+       [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
+       >>> trim_lex(tokens)
+       [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
+    """ if PY3 else r"""
+    Takes a lexed set of tokens, and removes whitespace when there is
+    a directive on a line by itself:
+
+       >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
+       >>> tokens
+       [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
+       >>> trim_lex(tokens)
+       [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
+    """
+
+
+def find_position(string, index, last_index, last_pos):
+    """
+    Given a string and index, return (line, column)
+    """
+    lines = string.count('\n', last_index, index)
+    if lines > 0:
+        column = index - string.rfind('\n', last_index, index)
+    else:
+        column = last_pos[1] + (index - last_index)
+    return (last_pos[0] + lines, column)
+
+
+def parse(s, name=None, line_offset=0, delimeters=None):
+
+    if delimeters is None:
+        delimeters = (Template.default_namespace['start_braces'],
+                      Template.default_namespace['end_braces'])
+    tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
+    result = []
+    while tokens:
+        next_chunk, tokens = parse_expr(tokens, name)
+        result.append(next_chunk)
+    return result
+
+parse.__doc__ = r"""
+    Parses a string into a kind of AST
+
+        >>> parse('{{x}}')
+        [('expr', (1, 3), 'x')]
+        >>> parse('foo')
+        ['foo']
+        >>> parse('{{if x}}test{{endif}}')
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
+        >>> parse(
+        ...    'series->{{for x in y}}x={{x}}{{endfor}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        ['series->',
+            ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
+        >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
+        [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
+        >>> parse('{{py:x=1}}')
+        [('py', (1, 3), 'x=1')]
+        >>> parse(
+        ...    '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
+            ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
+
+    Some exceptions::
+
+        >>> parse('{{continue}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: continue outside of for loop at line 1 column 3
+        >>> parse('{{if x}}foo')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: No {{endif}} at line 1 column 3
+        >>> parse('{{else}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: else outside of an if block at line 1 column 3
+        >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: Unexpected endif at line 1 column 25
+        >>> parse('{{if}}{{endif}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: if with no expression at line 1 column 3
+        >>> parse('{{for x y}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
+        >>> parse('{{py:x=1\ny=2}}')  #doctest: +NORMALIZE_WHITESPACE
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: Multi-line py blocks must start
+            with a newline at line 1 column 3
+    """ if PY3 else r"""
+    Parses a string into a kind of AST
+
+        >>> parse('{{x}}')
+        [('expr', (1, 3), 'x')]
+        >>> parse('foo')
+        ['foo']
+        >>> parse('{{if x}}test{{endif}}')
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
+        >>> parse(
+        ...    'series->{{for x in y}}x={{x}}{{endfor}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        ['series->',
+            ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
+        >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
+        [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
+        >>> parse('{{py:x=1}}')
+        [('py', (1, 3), 'x=1')]
+        >>> parse(
+        ...    '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
+            ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
+
+    Some exceptions::
+
+        >>> parse('{{continue}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: continue outside of for loop at line 1 column 3
+        >>> parse('{{if x}}foo')
+        Traceback (most recent call last):
+            ...
+        TemplateError: No {{endif}} at line 1 column 3
+        >>> parse('{{else}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: else outside of an if block at line 1 column 3
+        >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: Unexpected endif at line 1 column 25
+        >>> parse('{{if}}{{endif}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: if with no expression at line 1 column 3
+        >>> parse('{{for x y}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
+        >>> parse('{{py:x=1\ny=2}}')  #doctest: +NORMALIZE_WHITESPACE
+        Traceback (most recent call last):
+            ...
+        TemplateError: Multi-line py blocks must start
+            with a newline at line 1 column 3
+    """
+
+
+def parse_expr(tokens, name, context=()):
+    if isinstance(tokens[0], basestring_):
+        return tokens[0], tokens[1:]
+    expr, pos = tokens[0]
+    expr = expr.strip()
+    if expr.startswith('py:'):
+        expr = expr[3:].lstrip(' \t')
+        if expr.startswith('\n') or expr.startswith('\r'):
+            expr = expr.lstrip('\r\n')
+            if '\r' in expr:
+                expr = expr.replace('\r\n', '\n')
+                expr = expr.replace('\r', '')
+            expr += '\n'
+        else:
+            if '\n' in expr:
+                raise TemplateError(
+                    'Multi-line py blocks must start with a newline',
+                    position=pos, name=name)
+        return ('py', pos, expr), tokens[1:]
+    elif expr in ('continue', 'break'):
+        if 'for' not in context:
+            raise TemplateError(
+                'continue outside of for loop',
+                position=pos, name=name)
+        return (expr, pos), tokens[1:]
+    elif expr.startswith('if '):
+        return parse_cond(tokens, name, context)
+    elif (expr.startswith('elif ')
+          or expr == 'else'):
+        raise TemplateError(
+            '%s outside of an if block' % expr.split()[0],
+            position=pos, name=name)
+    elif expr in ('if', 'elif', 'for'):
+        raise TemplateError(
+            '%s with no expression' % expr,
+            position=pos, name=name)
+    elif expr in ('endif', 'endfor', 'enddef'):
+        raise TemplateError(
+            'Unexpected %s' % expr,
+            position=pos, name=name)
+    elif expr.startswith('for '):
+        return parse_for(tokens, name, context)
+    elif expr.startswith('default '):
+        return parse_default(tokens, name, context)
+    elif expr.startswith('inherit '):
+        return parse_inherit(tokens, name, context)
+    elif expr.startswith('def '):
+        return parse_def(tokens, name, context)
+    elif expr.startswith('#'):
+        return ('comment', pos, tokens[0][0]), tokens[1:]
+    return ('expr', pos, tokens[0][0]), tokens[1:]
+
+
+def parse_cond(tokens, name, context):
+    start = tokens[0][1]
+    pieces = []
+    context = context + ('if',)
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'Missing {{endif}}',
+                position=start, name=name)
+        if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endif'):
+            return ('cond', start) + tuple(pieces), tokens[1:]
+        next_chunk, tokens = parse_one_cond(tokens, name, context)
+        pieces.append(next_chunk)
+
+
+def parse_one_cond(tokens, name, context):
+    (first, pos), tokens = tokens[0], tokens[1:]
+    content = []
+    if first.endswith(':'):
+        first = first[:-1]
+    if first.startswith('if '):
+        part = ('if', pos, first[3:].lstrip(), content)
+    elif first.startswith('elif '):
+        part = ('elif', pos, first[5:].lstrip(), content)
+    elif first == 'else':
+        part = ('else', pos, None, content)
+    else:
+        assert 0, "Unexpected token %r at %s" % (first, pos)
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'No {{endif}}',
+                position=pos, name=name)
+        if (isinstance(tokens[0], tuple)
+            and (tokens[0][0] == 'endif'
+                 or tokens[0][0].startswith('elif ')
+                 or tokens[0][0] == 'else')):
+            return part, tokens
+        next_chunk, tokens = parse_expr(tokens, name, context)
+        content.append(next_chunk)
+
+
+def parse_for(tokens, name, context):
+    first, pos = tokens[0]
+    tokens = tokens[1:]
+    context = ('for',) + context
+    content = []
+    assert first.startswith('for ')
+    if first.endswith(':'):
+        first = first[:-1]
+    first = first[3:].strip()
+    match = in_re.search(first)
+    if not match:
+        raise TemplateError(
+            'Bad for (no "in") in %r' % first,
+            position=pos, name=name)
+    vars = first[:match.start()]
+    if '(' in vars:
+        raise TemplateError(
+            'You cannot have () in the variable section of a for loop (%r)'
+            % vars, position=pos, name=name)
+    vars = tuple([
+        v.strip() for v in first[:match.start()].split(',')
+        if v.strip()])
+    expr = first[match.end():]
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'No {{endfor}}',
+                position=pos, name=name)
+        if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endfor'):
+            return ('for', pos, vars, expr, content), tokens[1:]
+        next_chunk, tokens = parse_expr(tokens, name, context)
+        content.append(next_chunk)
+
+
+def parse_default(tokens, name, context):
+    first, pos = tokens[0]
+    assert first.startswith('default ')
+    first = first.split(None, 1)[1]
+    parts = first.split('=', 1)
+    if len(parts) == 1:
+        raise TemplateError(
+            "Expression must be {{default var=value}}; no = found in %r" %
+            first, position=pos, name=name)
+    var = parts[0].strip()
+    if ',' in var:
+        raise TemplateError(
+            "{{default x, y = ...}} is not supported",
+            position=pos, name=name)
+    if not var_re.search(var):
+        raise TemplateError(
+            "Not a valid variable name for {{default}}: %r"
+            % var, position=pos, name=name)
+    expr = parts[1].strip()
+    return ('default', pos, var, expr), tokens[1:]
+
+
+def parse_inherit(tokens, name, context):
+    first, pos = tokens[0]
+    assert first.startswith('inherit ')
+    expr = first.split(None, 1)[1]
+    return ('inherit', pos, expr), tokens[1:]
+
+
+def parse_def(tokens, name, context):
+    first, start = tokens[0]
+    tokens = tokens[1:]
+    assert first.startswith('def ')
+    first = first.split(None, 1)[1]
+    if first.endswith(':'):
+        first = first[:-1]
+    if '(' not in first:
+        func_name = first
+        sig = ((), None, None, {})
+    elif not first.endswith(')'):
+        raise TemplateError("Function definition doesn't end with ): %s" %
+                            first, position=start, name=name)
+    else:
+        first = first[:-1]
+        func_name, sig_text = first.split('(', 1)
+        sig = parse_signature(sig_text, name, start)
+    context = context + ('def',)
+    content = []
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'Missing {{enddef}}',
+                position=start, name=name)
+        if (isinstance(tokens[0], tuple) and tokens[0][0] == 'enddef'):
+            return ('def', start, func_name, sig, content), tokens[1:]
+        next_chunk, tokens = parse_expr(tokens, name, context)
+        content.append(next_chunk)
+
+
+def parse_signature(sig_text, name, pos):
+    tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
+    sig_args = []
+    var_arg = None
+    var_kw = None
+    defaults = {}
+
+    def get_token(pos=False):
+        try:
+            tok_type, tok_string, (srow, scol), (erow, ecol), line = next(
+                tokens)
+        except StopIteration:
+            return tokenize.ENDMARKER, ''
+        if pos:
+            return tok_type, tok_string, (srow, scol), (erow, ecol)
+        else:
+            return tok_type, tok_string
+    while 1:
+        var_arg_type = None
+        tok_type, tok_string = get_token()
+        if tok_type == tokenize.ENDMARKER:
+            break
+        if tok_type == tokenize.OP and (
+                tok_string == '*' or tok_string == '**'):
+            var_arg_type = tok_string
+            tok_type, tok_string = get_token()
+        if tok_type != tokenize.NAME:
+            raise TemplateError('Invalid signature: (%s)' % sig_text,
+                                position=pos, name=name)
+        var_name = tok_string
+        tok_type, tok_string = get_token()
+        if tok_type == tokenize.ENDMARKER or (
+                tok_type == tokenize.OP and tok_string == ','):
+            if var_arg_type == '*':
+                var_arg = var_name
+            elif var_arg_type == '**':
+                var_kw = var_name
+            else:
+                sig_args.append(var_name)
+            if tok_type == tokenize.ENDMARKER:
+                break
+            continue
+        if var_arg_type is not None:
+            raise TemplateError('Invalid signature: (%s)' % sig_text,
+                                position=pos, name=name)
+        if tok_type == tokenize.OP and tok_string == '=':
+            nest_type = None
+            unnest_type = None
+            nest_count = 0
+            start_pos = end_pos = None
+            parts = []
+            while 1:
+                tok_type, tok_string, s, e = get_token(True)
+                if start_pos is None:
+                    start_pos = s
+                end_pos = e
+                if tok_type == tokenize.ENDMARKER and nest_count:
+                    raise TemplateError('Invalid signature: (%s)' % sig_text,
+                                        position=pos, name=name)
+                if (not nest_count and
+                    (tok_type == tokenize.ENDMARKER or
+                        (tok_type == tokenize.OP and tok_string == ','))):
+                    default_expr = isolate_expression(
+                        sig_text, start_pos, end_pos)
+                    defaults[var_name] = default_expr
+                    sig_args.append(var_name)
+                    break
+                parts.append((tok_type, tok_string))
+                if nest_count \
+                        and tok_type == tokenize.OP \
+                        and tok_string == nest_type:
+                    nest_count += 1
+                elif nest_count \
+                        and tok_type == tokenize.OP \
+                        and tok_string == unnest_type:
+                    nest_count -= 1
+                    if not nest_count:
+                        nest_type = unnest_type = None
+                elif not nest_count \
+                        and tok_type == tokenize.OP \
+                        and tok_string in ('(', '[', '{'):
+                    nest_type = tok_string
+                    nest_count = 1
+                    unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
+    return sig_args, var_arg, var_kw, defaults
+
+
+def isolate_expression(string, start_pos, end_pos):
+    srow, scol = start_pos
+    srow -= 1
+    erow, ecol = end_pos
+    erow -= 1
+    lines = string.splitlines(True)
+    if srow == erow:
+        return lines[srow][scol:ecol]
+    parts = [lines[srow][scol:]]
+    parts.extend(lines[srow + 1:erow])
+    if erow < len(lines):
+        # It'll sometimes give (end_row_past_finish, 0)
+        parts.append(lines[erow][:ecol])
+    return ''.join(parts)
+
+_fill_command_usage = """\
+%prog [OPTIONS] TEMPLATE arg=value
+
+Use py:arg=value to set a Python value; otherwise all values are
+strings.
+"""
+
+
+def fill_command(args=None):
+    import sys
+    import optparse
+    import pkg_resources
+    import os
+    if args is None:
+        args = sys.argv[1:]
+    dist = pkg_resources.get_distribution('Paste')
+    parser = optparse.OptionParser(
+        version=coerce_text(dist),
+        usage=_fill_command_usage)
+    parser.add_option(
+        '-o', '--output',
+        dest='output',
+        metavar="FILENAME",
+        help="File to write output to (default stdout)")
+    parser.add_option(
+        '--html',
+        dest='use_html',
+        action='store_true',
+        help="Use HTML style filling (including automatic HTML quoting)")
+    parser.add_option(
+        '--env',
+        dest='use_env',
+        action='store_true',
+        help="Put the environment in as top-level variables")
+    options, args = parser.parse_args(args)
+    if len(args) < 1:
+        print('You must give a template filename')
+        sys.exit(2)
+    template_name = args[0]
+    args = args[1:]
+    vars = {}
+    if options.use_env:
+        vars.update(os.environ)
+    for value in args:
+        if '=' not in value:
+            print('Bad argument: %r' % value)
+            sys.exit(2)
+        name, value = value.split('=', 1)
+        if name.startswith('py:'):
+            name = name[:3]
+            value = eval(value)
+        vars[name] = value
+    if template_name == '-':
+        template_content = sys.stdin.read()
+        template_name = '<stdin>'
+    else:
+        f = open(template_name, 'rb')
+        template_content = f.read()
+        f.close()
+    if options.use_html:
+        TemplateClass = HTMLTemplate
+    else:
+        TemplateClass = Template
+    template = TemplateClass(template_content, name=template_name)
+    result = template.substitute(vars)
+    if options.output:
+        f = open(options.output, 'wb')
+        f.write(result)
+        f.close()
+    else:
+        sys.stdout.write(result)
+
+if __name__ == '__main__':
+    fill_command()
diff --git a/mne/externals/tempita/_looper.py b/mne/externals/tempita/_looper.py
new file mode 100644
index 0000000..4413a5b
--- /dev/null
+++ b/mne/externals/tempita/_looper.py
@@ -0,0 +1,163 @@
+"""
+Helper for looping over sequences, particular in templates.
+
+Often in a loop in a template it's handy to know what's next up,
+previously up, if this is the first or last item in the sequence, etc.
+These can be awkward to manage in a normal Python loop, but using the
+looper you can get a better sense of the context.  Use like::
+
+    >>> for loop, item in looper(['a', 'b', 'c']):
+    ...     print loop.number, item
+    ...     if not loop.last:
+    ...         print '---'
+    1 a
+    ---
+    2 b
+    ---
+    3 c
+
+"""
+
+import sys
+from .compat3 import basestring_
+
+__all__ = ['looper']
+
+
+class looper(object):
+    """
+    Helper for looping (particularly in templates)
+
+    Use this like::
+
+        for loop, item in looper(seq):
+            if loop.first:
+                ...
+    """
+
+    def __init__(self, seq):
+        self.seq = seq
+
+    def __iter__(self):
+        return looper_iter(self.seq)
+
+    def __repr__(self):
+        return '<%s for %r>' % (
+            self.__class__.__name__, self.seq)
+
+
+class looper_iter(object):
+
+    def __init__(self, seq):
+        self.seq = list(seq)
+        self.pos = 0
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        if self.pos >= len(self.seq):
+            raise StopIteration
+        result = loop_pos(self.seq, self.pos), self.seq[self.pos]
+        self.pos += 1
+        return result
+
+    if sys.version < "3":
+        next = __next__
+
+
+class loop_pos(object):
+
+    def __init__(self, seq, pos):
+        self.seq = seq
+        self.pos = pos
+
+    def __repr__(self):
+        return '<loop pos=%r at %r>' % (
+            self.seq[self.pos], self.pos)
+
+    def index(self):
+        return self.pos
+    index = property(index)
+
+    def number(self):
+        return self.pos + 1
+    number = property(number)
+
+    def item(self):
+        return self.seq[self.pos]
+    item = property(item)
+
+    def __next__(self):
+        try:
+            return self.seq[self.pos + 1]
+        except IndexError:
+            return None
+    __next__ = property(__next__)
+
+    if sys.version < "3":
+        next = __next__
+
+    def previous(self):
+        if self.pos == 0:
+            return None
+        return self.seq[self.pos - 1]
+    previous = property(previous)
+
+    def odd(self):
+        return not self.pos % 2
+    odd = property(odd)
+
+    def even(self):
+        return self.pos % 2
+    even = property(even)
+
+    def first(self):
+        return self.pos == 0
+    first = property(first)
+
+    def last(self):
+        return self.pos == len(self.seq) - 1
+    last = property(last)
+
+    def length(self):
+        return len(self.seq)
+    length = property(length)
+
+    def first_group(self, getter=None):
+        """
+        Returns true if this item is the start of a new group,
+        where groups mean that some attribute has changed.  The getter
+        can be None (the item itself changes), an attribute name like
+        ``'.attr'``, a function, or a dict key or list index.
+        """
+        if self.first:
+            return True
+        return self._compare_group(self.item, self.previous, getter)
+
+    def last_group(self, getter=None):
+        """
+        Returns true if this item is the end of a new group,
+        where groups mean that some attribute has changed.  The getter
+        can be None (the item itself changes), an attribute name like
+        ``'.attr'``, a function, or a dict key or list index.
+        """
+        if self.last:
+            return True
+        return self._compare_group(self.item, self.__next__, getter)
+
+    def _compare_group(self, item, other, getter):
+        if getter is None:
+            return item != other
+        elif (isinstance(getter, basestring_)
+              and getter.startswith('.')):
+            getter = getter[1:]
+            if getter.endswith('()'):
+                getter = getter[:-2]
+                return getattr(item, getter)() != getattr(other, getter)()
+            else:
+                return getattr(item, getter) != getattr(other, getter)
+        elif hasattr(getter, '__call__'):
+            return getter(item) != getter(other)
+        else:
+            return item[getter] != other[getter]
diff --git a/mne/externals/tempita/compat3.py b/mne/externals/tempita/compat3.py
new file mode 100644
index 0000000..d49412b
--- /dev/null
+++ b/mne/externals/tempita/compat3.py
@@ -0,0 +1,45 @@
+import sys
+
+__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode']
+
+PY3 = True if sys.version_info[0] == 3 else False
+
+if sys.version_info[0] < 3:
+    b = bytes = str
+    basestring_ = basestring
+else:
+
+    def b(s):
+        if isinstance(s, str):
+            return s.encode('latin1')
+        return bytes(s)
+    basestring_ = (bytes, str)
+    bytes = bytes
+text = str
+
+if sys.version_info[0] < 3:
+
+    def next(obj):
+        return obj.next()
+else:
+    next = next
+
+
+def is_unicode(obj):
+    if sys.version_info[0] < 3:
+        return isinstance(obj, unicode)
+    else:
+        return isinstance(obj, str)
+
+
+def coerce_text(v):
+    if not isinstance(v, basestring_):
+        if sys.version_info[0] < 3:
+            attr = '__unicode__'
+        else:
+            attr = '__str__'
+        if hasattr(v, attr):
+            return unicode(v)
+        else:
+            return bytes(v)
+    return v
diff --git a/mne/fiff/__init__.py b/mne/fiff/__init__.py
index 1b5af68..618dfe1 100644
--- a/mne/fiff/__init__.py
+++ b/mne/fiff/__init__.py
@@ -1,23 +1,79 @@
 """FIF module for IO with .fif files"""
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
-from .constants import FIFF
-from .open import fiff_open, show_fiff
-from .evoked import Evoked, read_evoked, write_evoked
-from .meas_info import read_fiducials, write_fiducials, read_info
-from .raw import (Raw, start_writing_raw, write_raw_buffer,
-                  finish_writing_raw, concatenate_raws, get_chpi_positions,
-                  set_eeg_reference)
-from .pick import (pick_types, pick_channels, pick_types_evoked,
-                   pick_channels_regexp, pick_channels_forward,
-                   pick_types_forward, pick_channels_cov,
-                   pick_channels_evoked, pick_info, _has_kit_refs)
-
-from .proj import proj_equal, make_eeg_average_ref_proj
-from .cov import read_cov, write_cov
-from . import bti
-from . import kit
+from ..utils import deprecated
+
+from ..io.open import fiff_open, show_fiff, _fiff_get_fid
+from ..evoked import (Evoked, read_evoked, write_evoked, read_evokeds,
+                      write_evokeds)
+from ..io.meas_info import read_fiducials, write_fiducials, read_info, write_info
+from ..io.pick import (pick_types, pick_channels, pick_types_evoked,
+                       pick_channels_regexp, pick_channels_forward,
+                       pick_types_forward, pick_channels_cov,
+                       pick_channels_evoked, pick_info, _has_kit_refs)
+
+from ..io.proj import proj_equal, make_eeg_average_ref_proj
+from ..cov import _read_cov, _write_cov
+from ..io import array
+from ..io import base
+from ..io import brainvision
+from ..io import bti
+from ..io import edf
+from ..io import egi
+from ..io import fiff
+from ..io import kit
+
+# for backward compatibility
+from ..io.fiff import RawFIFF
+from ..io.fiff import RawFIFF as Raw
+from ..io.base import concatenate_raws, get_chpi_positions, set_eeg_reference
+
+def _deprecate_io(obj, name):
+    return deprecated('Use mne.io.%s as mne.fiff.%s is deprecated and will be '
+                      'removed in v0.9.' % (name, name))(obj)
+
+def _deprecate_mne(obj, name):
+    return deprecated('Use mne.%s as mne.fiff.%s is deprecated and will be '
+                      'removed in v0.9.' % (name, name))(obj)
+
+
+# our decorator overwrites the class, so we need to wrap :(
+class Evoked(Evoked):
+    pass
+
+
+class Raw(Raw):
+    pass
+
+
+Evoked = _deprecate_io(Evoked, 'Evoked')
+Raw = _deprecate_io(Raw, 'Raw')
+read_evoked = _deprecate_io(read_evoked, 'read_evoked')
+read_evokeds = _deprecate_io(read_evokeds, 'read_evokeds')
+write_evoked = _deprecate_io(write_evoked, 'write_evoked')
+write_evokeds = _deprecate_io(write_evokeds, 'write_evokeds')
+read_fiducials = _deprecate_io(read_fiducials, 'read_fiducials')
+write_fiducials = _deprecate_io(write_fiducials, 'write_fiducials')
+read_info = _deprecate_io(read_info, 'read_info')
+write_info = _deprecate_io(write_info, 'write_info')
+proj_equal = _deprecate_io(proj_equal, 'proj_equal')
+make_eeg_average_ref_proj = _deprecate_io(make_eeg_average_ref_proj, 'make_eeg_average_ref_proj')
+read_cov = _deprecate_io(_read_cov, 'read_cov')
+write_cov = _deprecate_io(_write_cov, 'write_cov')
+concatenate_raws = _deprecate_io(concatenate_raws, 'concatenate_raws')
+get_chpi_positions = _deprecate_io(get_chpi_positions, 'get_chpi_positions')
+set_eeg_reference = _deprecate_io(set_eeg_reference, 'set_eeg_reference')
+
+pick_types = _deprecate_mne(pick_types, 'pick_types')
+pick_channels = _deprecate_mne(pick_channels, 'pick_channels')
+pick_types_evoked = _deprecate_mne(pick_types_evoked, 'pick_types_evoked')
+pick_channels_regexp = _deprecate_mne(pick_channels_regexp, 'pick_channels_regexp')
+pick_channels_forward = _deprecate_mne(pick_channels_forward, 'pick_channels_forward')
+pick_types_forward = _deprecate_mne(pick_types_forward, 'pick_types_forward')
+pick_channels_cov = _deprecate_mne(pick_channels_cov, 'pick_channels_cov')
+pick_channels_evoked = _deprecate_mne(pick_channels_evoked, 'pick_channels_evoked')
+pick_info = _deprecate_mne(pick_info, 'pick_info')
diff --git a/mne/fiff/brainvision/brainvision.py b/mne/fiff/brainvision/brainvision.py
deleted file mode 100644
index aa78d0c..0000000
--- a/mne/fiff/brainvision/brainvision.py
+++ /dev/null
@@ -1,529 +0,0 @@
-"""Conversion tool from Brain Vision EEG to FIF
-
-"""
-
-# Author: Teon Brooks <teon at nyu.edu>
-#
-# License: BSD (3-clause)
-
-import os
-import time
-import re
-import warnings
-from StringIO import StringIO
-from ConfigParser import SafeConfigParser
-
-import numpy as np
-
-from ...fiff import pick_types
-from ...transforms import als_ras_trans, apply_trans
-from ...utils import verbose, logger
-from ..raw import Raw
-from ..meas_info import Info
-from ..constants import FIFF
-from ...coreg import get_ras_to_neuromag_trans
-
-
-class RawBrainVision(Raw):
-    """Raw object from Brain Vision eeg file
-
-    Parameters
-    ----------
-    vdhr_fname : str
-        Path to the EEG header file.
-
-    elp_fname : str | None
-        Path to the elp file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-
-    ch_names : list | None
-        A list of channel names in order of collection of electrode position
-        digitization.
-
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
-
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    See Also
-    --------
-    mne.fiff.Raw : Documentation of attribute and methods.
-    """
-    @verbose
-    def __init__(self, vhdr_fname, elp_fname=None, ch_names=None,
-                 preload=False, verbose=None):
-        logger.info('Extracting eeg Parameters from %s...' % vhdr_fname)
-        vhdr_fname = os.path.abspath(vhdr_fname)
-        self.info, self._eeg_info = _get_eeg_info(vhdr_fname, elp_fname,
-                                                  ch_names)
-        logger.info('Creating Raw.info structure...')
-
-        # Raw attributes
-        self.verbose = verbose
-        self._preloaded = False
-        self.fids = list()
-        self._projector = None
-        self.comp = None  # no compensation for EEG
-        self.proj = False
-        self.first_samp = 0
-        f = open(self.info['file_id'])
-        f.seek(0, os.SEEK_END)
-        n_samples = f.tell()
-        dtype = int(self._eeg_info['dtype'][-1])
-        n_chan = self.info['nchan']
-        self.last_samp = (n_samples / (dtype * (n_chan - 1))) - 1
-
-        if preload:
-            self._preloaded = preload
-            logger.info('Reading raw data from %s...' % vhdr_fname)
-            self._data, _ = self._read_segment()
-            assert len(self._data) == self.info['nchan']
-
-            # Add time info
-            self._times = np.arange(self.first_samp, self.last_samp + 1,
-                                    dtype=np.float64)
-            self._times /= self.info['sfreq']
-            logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
-                        % (self.first_samp, self.last_samp,
-                           float(self.first_samp) / self.info['sfreq'],
-                           float(self.last_samp) / self.info['sfreq']))
-        logger.info('Ready.')
-
-    def __repr__(self):
-        n_chan = self.info['nchan']
-        data_range = self.last_samp - self.first_samp + 1
-        s = ('%r' % os.path.basename(self.info['file_id']),
-             "n_channels x n_times : %s x %s" % (n_chan, data_range))
-        return "<RawEEG  |  %s>" % ', '.join(s)
-
-    def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
-                      projector=None):
-        """Read a chunk of raw data
-
-        Parameters
-        ----------
-        start : int, (optional)
-            first sample to include (first is 0). If omitted, defaults to the
-            first sample in data.
-
-        stop : int, (optional)
-            First sample to not include.
-            If omitted, data is included to the end.
-
-        sel : array, optional
-            Indices of channels to select.
-
-        projector : array
-            SSP operator to apply to the data.
-
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
-
-        Returns
-        -------
-        data : array, [channels x samples]
-           the data matrix (channels x samples).
-
-        times : array, [samples]
-            returns the time values corresponding to the samples.
-        """
-        if sel is None:
-            sel = range(self.info['nchan'])
-        elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
-            return (666, 666)
-        if projector is not None:
-            raise NotImplementedError('Currently does not handle projections.')
-        if stop is None:
-            stop = self.last_samp + 1
-        elif stop > self.last_samp + 1:
-            stop = self.last_samp + 1
-
-        #  Initial checks
-        start = int(start)
-        stop = int(stop)
-
-        eeg_info = self._eeg_info
-        sfreq = self.info['sfreq']
-        n_chan = self.info['nchan']
-        cals = np.array([chan_info['cal'] for chan_info in self.info['chs']])
-        mults = np.array([chan_info['unit_mul'] for chan_info
-                          in self.info['chs']])
-        picks = pick_types(self.info, meg=False, eeg=True, exclude=[])
-        n_eeg = picks.size
-        cals = np.atleast_2d(cals[picks])
-        mults = np.atleast_2d(mults[picks])
-
-        if start >= stop:
-            raise ValueError('No data in this range')
-
-        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
-                    (start, stop - 1, start / float(sfreq),
-                     (stop - 1) / float(sfreq)))
-
-        with open(self.info['file_id'], 'rb') as f:
-            buffer_size = (stop - start)
-            pointer = start * n_chan
-            f.seek(pointer)
-            # extract data
-            data = np.fromfile(f, dtype=eeg_info['dtype'],
-                               count=buffer_size * n_eeg)
-        if eeg_info['data_orientation'] == 'MULTIPLEXED':
-            data = data.reshape((n_eeg, -1), order='F')
-        elif eeg_info['data_orientation'] == 'VECTORIZED':
-            data = data.reshape((n_eeg, -1), order='C')
-
-        gains = cals * mults
-        data = data * gains.T
-
-        stim_channel = np.zeros(data.shape[1])
-        evts = _read_vmrk(eeg_info['marker_id'])
-        if evts is not None:
-            stim_channel[:evts.size] = evts
-        stim_channel = stim_channel[start:stop]
-
-        data = np.vstack((data, stim_channel))
-        data = data[sel]
-
-        logger.info('[done]')
-        times = np.arange(start, stop, dtype=float) / sfreq
-
-        return data, times
-
-
-def _read_vmrk(vmrk_fname):
-    """Extracts the event markers for vmrk file
-
-    Parameters
-    ----------
-    vmrk_fname : str
-        vmrk file to be read.
-
-    Returns
-    -------
-    stim_channel : array
-        An array containing the whole recording's event marking
-    """
-    # read vmrk file
-    with open(vmrk_fname) as fid:
-        txt = fid.read()
-
-    start_tag = 'Brain Vision Data Exchange Marker File, Version 1.0'
-    if not txt.startswith(start_tag):
-        raise ValueError("vmrk file should start with %r" % start_tag)
-
-    # extract Marker Infos block
-    m = re.search("\[Marker Infos\]", txt)
-    if not m:
-        return np.zeros(0)
-    mk_txt = txt[m.end():]
-    m = re.search("\[.*\]", mk_txt)
-    if m:
-        mk_txt = mk_txt[:m.start()]
-
-    # extract event information
-    items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
-    events = []
-    for info in items:
-        mtype, mdesc, offset, duration = info.split(',')[:4]
-        if mtype == 'Stimulus':
-            trigger = int(re.findall('S\s?(\d+)', mdesc)[0])
-            offset, duration = int(offset), int(duration)
-            events.append((trigger, offset, offset + duration))
-    if events:
-        stim_channel = np.zeros(events[-1][2])
-        for event in events:
-            stim_channel[event[1]:event[2]] = trigger
-    else:
-        stim_channel = None
-
-    return stim_channel
-
-
-def _get_elp_locs(elp_fname, ch_names):
-    """Read a Polhemus ascii file
-
-    Parameters
-    ----------
-    elp_fname : str
-        Path to head shape file acquired from Polhemus system and saved in
-        ascii format.
-
-    ch_names : list
-        A list in order of EEG electrodes found in the Polhemus digitizer file.
-
-
-    Returns
-    -------
-    ch_locs : ndarray, shape = (n_points, 3)
-        Electrode points in Neuromag space.
-    """
-    pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
-    with open(elp_fname) as fid:
-        elp = pattern.findall(fid.read())
-    elp = np.array(elp, dtype=float)
-    elp = apply_trans(als_ras_trans, elp)
-    nasion, lpa, rpa = elp[:3]
-    trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
-    elp = apply_trans(trans, elp[8:])
-    ch_locs = dict(zip(ch_names, elp))
-    fid = nasion, lpa, rpa
-
-    return fid, ch_locs
-
-
-def _get_eeg_info(vhdr_fname, elp_fname=None, ch_names=None, preload=False):
-    """Extracts all the information from the header file.
-
-    Parameters
-    ----------
-    vhdr_fname : str
-        Raw EEG header to be read.
-
-    elp_fname : str | None
-        Path to the elp file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-
-    ch_names : list | None
-        A list of channel names in order of collection of electrode position
-        digitization.
-
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
-
-    Returns
-    -------
-    info : instance of Info
-        The measurement info.
-
-    edf_info : dict
-        A dict containing Brain Vision specific parameters.
-    """
-
-    info = Info()
-    # Some keys to be consistent with FIF measurement info
-    info['meas_id'] = None
-    info['projs'] = []
-    info['comps'] = []
-    info['bads'] = []
-    info['acq_pars'], info['acq_stim'] = None, None
-    info['filename'] = vhdr_fname
-    info['ctf_head_t'] = None
-    info['dev_ctf_t'] = []
-    info['filenames'] = []
-    info['dig'] = None
-    info['dev_head_t'] = None
-    info['proj_id'] = None
-    info['proj_name'] = None
-    info['experimenter'] = None
-    info['description'] = None
-    info['buffer_size_sec'] = 10.
-    info['orig_blocks'] = None
-    info['orig_fid_str'] = None
-
-    eeg_info = {}
-
-    with open(vhdr_fname, 'rb') as f:
-        # extract the first section to resemble a cfg
-        assert (f.readline().strip() ==
-                'Brain Vision Data Exchange Header File Version 1.0')
-        settings = f.read()
-
-    params, settings = settings.split('[Comment]')
-    cfg = SafeConfigParser()
-    cfg.readfp(StringIO(params))
-
-    # get sampling info
-    # Sampling interval is given in microsec
-    sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
-    sfreq = int(sfreq)
-    n_chan = cfg.getint('Common Infos', 'NumberOfChannels')
-
-    # check binary format
-    assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
-    eeg_info['data_orientation'] = cfg.get('Common Infos', 'DataOrientation')
-    if not (eeg_info['data_orientation'] == 'MULTIPLEXED' or
-            eeg_info['data_orientation'] == 'VECTORIZED'):
-        raise NotImplementedError('Data Orientation %s is not supported'
-                                  % eeg_info['data_orientation'])
-
-    binary_format = cfg.get('Binary Infos', 'BinaryFormat')
-    if binary_format == 'INT_16':
-        eeg_info['dtype'] = '<i2'
-    elif binary_format == 'INT_32':
-        eeg_info['dtype'] = '<i4'
-    elif binary_format == 'IEEE_FLOAT_32':
-        eeg_info['dtype'] = '<f4'
-    else:
-        raise NotImplementedError('Datatype %s is not supported'
-                                  % binary_format)
-
-    # load channel labels
-    ch_names = ['UNKNOWN'] * n_chan
-    cals = np.ones(n_chan) * np.nan
-    units = []
-    for chan, props in cfg.items('Channel Infos'):
-        n = int(re.findall(r'ch(\d+)', chan)[0])
-        name, _, resolution, unit = props.split(',')[:4]
-        ch_names[n - 1] = name
-        cals[n - 1] = resolution
-        if unit == '\xc2\xb5V':
-            units.append(1e-6)
-        elif unit == 'V':
-            units.append(0)
-        else:
-            units.append(unit)
-
-    # Attempts to extract filtering info from header. If not found, both are
-    # set to zero.
-    settings = settings.splitlines()
-    idx = None
-    if 'Channels' in settings:
-        idx = settings.index('Channels')
-        settings = settings[idx + 1:]
-        for idx, setting in enumerate(settings):
-            if re.match('#\s+Name', setting):
-                break
-            else:
-                idx = None
-    if idx:
-        lowpass = []
-        highpass = []
-        for i, ch in enumerate(ch_names, 1):
-            line = settings[idx + i].split()
-            assert ch in line
-            highpass.append(line[5])
-            lowpass.append(line[6])
-        if len(highpass) == 0:
-            info['highpass'] = None
-        elif all(highpass):
-            if highpass[0] == 'NaN':
-                info['highpass'] = None
-            elif highpass[0] == 'DC':
-                info['highpass'] = 0
-            else:
-                info['highpass'] = int(highpass[0])
-        else:
-            info['highpass'] = np.min(highpass)
-            warnings.warn('%s' % ('Channels contain different highpass '
-                                  'filters. Highest filter setting will '
-                                  'be stored.'))
-        if len(lowpass) == 0:
-            info['lowpass'] = None
-        elif all(lowpass):
-            if lowpass[0] == 'NaN':
-                info['lowpass'] = None
-            else:
-                info['lowpass'] = int(lowpass[0])
-        else:
-            info['lowpass'] = np.min(lowpass)
-            warnings.warn('%s' % ('Channels contain different lowpass filters.'
-                                  ' Lowest filter setting will be stored.'))
-    else:
-        info['highpass'] = None
-        info['lowpass'] = None
-
-    # locate EEG and marker files
-    path = os.path.dirname(vhdr_fname)
-    info['file_id'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
-    eeg_info['marker_id'] = os.path.join(path, cfg.get('Common Infos',
-                                                       'MarkerFile'))
-    info['meas_date'] = int(time.time())
-
-    # Creates a list of dicts of eeg channels for raw.info
-    logger.info('Setting channel info structure...')
-    info['chs'] = []
-    info['nchan'] = n_chan
-    info['ch_names'] = ch_names
-    info['sfreq'] = sfreq
-    if elp_fname and ch_names:
-        fid, ch_locs = _get_elp_locs(elp_fname, ch_names)
-        nasion, lpa, rpa = fid
-        info['dig'] = [{'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
-                        'kind': FIFF.FIFFV_POINT_CARDINAL,
-                        'coord_frame':  FIFF.FIFFV_COORD_HEAD},
-                       {'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
-                        'kind': FIFF.FIFFV_POINT_CARDINAL,
-                        'coord_frame': FIFF.FIFFV_COORD_HEAD},
-                       {'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
-                        'kind': FIFF.FIFFV_POINT_CARDINAL,
-                        'coord_frame': FIFF.FIFFV_COORD_HEAD}]
-    else:
-        ch_locs = None
-
-    for idx, ch_info in enumerate(zip(ch_names, cals, units), 1):
-        ch_name, cal, unit_mul = ch_info
-        chan_info = {}
-        chan_info['ch_name'] = ch_name
-        chan_info['kind'] = FIFF.FIFFV_EEG_CH
-        chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
-        chan_info['logno'] = idx
-        chan_info['scanno'] = idx
-        chan_info['cal'] = cal
-        chan_info['range'] = 1.
-        chan_info['unit_mul'] = unit_mul
-        chan_info['unit'] = FIFF.FIFF_UNIT_V
-        chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
-        if ch_locs:
-            if ch_name in ch_locs:
-                chan_info['eeg_loc'] = ch_locs[ch_name]
-        else:
-            chan_info['eeg_loc'] = np.zeros(3)
-        chan_info['loc'] = np.zeros(12)
-        chan_info['loc'][:3] = chan_info['eeg_loc']
-        info['chs'].append(chan_info)
-
-    # for stim channel
-    stim_channel = _read_vmrk(eeg_info['marker_id'])
-    if stim_channel is not None:
-        chan_info = {}
-        chan_info['ch_name'] = 'STI 014'
-        chan_info['kind'] = FIFF.FIFFV_STIM_CH
-        chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
-        chan_info['logno'] = idx + 1
-        chan_info['scanno'] = idx + 1
-        chan_info['cal'] = 1
-        chan_info['range'] = 1
-        chan_info['unit_mul'] = 0
-        chan_info['unit'] = FIFF.FIFF_UNIT_NONE
-        chan_info['eeg_loc'] = np.zeros(3)
-        chan_info['loc'] = np.zeros(12)
-        info['nchan'] = n_chan + 1
-        info['ch_names'].append(chan_info['ch_name'])
-        info['chs'].append(chan_info)
-
-    return info, eeg_info
-
-
-def read_raw_brainvision(vhdr_fname, elp_fname=None, ch_names=None,
-                         preload=False, verbose=None):
-    """Reader for Brain Vision EEG file
-
-    Parameters
-    ----------
-    vhdr_fname : str
-        Path to the EEG header file.
-
-    elp_fname : str | None
-        Path to the elp file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-
-    ch_names : list | None
-        A list of channel names in order of collection of electrode position
-        digitization.
-
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
-
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    See Also
-    --------
-    mne.fiff.Raw : Documentation of attribute and methods.
-    """
-    return RawBrainVision(vhdr_fname=vhdr_fname, elp_fname=elp_fname,
-                          ch_names=ch_names, preload=preload, verbose=verbose)
diff --git a/mne/fiff/brainvision/tests/test_brainvision.py b/mne/fiff/brainvision/tests/test_brainvision.py
deleted file mode 100644
index 320ee3a..0000000
--- a/mne/fiff/brainvision/tests/test_brainvision.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""Data Equivalence Tests"""
-
-# Author: Teon Brooks <teon at nyu.edu>
-#
-# License: BSD (3-clause)
-
-import os.path as op
-import inspect
-
-from nose.tools import assert_equal
-from numpy.testing import assert_array_almost_equal, assert_array_equal
-
-from mne.utils import _TempDir
-from mne.fiff import Raw, pick_types
-from mne.fiff.brainvision import read_raw_brainvision
-
-FILE = inspect.getfile(inspect.currentframe())
-data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
-vhdr_path = op.join(data_dir, 'test.vhdr')
-elp_path = op.join(data_dir, 'test_elp.txt')
-eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
-ch_names = ['FP1', 'VEOGt', 'F7', 'GND', 'F8',
-            'FC5', 'F3', 'FZ', 'F4', 'FC6',
-            'FC1', 'FCZ', 'FC2', 'CP5', 'C3',
-            'CZ', 'C4', 'CP6', 'CP1', 'CPZ',
-            'CP2', 'P7', 'P3', 'PZ', 'P4',
-            'P8', 'O1', 'POZ', 'O2', 'A1',
-            'A2', 'HEOGL', 'HEOGR', 'VEOGb']
-
-tempdir = _TempDir()
-
-
-def test_brainvision_data():
-    """Test reading raw Brain Vision files
-    """
-    raw_py = read_raw_brainvision(vhdr_path, elp_fname=elp_path,
-                                  ch_names=ch_names, preload=True)
-    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
-    data_py, times_py = raw_py[picks]
-
-    print raw_py  # to test repr
-    print raw_py.info  # to test Info repr
-
-    # this fif was generated using MNE-C
-    raw_bin = Raw(eeg_bin, preload=True)
-    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
-    data_bin, times_bin = raw_bin[picks]
-
-    assert_array_almost_equal(data_py, data_bin)
-    assert_array_almost_equal(times_py, times_bin)
-
-
-def test_read_segment():
-    """Test writing raw eeg files when preload is False
-    """
-    raw1 = read_raw_brainvision(vhdr_path, preload=False)
-    raw1_file = op.join(tempdir, 'raw1.fif')
-    raw1.save(raw1_file, overwrite=True)
-    raw11 = Raw(raw1_file, preload=True)
-    data1, times1 = raw1[:, :]
-    data11, times11 = raw11[:, :]
-    assert_array_almost_equal(data1, data11, 8)
-    assert_array_almost_equal(times1, times11)
-    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
-
-    raw2 = read_raw_brainvision(vhdr_path, preload=True)
-    raw2_file = op.join(tempdir, 'raw2.fif')
-    raw2.save(raw2_file, overwrite=True)
-    data2, times2 = raw2[:, :]
-    assert_array_equal(data1, data2)
-    assert_array_equal(times1, times2)
-
-    raw1 = Raw(raw1_file, preload=True)
-    raw2 = Raw(raw2_file, preload=True)
-    assert_array_equal(raw1._data, raw2._data)
diff --git a/mne/fiff/bti/__init__.py b/mne/fiff/bti/__init__.py
deleted file mode 100644
index 7e01861..0000000
--- a/mne/fiff/bti/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Bti module for conversion to FIF"""
-
-# Author: Denis A. Engemann <d.engemann at fz-juelich.de>
-
-from .raw import read_raw_bti
diff --git a/mne/fiff/channels.py b/mne/fiff/channels.py
deleted file mode 100644
index 4c1e11e..0000000
--- a/mne/fiff/channels.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#
-# License: BSD (3-clause)
-
-from .tree import dir_tree_find
-from .tag import find_tag
-from .constants import FIFF
-
-
-def read_bad_channels(fid, node):
-    """Read bad channels
-
-    Parameters
-    ----------
-    fid : file
-        The file descriptor.
-
-    node : dict
-        The node of the FIF tree that contains info on the bad channels.
-
-    Returns
-    -------
-    bads : list
-        A list of bad channel's names.
-    """
-    nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
-
-    bads = []
-    if len(nodes) > 0:
-        for node in nodes:
-            tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
-            if tag is not None and tag.data is not None:
-                bads = tag.data.split(':')
-    return bads
diff --git a/mne/fiff/cov.py b/mne/fiff/cov.py
deleted file mode 100644
index e99aaae..0000000
--- a/mne/fiff/cov.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#
-# License: BSD (3-clause)
-
-import numpy as np
-
-from .constants import FIFF
-from .write import (start_block, end_block, write_int, write_name_list,
-                    write_double, write_float_matrix)
-from .tag import find_tag
-from .tree import dir_tree_find
-from .proj import read_proj, write_proj
-from .channels import read_bad_channels
-from ..utils import logger, verbose
-
-
- at verbose
-def read_cov(fid, node, cov_kind, verbose=None):
-    """Read a noise covariance matrix
-
-    This is a low-level function. Consider using `mne.cov.read_cov()`
-    for most user-level purposes.
-
-    Parameters
-    ----------
-    fid : file
-        The file descriptor.
-    node : dict
-        The node in the FIF tree.
-    cov_kind : int
-        The type of covariance. This is typically
-        FIFF.FIFFV_MNE_NOISE_COV, although it could also be
-        FIFF.FIFFV_MNE_SOURCE_COV or FIFF.FIFFV_MNE_FMRI_PRIOR_COV.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    Returns
-    -------
-    data : dict
-        The noise covariance
-    """
-    #   Find all covariance matrices
-    covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
-    if len(covs) == 0:
-        raise ValueError('No covariance matrices found')
-
-    #   Is any of the covariance matrices a noise covariance
-    for p in range(len(covs)):
-        tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
-
-        if tag is not None and int(tag.data) == cov_kind:
-            this = covs[p]
-
-            #   Find all the necessary data
-            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
-            if tag is None:
-                raise ValueError('Covariance matrix dimension not found')
-            dim = int(tag.data)
-
-            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
-            if tag is None:
-                nfree = -1
-            else:
-                nfree = int(tag.data)
-
-            tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
-            if tag is None:
-                names = []
-            else:
-                names = tag.data.split(':')
-                if len(names) != dim:
-                    raise ValueError('Number of names does not match '
-                                       'covariance matrix dimension')
-
-            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
-            if tag is None:
-                tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
-                if tag is None:
-                    raise ValueError('No covariance matrix data found')
-                else:
-                    #   Diagonal is stored
-                    data = tag.data
-                    diagmat = True
-                    logger.info('    %d x %d diagonal covariance (kind = '
-                                '%d) found.' % (dim, dim, cov_kind))
-
-            else:
-                from scipy import sparse
-                if not sparse.issparse(tag.data):
-                    #   Lower diagonal is stored
-                    vals = tag.data
-                    data = np.zeros((dim, dim))
-                    data[np.tril(np.ones((dim, dim))) > 0] = vals
-                    data = data + data.T
-                    data.flat[::dim + 1] /= 2.0
-                    diagmat = False
-                    logger.info('    %d x %d full covariance (kind = %d) '
-                                'found.' % (dim, dim, cov_kind))
-                else:
-                    diagmat = False
-                    data = tag.data
-                    logger.info('    %d x %d sparse covariance (kind = %d)'
-                                ' found.' % (dim, dim, cov_kind))
-
-            #   Read the possibly precomputed decomposition
-            tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
-            tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
-            if tag1 is not None and tag2 is not None:
-                eig = tag1.data
-                eigvec = tag2.data
-            else:
-                eig = None
-                eigvec = None
-
-            #   Read the projection operator
-            projs = read_proj(fid, this)
-
-            #   Read the bad channel list
-            bads = read_bad_channels(fid, this)
-
-            #   Put it together
-            cov = dict(kind=cov_kind, diag=diagmat, dim=dim, names=names,
-                       data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
-                       eigvec=eigvec)
-            return cov
-
-    logger.info('    Did not find the desired covariance matrix (kind = %d)'
-                % cov_kind)
-
-    return None
-
-
-def write_cov(fid, cov):
-    """Write a noise covariance matrix
-
-    Parameters
-    ----------
-    fid : file
-        The file descriptor.
-    cov : dict
-        The noise covariance matrix to write.
-    """
-    start_block(fid, FIFF.FIFFB_MNE_COV)
-
-    #   Dimensions etc.
-    write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
-    write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
-    if cov['nfree'] > 0:
-        write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
-
-    #   Channel names
-    if cov['names'] is not None and len(cov['names']) > 0:
-        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
-
-    #   Data
-    if cov['diag']:
-        write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
-    else:
-        # Store only lower part of covariance matrix
-        dim = cov['dim']
-        mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
-        vals = cov['data'][mask].ravel()
-        write_double(fid, FIFF.FIFF_MNE_COV, vals)
-
-    #   Eigenvalues and vectors if present
-    if cov['eig'] is not None and cov['eigvec'] is not None:
-        write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
-        write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
-
-    #   Projection operator
-    if cov['projs'] is not None and len(cov['projs']) > 0:
-        write_proj(fid, cov['projs'])
-
-    #   Bad channels
-    if cov['bads'] is not None and len(cov['bads']) > 0:
-        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
-        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
-        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
-
-    #   Done!
-    end_block(fid, FIFF.FIFFB_MNE_COV)
diff --git a/mne/fiff/edf/tests/test_edf.py b/mne/fiff/edf/tests/test_edf.py
deleted file mode 100644
index 7dee947..0000000
--- a/mne/fiff/edf/tests/test_edf.py
+++ /dev/null
@@ -1,92 +0,0 @@
-"""Data Equivalence Tests"""
-
-# Author: Teon Brooks <teon at nyu.edu>
-#
-# License: BSD (3-clause)
-
-import os.path as op
-import inspect
-
-from nose.tools import assert_equal, assert_true
-from numpy.testing import assert_array_almost_equal, assert_array_equal
-from scipy import io
-
-from mne.utils import _TempDir
-from mne.fiff import Raw, pick_types
-from mne.fiff.edf import read_raw_edf
-
-FILE = inspect.getfile(inspect.currentframe())
-data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
-hpts_path = op.join(data_dir, 'biosemi.hpts')
-bdf_path = op.join(data_dir, 'test.bdf')
-edf_path = op.join(data_dir, 'test.edf')
-bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
-edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
-
-tempdir = _TempDir()
-
-
-def test_bdf_data():
-    """Test reading raw bdf files
-    """
-    raw_py = read_raw_edf(bdf_path, hpts=hpts_path, preload=True)
-    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
-    data_py, _ = raw_py[picks]
-
-    print raw_py  # to test repr
-    print raw_py.info  # to test Info repr
-
-    # this .mat was generated using the EEG Lab Biosemi Reader
-    raw_eeglab = io.loadmat(bdf_eeglab_path)
-    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
-    data_eeglab = raw_eeglab[picks]
-
-    assert_array_almost_equal(data_py, data_eeglab)
-
-    # Manually checking that float coordinates are imported
-    assert_true((raw_py.info['chs'][0]['eeg_loc']).any())
-    assert_true((raw_py.info['chs'][25]['eeg_loc']).any())
-    assert_true((raw_py.info['chs'][63]['eeg_loc']).any())
-
-
-def test_edf_data():
-    """Test reading raw edf files
-    """
-    raw_py = read_raw_edf(edf_path, preload=True)
-    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
-    data_py, _ = raw_py[picks]
-
-    print raw_py  # to test repr
-    print raw_py.info  # to test Info repr
-
-    # this .mat was generated using the EEG Lab Biosemi Reader
-    raw_eeglab = io.loadmat(edf_eeglab_path)
-    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
-    data_eeglab = raw_eeglab[picks]
-
-    assert_array_almost_equal(data_py, data_eeglab)
-
-
-def test_read_segment():
-    """Test writing raw edf files when preload is False
-    """
-    raw1 = read_raw_edf(bdf_path, hpts=hpts_path, preload=False)
-    raw1_file = op.join(tempdir, 'raw1.fif')
-    raw1.save(raw1_file, overwrite=True)
-    raw11 = Raw(raw1_file, preload=True)
-    data1, times1 = raw1[:, :]
-    data11, times11 = raw11[:, :]
-    assert_array_almost_equal(data1, data11, 8)
-    assert_array_almost_equal(times1, times11)
-    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
-
-    raw2 = read_raw_edf(bdf_path, hpts=hpts_path, preload=True)
-    raw2_file = op.join(tempdir, 'raw2.fif')
-    raw2.save(raw2_file, overwrite=True)
-    data2, times2 = raw2[:, :]
-    assert_array_equal(data1, data2)
-    assert_array_equal(times1, times2)
-
-    raw1 = Raw(raw1_file, preload=True)
-    raw2 = Raw(raw2_file, preload=True)
-    assert_array_equal(raw1._data, raw2._data)
diff --git a/mne/fiff/tests/test_evoked.py b/mne/fiff/tests/test_evoked.py
deleted file mode 100644
index 3f18c03..0000000
--- a/mne/fiff/tests/test_evoked.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#         Denis Engemann <d.engemann at fz-juelich.de>
-#
-# License: BSD (3-clause)
-
-import os.path as op
-from copy import deepcopy
-
-import numpy as np
-from numpy.testing import (assert_array_almost_equal, assert_equal,
-                           assert_array_equal, assert_allclose)
-from nose.tools import assert_true, assert_raises
-
-from mne.fiff import read_evoked, write_evoked, pick_types
-from mne.utils import _TempDir, requires_pandas, requires_nitime
-
-fname = op.join(op.dirname(__file__), 'data', 'test-ave.fif')
-fname_gz = op.join(op.dirname(__file__), 'data', 'test-ave.fif.gz')
-
-tempdir = _TempDir()
-
-
-def test_io_evoked():
-    """Test IO for evoked data (fif + gz) with integer and str args
-    """
-    ave = read_evoked(fname, 0)
-
-    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
-    ave2 = read_evoked(op.join(tempdir, 'evoked.fif'))
-
-    # This not being assert_array_equal due to windows rounding
-    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
-    assert_array_almost_equal(ave.times, ave2.times)
-    assert_equal(ave.nave, ave2.nave)
-    assert_equal(ave._aspect_kind, ave2._aspect_kind)
-    assert_equal(ave.kind, ave2.kind)
-    assert_equal(ave.last, ave2.last)
-    assert_equal(ave.first, ave2.first)
-
-    # test compressed i/o
-    ave2 = read_evoked(fname_gz, 0)
-    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
-
-    # test str access
-    setno = 'Left Auditory'
-    assert_raises(ValueError, read_evoked, fname, setno, kind='stderr')
-    assert_raises(ValueError, read_evoked, fname, setno, kind='standard_error')
-    ave3 = read_evoked(fname, setno)
-    assert_array_almost_equal(ave.data, ave3.data, 19)
-
-
-def test_shift_time_evoked():
-    """ Test for shifting of time scale
-    """
-    # Shift backward
-    ave = read_evoked(fname, 0)
-    ave.shift_time(-0.1, relative=True)
-    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
-
-    # Shift forward twice the amount
-    ave_bshift = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
-    ave_bshift.shift_time(0.2, relative=True)
-    write_evoked(op.join(tempdir, 'evoked.fif'), ave_bshift)
-
-    # Shift backward again
-    ave_fshift = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
-    ave_fshift.shift_time(-0.1, relative=True)
-    write_evoked(op.join(tempdir, 'evoked.fif'), ave_fshift)
-
-    ave_normal = read_evoked(fname, 0)
-    ave_relative = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
-
-    assert_true(np.allclose(ave_normal.data, ave_relative.data,
-                            atol=1e-16, rtol=1e-3))
-    assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
-
-    assert_equal(ave_normal.last, ave_relative.last)
-    assert_equal(ave_normal.first, ave_relative.first)
-
-    # Absolute time shift
-    ave = read_evoked(fname, 0)
-    ave.shift_time(-0.3, relative=False)
-    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
-
-    ave_absolute = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
-
-    assert_true(np.allclose(ave_normal.data, ave_absolute.data,
-                            atol=1e-16, rtol=1e-3))
-    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
-
-
-def test_evoked_resample():
-    """Test for resampling of evoked data
-    """
-    # upsample, write it out, read it in
-    ave = read_evoked(fname, 0)
-    sfreq_normal = ave.info['sfreq']
-    ave.resample(2 * sfreq_normal)
-    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
-    ave_up = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
-
-    # compare it to the original
-    ave_normal = read_evoked(fname, 0)
-
-    # and compare the original to the downsampled upsampled version
-    ave_new = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
-    ave_new.resample(sfreq_normal)
-
-    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
-    assert_array_almost_equal(ave_normal.times, ave_new.times)
-    assert_equal(ave_normal.nave, ave_new.nave)
-    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
-    assert_equal(ave_normal.kind, ave_new.kind)
-    assert_equal(ave_normal.last, ave_new.last)
-    assert_equal(ave_normal.first, ave_new.first)
-
-    # for the above to work, the upsampling just about had to, but
-    # we'll add a couple extra checks anyway
-    assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
-    assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
-
-
-def test_evoked_detrend():
-    """Test for detrending evoked data
-    """
-    ave = read_evoked(fname, 0)
-    ave_normal = read_evoked(fname, 0)
-    ave.detrend(0)
-    ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
-    picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
-    assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
-                            rtol=1e-8, atol=1e-16))
-
-
-def test_io_multi_evoked():
-    """Test IO for multiple evoked datasets
-    """
-    aves = read_evoked(fname, [0, 1, 2, 3])
-    write_evoked(op.join(tempdir, 'evoked.fif'), aves)
-    aves2 = read_evoked(op.join(tempdir, 'evoked.fif'), [0, 1, 2, 3])
-    types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
-    aves3 = read_evoked(op.join(tempdir, 'evoked.fif'), types)
-    for aves_new in [aves2, aves3]:
-        for [ave, ave_new] in zip(aves, aves_new):
-            assert_array_almost_equal(ave.data, ave_new.data)
-            assert_array_almost_equal(ave.times, ave_new.times)
-            assert_equal(ave.nave, ave_new.nave)
-            assert_equal(ave.kind, ave_new.kind)
-            assert_equal(ave._aspect_kind, ave_new._aspect_kind)
-            assert_equal(ave.last, ave_new.last)
-            assert_equal(ave.first, ave_new.first)
-    # this should throw an error since there are mulitple datasets
-    assert_raises(ValueError, read_evoked, fname)
-
-
- at requires_nitime
-def test_evoked_to_nitime():
-    """ Test to_nitime """
-    aves = read_evoked(fname, [0, 1, 2, 3])
-    evoked_ts = aves[0].to_nitime()
-    assert_equal(evoked_ts.data, aves[0].data)
-
-    picks2 = [1, 2]
-    aves = read_evoked(fname, [0, 1, 2, 3])
-    evoked_ts = aves[0].to_nitime(picks=picks2)
-    assert_equal(evoked_ts.data, aves[0].data[picks2])
-
-
- at requires_pandas
-def test_as_data_frame():
-    """Test evoked Pandas exporter"""
-    ave = read_evoked(fname, [0])[0]
-    assert_raises(ValueError, ave.as_data_frame, picks=np.arange(400))
-    df = ave.as_data_frame()
-    assert_true((df.columns == ave.ch_names).all())
-    df = ave.as_data_frame(use_time_index=False)
-    assert_true('time' in df.columns)
-    assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
-    assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
-
-
-def test_evoked_proj():
-    """Test SSP proj operations
-    """
-    for proj in [True, False]:
-        ave = read_evoked(fname, setno=0, proj=proj)
-        assert_true(all(p['active'] == proj for p in ave.info['projs']))
-
-        # test adding / deleting proj
-        if proj:
-            assert_raises(ValueError, ave.add_proj, [],
-                          {'remove_existing': True})
-            assert_raises(ValueError, ave.del_proj, 0)
-        else:
-            projs = deepcopy(ave.info['projs'])
-            n_proj = len(ave.info['projs'])
-            ave.del_proj(0)
-            assert_true(len(ave.info['projs']) == n_proj - 1)
-            ave.add_proj(projs, remove_existing=False)
-            assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
-            ave.add_proj(projs, remove_existing=True)
-            assert_true(len(ave.info['projs']) == n_proj)
-
-    ave = read_evoked(fname, setno=0, proj=False)
-    data = ave.data.copy()
-    ave.apply_proj()
-    assert_allclose(np.dot(ave._projector, data), ave.data)
diff --git a/mne/filter.py b/mne/filter.py
index 5ca7f8c..b7b6cfa 100644
--- a/mne/filter.py
+++ b/mne/filter.py
@@ -1,5 +1,6 @@
 """IIR and FIR filtering functions"""
 
+from .externals.six import string_types, integer_types
 import warnings
 import numpy as np
 from scipy.fftpack import fft, ifftshift, fftfreq
@@ -61,7 +62,7 @@ def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
     zero_phase : bool
         If True: the filter is applied in forward and backward direction,
         resulting in a zero-phase filter.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -159,7 +160,7 @@ def _1d_overlap_filter(x, h_fft, n_edge, n_fft, zero_phase, n_segments, n_seg,
     filter_input = x_ext
     x_filtered = np.zeros_like(filter_input)
 
-    for pass_no in range(2) if zero_phase else range(1):
+    for pass_no in list(range(2)) if zero_phase else list(range(1)):
 
         if pass_no == 1:
             # second pass: flip signal
@@ -257,7 +258,7 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
         used (faster for long signals). If str, a human-readable time in
         units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
         to the shortest power-of-two length at least that duration.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -278,7 +279,7 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
     min_att_db = 20
 
     # normalize frequencies
-    freq = np.array([f / (Fs / 2) for f in freq])
+    freq = np.array(freq) / (Fs / 2.)
     gain = np.array(gain)
     filter_length = _get_filter_length(filter_length, Fs, len_x=x.shape[1])
 
@@ -443,7 +444,7 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
 
     >>> iir_params = dict(order=4, ftype='butter')
     >>> iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low', return_copy=False)
-    >>> print (len(iir_params['b']), len(iir_params['a']), iir_params['padlen'])
+    >>> print((len(iir_params['b']), len(iir_params['a']), iir_params['padlen']))
     (5, 5, 82)
 
     Filters can also be constructed using filter design methods. To get a
@@ -453,7 +454,7 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
 
     >>> iir_params = dict(ftype='cheby1', gpass=3, gstop=20)
     >>> iir_params = construct_iir_filter(iir_params, 40, 50, 1000, 'low')
-    >>> print (len(iir_params['b']), len(iir_params['a']), iir_params['padlen'])
+    >>> print((len(iir_params['b']), len(iir_params['a']), iir_params['padlen']))
     (6, 6, 439)
 
     Padding and/or filter coefficients can also be manually specified. For
@@ -462,7 +463,7 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
 
     >>> iir_params = dict(b=np.ones((10)), a=[1, 0], padlen=0)
     >>> iir_params = construct_iir_filter(iir_params, return_copy=False)
-    >>> print (iir_params['b'], iir_params['a'], iir_params['padlen'])
+    >>> print((iir_params['b'], iir_params['a'], iir_params['padlen']))
     (array([ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.]), [1, 0], 0)
 
     """
@@ -515,7 +516,7 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
 def _check_method(method, iir_params, extra_types):
     """Helper to parse method arguments"""
     allowed_types = ['iir', 'fft'] + extra_types
-    if not isinstance(method, basestring):
+    if not isinstance(method, string_types):
         raise TypeError('method must be a string')
     if method not in allowed_types:
         raise ValueError('method must be one of %s, not "%s"'
@@ -569,7 +570,7 @@ def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s',
         Dictionary of parameters to use for IIR filtering.
         See mne.filter.construct_iir_filter for details. If iir_params
         is None and method="iir", 4th order Butterworth will be used.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -669,7 +670,7 @@ def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
         Dictionary of parameters to use for IIR filtering.
         See mne.filter.construct_iir_filter for details. If iir_params
         is None and method="iir", 4th order Butterworth will be used.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -775,7 +776,7 @@ def low_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
         Dictionary of parameters to use for IIR filtering.
         See mne.filter.construct_iir_filter for details. If iir_params
         is None and method="iir", 4th order Butterworth will be used.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -858,7 +859,7 @@ def high_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
         Dictionary of parameters to use for IIR filtering.
         See mne.filter.construct_iir_filter for details. If iir_params
         is None and method="iir", 4th order Butterworth will be used.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -961,7 +962,7 @@ def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
         sinusoidal components to remove when method='spectrum_fit' and
         freqs=None. Note that this will be Bonferroni corrected for the
         number of frequencies, so large p-values may be justified.
-    picks : list of int | None
+    picks : array-like of int | None
         Indices to filter. If None all indices will be filtered.
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
@@ -1175,7 +1176,8 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,
 
 
 @verbose
-def resample(x, up, down, npad=100, window='boxcar', n_jobs=1, verbose=None):
+def resample(x, up, down, npad=100, axis=-1, window='boxcar', n_jobs=1,
+             verbose=None):
     """Resample the array x
 
     Operates along the last dimension of the array.
@@ -1190,6 +1192,8 @@ def resample(x, up, down, npad=100, window='boxcar', n_jobs=1, verbose=None):
         Factor to downsample by.
     npad : integer
         Number of samples to use at the beginning and end for padding.
+    axis : int
+        Axis along which to resample (default is the last axis).
     window : string or tuple
         See scipy.signal.resample for description.
     n_jobs : int | str
@@ -1216,56 +1220,70 @@ def resample(x, up, down, npad=100, window='boxcar', n_jobs=1, verbose=None):
     current implementation is functionally equivalent to passing
     up=up/down and down=1.
     """
+    # check explicitly for backwards compatibility
+    if not isinstance(axis, int):
+        err = ("The axis parameter needs to be an integer (got %s). "
+               "The axis parameter was missing from this function for a "
+               "period of time, you might be intending to specify the "
+               "subsequent window parameter." % repr(axis))
+        raise TypeError(err)
+
     # make sure our arithmetic will work
     ratio = float(up) / down
-    x, orig_shape = _prep_for_filtering(x, False)[:2]
-
-    x_len = x.shape[1]
-    if x_len > 0:
-        # prep for resampling now
-        orig_len = x_len + 2 * npad  # length after padding
-        new_len = int(round(ratio * orig_len))  # length after resampling
-        to_remove = np.round(ratio * npad).astype(int)
-
-        # figure out windowing function
-        if window is not None:
-            if callable(window):
-                W = window(fftfreq(orig_len))
-            elif isinstance(window, np.ndarray) and \
-                    window.shape == (orig_len,):
-                W = window
-            else:
-                W = ifftshift(get_window(window, orig_len))
+    if axis < 0:
+        axis = x.ndim + axis
+    orig_last_axis = x.ndim - 1
+    if axis != orig_last_axis:
+        x = x.swapaxes(axis, orig_last_axis)
+    orig_shape = x.shape
+    x_len = orig_shape[-1]
+    if x_len == 0:
+        warnings.warn('x has zero length along last axis, returning a copy of '
+                      'x')
+        return x.copy()
+
+    # prep for resampling now
+    x_flat = x.reshape((-1, x_len))
+    orig_len = x_len + 2 * npad  # length after padding
+    new_len = int(round(ratio * orig_len))  # length after resampling
+    to_remove = np.round(ratio * npad).astype(int)
+
+    # figure out windowing function
+    if window is not None:
+        if callable(window):
+            W = window(fftfreq(orig_len))
+        elif isinstance(window, np.ndarray) and \
+                window.shape == (orig_len,):
+            W = window
         else:
-            W = np.ones(orig_len)
-        W *= (float(new_len) / float(orig_len))
-        W = W.astype(np.complex128)
+            W = ifftshift(get_window(window, orig_len))
+    else:
+        W = np.ones(orig_len)
+    W *= (float(new_len) / float(orig_len))
+    W = W.astype(np.complex128)
 
-        # figure out if we should use CUDA
-        n_jobs, cuda_dict, W = setup_cuda_fft_resample(n_jobs, W, new_len)
+    # figure out if we should use CUDA
+    n_jobs, cuda_dict, W = setup_cuda_fft_resample(n_jobs, W, new_len)
 
-        # do the resampling using an adaptation of scipy's FFT-based resample()
-        # use of the 'flat' window is recommended for minimal ringing
-        if n_jobs == 1:
-            y = np.zeros((len(x), new_len - 2 * to_remove), dtype=x.dtype)
-            for xi, x_ in enumerate(x):
-                y[xi] = fft_resample(x_, W, new_len, npad, to_remove,
-                                     cuda_dict)
-        else:
-            _check_njobs(n_jobs, can_be_cuda=True)
-            parallel, p_fun, _ = parallel_func(fft_resample, n_jobs)
-            y = parallel(p_fun(x_, W, new_len, npad, to_remove, cuda_dict)
-                         for x_ in x)
-            y = np.array(y)
-
-        # Restore the original array shape (modified for resampling)
-        orig_shape = list(orig_shape)
-        orig_shape[-1] = y.shape[1]
-        y.shape = tuple(orig_shape)
+    # do the resampling using an adaptation of scipy's FFT-based resample()
+    # use of the 'flat' window is recommended for minimal ringing
+    if n_jobs == 1:
+        y = np.zeros((len(x_flat), new_len - 2 * to_remove), dtype=x.dtype)
+        for xi, x_ in enumerate(x_flat):
+            y[xi] = fft_resample(x_, W, new_len, npad, to_remove,
+                                 cuda_dict)
     else:
-        warnings.warn('x has zero length along last axis, returning a copy of '
-                      'x')
-        y = x.copy()
+        _check_njobs(n_jobs, can_be_cuda=True)
+        parallel, p_fun, _ = parallel_func(fft_resample, n_jobs)
+        y = parallel(p_fun(x_, W, new_len, npad, to_remove, cuda_dict)
+                     for x_ in x_flat)
+        y = np.array(y)
+
+    # Restore the original array shape (modified for resampling)
+    y.shape = orig_shape[:-1] + (y.shape[1],)
+    if axis != orig_last_axis:
+        y = y.swapaxes(axis, orig_last_axis)
+
     return y
 
 
@@ -1314,7 +1332,7 @@ def _get_filter_length(filter_length, sfreq, min_length=128, len_x=np.inf):
     """Helper to determine a reasonable filter length"""
     if not isinstance(min_length, int):
         raise ValueError('min_length must be an int')
-    if isinstance(filter_length, basestring):
+    if isinstance(filter_length, string_types):
         # parse time values
         if filter_length[-2:].lower() == 'ms':
             mult_fact = 1e-3
@@ -1346,7 +1364,7 @@ def _get_filter_length(filter_length, sfreq, min_length=128, len_x=np.inf):
                           % (filter_length, filter_length / float(sfreq)))
 
     if filter_length is not None:
-        if not isinstance(filter_length, int):
+        if not isinstance(filter_length, integer_types):
             raise ValueError('filter_length must be str, int, or None')
     return filter_length
 
diff --git a/mne/fixes.py b/mne/fixes.py
index e739b58..2b201bc 100644
--- a/mne/fixes.py
+++ b/mne/fixes.py
@@ -22,10 +22,33 @@ import scipy
 from scipy import linalg
 from math import ceil, log
 from numpy.fft import irfft
+from nose.tools import assert_true
 from scipy.signal import filtfilt as sp_filtfilt
 from distutils.version import LooseVersion
 from functools import partial
-import copy_reg
+from .externals import six
+from .externals.six.moves import copyreg
+from gzip import GzipFile
+
+
+###############################################################################
+# Misc
+
+class gzip_open(GzipFile):  # python2.6 doesn't have context managing
+    def __init__(self, *args, **kwargs):
+        return GzipFile.__init__(self, *args, **kwargs)
+
+    def __enter__(self):
+        if hasattr(GzipFile, '__enter__'):
+            return GzipFile.__enter__(self)
+        else:
+            return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if hasattr(GzipFile, '__exit__'):
+            return GzipFile.__exit__(self, exc_type, exc_value, traceback)
+        else:
+            return self.close()
 
 
 class _Counter(collections.defaultdict):
@@ -35,12 +58,12 @@ class _Counter(collections.defaultdict):
         self.update(iterable)
 
     def most_common(self):
-        return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
+        return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
 
     def update(self, other):
         """Adds counts for elements in other"""
         if isinstance(other, self.__class__):
-            for x, n in other.iteritems():
+            for x, n in six.iteritems(other):
                 self[x] += n
         else:
             for x in other:
@@ -103,7 +126,7 @@ else:
 def _bincount(X, weights=None, minlength=None):
     """Replacing np.bincount in numpy < 1.6 to provide minlength."""
     result = np.bincount(X, weights)
-    if len(result) >= minlength:
+    if minlength is None or len(result) >= minlength:
         return result
     out = np.zeros(minlength, np.int)
     out[:len(result)] = result
@@ -151,6 +174,30 @@ else:
     in1d = np.in1d
 
 
+def _digitize(x, bins, right=False):
+    """Replacement for digitize with right kwarg (numpy < 1.7).
+
+    Notes
+    -----
+    This fix is only meant for integer arrays. If ``right==True`` but either
+    ``x`` or ``bins`` are of a different type, a NotImplementedError will be
+    raised.
+    """
+    if right:
+        x = np.asarray(x)
+        bins = np.asarray(bins)
+        if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
+            raise NotImplementedError("Only implemented for integer input")
+        return np.digitize(x - 1e-5, bins)
+    else:
+        return np.digitize(x, bins)
+
+if LooseVersion(np.__version__) < LooseVersion('1.7'):
+    digitize = _digitize
+else:
+    digitize = np.digitize
+
+
 def _tril_indices(n, k=0):
     """Replacement for tril_indices that is provided for numpy >= 1.4"""
     mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
@@ -192,7 +239,7 @@ def _qr_economic_old(A, **kwargs):
     Compat function for the QR-decomposition in economic mode
     Scipy 0.9 changed the keyword econ=True to mode='economic'
     """
-    with warnings.catch_warnings(True):
+    with warnings.catch_warnings(record=True):
         return linalg.qr(A, econ=True, **kwargs)
 
 
@@ -514,7 +561,7 @@ def _reduce_partial(p):
 
 # This adds pickling functionality to older Python 2.6
 # Please always import partial from here.
-copy_reg.pickle(partial, _reduce_partial)
+copyreg.pickle(partial, _reduce_partial)
 
 
 def normalize_colors(vmin, vmax, clip=False):
@@ -524,3 +571,18 @@ def normalize_colors(vmin, vmax, clip=False):
         return plt.Normalize(vmin, vmax, clip=clip)
     else:
         return plt.normalize(vmin, vmax, clip=clip)
+
+
+def _assert_is(expr1, expr2, msg=None):
+    """Fake assert_is without message"""
+    assert_true(expr2 is expr2)
+
+def _assert_is_not(expr1, expr2, msg=None):
+    """Fake assert_is_not without message"""
+    assert_true(expr2 is not expr2)
+
+try:
+    from nose.tools import assert_is, assert_is_not
+except ImportError:
+    assert_is = _assert_is
+    assert_is_not = _assert_is_not
diff --git a/mne/forward/__init__.py b/mne/forward/__init__.py
index 1616053..67292f5 100644
--- a/mne/forward/__init__.py
+++ b/mne/forward/__init__.py
@@ -1,4 +1,4 @@
-from .forward import (read_forward_solution, write_forward_solution,
+from .forward import (Forward, read_forward_solution, write_forward_solution,
                       is_fixed_orient, read_forward_meas_info,
                       write_forward_meas_info,
                       compute_orient_prior, compute_depth_prior,
@@ -10,3 +10,5 @@ from .forward import (read_forward_solution, write_forward_solution,
                       _subject_from_forward, convert_forward_solution,
                       _to_fixed_ori, prepare_bem_model)
 from ._make_forward import make_forward_solution
+from ._field_interpolation import _make_surface_mapping, make_field_map
+from . import _lead_dots  # for testing purposes
diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py
index 67b1e37..d4b67da 100644
--- a/mne/forward/_compute_forward.py
+++ b/mne/forward/_compute_forward.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larsoner at uw.edu>
@@ -10,12 +10,12 @@ from copy import deepcopy
 
 from ..surface import (fast_cross_3d, _find_nearest_tri_pt, _get_tri_supp_geom,
                        _triangle_coords)
-from ..fiff.constants import FIFF
+from ..io.constants import FIFF
 from ..transforms import apply_trans
 from ..utils import logger
 from ..parallel import parallel_func
-from ..fiff.compensator import get_current_comp, make_compensator
-from ..fiff.pick import pick_types
+from ..io.compensator import get_current_comp, make_compensator
+from ..io.pick import pick_types
 
 
 ##############################################################################
@@ -62,20 +62,22 @@ def _bem_lin_field_coeffs_simple(dest, normal, tri_rr, tri_nn, tri_area):
     return out
 
 
-def _lin_field_coeff(s, mult, rmags, cosmags, ws, lims, func, n_jobs):
+def _lin_field_coeff(s, mult, rmags, cosmags, ws, counts, func, n_jobs):
     """Use the linear field approximation to get field coefficients"""
     parallel, p_fun, _ = parallel_func(_do_lin_field_coeff, n_jobs)
     nas = np.array_split
-    coeffs = parallel(p_fun(s['rr'], t, tn, ta, rmags, cosmags, ws, lims, func)
+    coeffs = parallel(p_fun(s['rr'], t, tn, ta,
+                            rmags, cosmags, ws, counts, func)
                       for t, tn, ta in zip(nas(s['tris'], n_jobs),
                                            nas(s['tri_nn'], n_jobs),
                                            nas(s['tri_area'], n_jobs)))
     return mult * np.sum(coeffs, axis=0)
 
 
-def _do_lin_field_coeff(rr, t, tn, ta, rmags, cosmags, ws, lims, func):
+def _do_lin_field_coeff(rr, t, tn, ta, rmags, cosmags, ws, counts, func):
     """Actually get field coefficients (parallel-friendly)"""
-    coeff = np.zeros((len(lims) - 1, len(rr)))
+    coeff = np.zeros((len(counts), len(rr)))
+    bins = np.repeat(np.arange(len(counts)), counts)
     for tri, tri_nn, tri_area in zip(t, tn, ta):
         # Accumulate the coefficients for each triangle node
         # and add to the corresponding coefficient matrix
@@ -89,8 +91,9 @@ def _do_lin_field_coeff(rr, t, tn, ta, rmags, cosmags, ws, lims, func):
         #    coeff[j][tri + off] += mult * res
 
         xx = func(rmags, cosmags, tri_rr, tri_nn, tri_area)
-        yy = np.c_[np.zeros((3, 1)), np.cumsum(xx * ws, axis=1)]
-        zz = np.diff(yy[:, lims], axis=1)
+        # only loops 3x (one per direction)
+        zz = np.array([np.bincount(bins, weights=x * ws,
+                                   minlength=len(counts)) for x in xx])
         coeff[:, tri] += zz.T
     return coeff
 
@@ -110,15 +113,15 @@ def _bem_specify_coils(bem, coils, coord_frame, n_jobs):
     # Process each of the surfaces
     rmags = np.concatenate([coil['rmag'] for coil in coils])
     cosmags = np.concatenate([coil['cosmag'] for coil in coils])
-    lims = np.cumsum(np.r_[0, [len(coil['rmag']) for coil in coils]])
+    counts = np.array([len(coil['rmag']) for coil in coils])
     ws = np.concatenate([coil['w'] for coil in coils])
 
     lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]])
-    coeff = np.empty((len(lims) - 1, lens[-1]))
+    coeff = np.empty((len(counts), lens[-1]))
     for o1, o2, surf, mult in zip(lens[:-1], lens[1:],
                                   bem['surfs'], bem['field_mult']):
         coeff[:, o1:o2] = _lin_field_coeff(surf, mult, rmags, cosmags,
-                                           ws, lims, func, n_jobs)
+                                           ws, counts, func, n_jobs)
     # put through the bem
     sol = np.dot(coeff, bem['solution'])
     return sol
@@ -273,7 +276,7 @@ def _do_inf_pots(rr, srr, mri_Q, sol):
     # We chunk the source rr's in order to save memory
     bounds = np.r_[np.arange(0, len(rr), 1000), len(rr)]
     B = np.empty((len(rr) * 3, sol.shape[1]))
-    for bi in xrange(len(bounds) - 1):
+    for bi in range(len(bounds) - 1):
         v0s = _bem_inf_pots(rr[bounds[bi]:bounds[bi + 1]], srr, mri_Q)
         v0s.shape = (v0s.shape[0] * 3, v0s.shape[2])
         B[3 * bounds[bi]:3 * bounds[bi + 1]] = np.dot(v0s, sol)
diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py
new file mode 100644
index 0000000..47ef1dd
--- /dev/null
+++ b/mne/forward/_field_interpolation.py
@@ -0,0 +1,286 @@
+import numpy as np
+from scipy import linalg
+from copy import deepcopy
+
+from ..io.constants import FIFF
+from ..io.pick import pick_types, pick_info
+from ..surface import get_head_surf, get_meg_helmet_surf
+
+from ..io.proj import _has_eeg_average_ref_proj, make_projector
+from ..transforms import transform_surface_to, read_trans, _find_trans
+from ._make_forward import _create_coils
+from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table,
+                         _get_legen_lut_fast, _get_legen_lut_accurate)
+from ..parallel import check_n_jobs
+from ..utils import logger, verbose
+from ..fixes import partial
+
+
+def _is_axial_coil(coil):
+    is_ax = coil['coil_class'] in (FIFF.FWD_COILC_MAG,
+                                   FIFF.FWD_COILC_AXIAL_GRAD,
+                                   FIFF.FWD_COILC_AXIAL_GRAD2)
+    return is_ax
+
+
+def _ad_hoc_noise(coils, ch_type='meg'):
+    v = np.empty(len(coils))
+    if ch_type == 'meg':
+        axs = np.array([_is_axial_coil(coil) for coil in coils], dtype=bool)
+        v[axs] = 4e-28  # 20e-15 ** 2
+        v[np.logical_not(axs)] = 2.5e-25  # 5e-13 ** 2
+    else:
+        v.fill(1e-12)  # 1e-6 ** 2
+    cov = dict(diag=True, data=v, eig=None, eigvec=None)
+    return cov
+
+
+def _compute_mapping_matrix(fmd, info):
+    """Do the hairy computations"""
+    logger.info('preparing the mapping matrix...')
+    # assemble a projector and apply it to the data
+    ch_names = fmd['ch_names']
+    projs = info.get('projs', list())
+    proj_op = make_projector(projs, ch_names)[0]
+    proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op))
+
+    noise_cov = fmd['noise']
+    # Whiten
+    if not noise_cov['diag']:
+        raise NotImplementedError  # this shouldn't happen
+    whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel()))
+    whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener))
+
+    # SVD is numerically better than the eigenvalue composition even if
+    # mat is supposed to be symmetric and positive definite
+    uu, sing, vv = linalg.svd(whitened_dots, full_matrices=False,
+                              overwrite_a=True)
+
+    # Eigenvalue truncation
+    sumk = np.cumsum(sing)
+    sumk /= sumk[-1]
+    fmd['nest'] = np.where(sumk > (1.0 - fmd['miss']))[0][0]
+    logger.info('Truncate at %d missing %g' % (fmd['nest'], fmd['miss']))
+    sing = 1.0 / sing[:fmd['nest']]
+
+    # Put the inverse together
+    logger.info('Put the inverse together...')
+    inv = np.dot(uu[:, :fmd['nest']] * sing, vv[:fmd['nest']]).T
+
+    # Sandwich with the whitener
+    inv_whitened = np.dot(whitener.T, np.dot(inv, whitener))
+
+    # Take into account that the lead fields used to compute
+    # d->surface_dots were unprojected
+    inv_whitened_proj = (np.dot(inv_whitened.T, proj_op)).T
+
+    # Finally sandwich in the selection matrix
+    # This one picks up the correct lead field projection
+    mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj)
+
+    # Optionally apply the average electrode reference to the final field map
+    if fmd['kind'] == 'eeg':
+        if _has_eeg_average_ref_proj(projs):
+            logger.info('The map will have average electrode reference')
+            mapping_mat -= np.mean(mapping_mat, axis=0)[np.newaxis, :]
+    return mapping_mat
+
+
+ at verbose
+def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
+                          n_jobs=1, verbose=None):
+    """Re-map M/EEG data to a surface
+
+    Parameters
+    ----------
+    info : instance of io.meas_info.Info
+        Measurement info.
+    surf : dict
+        The surface to map the data to. The required fields are `'rr'`,
+        `'nn'`, and `'coord_frame'`. Must be in head coordinates.
+    ch_type : str
+        Must be either `'meg'` or `'eeg'`, determines the type of field.
+    trans : None | dict
+        If None, no transformation applied. Should be a Head<->MRI
+        transformation.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    mapping : array
+        A n_vertices x n_sensors array that remaps the MEG or EEG data,
+        as `new_data = np.dot(mapping, data)`.
+    """
+    if not all([key in surf for key in ['rr', 'nn']]):
+        raise KeyError('surf must have both "rr" and "nn"')
+    if 'coord_frame' not in surf:
+        raise KeyError('The surface coordinate frame must be specified '
+                       'in surf["coord_frame"]')
+    if mode not in ['accurate', 'fast']:
+        raise ValueError('mode must be "accurate" or "fast", not "%s"' % mode)
+
+    # deal with coordinate frames here -- always go to "head" (easiest)
+    if surf['coord_frame'] == FIFF.FIFFV_COORD_MRI:
+        if trans is None or FIFF.FIFFV_COORD_MRI not in [trans['to'],
+                                                         trans['from']]:
+            raise ValueError('trans must be a Head<->MRI transform if the '
+                             'surface is not in head coordinates.')
+        surf = transform_surface_to(deepcopy(surf), 'head', trans)
+
+    n_jobs = check_n_jobs(n_jobs)
+
+    #
+    # Step 1. Prepare the coil definitions
+    # Do the dot products, assume surf in head coords
+    #
+    if ch_type not in ('meg', 'eeg'):
+        raise ValueError('unknown coil type "%s"' % ch_type)
+    if ch_type == 'meg':
+        picks = pick_types(info, meg=True, eeg=False, ref_meg=False)
+        logger.info('Prepare MEG mapping...')
+    else:
+        picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
+        logger.info('Prepare EEG mapping...')
+    if len(picks) == 0:
+        raise RuntimeError('cannot map, no channels found')
+    chs = pick_info(info, picks)['chs']
+
+    # create coil defs in head coordinates
+    if ch_type == 'meg':
+        # Put them in head coordinates
+        coils = _create_coils(chs, FIFF.FWD_COIL_ACCURACY_NORMAL,
+                              info['dev_head_t'], coil_type='meg')[0]
+        type_str = 'coils'
+        miss = 1e-4  # Smoothing criterion for MEG
+    else:  # EEG
+        coils = _create_coils(chs, coil_type='eeg')[0]
+        type_str = 'electrodes'
+        miss = 1e-3  # Smoothing criterion for EEG
+
+    #
+    # Step 2. Calculate the dot products
+    #
+    my_origin = np.array([0.0, 0.0, 0.04])
+    int_rad = 0.06
+    noise = _ad_hoc_noise(coils, ch_type)
+    if mode == 'fast':
+        # Use 50 coefficients with nearest-neighbor interpolation
+        lut, n_fact = _get_legen_table(ch_type, False, 50)
+        lut_fun = partial(_get_legen_lut_fast, lut=lut)
+    else:  # 'accurate'
+        # Use 100 coefficients with linear interpolation
+        lut, n_fact = _get_legen_table(ch_type, False, 100)
+        lut_fun = partial(_get_legen_lut_accurate, lut=lut)
+    logger.info('Computing dot products for %i %s...' % (len(coils), type_str))
+    self_dots = _do_self_dots(int_rad, False, coils, my_origin, ch_type,
+                              lut_fun, n_fact, n_jobs)
+    sel = np.arange(len(surf['rr']))  # eventually we should do sub-selection
+    logger.info('Computing dot products for %i surface locations...'
+                % len(sel))
+    surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel,
+                                    my_origin, ch_type, lut_fun, n_fact,
+                                    n_jobs)
+
+    #
+    # Step 4. Return the result
+    #
+    ch_names = [c['ch_name'] for c in chs]
+    fmd = dict(kind=ch_type, surf=surf, ch_names=ch_names, coils=coils,
+               origin=my_origin, noise=noise, self_dots=self_dots,
+               surface_dots=surface_dots, int_rad=int_rad, miss=miss)
+    logger.info('Field mapping data ready')
+
+    fmd['data'] = _compute_mapping_matrix(fmd, info)
+
+    # Remove some unecessary fields
+    del fmd['self_dots']
+    del fmd['surface_dots']
+    del fmd['int_rad']
+    del fmd['miss']
+    return fmd
+
+
+def make_field_map(evoked, trans_fname='auto', subject=None, subjects_dir=None,
+                   ch_type=None, mode='fast', n_jobs=1):
+    """Compute surface maps used for field display in 3D
+
+    Parameters
+    ----------
+    evoked : Evoked | Epochs | Raw
+        The measurement file. Need to have info attribute.
+    trans_fname : str | 'auto' | None
+        The full path to the `*-trans.fif` file produced during
+        coregistration. If present or found using 'auto'
+        the maps will be in MRI coordinates.
+        If None, map for EEG data will not be available.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT. If None, map for EEG data will not be available.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    ch_type : None | 'eeg' | 'meg'
+        If None, a map for each available channel type will be returned.
+        Else only the specified type will be used.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+    n_jobs : int
+        The number of jobs to run in parallel.
+
+    Returns
+    -------
+    surf_maps : list
+        The surface maps to be used for field plots. The list contains
+        separate ones for MEG and EEG (if both MEG and EEG are present).
+    """
+    info = evoked.info
+
+    if ch_type is None:
+        types = [t for t in ['eeg', 'meg'] if t in evoked]
+    else:
+        if ch_type not in ['eeg', 'meg']:
+            raise ValueError("ch_type should be 'eeg' or 'meg' (got %s)"
+                             % ch_type)
+        types = [ch_type]
+
+    if trans_fname == 'auto':
+        # let's try to do this in MRI coordinates so they're easy to plot
+        trans_fname = _find_trans(subject, subjects_dir)
+
+    if 'eeg' in types and trans_fname is None:
+        logger.info('No trans file available. EEG data ignored.')
+        types.remove('eeg')
+
+    if len(types) == 0:
+        raise RuntimeError('No data available for mapping.')
+
+    trans = None
+    if trans_fname is not None:
+        trans = read_trans(trans_fname)
+
+    surfs = []
+    for this_type in types:
+        if this_type == 'meg':
+            surf = get_meg_helmet_surf(info, trans)
+        else:
+            surf = get_head_surf(subject, subjects_dir=subjects_dir)
+        surfs.append(surf)
+
+    surf_maps = list()
+
+    for this_type, this_surf in zip(types, surfs):
+        this_map = _make_surface_mapping(evoked.info, this_surf, this_type,
+                                         trans, n_jobs=n_jobs)
+        this_map['surf'] = this_surf  # XXX : a bit weird...
+        surf_maps.append(this_map)
+
+    return surf_maps
diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py
new file mode 100644
index 0000000..ecc1153
--- /dev/null
+++ b/mne/forward/_lead_dots.py
@@ -0,0 +1,309 @@
+import os
+from os import path as op
+
+import numpy as np
+from numpy.polynomial import legendre
+
+from ..parallel import parallel_func
+from ..utils import logger, _get_extra_data_path
+
+
+##############################################################################
+# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE
+
+def _next_legen_der(n, x, p0, p01, p0d, p0dd):
+    """Compute the next Legendre polynomial and its derivatives"""
+    # only good for n > 1 !
+    help_ = p0
+    helpd = p0d
+    p0 = ((2 * n - 1) * x * help_ - (n - 1) * p01) / n
+    p0d = n * help_ + x * helpd
+    p0dd = (n + 1) * helpd + x * p0dd
+    p01 = help_
+    return p0, p0d, p0dd
+
+
+def _get_legen(x, n_coeff=100):
+    """Get Legendre polynomials expanded about x"""
+    return legendre.legvander(x, n_coeff - 1)
+
+
+def _get_legen_der(xx, n_coeff=100):
+    """Get Legendre polynomial derivatives expanded about x"""
+    coeffs = np.empty((len(xx), n_coeff, 3))
+    for c, x in zip(coeffs, xx):
+        p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2]
+        p0s[:2] = [1.0, x]
+        p0ds[:2] = [0.0, 1.0]
+        p0dds[:2] = [0.0, 0.0]
+        for n in range(2, n_coeff):
+            p0s[n], p0ds[n], p0dds[n] = _next_legen_der(n, x, p0s[n - 1],
+                                            p0s[n - 2], p0ds[n - 1],
+                                            p0dds[n - 1])
+    return coeffs
+
+
+def _get_legen_table(ch_type, volume_integral=False, n_coeff=100,
+                     n_interp=20000, force_calc=False):
+    """Return a (generated) LUT of Legendre (derivative) polynomial coeffs"""
+    if n_interp % 2 != 0:
+        raise RuntimeError('n_interp must be even')
+    fname = op.join(_get_extra_data_path(), 'tables')
+    if not op.isdir(fname):
+        # Updated due to API chang (GH 1167)
+        os.makedirs(fname)
+    if ch_type == 'meg':
+        fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp))
+        leg_fun = _get_legen_der
+        extra_str = ' derivative'
+        lut_shape = (n_interp + 1, n_coeff, 3)
+    else:  # 'eeg'
+        fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp))
+        leg_fun = _get_legen
+        extra_str = ''
+        lut_shape = (n_interp + 1, n_coeff)
+    if not op.isfile(fname) or force_calc:
+        n_out = (n_interp // 2)
+        logger.info('Generating Legendre%s table...' % extra_str)
+        x_interp = np.arange(-n_out, n_out + 1, dtype=np.float64) / n_out
+        lut = leg_fun(x_interp, n_coeff).astype(np.float32)
+        if not force_calc:
+            with open(fname, 'wb') as fid:
+                fid.write(lut.tostring())
+    else:
+        logger.info('Reading Legendre%s table...' % extra_str)
+        with open(fname, 'rb', buffering=0) as fid:
+            lut = np.fromfile(fid, np.float32)
+    lut.shape = lut_shape
+
+    # we need this for the integration step
+    n_fact = np.arange(1, n_coeff, dtype=float)
+    if ch_type == 'meg':
+        n_facts = list()  # multn, then mult, then multn * (n + 1)
+        if volume_integral:
+            n_facts.append(n_fact / ((2.0 * n_fact + 1.0)
+                                     * (2.0 * n_fact + 3.0)))
+        else:
+            n_facts.append(n_fact / (2.0 * n_fact + 1.0))
+        n_facts.append(n_facts[0] / (n_fact + 1.0))
+        n_facts.append(n_facts[0] * (n_fact + 1.0))
+        # skip the first set of coefficients because they are not used
+        lut = lut[:, 1:, [0, 1, 1, 2]]  # for multiplicative convenience later
+        # reshape this for convenience, too
+        n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T
+        n_facts = np.ascontiguousarray(n_facts)
+        n_fact = n_facts
+    else:  # 'eeg'
+        n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact
+        # skip the first set of coefficients because they are not used
+        lut = lut[:, 1:].copy()
+    return lut, n_fact
+
+
+def _get_legen_lut_fast(x, lut):
+    """Return Legendre coefficients for given x values in -1<=x<=1"""
+    # map into table vals (works for both vals and deriv tables)
+    n_interp = (lut.shape[0] - 1.0)
+    # equiv to "(x + 1.0) / 2.0) * n_interp" but faster
+    mm = x * (n_interp / 2.0) + 0.5 * n_interp
+    # nearest-neighbor version (could be decent enough...)
+    idx = np.round(mm).astype(int)
+    vals = lut[idx]
+    return vals
+
+
+def _get_legen_lut_accurate(x, lut):
+    """Return Legendre coefficients for given x values in -1<=x<=1"""
+    # map into table vals (works for both vals and deriv tables)
+    n_interp = (lut.shape[0] - 1.0)
+    # equiv to "(x + 1.0) / 2.0) * n_interp" but faster
+    mm = x * (n_interp / 2.0) + 0.5 * n_interp
+    # slower, more accurate interpolation version
+    mm = np.minimum(mm, n_interp - 0.0000000001)
+    idx = np.floor(mm).astype(int)
+    w2 = mm - idx
+    w2.shape += tuple([1] * (lut.ndim - w2.ndim))  # expand to correct size
+    vals = (1 - w2) * lut[idx] + w2 * lut[idx + 1]
+    return vals
+
+
+def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):
+    """Lead field dot products using Legendre polynomial (P_n) series"""
+    # Compute the sum occurring in the evaluation.
+    # The result is
+    #   sums[:]    (2n+1)^2/n beta^n P_n
+    coeffs = lut_fun(ctheta)
+    betans = np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
+                        axis=1)
+    s0 = np.dot(coeffs * betans, n_fact)  # == weighted sum across cols
+    return s0
+
+
+def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):
+    """Lead field dot products using Legendre polynomial (P_n) series"""
+    # Compute the sums occurring in the evaluation.
+    # Two point magnetometers on the xz plane are assumed.
+    # The four sums are:
+    #  * sums[:, 0]    n(n+1)/(2n+1) beta^(n+1) P_n
+    #  * sums[:, 1]    n/(2n+1) beta^(n+1) P_n'
+    #  * sums[:, 2]    n/((2n+1)(n+1)) beta^(n+1) P_n'
+    #  * sums[:, 3]    n/((2n+1)(n+1)) beta^(n+1) P_n''
+    coeffs = lut_fun(ctheta)
+    beta = (np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
+                       axis=1) * beta[:, np.newaxis])
+    # This is equivalent, but slower:
+    # sums = np.sum(beta[:, :, np.newaxis] * n_fact * coeffs, axis=1)
+    # sums = np.rollaxis(sums, 2)
+    sums = np.einsum('ij,jk,ijk->ki', beta, n_fact, coeffs)
+    return sums
+
+
+###############################################################################
+# SPHERE DOTS
+
+def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
+                        w1, w2, volume_integral, lut, n_fact, ch_type):
+    """Lead field dot product computation for M/EEG in the sphere model"""
+    ct = np.einsum('ik,jk->ij', rr1, rr2)  # outer product, sum over coords
+
+    # expand axes
+    rr1 = rr1[:, np.newaxis, :]  # (n_rr1, n_rr2, n_coord) e.g. 4x4x3
+    rr2 = rr2[np.newaxis, :, :]
+    lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :]
+
+    beta = (r * r) / lr1lr2
+    if ch_type == 'meg':
+        sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact,
+                              volume_integral)
+        sums.shape = (4,) + beta.shape
+
+        # Accumulate the result, a little bit streamlined version
+        #cosmags1 = cosmags1[:, np.newaxis, :]
+        #cosmags2 = cosmags2[np.newaxis, :, :]
+        #n1c1 = np.sum(cosmags1 * rr1, axis=2)
+        #n1c2 = np.sum(cosmags1 * rr2, axis=2)
+        #n2c1 = np.sum(cosmags2 * rr1, axis=2)
+        #n2c2 = np.sum(cosmags2 * rr2, axis=2)
+        #n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
+        n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1)
+        n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2)
+        n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1)
+        n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2)
+        n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2)
+        part1 = ct * n1c1 * n2c2
+        part2 = n1c1 * n2c1 + n1c2 * n2c2
+
+        result = (n1c1 * n2c2 * sums[0] +
+                  (2.0 * part1 - part2) * sums[1] +
+                  (n1n2 + part1 - part2) * sums[2] +
+                  (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3])
+
+        # Give it a finishing touch!
+        const = 4e-14 * np.pi  # This is \mu_0^2/4\pi
+        result *= (const / lr1lr2)
+        if volume_integral:
+            result *= r
+    else:  # 'eeg'
+        sums = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact)
+        sums.shape = beta.shape
+
+        # Give it a finishing touch!
+        eeg_const = 1.0 / (4.0 * np.pi)
+        result = eeg_const * sums / lr1lr2
+    # new we add them all up with weights
+    if w1 is None:  # operating on surface, treat independently
+        #result = np.sum(w2[np.newaxis, :] * result, axis=1)
+        result = np.dot(result, w2)
+    else:
+        #result = np.sum((w1[:, np.newaxis] * w2[np.newaxis, :]) * result)
+        result = np.einsum('i,j,ij', w1, w2, result)
+    return result
+
+
+def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):
+    """Perform the lead field dot product integrations"""
+    if ch_type == 'eeg':
+        intrad *= 0.7
+    # convert to normalized distances from expansion center
+    rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
+    rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
+    rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
+    cosmags = [coil['cosmag'] for coil in coils]
+    ws = [coil['w'] for coil in coils]
+    parallel, p_fun, _ = parallel_func(_do_self_dots_subset, n_jobs)
+    prods = parallel(p_fun(intrad, rmags, rlens, cosmags,
+                           ws, volume, lut, n_fact, ch_type, idx)
+                     for idx in np.array_split(np.arange(len(rmags)), n_jobs))
+    products = np.sum(prods, axis=0)
+    return products
+
+
+def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,
+                         n_fact, ch_type, idx):
+    """Helper for parallelization"""
+    products = np.zeros((len(rmags), len(rmags)))
+    for ci1 in idx:
+        for ci2 in range(0, ci1 + 1):
+            res = _fast_sphere_dot_r0(intrad, rmags[ci1], rmags[ci2],
+                                      rlens[ci1], rlens[ci2],
+                                      cosmags[ci1], cosmags[ci2],
+                                      ws[ci1], ws[ci2], volume, lut,
+                                      n_fact, ch_type)
+            products[ci1, ci2] = res
+            products[ci2, ci1] = res
+    return products
+
+
+def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
+                     lut, n_fact, n_jobs):
+    """Compute the map construction products"""
+    virt_ref = False
+    # convert to normalized distances from expansion center
+    rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
+    rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
+    rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
+    cosmags = [coil['cosmag'] for coil in coils]
+    ws = [coil['w'] for coil in coils]
+    rref = None
+    refl = None
+    if ch_type == 'eeg':
+        intrad *= 0.7
+        if virt_ref:
+            rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]
+            refl = np.sqrt(np.sum(rref * rref, axis=1))
+            rref /= refl[:, np.newaxis]
+
+    rsurf = surf['rr'][sel] - r0[np.newaxis, :]
+    lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1))
+    rsurf /= lsurf[:, np.newaxis]
+    this_nn = surf['nn'][sel]
+
+    parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs)
+    prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
+                           this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
+                           idx)
+                     for idx in np.array_split(np.arange(len(rmags)), n_jobs))
+    products = np.sum(prods, axis=0)
+    return products
+
+
+def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
+                            this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
+                            idx):
+    """Helper for parallelization"""
+    products = np.zeros((len(rsurf), len(rmags)))
+    for ci in idx:
+        res = _fast_sphere_dot_r0(intrad, rsurf, rmags[ci],
+                                  lsurf, rlens[ci],
+                                  this_nn, cosmags[ci],
+                                  None, ws[ci], volume, lut,
+                                  n_fact, ch_type)
+        if rref is not None:
+            vres = _fast_sphere_dot_r0(intrad, rref, rmags[ci],
+                                       refl, rlens[ci],
+                                       None, ws[ci], volume,
+                                       lut, n_fact, ch_type)
+            products[:, ci] = res - vres
+        else:
+            products[:, ci] = res
+    return products
diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py
index 1470c0f..f4bb23d 100644
--- a/mne/forward/_make_forward.py
+++ b/mne/forward/_make_forward.py
@@ -1,18 +1,22 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larsoner at uw.edu>
 #
 # License: BSD (3-clause)
 
+from ..externals.six import string_types
 import os
 from os import path as op
 import numpy as np
 
-from ..fiff import read_info, pick_types, pick_info, FIFF, _has_kit_refs
-from .forward import write_forward_solution, _merge_meg_eeg_fwds
+from .. import pick_types, pick_info
+from ..io.pick import _has_kit_refs
+from ..io import read_info
+from ..io.constants import FIFF
+from .forward import Forward, write_forward_solution, _merge_meg_eeg_fwds
 from ._compute_forward import _compute_forwards
-from ..transforms import (invert_transform, transform_source_space_to,
+from ..transforms import (invert_transform, transform_surface_to,
                           read_trans, _get_mri_head_t_from_trans_file,
                           apply_trans, _print_coord_trans, _coord_frame_name)
 from ..utils import logger, verbose
@@ -21,8 +25,10 @@ from ..source_space import (read_source_spaces, _filter_source_spaces,
 from ..surface import read_bem_solution, _normalize_vectors
 
 
-def _read_coil_defs(fname):
+def _read_coil_defs(fname=None):
     """Read a coil definition file"""
+    if fname is None:
+        fname = op.join(op.split(__file__)[0], '..', 'data', 'coil_def.dat')
     big_val = 0.5
     with open(fname, 'r') as fid:
         lines = fid.readlines()
@@ -44,7 +50,7 @@ def _read_coil_defs(fname):
                 rmag = list()
                 cosmag = list()
                 w = list()
-                for p in xrange(npts):
+                for p in range(npts):
                     # get next non-comment line
                     line = lines.pop()
                     while(line[0] == '#'):
@@ -142,8 +148,10 @@ def _create_eeg_el(ch, t):
     return res
 
 
-def _create_coils(coilset, chs, acc, t, coil_type='meg'):
+def _create_coils(chs, acc=None, t=None, coil_type='meg', coilset=None):
     """Create a set of MEG or EEG coils"""
+    if coilset is None:  # auto-read defs if not supplied
+        coilset = _read_coil_defs()
     coils = list()
     if coil_type == 'meg':
         for ch in chs:
@@ -153,7 +161,7 @@ def _create_coils(coilset, chs, acc, t, coil_type='meg'):
             coils.append(_create_eeg_el(ch, t))
     else:
         raise RuntimeError('unknown coil type')
-    return coils, t['to']
+    return coils, coils[0]['coord_frame']  # all get the same coord_frame
 
 
 @verbose
@@ -164,7 +172,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
 
     Parameters
     ----------
-    info : instance of mne.fiff.meas_info.Info | str
+    info : instance of mne.io.meas_info.Info | str
         If str, then it should be a filename to a Raw, Epochs, or Evoked
         file with measurement information. If dict, should be an info
         dict (such as one from Raw, Epochs, or Evoked).
@@ -204,8 +212,8 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
 
     Returns
     -------
-    fwd : dict
-        The generated forward solution.
+    fwd : instance of Forward
+        The forward solution.
 
     Notes
     -----
@@ -220,7 +228,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     # 3. --fixed option (can be computed post-hoc)
     # 4. --mricoord option (probably not necessary)
 
-    if isinstance(mri, basestring):
+    if isinstance(mri, string_types):
         if not op.isfile(mri):
             raise IOError('mri file "%s" not found' % mri)
         if op.splitext(mri)[1] in ['.fif', '.gz']:
@@ -231,7 +239,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
         mri_head_t = mri
         mri = 'dict'
 
-    if not isinstance(src, basestring):
+    if not isinstance(src, string_types):
         if not isinstance(src, SourceSpaces):
             raise TypeError('src must be a string or SourceSpaces')
         src_extra = 'list'
@@ -244,9 +252,9 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     if fname is not None and op.isfile(fname) and not overwrite:
         raise IOError('file "%s" exists, consider using overwrite=True'
                       % fname)
-    if not isinstance(info, (dict, basestring)):
+    if not isinstance(info, (dict, string_types)):
         raise TypeError('info should be a dict or string')
-    if isinstance(info, basestring):
+    if isinstance(info, string_types):
         info_extra = op.split(info)[1]
         info_extra_long = info
         info = read_info(info, verbose=False)
@@ -261,7 +269,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     coord_frame = FIFF.FIFFV_COORD_HEAD
 
     # Report the setup
-    mri_extra = mri if isinstance(mri, basestring) else 'dict'
+    mri_extra = mri if isinstance(mri, string_types) else 'dict'
     logger.info('Source space                 : %s' % src)
     logger.info('MRI -> head transform source : %s' % mri_extra)
     logger.info('Measurement data             : %s' % info_extra_long)
@@ -274,7 +282,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
 
     # Read the source locations
     logger.info('')
-    if isinstance(src, basestring):
+    if isinstance(src, string_types):
         logger.info('Reading %s...' % src)
         src = read_source_spaces(src, verbose=False)
     else:
@@ -367,35 +375,33 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
         raise RuntimeError('Could not find any MEG or EEG channels')
 
     # Create coil descriptions with transformation to head or MRI frame
-    templates = _read_coil_defs(op.join(op.split(__file__)[0],
-                                        '..', 'data', 'coil_def.dat'))
+    templates = _read_coil_defs()
     if nmeg > 0 and ncomp > 0:  # Compensation channel information
         logger.info('%d compensation data sets in %s'
                     % (ncomp_data, info_extra))
 
     meg_xform = meg_head_t
-    eeg_xform = {'trans': np.eye(4), 'to': FIFF.FIFFV_COORD_HEAD,
-                 'from': FIFF.FIFFV_COORD_HEAD}
     extra_str = 'Head'
 
     megcoils, megcf, compcoils, compcf = None, None, None, None
     if nmeg > 0:
-        megcoils, megcf = _create_coils(templates, megchs,
+        megcoils, megcf = _create_coils(megchs,
                                         FIFF.FWD_COIL_ACCURACY_ACCURATE,
-                                        meg_xform, coil_type='meg')
+                                        meg_xform, coil_type='meg',
+                                        coilset=templates)
         if ncomp > 0:
-            compcoils, compcf = _create_coils(templates, compchs,
+            compcoils, compcf = _create_coils(compchs,
                                               FIFF.FWD_COIL_ACCURACY_NORMAL,
-                                              meg_xform, coil_type='meg')
+                                              meg_xform, coil_type='meg',
+                                              coilset=templates)
     eegels = None
     if neeg > 0:
-        eegels, _ = _create_coils(templates, eegchs, None,
-                                  eeg_xform, coil_type='eeg')
+        eegels, _ = _create_coils(eegchs, coil_type='eeg')
     logger.info('%s coordinate coil definitions created.' % extra_str)
 
     # Transform the source spaces into the appropriate coordinates
     for s in src:
-        transform_source_space_to(s, coord_frame, mri_head_t)
+        transform_surface_to(s, coord_frame, mri_head_t)
     logger.info('Source spaces are now in %s coordinates.'
                 % _coord_frame_name(coord_frame))
 
@@ -440,7 +446,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     megfwd, eegfwd = _compute_forwards(src, bem, coils, cfs, ccoils, ccfs,
                                        infos, coil_types, n_jobs)
 
-    # merge forwards into one
+    # merge forwards into one (creates two Forward objects)
     megfwd = _to_forward_dict(megfwd, None, megnames, coord_frame,
                               FIFF.FIFFV_MNE_FREE_ORI)
     eegfwd = _to_forward_dict(eegfwd, None, eegnames, coord_frame,
@@ -453,7 +459,7 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     info = pick_info(info, picks)
     source_rr = np.concatenate([s['rr'][s['vertno']] for s in src])
     # deal with free orientations:
-    nsource = fwd['sol']['data'].shape[1] / 3
+    nsource = fwd['sol']['data'].shape[1] // 3
     source_nn = np.tile(np.eye(3), (nsource, 1))
 
     # Don't transform the source spaces back into MRI coordinates (which is
@@ -481,10 +487,10 @@ def _to_forward_dict(fwd, fwd_grad, names, coord_frame, source_ori):
     if fwd is not None:
         sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0],
                    row_names=names, col_names=[])
-        fwd = dict(sol=sol, source_ori=source_ori, nsource=sol['ncol'],
-                   coord_frame=coord_frame, sol_grad=None,
-                   nchan=sol['nrow'], _orig_source_ori=source_ori,
-                   _orig_sol=sol['data'].copy(), _orig_sol_grad=None)
+        fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'],
+                      coord_frame=coord_frame, sol_grad=None,
+                      nchan=sol['nrow'], _orig_source_ori=source_ori,
+                      _orig_sol=sol['data'].copy(), _orig_sol_grad=None)
         if fwd_grad is not None:
             sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1],
                             ncol=fwd_grad.shape[0], row_names=names,
diff --git a/mne/forward/forward.py b/mne/forward/forward.py
index fd66990..99a417b 100644
--- a/mne/forward/forward.py
+++ b/mne/forward/forward.py
@@ -1,9 +1,10 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
+from ..externals.six import string_types
 from time import time
 import warnings
 from copy import deepcopy
@@ -17,29 +18,64 @@ import os
 from os import path as op
 import tempfile
 
-from ..fiff.constants import FIFF
-from ..fiff.open import fiff_open
-from ..fiff.tree import dir_tree_find
-from ..fiff.channels import read_bad_channels
-from ..fiff.tag import find_tag, read_tag
-from ..fiff.matrix import (_read_named_matrix, _transpose_named_matrix,
-                           write_named_matrix)
-from ..fiff.pick import (pick_channels_forward, pick_info, pick_channels,
-                         pick_types)
-from ..fiff.write import (write_int, start_block, end_block,
-                          write_coord_trans, write_ch_info, write_name_list,
-                          write_string, start_file, end_file, write_id)
-from ..fiff.raw import Raw
-from ..fiff.evoked import Evoked, write_evoked
-from ..event import make_fixed_length_events
+from ..io.constants import FIFF
+from ..io.open import fiff_open
+from ..io.tree import dir_tree_find
+from ..io.tag import find_tag, read_tag
+from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
+                         write_named_matrix)
+from ..io.meas_info import read_bad_channels, Info
+from ..io.pick import (pick_channels_forward, pick_info, pick_channels,
+                       pick_types)
+from ..io.write import (write_int, start_block, end_block,
+                        write_coord_trans, write_ch_info, write_name_list,
+                        write_string, start_file, end_file, write_id)
+from ..io.base import _BaseRaw
+from ..evoked import Evoked, write_evokeds
 from ..epochs import Epochs
 from ..source_space import (read_source_spaces_from_tree,
                             find_source_space_hemi,
                             _write_source_spaces_to_fid)
-from ..transforms import (transform_source_space_to, invert_transform,
+from ..transforms import (transform_surface_to, invert_transform,
                           write_trans)
 from ..utils import (_check_fname, get_subjects_dir, has_command_line_tools,
-                     run_subprocess, logger, verbose)
+                     run_subprocess, check_fname, logger, verbose)
+
+
+class Forward(dict):
+    """Forward class to represent info from forward solution
+    """
+
+    def __repr__(self):
+        """Summarize forward info instead of printing all"""
+
+        entr = '<Forward'
+
+        nchan = len(pick_types(self['info'], meg=True, eeg=False))
+        entr += ' | ' + 'MEG channels: %d' % nchan
+        nchan = len(pick_types(self['info'], meg=False, eeg=True))
+        entr += ' | ' + 'EEG channels: %d' % nchan
+
+        if self['src'][0]['type'] == 'surf':
+            entr += (' | Source space: Surface with %d vertices'
+                     % self['nsource'])
+        elif self['src'][0]['type'] == 'vol':
+            entr += (' | Source space: Volume with %d grid points'
+                     % self['nsource'])
+        elif self['src'][0]['type'] == 'discrete':
+            entr += (' | Source space: Discrete with %d dipoles'
+                     % self['nsource'])
+
+        if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:
+            entr += (' | Source orientation: Unknown')
+        elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
+            entr += (' | Source orientation: Fixed')
+        elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            entr += (' | Source orientation: Free')
+
+        entr += '>'
+
+        return entr
 
 
 def prepare_bem_model(bem, sol_fname=None, method='linear'):
@@ -96,7 +132,7 @@ def _block_diag(A, n):
     if sparse.issparse(A):  # then make block sparse
         raise NotImplemented('sparse reversal not implemented yet')
     ma, na = A.shape
-    bdn = na / int(n)  # number of submatrices
+    bdn = na // int(n)  # number of submatrices
 
     if na % n > 0:
         raise ValueError('Width of matrix must be a multiple of n')
@@ -135,14 +171,14 @@ def _inv_block_diag(A, n):
         The block diagonal matrix.
     """
     ma, na = A.shape
-    bdn = na / int(n)  # number of submatrices
+    bdn = na // int(n)  # number of submatrices
 
     if na % n > 0:
         raise ValueError('Width of matrix must be a multiple of n')
 
     # modify A in-place to invert each sub-block
     A = A.copy()
-    for start in xrange(0, na, 3):
+    for start in range(0, na, 3):
         # this is a view
         A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])
 
@@ -165,7 +201,7 @@ def _read_one(fid, node):
     if node is None:
         return None
 
-    one = dict()
+    one = Forward()
 
     tag = find_tag(fid, node, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
     if tag is None:
@@ -202,16 +238,16 @@ def _read_one(fid, node):
         raise
 
     try:
-        one['sol_grad'] = _read_named_matrix(fid, node,
-                                        FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD)
+        fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD
+        one['sol_grad'] = _read_named_matrix(fid, node, fwd_type)
         one['sol_grad'] = _transpose_named_matrix(one['sol_grad'], copy=False)
         one['_orig_sol_grad'] = one['sol_grad']['data'].copy()
     except:
         one['sol_grad'] = None
 
     if one['sol']['data'].shape[0] != one['nchan'] or \
-                (one['sol']['data'].shape[1] != one['nsource'] and
-                 one['sol']['data'].shape[1] != 3 * one['nsource']):
+            (one['sol']['data'].shape[1] != one['nsource'] and
+             one['sol']['data'].shape[1] != 3 * one['nsource']):
         fid.close()
         raise ValueError('Forward solution matrix has wrong dimensions')
 
@@ -238,10 +274,10 @@ def read_forward_meas_info(tree, fid):
 
     Returns
     -------
-    info : instance of mne.fiff.meas_info.Info
+    info : instance of mne.io.meas_info.Info
         The measurement info.
     """
-    info = dict()
+    info = Info()
 
     # Information from the MRI file
     parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
@@ -363,7 +399,7 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
     Parameters
     ----------
     fname : string
-        The file name.
+        The file name, which should end with -fwd.fif or -fwd.fif.gz.
     force_fixed : bool, optional (default False)
         Force fixed source orientation mode?
     surf_ori : bool, optional (default False)
@@ -380,9 +416,10 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
 
     Returns
     -------
-    fwd : dict
+    fwd : instance of Forward
         The forward solution.
     """
+    check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
 
     #   Open the file, create directory
     logger.info('Reading forward solution from %s...' % fname)
@@ -500,7 +537,7 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
     nuse = 0
     for s in src:
         try:
-            s = transform_source_space_to(s, fwd['coord_frame'], mri_head_t)
+            s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)
         except Exception as inst:
             raise ValueError('Could not transform source space (%s)' % inst)
 
@@ -514,15 +551,16 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
     fwd['src'] = src
 
     #   Handle the source locations and orientations
-    fwd['source_rr'] = np.concatenate([s['rr'][s['vertno'], :] for s in src],
-                                      axis=0)
+    fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]
+                                       for ss in src], axis=0)
 
     # deal with transformations, storing orig copies so transforms can be done
     # as necessary later
     fwd['_orig_source_ori'] = fwd['source_ori']
     convert_forward_solution(fwd, surf_ori, force_fixed, copy=False)
     fwd = pick_channels_forward(fwd, include=include, exclude=exclude)
-    return fwd
+
+    return Forward(fwd)
 
 
 @verbose
@@ -588,7 +626,7 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
         nuse_total = sum([s['nuse'] for s in fwd['src']])
         fwd['source_nn'] = np.empty((3 * nuse_total, 3), dtype=np.float)
         logger.info('    Converting to surface-based source orientations...')
-        if s['patch_inds'] is not None:
+        if fwd['src'][0]['patch_inds'] is not None:
             use_ave_nn = True
             logger.info('    Average patch normals will be employed in the '
                         'rotation to the local surface coordinates....')
@@ -648,7 +686,8 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
     Parameters
     ----------
     fname : str
-        File name to save the forward solution to.
+        File name to save the forward solution to. It should end with -fwd.fif
+        or -fwd.fif.gz.
     fwd : dict
         Forward solution.
     overwrite : bool
@@ -656,6 +695,8 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
+    check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
+
     # check for file existence
     _check_fname(fname, overwrite)
     fid = start_file(fname)
@@ -692,8 +733,8 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
     for s in fwd['src']:
         s = deepcopy(s)
         try:
-            s = transform_source_space_to(s, fwd['mri_head_t']['from'],
-                                          fwd['mri_head_t'])
+            s = transform_surface_to(s, fwd['mri_head_t']['from'],
+                                     fwd['mri_head_t'])
         except Exception as inst:
             raise ValueError('Could not transform source space (%s)' % inst)
         src.append(s)
@@ -702,7 +743,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
     # Write the source spaces (again)
     #
     _write_source_spaces_to_fid(fid, src)
-    n_vert = sum([s['nuse'] for s in src])
+    n_vert = sum([ss['nuse'] for ss in src])
     n_col = fwd['sol']['data'].shape[1]
     if fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
         assert n_col == n_vert
@@ -813,7 +854,7 @@ def write_forward_meas_info(fid, info):
     ----------
     fid : file id
         The file id
-    info : instance of mne.fiff.meas_info.Info
+    info : instance of mne.io.meas_info.Info
         The measurement info.
     """
     #
@@ -912,7 +953,7 @@ def _restrict_gain_matrix(G, info):
                 G = G[sel]
                 logger.info('    %d EEG channels' % len(sel))
             else:
-                logger.warn('Could not find MEG or EEG channels')
+                logger.warning('Could not find MEG or EEG channels')
     return G
 
 
@@ -932,7 +973,7 @@ def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
     else:
         n_pos = G.shape[1] // 3
         d = np.zeros(n_pos)
-        for k in xrange(n_pos):
+        for k in range(n_pos):
             Gk = G[:, 3 * k:3 * (k + 1)]
             d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
 
@@ -962,7 +1003,7 @@ def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
 
     logger.info('    limit = %d/%d = %f'
                 % (n_limit + 1, len(d),
-                np.sqrt(limit / ws[0])))
+                   np.sqrt(limit / ws[0])))
     scale = 1.0 / limit
     logger.info('    scale = %g exp = %g' % (scale, exp))
     wpp = np.minimum(w / limit, 1) ** exp
@@ -1030,7 +1071,7 @@ def _apply_forward(fwd, stc, start=None, stop=None, verbose=None):
         warnings.warn('The maximum current magnitude is %0.1f nAm, which is '
                       'very large. Are you trying to apply the forward model '
                       'to dSPM values? The result will only be correct if '
-                      'currents are used.' % 1e9 * max_cur)
+                      'currents are used.' % (1e9 * max_cur))
 
     src_sel = _stc_src_sel(fwd['src'], stc)
     n_src = sum([len(v) for v in stc.vertno])
@@ -1165,8 +1206,8 @@ def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
     data, times = _apply_forward(fwd, stc, start, stop)
 
     # store sensor data in Raw object using the template
-    raw = deepcopy(raw_template)
-    raw._preloaded = True
+    raw = raw_template.copy()
+    raw.preload = True
     raw._data = data
     raw._times = times
 
@@ -1372,15 +1413,15 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
         fname = op.join(temp_dir, 'temp-fwd.fif')
     _check_fname(fname, overwrite)
 
-    if not isinstance(subject, basestring):
+    if not isinstance(subject, string_types):
         raise ValueError('subject must be a string')
 
     # check for meas to exist as string, or try to make evoked
     meas_data = None
-    if isinstance(meas, basestring):
+    if isinstance(meas, string_types):
         if not op.isfile(meas):
             raise IOError('measurement file "%s" could not be found' % meas)
-    elif isinstance(meas, Raw):
+    elif isinstance(meas, _BaseRaw):
         events = np.array([[0, 0, 1]], dtype=np.int)
         end = 1. / meas.info['sfreq']
         meas_data = Epochs(meas, events, 1, 0, end, proj=False).average()
@@ -1393,7 +1434,7 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
 
     if meas_data is not None:
         meas = op.join(temp_dir, 'evoked.fif')
-        write_evoked(meas, meas_data)
+        write_evokeds(meas, meas_data)
 
     # deal with trans/mri
     if mri is not None and trans is not None:
@@ -1404,13 +1445,13 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
         raise ValueError('Either trans or mri must be specified')
 
     if trans is not None:
-        if not isinstance(trans, basestring):
+        if not isinstance(trans, string_types):
             raise ValueError('trans must be a string')
         if not op.isfile(trans):
             raise IOError('trans file "%s" not found' % trans)
     if mri is not None:
         # deal with trans
-        if not isinstance(mri, basestring):
+        if not isinstance(mri, string_types):
             if isinstance(mri, dict):
                 mri_data = deepcopy(mri)
                 mri = op.join(temp_dir, 'mri-trans.fif')
@@ -1435,7 +1476,7 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
 
     # deal with mindist
     if mindist is not None:
-        if isinstance(mindist, basestring):
+        if isinstance(mindist, string_types):
             if not mindist.lower() == 'all':
                 raise ValueError('mindist, if string, must be "all"')
             mindist = ['--all']
@@ -1444,13 +1485,13 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
 
     # src, spacing, bem
     if src is not None:
-        if not isinstance(src, basestring):
+        if not isinstance(src, string_types):
             raise ValueError('src must be a string or None')
     if spacing is not None:
-        if not isinstance(spacing, basestring):
+        if not isinstance(spacing, string_types):
             raise ValueError('spacing must be a string or None')
     if bem is not None:
-        if not isinstance(bem, basestring):
+        if not isinstance(bem, string_types):
             raise ValueError('bem must be a string or None')
 
     # put together the actual call
@@ -1537,6 +1578,7 @@ def average_forward_solutions(fwds, weights=None):
     # check weights
     if weights is None:
         weights = np.ones(len(fwds))
+    weights = np.asanyarray(weights)  # in case it's a list, convert it
     if not np.all(weights >= 0):
         raise ValueError('weights must be non-negative')
     if not len(weights) == len(fwds):
diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py
new file mode 100644
index 0000000..7422e38
--- /dev/null
+++ b/mne/forward/tests/test_field_interpolation.py
@@ -0,0 +1,153 @@
+import numpy as np
+from os import path as op
+from numpy.polynomial import legendre
+from numpy.testing.utils import assert_allclose, assert_array_equal
+from nose.tools import assert_raises, assert_true
+
+from mne.forward import _make_surface_mapping, make_field_map
+from mne.surface import get_meg_helmet_surf, get_head_surf
+from mne.datasets import sample
+from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
+                                    _get_legen_table,
+                                    _get_legen_lut_fast,
+                                    _get_legen_lut_accurate)
+from mne import pick_types_evoked, read_evokeds
+from mne.fixes import partial
+from mne.externals.six.moves import zip
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+
+data_path = sample.data_path(download=False)
+trans_fname = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_raw-trans.fif')
+subjects_dir = op.join(data_path, 'subjects')
+
+
+def test_legendre_val():
+    """Test Legendre polynomial (derivative) equivalence
+    """
+    # check table equiv
+    xs = np.linspace(-1., 1., 1000)
+    n_terms = 100
+
+    # True, numpy
+    vals_np = legendre.legvander(xs, n_terms - 1)
+
+    # Table approximation
+    for fun, nc in zip([_get_legen_lut_fast, _get_legen_lut_accurate],
+                       [100, 50]):
+        lut, n_fact = _get_legen_table('eeg', n_coeff=nc)
+        vals_i = fun(xs, lut)
+        # Need a "1:" here because we omit the first coefficient in our table!
+        assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
+                        rtol=1e-2, atol=5e-3)
+
+        # Now let's look at our sums
+        ctheta = np.random.rand(20, 30) * 2.0 - 1.0
+        beta = np.random.rand(20, 30) * 0.8
+        lut_fun = partial(fun, lut=lut)
+        c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
+        c1.shape = beta.shape
+
+        # compare to numpy
+        n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
+        coeffs = np.zeros((n_terms,) + beta.shape)
+        coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0)
+                      * (2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
+        # can't use tensor=False here b/c it isn't in old numpy
+        c2 = np.empty((20, 30))
+        for ci1 in range(20):
+            for ci2 in range(30):
+                c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
+                                               coeffs[:, ci1, ci2])
+        assert_allclose(c1, c2, 1e-2, 1e-3)  # close enough...
+
+    # compare fast and slow for MEG
+    ctheta = np.random.rand(20 * 30) * 2.0 - 1.0
+    beta = np.random.rand(20 * 30) * 0.8
+    lut, n_fact = _get_legen_table('meg', n_coeff=50)
+    fun = partial(_get_legen_lut_fast, lut=lut)
+    coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
+    lut, n_fact = _get_legen_table('meg', n_coeff=100)
+    fun = partial(_get_legen_lut_accurate, lut=lut)
+    coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
+
+
+def test_legendre_table():
+    """Test Legendre table calculation
+    """
+    # double-check our table generation
+    n_do = 10
+    for ch_type in ['eeg', 'meg']:
+        lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=50)
+        lut1 = lut1[:, :n_do - 1].copy()
+        n_fact1 = n_fact1[:n_do - 1].copy()
+        lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n_do,
+                                         force_calc=True)
+        assert_allclose(lut1, lut2)
+        assert_allclose(n_fact1, n_fact2)
+
+
+ at sample.requires_sample_data
+def test_make_field_map_eeg():
+    """Test interpolation of EEG field onto head
+    """
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory')
+    evoked.info['bads'] = ['MEG 2443', 'EEG 053']  # add some bads
+    surf = get_head_surf('sample', subjects_dir=subjects_dir)
+    # we must have trans if surface is in MRI coords
+    assert_raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
+
+    evoked = pick_types_evoked(evoked, meg=False, eeg=True)
+    fmd = make_field_map(evoked, trans_fname=trans_fname,
+                         subject='sample', subjects_dir=subjects_dir)
+
+    # trans is necessary for EEG only
+    assert_raises(RuntimeError, make_field_map, evoked, trans_fname=None,
+                  subject='sample', subjects_dir=subjects_dir)
+
+    fmd = make_field_map(evoked, trans_fname=trans_fname,
+                         subject='sample', subjects_dir=subjects_dir)
+    assert_true(len(fmd) == 1)
+    assert_array_equal(fmd[0]['data'].shape, (2562, 59))  # maps data onto surf
+    assert_true(len(fmd[0]['ch_names']), 59)
+
+
+def test_make_field_map_meg():
+    """Test interpolation of MEG field onto helmet
+    """
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory')
+    info = evoked.info
+    surf = get_meg_helmet_surf(info)
+    # let's reduce the number of channels by a bunch to speed it up
+    info['bads'] = info['ch_names'][:200]
+    # bad ch_type
+    assert_raises(ValueError, _make_surface_mapping, info, surf, 'foo')
+    # bad mode
+    assert_raises(ValueError, _make_surface_mapping, info, surf, 'meg',
+                  mode='foo')
+    # no picks
+    evoked_eeg = pick_types_evoked(evoked, meg=False, eeg=True)
+    assert_raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
+                  surf, 'meg')
+    # bad surface def
+    nn = surf['nn']
+    del surf['nn']
+    assert_raises(KeyError, _make_surface_mapping, info, surf, 'meg')
+    surf['nn'] = nn
+    cf = surf['coord_frame']
+    del surf['coord_frame']
+    assert_raises(KeyError, _make_surface_mapping, info, surf, 'meg')
+    surf['coord_frame'] = cf
+
+    # now do it with make_field_map
+    evoked = pick_types_evoked(evoked, meg=True, eeg=False)
+    fmd = make_field_map(evoked, trans_fname=None,
+                         subject='sample', subjects_dir=subjects_dir)
+    assert_true(len(fmd) == 1)
+    assert_array_equal(fmd[0]['data'].shape, (304, 106))  # maps data onto surf
+    assert_true(len(fmd[0]['ch_names']), 106)
+
+    assert_raises(ValueError, make_field_map, evoked, ch_type='foobar')
diff --git a/mne/forward/tests/test_forward.py b/mne/forward/tests/test_forward.py
index ce268ac..3ecf7a6 100644
--- a/mne/forward/tests/test_forward.py
+++ b/mne/forward/tests/test_forward.py
@@ -8,24 +8,25 @@ from numpy.testing import (assert_array_almost_equal, assert_equal,
                            assert_array_equal, assert_allclose)
 
 from mne.datasets import sample
-from mne.fiff import Raw, Evoked, pick_types_forward
+from mne.io import Raw
 from mne import (read_forward_solution, apply_forward, apply_forward_raw,
                  average_forward_solutions, write_forward_solution,
                  convert_forward_solution)
-from mne import SourceEstimate
+from mne import SourceEstimate, pick_types_forward, read_evokeds
 from mne.label import read_label
 from mne.utils import requires_mne, run_subprocess, _TempDir
-from mne.forward import restrict_forward_to_stc, restrict_forward_to_label
+from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
+                         Forward)
 
 data_path = sample.data_path(download=False)
 fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif')
 fname_meeg = op.join(data_path, 'MEG', 'sample',
                      'sample_audvis-meg-eeg-oct-6-fwd.fif')
 
-fname_raw = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
 
-fname_evoked = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                        'data', 'test-ave.fif')
 fname_mri = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
 subjects_dir = os.path.join(data_path, 'subjects')
@@ -33,7 +34,7 @@ fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
 temp_dir = _TempDir()
 # make a file that exists with some data in it
 existing_file = op.join(temp_dir, 'test.fif')
-with open(existing_file, 'wb') as fid:
+with open(existing_file, 'w') as fid:
     fid.write('aoeu')
 
 
@@ -56,21 +57,29 @@ def test_convert_forward():
     """Test converting forward solution between different representations
     """
     fwd = read_forward_solution(fname_meeg)
+    print(fwd)  # __repr__
+    assert_true(isinstance(fwd, Forward))
     # look at surface orientation
     fwd_surf = convert_forward_solution(fwd, surf_ori=True)
     fwd_surf_io = read_forward_solution(fname_meeg, surf_ori=True)
     compare_forwards(fwd_surf, fwd_surf_io)
     # go back
     fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
+    print(fwd_new)
+    assert_true(isinstance(fwd, Forward))
     compare_forwards(fwd, fwd_new)
     # now go to fixed
     fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
                                          force_fixed=True)
+    print(fwd_fixed)
+    assert_true(isinstance(fwd_fixed, Forward))
     fwd_fixed_io = read_forward_solution(fname_meeg, surf_ori=False,
                                          force_fixed=True)
     compare_forwards(fwd_fixed, fwd_fixed_io)
     # now go back to cartesian (original condition)
     fwd_new = convert_forward_solution(fwd_fixed)
+    print(fwd_new)
+    assert_true(isinstance(fwd_new, Forward))
     compare_forwards(fwd, fwd_new)
 
 
@@ -80,10 +89,11 @@ def test_io_forward():
     """
     # test M/EEG
     fwd_meeg = read_forward_solution(fname_meeg)
+    assert_true(isinstance(fwd_meeg, Forward))
     leadfield = fwd_meeg['sol']['data']
     assert_equal(leadfield.shape, (366, 22494))
     assert_equal(len(fwd_meeg['sol']['row_names']), 366)
-    fname_temp = op.join(temp_dir, 'fwd.fif')
+    fname_temp = op.join(temp_dir, 'test-fwd.fif')
     write_forward_solution(fname_temp, fwd_meeg, overwrite=True)
 
     fwd_meeg = read_forward_solution(fname_temp)
@@ -96,7 +106,7 @@ def test_io_forward():
     leadfield = fwd['sol']['data']
     assert_equal(leadfield.shape, (306, 22494))
     assert_equal(len(fwd['sol']['row_names']), 306)
-    fname_temp = op.join(temp_dir, 'fwd.fif')
+    fname_temp = op.join(temp_dir, 'test-fwd.fif')
     write_forward_solution(fname_temp, fwd, overwrite=True)
 
     fwd = read_forward_solution(fname, surf_ori=True)
@@ -118,6 +128,14 @@ def test_io_forward():
     assert_true('mri_head_t' in fwd)
     assert_true(fwd['surf_ori'])
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
+        write_forward_solution(fwd_badname, fwd_meeg)
+        read_forward_solution(fwd_badname)
+    assert_true(len(w) == 2)
+
 
 @sample.requires_sample_data
 def test_apply_forward():
@@ -131,6 +149,7 @@ def test_apply_forward():
 
     fwd = read_forward_solution(fname, force_fixed=True)
     fwd = pick_types_forward(fwd, meg=True)
+    assert_true(isinstance(fwd, Forward))
 
     vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
     stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
@@ -140,7 +159,7 @@ def test_apply_forward():
 
     # Evoked
     with warnings.catch_warnings(record=True) as w:
-        evoked = Evoked(fname_evoked, setno=0)
+        evoked = read_evokeds(fname_evoked, condition=0)
         evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
         assert_equal(len(w), 2)
         data = evoked.data
@@ -182,6 +201,7 @@ def test_restrict_forward_to_stc():
     stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
 
     fwd_out = restrict_forward_to_stc(fwd, stc)
+    assert_true(isinstance(fwd_out, Forward))
 
     assert_equal(fwd_out['sol']['ncol'], 20)
     assert_equal(fwd_out['src'][0]['nuse'], 15)
@@ -278,11 +298,12 @@ def test_average_forward_solution():
 
     # try an easy case
     fwd_copy = average_forward_solutions([fwd])
+    assert_true(isinstance(fwd_copy, Forward))
     assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
 
     # modify a fwd solution, save it, use MNE to average with old one
     fwd_copy['sol']['data'] *= 0.5
-    fname_copy = op.join(temp_dir, 'fwd.fif')
+    fname_copy = op.join(temp_dir, 'copy-fwd.fif')
     write_forward_solution(fname_copy, fwd_copy, overwrite=True)
     cmd = ('mne_average_forward_solutions', '--fwd', fname, '--fwd',
            fname_copy, '--out', fname_copy)
diff --git a/mne/forward/tests/test_make_forward.py b/mne/forward/tests/test_make_forward.py
index 8fd17c8..4fb018e 100644
--- a/mne/forward/tests/test_make_forward.py
+++ b/mne/forward/tests/test_make_forward.py
@@ -1,29 +1,33 @@
+from __future__ import print_function
+
 import os
 import os.path as op
 from subprocess import CalledProcessError
+import warnings
 
-from nose.tools import assert_raises
+from nose.tools import assert_raises, assert_true
 from numpy.testing import (assert_equal, assert_allclose)
 
 from mne.datasets import sample
-from mne.fiff import Raw
-from mne.fiff.kit import read_raw_kit
-from mne.fiff.bti import read_raw_bti
+from mne.io import Raw
+from mne.io import read_raw_kit
+from mne.io import read_raw_bti
 from mne import (read_forward_solution, make_forward_solution,
                  do_forward_solution, setup_source_space, read_trans,
                  convert_forward_solution)
 from mne.utils import requires_mne, _TempDir
 from mne.tests.test_source_space import _compare_source_spaces
+from mne.forward import Forward
 
 data_path = sample.data_path(download=False)
 fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif')
 fname_meeg = op.join(data_path, 'MEG', 'sample',
                      'sample_audvis-meg-eeg-oct-6-fwd.fif')
 
-fname_raw = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
 
-fname_evoked = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                        'data', 'test-ave.fif')
 fname_mri = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
 subjects_dir = os.path.join(data_path, 'subjects')
@@ -31,7 +35,7 @@ temp_dir = _TempDir()
 
 # make a file that exists with some data in it
 existing_file = op.join(temp_dir, 'test.fif')
-with open(existing_file, 'wb') as fid:
+with open(existing_file, 'w') as fid:
     fid.write('aoeu')
 
 
@@ -49,7 +53,7 @@ def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
 
         for key in ['nchan', 'source_nn', 'source_rr', 'source_ori',
                     'surf_ori', 'coord_frame', 'nsource']:
-            print key
+            print(key)
             assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7)
         assert_allclose(fwd_py['mri_head_t']['trans'],
                         fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
@@ -59,13 +63,13 @@ def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
         assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
 
         # check MEG
-        print 'check MEG'
+        print('check MEG')
         assert_allclose(fwd['sol']['data'][:306],
                         fwd_py['sol']['data'][:306],
                         rtol=meg_rtol, atol=meg_atol)
         # check EEG
         if fwd['sol']['data'].shape[0] > 306:
-            print 'check EEG'
+            print('check EEG')
             assert_allclose(fwd['sol']['data'][306:],
                             fwd_py['sol']['data'][306:],
                             rtol=1e-3, atol=1e-3)
@@ -78,23 +82,23 @@ def test_make_forward_solution_kit():
     """
     fname_bem = op.join(subjects_dir, 'sample', 'bem',
                         'sample-5120-bem-sol.fif')
-    kit_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'kit',
+    kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
                       'tests', 'data')
     sqd_path = op.join(kit_dir, 'test.sqd')
     mrk_path = op.join(kit_dir, 'test_mrk.sqd')
     elp_path = op.join(kit_dir, 'test_elp.txt')
     hsp_path = op.join(kit_dir, 'test_hsp.txt')
     mri_path = op.join(kit_dir, 'trans-sample.fif')
-    fname_kit_raw = op.join(kit_dir, 'test_bin.fif')
+    fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
 
-    bti_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'bti',
+    bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
                       'tests', 'data')
     bti_pdf = op.join(bti_dir, 'test_pdf_linux')
     bti_config = op.join(bti_dir, 'test_config_linux')
     bti_hs = op.join(bti_dir, 'test_hs_linux')
-    fname_bti_raw = op.join(bti_dir, 'exported4D_linux.fif')
+    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
 
-    fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+    fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                             'data', 'test_ctf_comp_raw.fif')
 
     # first set up a testing source space
@@ -106,12 +110,14 @@ def test_make_forward_solution_kit():
     fwd = do_forward_solution('sample', fname_kit_raw, src=fname_src,
                               mindist=0.0, bem=fname_bem, mri=mri_path,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
+    assert_true(isinstance(fwd, Forward))
 
     # now let's use python with the same raw file
     fwd_py = make_forward_solution(fname_kit_raw, mindist=0.0,
                                    src=src, eeg=False, meg=True,
                                    bem=fname_bem, mri=mri_path)
     _compare_forwards(fwd, fwd_py, 157, 108)
+    assert_true(isinstance(fwd_py, Forward))
 
     # now let's use mne-python all the way
     raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
@@ -152,10 +158,11 @@ def test_make_forward_solution_kit():
     fwd_py = make_forward_solution(ctf_raw.info, mindist=0.0,
                                    src=src, eeg=False, meg=True,
                                    bem=fname_bem, mri=fname_mri)
-
-    fwd = do_forward_solution('sample', ctf_raw, src=fname_src,
-                              mindist=0.0, bem=fname_bem, mri=fname_mri,
-                              eeg=False, meg=True, subjects_dir=subjects_dir)
+    with warnings.catch_warnings(record=True):
+        fwd = do_forward_solution('sample', ctf_raw, src=fname_src,
+                                  mindist=0.0, bem=fname_bem, mri=fname_mri,
+                                  eeg=False, meg=True,
+                                  subjects_dir=subjects_dir)
     _compare_forwards(fwd, fwd_py, 274, 108)
 
 
@@ -169,7 +176,9 @@ def test_make_forward_solution():
     fwd_py = make_forward_solution(fname_raw, mindist=5.0,
                                    src=fname_src, eeg=True, meg=True,
                                    bem=fname_bem, mri=fname_mri)
+    assert_true(isinstance(fwd_py, Forward))
     fwd = read_forward_solution(fname_meeg)
+    assert_true(isinstance(fwd, Forward))
     _compare_forwards(fwd, fwd_py, 366, 22494)
 
 
diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py
index d44b8a4..94fd9e4 100644
--- a/mne/gui/__init__.py
+++ b/mne/gui/__init__.py
@@ -4,6 +4,8 @@
 #
 # License: BSD (3-clause)
 
+from ..utils import _check_mayavi_version
+
 
 def combine_kit_markers():
     """Create a new KIT marker file by interpolating two marker files
@@ -12,13 +14,14 @@ def combine_kit_markers():
     -----
     The functionality in this GUI is also part of :func:`kit2fiff`.
     """
+    _check_mayavi_version()
     from ._marker_gui import CombineMarkersFrame
     gui = CombineMarkersFrame()
     gui.configure_traits()
     return gui
 
 
-def coregistration(tabbed=False, split=True, scene_width=01, raw=None,
+def coregistration(tabbed=False, split=True, scene_width=0o1, raw=None,
                    subject=None, subjects_dir=None):
     """Coregister an MRI with a subject's head shape
 
@@ -49,6 +52,7 @@ def coregistration(tabbed=False, split=True, scene_width=01, raw=None,
     subjects for which no MRI is available
     <http://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
     """
+    _check_mayavi_version()
     from ._coreg_gui import CoregFrame, _make_view
     view = _make_view(tabbed, split, scene_width)
     gui = CoregFrame(raw, subject, subjects_dir)
@@ -74,6 +78,7 @@ def fiducials(subject=None, fid_file=None, subjects_dir=None):
     All parameters are optional, since they can be set through the GUI.
     The functionality in this GUI is also part of :func:`coregistration`.
     """
+    _check_mayavi_version()
     from ._fiducials_gui import FiducialsFrame
     gui = FiducialsFrame(subject, subjects_dir, fid_file=fid_file)
     gui.configure_traits()
@@ -83,6 +88,7 @@ def fiducials(subject=None, fid_file=None, subjects_dir=None):
 def kit2fiff():
     """Convert KIT files to the fiff format
     """
+    _check_mayavi_version()
     from ._kit2fiff_gui import Kit2FiffFrame
     gui = Kit2FiffFrame()
     gui.configure_traits()
diff --git a/mne/gui/_coreg_gui.py b/mne/gui/_coreg_gui.py
index e57b6c2..6b327a0 100644
--- a/mne/gui/_coreg_gui.py
+++ b/mne/gui/_coreg_gui.py
@@ -4,11 +4,11 @@
 #
 # License: BSD (3-clause)
 
-from copy import deepcopy
 import os
-from Queue import Queue
+from ..externals.six.moves import queue
 import re
 from threading import Thread
+import warnings
 
 import numpy as np
 from scipy.spatial.distance import cdist
@@ -63,7 +63,7 @@ except:
 
 
 from ..coreg import bem_fname, trans_fname
-from ..fiff import FIFF
+from ..io.constants import FIFF
 from ..forward import prepare_bem_model
 from ..transforms import (write_trans, read_trans, apply_trans, rotation,
                           translation, scaling, rotation_angles)
@@ -71,7 +71,7 @@ from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
                      _point_cloud_error)
 from ..utils import get_subjects_dir, logger
 from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
-from ._file_traits import (assert_env_set, trans_wildcard, RawSource,
+from ._file_traits import (set_mne_root, trans_wildcard, RawSource,
                            SubjectSelectorPanel)
 from ._viewer import defaults, HeadViewController, PointObject, SurfaceObject
 
@@ -84,7 +84,7 @@ class CoregModel(HasPrivateTraits):
 
     Notes
     -----
-    Transform from head to mri space is modeled with the following steps:
+    Transform from head to mri space is modelled with the following steps:
 
      * move the head shape to its nasion position
      * rotate the head shape with user defined rotation around its nasion
@@ -105,6 +105,9 @@ class CoregModel(HasPrivateTraits):
     hsp = Instance(RawSource, ())
 
     # parameters
+    grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
+                      "head outwards to compensate for hair on the digitizer "
+                      "head shape")
     n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
                           "subject's head shape (a new MRI subject will be "
                           "created with a name specified upon saving)")
@@ -118,6 +121,9 @@ class CoregModel(HasPrivateTraits):
     trans_y = Float(0, label="Anterior (Y)")
     trans_z = Float(0, label="Superior (Z)")
 
+    prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
+                             "after scaling the MRI")
+
     # secondary to parameters
     scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
                                  'scale_z'])
@@ -138,13 +144,17 @@ class CoregModel(HasPrivateTraits):
                               "match the scaled MRI.")
 
     # info
+    subject_has_bem = DelegatesTo('mri')
+    lock_fiducials = DelegatesTo('mri')
+    can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
+                                                       'subject_has_bem'])
     can_save = Property(Bool, depends_on=['head_mri_trans'])
     raw_subject = Property(depends_on='hsp.raw_fname', desc="Subject guess "
                            "based on the raw file name.")
-    lock_fiducials = DelegatesTo('mri')
 
     # transformed geometry
-    transformed_mri_points = Property(depends_on=['mri.points',
+    processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
+    transformed_mri_points = Property(depends_on=['processed_mri_points',
                                                   'mri_scale_trans'])
     transformed_hsp_points = Property(depends_on=['hsp.points',
                                                   'head_mri_trans'])
@@ -173,6 +183,10 @@ class CoregModel(HasPrivateTraits):
     points_eval_str = Property(depends_on='point_distance')
 
     @cached_property
+    def _get_can_prepare_bem_model(self):
+        return self.subject_has_bem and self.n_scale_params > 0
+
+    @cached_property
     def _get_can_save(self):
         return np.any(self.head_mri_trans != np.eye(4))
 
@@ -238,8 +252,28 @@ class CoregModel(HasPrivateTraits):
         return trans
 
     @cached_property
+    def _get_processed_mri_points(self):
+        if self.grow_hair:
+            if len(self.mri.norms):
+                if self.n_scale_params == 0:
+                    scaled_hair_dist = self.grow_hair / 1000
+                else:
+                    scaled_hair_dist = self.grow_hair / self.scale / 1000
+
+                points = self.mri.points.copy()
+                hair = points[:, 2] > points[:, 1]
+                points[hair] += self.mri.norms[hair] * scaled_hair_dist
+                return points
+            else:
+                msg = "Norms missing form bem, can't grow hair"
+                error(None, msg)
+                self.grow_hair = 0
+        return self.mri.points
+
+    @cached_property
     def _get_transformed_mri_points(self):
-        return apply_trans(self.mri_scale_trans, self.mri.points)
+        points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
+        return points
 
     @cached_property
     def _get_transformed_mri_lpa(self):
@@ -287,7 +321,7 @@ class CoregModel(HasPrivateTraits):
     @cached_property
     def _get_point_distance(self):
         if (len(self.transformed_hsp_points) == 0
-            or len(self.transformed_mri_points) == 0):
+                or len(self.transformed_mri_points) == 0):
             return
         dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
                       'euclidean')
@@ -341,7 +375,8 @@ class CoregModel(HasPrivateTraits):
         distance = float(distance)
         if reset:
             logger.info("Coregistration: Reset excluded head shape points")
-            self.hsp.points_filter = None
+            with warnings.catch_warnings(record=True):  # Traits None comp
+                self.hsp.points_filter = None
 
         if distance <= 0:
             return
@@ -364,7 +399,8 @@ class CoregModel(HasPrivateTraits):
             new_filter[old_filter] = new_sub_filter
 
         # set the filter
-        self.hsp.points_filter = new_filter
+        with warnings.catch_warnings(record=True):  # comp to None in Traits
+            self.hsp.points_filter = new_filter
 
     def fit_auricular_points(self):
         "Find rotation to fit LPA and RPA"
@@ -402,7 +438,7 @@ class CoregModel(HasPrivateTraits):
         "Find rotation to fit head shapes"
         src_pts = self.hsp.points - self.hsp.nasion
 
-        tgt_pts = self.mri.points - self.mri.nasion
+        tgt_pts = self.processed_mri_points - self.mri.nasion
         tgt_pts *= self.scale
         tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
 
@@ -449,7 +485,7 @@ class CoregModel(HasPrivateTraits):
         "Find MRI scaling and rotation to match head shape points"
         src_pts = self.hsp.points - self.hsp.nasion
 
-        tgt_pts = self.mri.points - self.mri.nasion
+        tgt_pts = self.processed_mri_points - self.mri.nasion
 
         if self.n_scale_params == 1:
             x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
@@ -477,12 +513,12 @@ class CoregModel(HasPrivateTraits):
         subjects_dir = self.mri.subjects_dir
         subject_from = self.mri.subject
 
-        bem_name = 'inner_skull'
+        bem_name = 'inner_skull-bem'
         bem_file = bem_fname.format(subjects_dir=subjects_dir,
                                     subject=subject_from, name=bem_name)
         if not os.path.exists(bem_file):
             pattern = bem_fname.format(subjects_dir=subjects_dir,
-                                       subject=subject_to, name='(.+)')
+                                       subject=subject_to, name='(.+-bem)')
             bem_dir, bem_file = os.path.split(pattern)
             m = None
             bem_file_pattern = re.compile(bem_file)
@@ -493,10 +529,10 @@ class CoregModel(HasPrivateTraits):
 
             if m is None:
                 pattern = bem_fname.format(subjects_dir=subjects_dir,
-                                           subject=subject_to, name='*')
+                                           subject=subject_to, name='*-bem')
                 err = ("No bem file found; looking for files matching "
                        "%s" % pattern)
-                error(err)
+                error(None, err)
 
             bem_name = m.group(1)
 
@@ -524,9 +560,9 @@ class CoregModel(HasPrivateTraits):
 
     def reset(self):
         """Reset all the parameters affecting the coregistration"""
-        self.reset_traits(('n_scaling_params', 'scale_x', 'scale_y', 'scale_z',
-                           'rot_x', 'rot_y', 'rot_z', 'trans_x', 'trans_y',
-                           'trans_z'))
+        self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
+                           'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
+                           'trans_x', 'trans_y', 'trans_z'))
 
     def set_trans(self, head_mri_trans):
         """Set rotation and translation parameters from a transformation matrix
@@ -564,13 +600,10 @@ class CoregModel(HasPrivateTraits):
         """
         if not self.can_save:
             raise RuntimeError("Not enough information for saving transform")
-        trans = self.head_mri_trans
-        dig = deepcopy(self.hsp.fid_dig)
-        for i in xrange(len(dig)):
-            dig[i]['r'] = apply_trans(trans, dig[i]['r'])
-        info = {'to': FIFF.FIFFV_COORD_MRI, 'from': FIFF.FIFFV_COORD_HEAD,
-                'trans': trans, 'dig': dig}
-        write_trans(fname, info)
+        trans_matrix = self.head_mri_trans
+        trans = {'to': FIFF.FIFFV_COORD_MRI, 'from': FIFF.FIFFV_COORD_HEAD,
+                 'trans': trans_matrix}
+        write_trans(fname, trans)
 
 
 class CoregFrameHandler(Handler):
@@ -592,6 +625,7 @@ class CoregPanel(HasPrivateTraits):
 
     # parameters
     reset_params = Button(label='Reset')
+    grow_hair = DelegatesTo('model')
     n_scale_params = DelegatesTo('model')
     scale_step = Float(1.01)
     scale_x = DelegatesTo('model')
@@ -641,18 +675,20 @@ class CoregPanel(HasPrivateTraits):
     points_eval_str = DelegatesTo('model')
 
     # saving
+    can_prepare_bem_model = DelegatesTo('model')
     can_save = DelegatesTo('model')
-    prepare_bem_model = Bool(True)
+    prepare_bem_model = DelegatesTo('model')
     save = Button(label="Save As...")
     load_trans = Button
-    queue = Instance(Queue, ())
+    queue = Instance(queue.Queue, ())
     queue_feedback = Str('')
     queue_current = Str('')
     queue_len = Int(0)
     queue_len_str = Property(Str, depends_on=['queue_len'])
     error = Str('')
 
-    view = View(VGroup(Item('n_scale_params', label='MRI Scaling',
+    view = View(VGroup(Item('grow_hair', show_label=True),
+                       Item('n_scale_params', label='MRI Scaling',
                             style='custom', show_label=True,
                             editor=EnumEditor(values={0: '1:No Scaling',
                                                       1: '2:1 Param',
@@ -763,7 +799,7 @@ class CoregPanel(HasPrivateTraits):
                        HGroup(Item('prepare_bem_model'),
                               Label("Run mne_prepare_bem_model"),
                               show_labels=False,
-                              enabled_when='n_scale_params > 0'),
+                              enabled_when='can_prepare_bem_model'),
                        HGroup(Item('save', enabled_when='can_save',
                                    tooltip="Save the trans file and (if "
                                    "scaling is enabled) the scaled MRI"),
@@ -881,12 +917,11 @@ class CoregPanel(HasPrivateTraits):
             return
 
         # Make sure that MNE_ROOT environment variable is set
-        if not assert_env_set(mne_root=True):
+        if not set_mne_root(True):
             err = ("MNE_ROOT environment variable could not be set. "
-                   "You will be able to scale MRIs, but the preparatory mne "
-                   "tools will fail. Please specify the MNE_ROOT environment "
-                   "variable. In Python this can be done using:\n\n"
-                   ">>> os.environ['MNE_ROOT'] = '/Applications/mne-2.7.3'")
+                   "You will be able to scale MRIs, but the "
+                   "mne_prepare_bem_model tool will fail. Please install "
+                   "MNE.")
             warning(None, err, "MNE_ROOT Not Set")
 
     def _reset_params_fired(self):
@@ -942,7 +977,7 @@ class CoregPanel(HasPrivateTraits):
             subject_to = mridlg.subject_to
 
         # find bem file to run mne_prepare_bem_model
-        if self.n_scale_params and self.prepare_bem_model:
+        if self.can_prepare_bem_model and self.prepare_bem_model:
             bem_job = self.model.get_prepare_bem_model_job(subject_to)
         else:
             bem_job = None
@@ -1263,11 +1298,13 @@ class CoregFrame(HasTraits):
 
         # MRI scalp
         color = defaults['mri_color']
-        self.mri_obj = SurfaceObject(points=self.model.mri.points, color=color,
-                                     tri=self.model.mri.tris, scene=self.scene)
+        self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
+                                     color=color, tri=self.model.mri.tris,
+                                     scene=self.scene)
         # on_trait_change was unreliable, so link it another way:
         self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
-        self.model.sync_trait('scale', self.mri_obj, 'trans', mutual=False)
+        self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
+                              mutual=False)
         self.fid_panel.hsp_obj = self.mri_obj
 
         # MRI Fiducials
diff --git a/mne/gui/_fiducials_gui.py b/mne/gui/_fiducials_gui.py
index 98fff2d..a127480 100644
--- a/mne/gui/_fiducials_gui.py
+++ b/mne/gui/_fiducials_gui.py
@@ -6,6 +6,8 @@
 
 from glob import glob
 import os
+from ..externals.six.moves import map
+from ..externals.six.moves import zip
 
 # allow import without traits
 try:
@@ -43,7 +45,8 @@ except:
     NoButtons = trait_wraith
 
 from ..coreg import fid_fname, fid_fname_general, head_bem_fname
-from ..fiff import FIFF, write_fiducials
+from ..io import write_fiducials
+from ..io.constants import FIFF
 from ..utils import get_subjects_dir, logger
 from ._file_traits import (BemSource, fid_wildcard, FiducialsSource,
                            MRISubjectSource, SubjectSelectorPanel)
@@ -76,7 +79,9 @@ class MRIHeadWithFiducialsModel(HasPrivateTraits):
     fid_points = DelegatesTo('fid', 'points')
     subjects_dir = DelegatesTo('subject_source')
     subject = DelegatesTo('subject_source')
+    subject_has_bem = DelegatesTo('subject_source')
     points = DelegatesTo('bem')
+    norms = DelegatesTo('bem')
     tris = DelegatesTo('bem')
     lpa = Array(float, (1, 3))
     nasion = Array(float, (1, 3))
@@ -294,7 +299,7 @@ class FiducialsPanel(HasPrivateTraits):
             idx = None
             pt = [picker.pick_position]
         elif self.hsp_obj.surf.actor.actor in picker.actors:
-            idxs = [i for i in xrange(n_pos) if picker.actors[i] is
+            idxs = [i for i in range(n_pos) if picker.actors[i] is
                     self.hsp_obj.surf.actor.actor]
             idx = idxs[-1]
             pt = [picker.picked_positions[idx]]
diff --git a/mne/gui/_file_traits.py b/mne/gui/_file_traits.py
index a0e9bdb..98a9425 100644
--- a/mne/gui/_file_traits.py
+++ b/mne/gui/_file_traits.py
@@ -7,6 +7,8 @@
 import os
 
 import numpy as np
+from ..externals.six.moves import map
+from ..externals.six.moves import zip
 
 # allow import without traits
 try:
@@ -41,10 +43,12 @@ except:
     Item = trait_wraith
     VGroup = trait_wraith
 
-from ..fiff import FIFF, Raw, read_fiducials
+from ..io.constants import FIFF
+from ..io import Raw, read_fiducials
 from ..surface import read_bem_surfaces
-from ..coreg import _is_mri_subject, create_default_subject
-from ..utils import get_config
+from ..coreg import (_is_mri_subject, _mri_subject_has_bem,
+                     create_default_subject)
+from ..utils import get_config, set_config
 
 
 fid_wildcard = "*.fif"
@@ -58,72 +62,153 @@ def _expand_path(p):
     return os.path.abspath(os.path.expandvars(os.path.expanduser(p)))
 
 
-def assert_env_set(mne_root=True, fs_home=False):
-    """Make sure that environment variables are correctly set
+def get_fs_home():
+    """Get the FREESURFER_HOME directory
 
-    Parameters
-    ----------
-    mne_root : bool
-        Make sure the MNE_ROOT environment variable is set correctly, and the
-        mne bin directory is in the PATH.
-    fs_home : bool
-        Make sure the FREESURFER_HOME environment variable is set correctly.
+    Returns
+    -------
+    fs_home : None | str
+        The FREESURFER_HOME path or None if the user cancels.
+
+    Notes
+    -----
+    If FREESURFER_HOME can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    fs_home = get_config('FREESURFER_HOME')
+    problem = _fs_home_problem(fs_home)
+    while problem:
+        info = ("Please select the FREESURFER_HOME directory. This is the "
+                "root directory of the freesurfer installation.")
+        msg = '\n\n'.join((problem, info))
+        information(None, msg, "Select the FREESURFER_HOME Directory")
+        msg = "Please select the FREESURFER_HOME Directory"
+        dlg = DirectoryDialog(message=msg, new_directory=False)
+        if dlg.open() == OK:
+            fs_home = dlg.path
+            problem = _fs_home_problem(fs_home)
+            if problem is None:
+                set_config('FREESURFER_HOME', fs_home)
+        else:
+            return None
+
+    return fs_home
+
+def set_fs_home():
+    """Set the FREESURFER_HOME environment variable
 
     Returns
     -------
     success : bool
-        Whether the requested environment variables are successfully set or
-        not.
+        True if the environment variable could be set, False if FREESURFER_HOME
+        could not be found.
 
     Notes
     -----
-    Environment variables are added to ``os.environ`` to make sure that bash
-    tools can find them.
+    If FREESURFER_HOME can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
     """
-    if fs_home:
-        fs_home = os.environ.get('FREESURFER_HOME', None)
-        test_dir = os.path.join('%s', 'subjects', 'fsaverage')
-        while (fs_home is None) or not os.path.exists(test_dir % fs_home):
-            msg = ("Please select the FREESURFER_HOME directory. This is the "
-                   "root directory of the freesurfer installation. In order "
-                   "to avoid this prompt in the future, set the "
-                   "FREESURFER_HOME environment variable. "
-                   "In Python, this can be done with:\n"
-                   ">>> os.environ['FREESURFER_HOME'] = path")
-            information(None, msg, "Select FREESURFER_HOME Directory")
-            msg = "Please select the FREESURFER_HOME Directory"
-            dlg = DirectoryDialog(message=msg, new_directory=False)
-            if dlg.open() == OK:
-                fs_home = dlg.path
-            else:
-                return False
+    fs_home = get_fs_home()
+    if fs_home is None:
+        return False
+    else:
         os.environ['FREESURFER_HOME'] = fs_home
+        return True
 
-    if mne_root:
-        mne_root = get_config('MNE_ROOT')
-        test_dir = os.path.join('%s', 'share', 'mne', 'mne_analyze')
-        while (mne_root is None) or not os.path.exists(test_dir % mne_root):
-            msg = ("Please select the MNE_ROOT directory. This is the root "
-                   "directory of the MNE installation. In order to "
-                   "avoid this prompt in the future, set the MNE_ROOT "
-                   "environment variable. "
-                   "In Python, this can be done with:\n"
-                   ">>> os.environ['MNE_ROOT'] = path")
-            information(None, msg, "Select MNE_ROOT Directory")
-            msg = "Please select the MNE_ROOT Directory"
-            dlg = DirectoryDialog(message=msg, new_directory=False)
-            if dlg.open() == OK:
-                mne_root = dlg.path
-            else:
-                return False
-        os.environ['MNE_ROOT'] = mne_root
+def _fs_home_problem(fs_home):
+    "Check FREESURFER_HOME path"
+    test_dir = os.path.join(fs_home, 'subjects', 'fsaverage')
+    if fs_home is None:
+        problem = "FREESURFER_HOME is not set."
+    elif not os.path.exists(fs_home):
+        problem = "FREESURFER_HOME (%s) does not exist." % fs_home
+    elif not os.path.exists(test_dir):
+        problem = ("FREESURFER_HOME (%s) does not contain the fsaverage "
+                   "subject." % fs_home)
+    else:
+        problem = None
+    return problem
+
+
+def get_mne_root():
+    """Get the MNE_ROOT directory
+
+    Returns
+    -------
+    mne_root : None | str
+        The MNE_ROOT path or None if the user cancels.
+
+    Notes
+    -----
+    If MNE_ROOT can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    mne_root = get_config('MNE_ROOT')
+    problem = _mne_root_problem(mne_root)
+    while problem:
+        info = ("Please select the MNE_ROOT directory. This is the root "
+                "directory of the MNE installation.")
+        msg = '\n\n'.join((problem, info))
+        information(None, msg, "Select the MNE_ROOT Directory")
+        msg = "Please select the MNE_ROOT Directory"
+        dlg = DirectoryDialog(message=msg, new_directory=False)
+        if dlg.open() == OK:
+            mne_root = dlg.path
+            problem = _mne_root_problem(mne_root)
+            if problem is None:
+                set_config('MNE_ROOT', mne_root)
+        else:
+            return None
+
+    return mne_root
+
+def set_mne_root(set_mne_bin=False):
+    """Set the MNE_ROOT environment variable
+
+    Parameters
+    ----------
+    set_mne_bin : bool
+        Also add the MNE binary directory to the PATH (default: False).
+
+    Returns
+    -------
+    success : bool
+        True if the environment variable could be set, False if MNE_ROOT
+        could not be found.
 
-        # add mne bin directory to PATH
-        mne_bin = os.path.realpath(os.path.join(mne_root, 'bin'))
-        if mne_bin not in map(_expand_path, os.environ['PATH'].split(':')):
-            os.environ['PATH'] += ':' + mne_bin
+    Notes
+    -----
+    If MNE_ROOT can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    mne_root = get_mne_root()
+    if mne_root is None:
+        return False
+    else:
+        os.environ['MNE_ROOT'] = mne_root
+        if set_mne_bin:
+            mne_bin = os.path.realpath(os.path.join(mne_root, 'bin'))
+            if mne_bin not in map(_expand_path, os.environ['PATH'].split(':')):
+                os.environ['PATH'] += ':' + mne_bin
+        return True
 
-    return True
+def _mne_root_problem(mne_root):
+    "Check MNE_ROOT path"
+    test_dir = os.path.join(mne_root, 'share', 'mne', 'mne_analyze')
+    if mne_root is None:
+        problem = "MNE_ROOT is not set."
+    elif not os.path.exists(mne_root):
+        problem = "MNE_ROOT (%s) does not exist." % mne_root
+    elif not os.path.exists(test_dir):
+        problem = ("MNE_ROOT (%s) is missing files. If this is your MNE "
+                   "installation, consider reinstalling." % mne_root)
+    else:
+        problem = None
+    return problem
 
 
 class BemSource(HasTraits):
@@ -148,6 +233,7 @@ class BemSource(HasTraits):
     """
     file = File(exists=True, filter=['*.fif'])
     points = Array(shape=(None, 3), value=np.empty((0, 3)))
+    norms = Array
     tris = Array(shape=(None, 3), value=np.empty((0, 3)))
 
     @on_trait_change('file')
@@ -155,9 +241,11 @@ class BemSource(HasTraits):
         if os.path.exists(self.file):
             bem = read_bem_surfaces(self.file)[0]
             self.points = bem['rr']
+            self.norms = bem['nn']
             self.tris = bem['tris']
         else:
             self.points = np.empty((0, 3))
+            self.norms = np.empty((0, 3))
             self.tris = np.empty((0, 3))
 
 
@@ -346,6 +434,9 @@ class MRISubjectSource(HasPrivateTraits):
     # info
     can_create_fsaverage = Property(Bool, depends_on=['subjects_dir',
                                                       'subjects'])
+    subject_has_bem = Property(Bool, depends_on=['subjects_dir', 'subject'],
+                               desc="whether the subject has a file matching "
+                               "the bem file name pattern")
     bem_pattern = Property(depends_on='mri_dir')
 
     @cached_property
@@ -379,18 +470,31 @@ class MRISubjectSource(HasPrivateTraits):
 
         return subjects
 
+    @cached_property
+    def _get_subject_has_bem(self):
+        if not self.subject:
+            return False
+        return _mri_subject_has_bem(self.subject, self.subjects_dir)
+
     def create_fsaverage(self):
         if not self.subjects_dir:
-            err = ("No subjects diretory is selected. Please specify "
+            err = ("No subjects directory is selected. Please specify "
                    "subjects_dir first.")
             raise RuntimeError(err)
 
-        if not assert_env_set(mne_root=True, fs_home=True):
-            err = ("Not all files required for creating the fsaverage brain "
-                   "were found. Both mne and freesurfer are required.")
+        mne_root = get_mne_root()
+        if mne_root is None:
+            err = ("MNE contains files that are needed for copying the "
+                   "fsaverage brain. Please install MNE and try again.")
+            raise RuntimeError(err)
+        fs_home = get_fs_home()
+        if fs_home is None:
+            err = ("FreeSurfer contains files that are needed for copying the "
+                   "fsaverage brain. Please install FreeSurfer and try again.")
             raise RuntimeError(err)
 
-        create_default_subject(subjects_dir=self.subjects_dir)
+        create_default_subject(mne_root, fs_home,
+                               subjects_dir=self.subjects_dir)
         self.refresh = True
         self.subject = 'fsaverage'
 
diff --git a/mne/gui/_kit2fiff_gui.py b/mne/gui/_kit2fiff_gui.py
index 3b09a01..f29a122 100644
--- a/mne/gui/_kit2fiff_gui.py
+++ b/mne/gui/_kit2fiff_gui.py
@@ -5,7 +5,7 @@
 # License: BSD (3-clause)
 
 import os
-from Queue import Queue
+from ..externals.six.moves import queue
 from threading import Thread
 
 import numpy as np
@@ -18,7 +18,7 @@ try:
     from pyface.api import confirm, error, FileDialog, OK, YES, information
     from traits.api import (HasTraits, HasPrivateTraits, cached_property,
                             Instance, Property, Bool, Button, Enum, File, Int,
-                            List, Str, DelegatesTo)
+                            List, Str, Array, DelegatesTo)
     from traitsui.api import (View, Item, HGroup, VGroup, spring,
                               CheckListEditor, EnumEditor, Handler)
     from traitsui.menu import NoButtons
@@ -41,6 +41,8 @@ except:
     List = trait_wraith
     Property = trait_wraith
     Str = trait_wraith
+    Array = trait_wraith
+    spring = trait_wraith
     View = trait_wraith
     Item = trait_wraith
     HGroup = trait_wraith
@@ -50,16 +52,16 @@ except:
     CheckListEditor = trait_wraith
     SceneEditor = trait_wraith
 
-from ..fiff.kit.coreg import read_hsp, read_elp
-from ..fiff.kit.kit import RawKIT, KIT
+from ..io.kit.coreg import read_hsp
+from ..io.kit.kit import RawKIT, KIT
 from ..transforms import apply_trans, als_ras_trans, als_ras_trans_mm
-from ..coreg import (_decimate_points, fit_matched_points,
+from ..coreg import (read_elp, _decimate_points, fit_matched_points,
                      get_ras_to_neuromag_trans)
 from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
 from ._viewer import HeadViewController, headview_item, PointObject
 
 
-use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in xrange(5)])
+use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
 backend_is_wx = False  # is there a way to determine this?
 if backend_is_wx:
     # wx backend allows labels for wildcards
@@ -87,10 +89,11 @@ class Kit2FiffModel(HasPrivateTraits):
                     "head shape")
     fid_file = File(exists=True, filter=hsp_fid_wildcard, desc="Digitizer "
                     "fiducials")
-    stim_chs = Enum(">", "<")
+    stim_chs = Enum(">", "<", "man")
+    stim_chs_manual = Array(int, (8,), range(168, 176))
     stim_slope = Enum("-", "+")
     # Marker Points
-    use_mrk = List(range(5), desc="Which marker points to use for the device "
+    use_mrk = List(list(range(5)), desc="Which marker points to use for the device "
                    "head coregistration.")
 
     # Derived Traits
@@ -157,7 +160,7 @@ class Kit2FiffModel(HasPrivateTraits):
     def _get_elp(self):
         if self.elp_raw is None:
             return np.empty((0, 3))
-        pts = self.elp_raw[3:]
+        pts = self.elp_raw[3:8]
         pts = apply_trans(self.polhemus_neuromag_trans, pts)
         return pts
 
@@ -168,6 +171,8 @@ class Kit2FiffModel(HasPrivateTraits):
 
         try:
             pts = read_elp(self.fid_file)
+            if len(pts) < 8:
+                raise ValueError("File contains %i points, need 8" % len(pts))
         except Exception as err:
             error(None, str(err), "Error Reading Fiducials")
             self.reset_traits(['fid_file'])
@@ -259,8 +264,7 @@ class Kit2FiffModel(HasPrivateTraits):
 
     def clear_all(self):
         """Clear all specified input parameters"""
-        self.markers.mrk1.clear = True
-        self.markers.mrk2.clear = True
+        self.markers.clear = True
         self.reset_traits(['sqd_file', 'hsp_file', 'fid_file'])
 
     def get_event_info(self):
@@ -284,12 +288,17 @@ class Kit2FiffModel(HasPrivateTraits):
         if not self.sqd_file:
             raise ValueError("sqd file not set")
 
-        raw = RawKIT(self.sqd_file, preload=preload)
-        raw._set_stimchannels(self.stim_chs, self.stim_slope)
+        if self.stim_chs == 'man':
+            stim = self.stim_chs_manual
+        else:
+            stim = self.stim_chs
+
+        raw = RawKIT(self.sqd_file, preload=preload, stim=stim,
+                     slope=self.stim_slope)
 
         if np.any(self.fid):
             raw._set_dig_neuromag(self.fid, self.elp, self.hsp,
-                                 self.dev_head_trans)
+                                  self.dev_head_trans)
         return raw
 
 
@@ -297,8 +306,7 @@ class Kit2FiffFrameHandler(Handler):
     """Handler that checks for unfinished processes before closing its window
     """
     def close(self, info, is_ok):
-        if info.object.kit2fiff_panel.kit2fiff_coreg_panel\
-                                                    .queue.unfinished_tasks:
+        if info.object.kit2fiff_panel.queue.unfinished_tasks:
             msg = ("Can not close the window while saving is still in "
                    "progress. Please wait until all files are processed.")
             title = "Saving Still in Progress"
@@ -318,6 +326,7 @@ class Kit2FiffPanel(HasPrivateTraits):
     hsp_file = DelegatesTo('model')
     fid_file = DelegatesTo('model')
     stim_chs = DelegatesTo('model')
+    stim_chs_manual = DelegatesTo('model')
     stim_slope = DelegatesTo('model')
 
     # info
@@ -338,7 +347,7 @@ class Kit2FiffPanel(HasPrivateTraits):
     # Output
     save_as = Button(label='Save FIFF...')
     clear_all = Button(label='Clear All')
-    queue = Instance(Queue, ())
+    queue = Instance(queue.Queue, ())
     queue_feedback = Str('')
     queue_current = Str('')
     queue_len = Int(0)
@@ -359,16 +368,7 @@ class Kit2FiffPanel(HasPrivateTraits):
                               Item('use_mrk', editor=use_editor,
                                    style='custom'),
                               label="Sources", show_border=True),
-                    VGroup(Item('stim_chs', label="Binary Coding",
-                                style='custom',
-                                editor=EnumEditor(values={'>': '1:1 ... 128',
-                                                          '<': '2:128 ... 1',
-                                                          },
-                                                  cols=2),
-                                help="Specifies the bit order in event "
-                                "channels. Assign the first bit (1) to the "
-                                "first or the last trigger channel."),
-                           Item('stim_slope', label="Event Onset",
+                    VGroup(Item('stim_slope', label="Event Onset",
                                 style='custom',
                                 editor=EnumEditor(
                                            values={'+': '2:Peak (0 to 5 V)',
@@ -377,6 +377,18 @@ class Kit2FiffPanel(HasPrivateTraits):
                                 help="Whether events are marked by a decrease "
                                 "(trough) or an increase (peak) in trigger "
                                 "channel values"),
+                           Item('stim_chs', label="Binary Coding",
+                                style='custom',
+                                editor=EnumEditor(values={'>': '1:1 ... 128',
+                                                          '<': '3:128 ... 1',
+                                                          'man': '2:Manual'},
+                                                  cols=2),
+                                help="Specifies the bit order in event "
+                                "channels. Assign the first bit (1) to the "
+                                "first or the last trigger channel."),
+                           Item('stim_chs_manual', label='Stim Channels',
+                                style='custom',
+                                visible_when="stim_chs == 'man'"),
                            label='Events', show_border=True),
                        HGroup(Item('save_as', enabled_when='can_save'), spring,
                               'clear_all', show_labels=False),
diff --git a/mne/gui/_marker_gui.py b/mne/gui/_marker_gui.py
index fb14b04..86e631e 100644
--- a/mne/gui/_marker_gui.py
+++ b/mne/gui/_marker_gui.py
@@ -48,7 +48,7 @@ except:
 
 from ..transforms import apply_trans, rotation, translation
 from ..coreg import fit_matched_points
-from ..fiff.kit import read_mrk, write_mrk
+from ..io.kit import read_mrk, write_mrk
 from ._viewer import HeadViewController, headview_borders, PointObject
 
 
@@ -67,8 +67,8 @@ else:
 out_ext = ['.txt', '.pickled']
 
 
-use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in xrange(5)])
-use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in xrange(5)])
+use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)])
+use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
 
 mrk_view_editable = View(
         VGroup('file',
@@ -149,7 +149,7 @@ class MarkerPointSource(MarkerPoints):
     name = Property(Str, depends_on='file')
     dir = Property(Str, depends_on='file')
 
-    use = List(range(5), desc="Which points to use for the interpolated "
+    use = List(list(range(5)), desc="Which points to use for the interpolated "
                "marker.")
     enabled = Property(Bool, depends_on=['points', 'use'])
     clear = Button(desc="Clear the current marker data")
@@ -186,7 +186,7 @@ class MarkerPointSource(MarkerPoints):
             self.points = pts
 
     def _clear_fired(self):
-        self.reset_traits(['file', 'points'])
+        self.reset_traits(['file', 'points', 'use'])
 
     def _edit_fired(self):
         self.edit_traits(view=mrk_view_edit)
@@ -312,9 +312,16 @@ class CombineMarkersModel(HasPrivateTraits):
     mrk2 = Instance(MarkerPointSource)
     mrk3 = Instance(MarkerPointDest)
 
+    clear = Button(desc="Clear the current marker data")
+
     # stats
     distance = Property(Str, depends_on=['mrk1.points', 'mrk2.points'])
 
+    def _clear_fired(self):
+        self.mrk1.clear = True
+        self.mrk2.clear = True
+        self.mrk3.reset_traits(['method'])
+
     def _mrk1_default(self):
         mrk = MarkerPointSource()
         return mrk
diff --git a/mne/gui/_viewer.py b/mne/gui/_viewer.py
index b132da6..6b57023 100644
--- a/mne/gui/_viewer.py
+++ b/mne/gui/_viewer.py
@@ -41,6 +41,7 @@ except:
     Item = trait_wraith
     Group = trait_wraith
     HGroup = trait_wraith
+    VGrid = trait_wraith
     VGroup = trait_wraith
     Glyph = trait_wraith
     Surface = trait_wraith
@@ -164,7 +165,7 @@ class Object(HasPrivateTraits):
             color = self.color.getRgbF()[:3]
         return color
 
-    @on_trait_change('trans')
+    @on_trait_change('trans,points')
     def _update_points(self):
         """Update the location of the plotted points"""
         if not hasattr(self.src, 'data'):
diff --git a/mne/gui/tests/test_coreg_gui.py b/mne/gui/tests/test_coreg_gui.py
index 2e80af1..65f4cd7 100644
--- a/mne/gui/tests/test_coreg_gui.py
+++ b/mne/gui/tests/test_coreg_gui.py
@@ -2,23 +2,26 @@
 #
 # License: BSD (3-clause)
 
+from ...externals.six import string_types
 import os
 
 import numpy as np
 from numpy.testing import assert_allclose
 from nose.tools import (assert_equal, assert_almost_equal, assert_false,
                         assert_raises, assert_true)
+import warnings
 
 import mne
 from mne.datasets import sample
-from mne.fiff.kit.tests import data_dir as kit_data_dir
+from mne.io.kit.tests import data_dir as kit_data_dir
 from mne.utils import _TempDir, requires_traits, requires_mne_fs_in_env
 
 
 data_path = sample.data_path(download=False)
 raw_path = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
-kit_raw_path = os.path.join(kit_data_dir, 'test_bin.fif')
+kit_raw_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
 subjects_dir = os.path.join(data_path, 'subjects')
+warnings.simplefilter('always')
 
 tempdir = _TempDir()
 
@@ -93,8 +96,8 @@ def test_coreg_model():
     assert_almost_equal(model.rot_z, rot_z)
 
     # info
-    assert_true(isinstance(model.fid_eval_str, basestring))
-    assert_true(isinstance(model.points_eval_str, basestring))
+    assert_true(isinstance(model.fid_eval_str, string_types))
+    assert_true(isinstance(model.points_eval_str, string_types))
 
 
 @sample.requires_sample_data
@@ -146,7 +149,7 @@ def test_coreg_model_with_fsaverage():
     assert_true(avg_point_distance_1param < avg_point_distance)
 
     desc, func, args, kwargs = model.get_scaling_job('test')
-    assert_true(isinstance(desc, basestring))
+    assert_true(isinstance(desc, string_types))
     assert_equal(args[0], 'fsaverage')
     assert_equal(args[1], 'test')
     assert_allclose(args[2], model.scale)
@@ -159,5 +162,6 @@ def test_coreg_model_with_fsaverage():
 
     # test switching raw disables point omission
     assert_equal(model.hsp.n_omitted, 1)
-    model.hsp.file = kit_raw_path
+    with warnings.catch_warnings(record=True):
+        model.hsp.file = kit_raw_path
     assert_equal(model.hsp.n_omitted, 0)
diff --git a/mne/gui/tests/test_file_traits.py b/mne/gui/tests/test_file_traits.py
index 953203c..096f438 100644
--- a/mne/gui/tests/test_file_traits.py
+++ b/mne/gui/tests/test_file_traits.py
@@ -9,7 +9,7 @@ from numpy.testing import assert_allclose
 from nose.tools import assert_equal, assert_false, assert_raises, assert_true
 
 from mne.datasets import sample
-from mne.fiff.tests import data_dir as fiff_data_dir
+from mne.io.tests import data_dir as fiff_data_dir
 from mne.utils import _TempDir, requires_mne_fs_in_env, requires_traits
 
 data_path = sample.data_path(download=False)
@@ -35,15 +35,6 @@ def test_bem_source():
     assert_equal(bem.tris.shape, (5120, 3))
 
 
- at requires_traits
- at requires_mne_fs_in_env
-def test_assert_env_set():
-    """Test environment variable detection"""
-    from mne.gui._file_traits import assert_env_set
-
-    assert_true(assert_env_set(True, True))
-
-
 @sample.requires_sample_data
 @requires_traits
 def test_fiducials_source():
diff --git a/mne/gui/tests/test_kit2fiff_gui.py b/mne/gui/tests/test_kit2fiff_gui.py
index 33631bd..f391629 100644
--- a/mne/gui/tests/test_kit2fiff_gui.py
+++ b/mne/gui/tests/test_kit2fiff_gui.py
@@ -5,11 +5,12 @@
 import os
 
 import numpy as np
-from numpy.testing import assert_allclose
+from numpy.testing import assert_allclose, assert_array_equal
 from nose.tools import assert_true, assert_false, assert_equal
 
-from mne.fiff.kit.tests import data_dir as kit_data_dir
-from mne.fiff import Raw
+import mne
+from mne.io.kit.tests import data_dir as kit_data_dir
+from mne.io import Raw
 from mne.utils import _TempDir, requires_traits
 
 mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
@@ -17,7 +18,7 @@ mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
 sqd_path = os.path.join(kit_data_dir, 'test.sqd')
 hsp_path = os.path.join(kit_data_dir, 'test_hsp.txt')
 fid_path = os.path.join(kit_data_dir, 'test_elp.txt')
-fif_path = os.path.join(kit_data_dir, 'test_bin.fif')
+fif_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
 
 tempdir = _TempDir()
 tgt_fname = os.path.join(tempdir, 'test-raw.fif')
@@ -63,3 +64,25 @@ def test_kit2fiff_model():
     assert_false(np.all(model.dev_head_trans == trans_transform))
     assert_false(np.all(model.dev_head_trans == trans_avg))
     assert_false(np.all(model.dev_head_trans == np.eye(4)))
+
+    # test setting stim channels
+    model.stim_slope = '+'
+    events_bin = mne.find_events(raw_bin, stim_channel='STI 014')
+
+    model.stim_chs = '<'
+    raw = model.get_raw()
+    events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events, events_bin)
+
+    events_rev = events_bin.copy()
+    events_rev[:, 2] = 1
+    model.stim_chs = '>'
+    raw = model.get_raw()
+    events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events, events_rev)
+
+    model.stim_chs = 'man'
+    model.stim_chs_manual = list(range(167, 159, -1))
+    raw = model.get_raw()
+    events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events, events_bin)
diff --git a/mne/gui/tests/test_marker_gui.py b/mne/gui/tests/test_marker_gui.py
index e9feb00..39e0c89 100644
--- a/mne/gui/tests/test_marker_gui.py
+++ b/mne/gui/tests/test_marker_gui.py
@@ -8,8 +8,8 @@ import numpy as np
 from numpy.testing import assert_array_equal
 from nose.tools import assert_true, assert_false
 
-from mne.fiff.kit.tests import data_dir as kit_data_dir
-from mne.fiff.kit import read_mrk
+from mne.io.kit.tests import data_dir as kit_data_dir
+from mne.io.kit import read_mrk
 from mne.utils import _TempDir, requires_traits
 
 mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
@@ -26,22 +26,45 @@ def test_combine_markers_model():
     from mne.gui._marker_gui import CombineMarkersModel
 
     model = CombineMarkersModel()
+
+    # set one marker file
     assert_false(model.mrk3.can_save)
     model.mrk1.file = mrk_pre_path
     assert_true(model.mrk3.can_save)
-    assert_array_equal(model.mrk1.points, model.mrk3.points)
+    assert_array_equal(model.mrk3.points, model.mrk1.points)
 
+    # setting second marker file
     model.mrk2.file = mrk_pre_path
-    assert_array_equal(model.mrk1.points, model.mrk3.points)
+    assert_array_equal(model.mrk3.points, model.mrk1.points)
 
-    model.mrk2._clear_fired()
+    # set second marker
+    model.mrk2.clear = True
     model.mrk2.file = mrk_post_path
     assert_true(np.any(model.mrk3.points))
+    points_interpolate_mrk1_mrk2 = model.mrk3.points
 
+    # change interpolation method
     model.mrk3.method = 'Average'
     mrk_avg = read_mrk(mrk_avg_path)
     assert_array_equal(model.mrk3.points, mrk_avg)
 
+    # clear second marker
+    model.mrk2.clear = True
+    assert_array_equal(model.mrk1.points, model.mrk3.points)
+
+    # I/O
+    model.mrk2.file = mrk_post_path
     model.mrk3.save(tgt_fname)
     mrk_io = read_mrk(tgt_fname)
     assert_array_equal(mrk_io, model.mrk3.points)
+
+    # exlude an individual marker
+    model.mrk1.use = [1, 2, 3, 4]
+    assert_array_equal(model.mrk3.points[0], model.mrk2.points[0])
+    assert_array_equal(model.mrk3.points[1:], mrk_avg[1:])
+
+    # reset model
+    model.clear = True
+    model.mrk1.file = mrk_pre_path
+    model.mrk2.file = mrk_post_path
+    assert_array_equal(model.mrk3.points, points_interpolate_mrk1_mrk2)
diff --git a/mne/html/bootstrap.min.css b/mne/html/bootstrap.min.css
new file mode 100644
index 0000000..c547283
--- /dev/null
+++ b/mne/html/bootstrap.min.css
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.0.3 (http://getbootstrap.com)
+ * Copyright 2013 Twitter, Inc.
+ * Licensed under http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*! normalize.css v2.1.3 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden],template{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a{background:transparent}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[ti [...]
\ No newline at end of file
diff --git a/mne/html/bootstrap.min.js b/mne/html/bootstrap.min.js
new file mode 100644
index 0000000..1a6258e
--- /dev/null
+++ b/mne/html/bootstrap.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.0.3 (http://getbootstrap.com)
+ * Copyright 2013 Twitter, Inc.
+ * Licensed under http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+if("undefined"==typeof jQuery)throw new Error("Bootstrap requires jQuery");+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]}}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one(a.support.transition.end,function(){c=!0});var e=function(){c||a(d).tri [...]
\ No newline at end of file
diff --git a/mne/html/d3.v3.min.js b/mne/html/d3.v3.min.js
new file mode 100644
index 0000000..eed58e6
--- /dev/null
+++ b/mne/html/d3.v3.min.js
@@ -0,0 +1,5 @@
+!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;retu [...]
+},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),functio [...]
+}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=Ti(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=Ti(r.dom [...]
+}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo [...]
+},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+ [...]
\ No newline at end of file
diff --git a/mne/html/jquery-1.10.2.min.js b/mne/html/jquery-1.10.2.min.js
new file mode 100644
index 0000000..da41706
--- /dev/null
+++ b/mne/html/jquery-1.10.2.min.js
@@ -0,0 +1,6 @@
+/*! jQuery v1.10.2 | (c) 2005, 2013 jQuery Foundation, Inc. | jquery.org/license
+//@ sourceMappingURL=jquery-1.10.2.min.map
+*/
+(function(e,t){var n,r,i=typeof t,o=e.location,a=e.document,s=a.documentElement,l=e.jQuery,u=e.$,c={},p=[],f="1.10.2",d=p.concat,h=p.push,g=p.slice,m=p.indexOf,y=c.toString,v=c.hasOwnProperty,b=f.trim,x=function(e,t){return new x.fn.init(e,t,r)},w=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=/\S+/g,C=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,k=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,E=/^[\],:{}\s]*$/,S=/(?:^|:|,)(?:\s*\[)+/g,A=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/ [...]
+}({});var B=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,P=/([A-Z])/g;function R(e,n,r,i){if(x.acceptData(e)){var o,a,s=x.expando,l=e.nodeType,u=l?x.cache:e,c=l?e[s]:e[s]&&s;if(c&&u[c]&&(i||u[c].data)||r!==t||"string"!=typeof n)return c||(c=l?e[s]=p.pop()||x.guid++:s),u[c]||(u[c]=l?{}:{toJSON:x.noop}),("object"==typeof n||"function"==typeof n)&&(i?u[c]=x.extend(u[c],n):u[c].data=x.extend(u[c].data,n)),a=u[c],i||(a.data||(a.data={}),a=a.data),r!==t&&(a[x.camelCase(n)]=r),"string"==typeof n?(o=a[n],null [...]
+u[o]&&(delete u[o],c?delete n[l]:typeof n.removeAttribute!==i?n.removeAttribute(l):n[l]=null,p.push(o))}},_evalUrl:function(e){return x.ajax({url:e,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})}}),x.fn.extend({wrapAll:function(e){if(x.isFunction(e))return this.each(function(t){x(this).wrapAll(e.call(this,t))});if(this[0]){var t=x(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.first [...]
diff --git a/mne/html/jquery-ui.min.css b/mne/html/jquery-ui.min.css
new file mode 100644
index 0000000..47047cf
--- /dev/null
+++ b/mne/html/jquery-ui.min.css
@@ -0,0 +1,6 @@
+/*! jQuery UI - v1.10.3 - 2013-05-03
+* http://jqueryui.com
+* Includes: jquery.ui.core.css, jquery.ui.accordion.css, jquery.ui.autocomplete.css, jquery.ui.button.css, jquery.ui.datepicker.css, jquery.ui.dialog.css, jquery.ui.menu.css, jquery.ui.progressbar.css, jquery.ui.resizable.css, jquery.ui.selectable.css, jquery.ui.slider.css, jquery.ui.spinner.css, jquery.ui.tabs.css, jquery.ui.tooltip.css
+* To view and modify this theme, visit http://jqueryui.com/themeroller/?ffDefault=Verdana%2CArial%2Csans-serif&fwDefault=normal&fsDefault=1.1em&cornerRadius=4px&bgColorHeader=cccccc&bgTextureHeader=highlight_soft&bgImgOpacityHeader=75&borderColorHeader=aaaaaa&fcHeader=222222&iconColorHeader=222222&bgColorContent=ffffff&bgTextureContent=flat&bgImgOpacityContent=75&borderColorContent=aaaaaa&fcContent=222222&iconColorContent=222222&bgColorDefault=e6e6e6&bgTextureDefault=glass&bgImgOpacityDe [...]
+* Copyright 2013 jQuery Foundation and other contributors Licensed MIT */
+.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:after,.ui-helper-clearfix:before{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-clearfix{min-height:0}.ui-helper-zfix{width:100%;heig [...]
\ No newline at end of file
diff --git a/mne/html/jquery-ui.min.js b/mne/html/jquery-ui.min.js
new file mode 100644
index 0000000..82bbb67
--- /dev/null
+++ b/mne/html/jquery-ui.min.js
@@ -0,0 +1,12 @@
+/*! jQuery UI - v1.10.3 - 2013-05-03
+* http://jqueryui.com
+* Includes: jquery.ui.core.js, jquery.ui.widget.js, jquery.ui.mouse.js, jquery.ui.draggable.js, jquery.ui.droppable.js, jquery.ui.resizable.js, jquery.ui.selectable.js, jquery.ui.sortable.js, jquery.ui.effect.js, jquery.ui.accordion.js, jquery.ui.autocomplete.js, jquery.ui.button.js, jquery.ui.datepicker.js, jquery.ui.dialog.js, jquery.ui.effect-blind.js, jquery.ui.effect-bounce.js, jquery.ui.effect-clip.js, jquery.ui.effect-drop.js, jquery.ui.effect-explode.js, jquery.ui.effect-fade.js, [...]
+* Copyright 2013 jQuery Foundation and other contributors; Licensed MIT */
+(function(t,e){function i(e,i){var n,o,a,r=e.nodeName.toLowerCase();return"area"===r?(n=e.parentNode,o=n.name,e.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap=#"+o+"]")[0],!!a&&s(a)):!1):(/input|select|textarea|button|object/.test(r)?!e.disabled:"a"===r?e.href||i:i)&&s(e)}function s(e){return t.expr.filters.visible(e)&&!t(e).parents().addBack().filter(function(){return"hidden"===t.css(this,"visibility")}).length}var n=0,o=/^ui-id-\d+$/;t.ui=t.ui||{},t.extend(t.ui,{version:"1. [...]
+},this.proportions={width:this.element[0].offsetWidth,height:this.element[0].offsetHeight},t.ui.ddmanager.droppables[e.scope]=t.ui.ddmanager.droppables[e.scope]||[],t.ui.ddmanager.droppables[e.scope].push(this),e.addClasses&&this.element.addClass("ui-droppable")},_destroy:function(){for(var e=0,i=t.ui.ddmanager.droppables[this.options.scope];i.length>e;e++)i[e]===this&&i.splice(e,1);this.element.removeClass("ui-droppable ui-droppable-disabled")},_setOption:function(e,i){"accept"===e&&(th [...]
+this._rearrange(e,s),this._trigger("change",e,this._uiHash());break}return this._contactContainers(e),t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),this._trigger("sort",e,this._uiHash()),this.lastPositionAbs=this.positionAbs,!1},_mouseStop:function(e,i){if(e){if(t.ui.ddmanager&&!this.options.dropBehaviour&&t.ui.ddmanager.drop(this,e),this.options.revert){var s=this,n=this.placeholder.offset(),o=this.options.axis,a={};o&&"x"!==o||(a.left=n.left-this.offset.parent.left-this.margins.left+(thi [...]
+},_destroyIcons:function(){this.headers.removeClass("ui-accordion-icons").children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeClass("ui-accordion ui-widget ui-helper-reset").removeAttr("role"),this.headers.removeClass("ui-accordion-header ui-accordion-header-active ui-helper-reset ui-state-default ui-corner-all ui-state-active ui-state-disabled ui-corner-top").removeAttr("role").removeAttr("aria-selected").removeAttr("aria-controls").removeAttr("t [...]
+case 27:t.datepicker._hideDatepicker();break;case 33:t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 34:t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 35:(e.ctrlKey||e.metaKey)&&t.datepicker._clearDate(e.target),a=e.ctrlKey||e.metaKey;break;case 36:(e.ctrlKey||e.metaKey)&&t.datepicker._gotoToday(e.target),a=e.ctrlKey||e [...]
+}var i=this,s=this.options,n=s.resizable,o=this.uiDialog.css("position"),a="string"==typeof n?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:s.maxWidth,maxHeight:s.maxHeight,minWidth:s.minWidth,minHeight:this._minHeight(),handles:a,start:function(s,n){t(this).addClass("ui-dialog-resizing"),i._blockFrames(),i._trigger("resizeStart",s,e(n))},resize:function(t,s){i._trigger("resize",t,e(s))},stop:function( [...]
+},value:function(t){return t===e?this.options.value:(this.options.value=this._constrainedValue(t),this._refreshValue(),e)},_constrainedValue:function(t){return t===e&&(t=this.options.value),this.indeterminate=t===!1,"number"!=typeof t&&(t=0),this.indeterminate?!1:Math.min(this.options.max,Math.max(this.min,t))},_setOptions:function(t){var e=t.value;delete t.value,this._super(t),this.options.value=this._constrainedValue(e),this._refreshValue()},_setOption:function(t,e){"max"===t&&(e=Math. [...]
+if(n){if(a=this._find(s),a.length)return a.find(".ui-tooltip-content").html(n),void 0;s.is("[title]")&&(i&&"mouseover"===i.type?s.attr("title",""):s.removeAttr("title")),a=this._tooltip(s),e(s,a.attr("id")),a.find(".ui-tooltip-content").html(n),this.options.track&&i&&/^mouse/.test(i.type)?(this._on(this.document,{mousemove:o}),o(i)):a.position(t.extend({of:s},this.options.position)),a.hide(),this._show(a,this.options.show),this.options.show&&this.options.show.delay&&(h=this.delayedShow=s [...]
\ No newline at end of file
diff --git a/mne/html/mpld3.v0.2.min.js b/mne/html/mpld3.v0.2.min.js
new file mode 100644
index 0000000..adefb15
--- /dev/null
+++ b/mne/html/mpld3.v0.2.min.js
@@ -0,0 +1,2 @@
+!function(t){function s(t){var s={};for(var o in t)s[o]=t[o];return s}function o(t,s){t="undefined"!=typeof t?t:10,s="undefined"!=typeof s?s:"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";for(var o=s.charAt(Math.round(Math.random()*(s.length-11))),e=1;t>e;e++)o+=s.charAt(Math.round(Math.random()*(s.length-1)));return o}function e(s,o){var e=t.interpolate([s[0].valueOf(),s[1].valueOf()],[o[0].valueOf(),o[1].valueOf()]);return function(t){var s=e(t);return[new Date(s[0]), [...]
+},this.disable=function(){t.select(l).call(p.clear()),this.fig.hideBrush(this.extentClass),this.enabled=!1},this.disable()},F.register_plugin("mouseposition",B),B.prototype=Object.create(F.Plugin.prototype),B.prototype.constructor=B,B.prototype.requiredProps=[],B.prototype.defaultProps={fontsize:12,fmt:".3g"},B.prototype.draw=function(){for(var s=this.fig,o=t.format(this.props.fmt),e=s.canvas.append("text").attr("class","mpld3-coordinates").style("text-anchor","end").style("font-size",th [...]
\ No newline at end of file
diff --git a/mne/inverse_sparse/__init__.py b/mne/inverse_sparse/__init__.py
index 981622d..e198907 100644
--- a/mne/inverse_sparse/__init__.py
+++ b/mne/inverse_sparse/__init__.py
@@ -1,6 +1,6 @@
 """Non-Linear sparse inverse solvers"""
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: Simplified BSD
 
diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py
index f26b839..01d11e5 100644
--- a/mne/inverse_sparse/_gamma_map.py
+++ b/mne/inverse_sparse/_gamma_map.py
@@ -1,5 +1,5 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Martin Luessi <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 # License: Simplified BSD
 from copy import deepcopy
 
@@ -7,7 +7,7 @@ import numpy as np
 from scipy import linalg
 
 from ..forward import is_fixed_orient, _to_fixed_ori
-from ..fiff.pick import pick_channels_evoked
+from ..io.pick import pick_channels_evoked
 from ..minimum_norm.inverse import _prepare_forward
 from ..utils import logger, verbose
 from .mxne_inverse import _make_sparse_stc, _prepare_gain
@@ -281,8 +281,8 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
         in_pos = 0
         if len(X) < 3 * len(active_src):
             X_xyz = np.zeros((3 * len(active_src), X.shape[1]), dtype=X.dtype)
-            for ii in xrange(len(active_src)):
-                for jj in xrange(3):
+            for ii in range(len(active_src)):
+                for jj in range(3):
                     if in_pos >= len(active_set):
                         break
                     if (active_set[in_pos] + jj) % 3 == 0:
diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py
index f49a89d..7a4f66f 100755
--- a/mne/inverse_sparse/mxne_debiasing.py
+++ b/mne/inverse_sparse/mxne_debiasing.py
@@ -1,5 +1,5 @@
 # Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -100,7 +100,7 @@ def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None):
     Y = np.ones(n_sources)
     t = 1.0
 
-    for i in xrange(max_iter):
+    for i in range(max_iter):
         D0 = D
 
         # gradient step
diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py
index 8d32591..7223096 100644
--- a/mne/inverse_sparse/mxne_inverse.py
+++ b/mne/inverse_sparse/mxne_inverse.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: Simplified BSD
 
@@ -9,7 +9,7 @@ from scipy import linalg, signal
 from ..source_estimate import SourceEstimate
 from ..minimum_norm.inverse import combine_xyz, _prepare_forward
 from ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori
-from ..fiff.pick import pick_channels_evoked
+from ..io.pick import pick_channels_evoked
 from .mxne_optim import mixed_norm_solver, norm_l2inf, tf_mixed_norm_solver
 from ..utils import logger, verbose
 
diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py
index 92429b7..6020d41 100644
--- a/mne/inverse_sparse/mxne_optim.py
+++ b/mne/inverse_sparse/mxne_optim.py
@@ -1,4 +1,5 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+from __future__ import print_function
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: Simplified BSD
 
@@ -48,16 +49,16 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
     -------
     >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
     >>> Y = np.r_[Y, np.zeros_like(Y)]
-    >>> print Y
+    >>> print(Y)
     [[ 0.  4.  3.  0.  0.]
      [ 0.  4.  3.  0.  0.]
      [ 0.  0.  0.  0.  0.]
      [ 0.  0.  0.  0.  0.]]
     >>> Yp, active_set = prox_l21(Y, 2, 2)
-    >>> print Yp
+    >>> print(Yp)
     [[ 0.          2.86862915  2.15147186  0.          0.        ]
      [ 0.          2.86862915  2.15147186  0.          0.        ]]
-    >>> print active_set
+    >>> print(active_set)
     [ True  True False False]
     """
     if len(Y) == 0:
@@ -71,7 +72,7 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
         rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
     else:
         rows_norm = np.sqrt(np.sum((np.abs(Y) ** 2).reshape(n_positions, -1),
-                                    axis=1))
+                                   axis=1))
     # Ensure shrink is >= 0 while avoiding any division by zero
     shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
     active_set = shrink > 0.0
@@ -96,16 +97,16 @@ def prox_l1(Y, alpha, n_orient):
     -------
     >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
     >>> Y = np.r_[Y, np.zeros_like(Y)]
-    >>> print Y
+    >>> print(Y)
     [[ 1.  2.  3.  2.  0.]
      [ 1.  2.  3.  2.  0.]
      [ 0.  0.  0.  0.  0.]
      [ 0.  0.  0.  0.  0.]]
     >>> Yp, active_set = prox_l1(Y, 2, 2)
-    >>> print Yp
+    >>> print(Yp)
     [[ 0.          0.58578644  1.58578644  0.58578644  0.        ]
      [ 0.          0.58578644  1.58578644  0.58578644  0.        ]]
-    >>> print active_set
+    >>> print(active_set)
     [ True  True False False]
     """
     n_positions = Y.shape[0] // n_orient
@@ -174,7 +175,7 @@ def dgap_l21(M, G, X, active_set, alpha, n_orient):
 
 @verbose
 def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
-                       init=None, n_orient=1):
+                            init=None, n_orient=1):
     """Solves L21 inverse problem with proximal iterations and FISTA"""
     n_sensors, n_times = M.shape
     n_sensors, n_sources = G.shape
@@ -205,7 +206,7 @@ def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
 
     active_set = np.ones(n_sources, dtype=np.bool)  # start with full AS
 
-    for i in xrange(maxit):
+    for i in range(maxit):
         X0, active_set_0 = X, active_set  # store previous values
         if gram is None:
             Y += np.dot(G.T, R) / lipschitz_constant  # ISTA step
@@ -350,7 +351,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
         active_set[idx_large_corr[-active_set_size:]] = True
         if n_orient > 1:
             active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
-        for k in xrange(maxit):
+        for k in range(maxit):
             X, as_, E = l21_solver(M, G[:, active_set], alpha,
                                    maxit=maxit, tol=tol, init=X_init,
                                    n_orient=n_orient)
@@ -366,8 +367,8 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
                                                          n_orient))
                 new_active_idx = idx_large_corr[-active_set_size:]
                 if n_orient > 1:
-                    new_active_idx = n_orient * new_active_idx[:, None] + \
-                                                np.arange(n_orient)[None, :]
+                    new_active_idx = (n_orient * new_active_idx[:, None] +
+                                      np.arange(n_orient)[None, :])
                     new_active_idx = new_active_idx.ravel()
                 idx_old_active_set = as_
                 active_set_old = active_set.copy()
@@ -382,7 +383,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
                     logger.info('Convergence stopped (AS did not change) !')
                     break
         else:
-            logger.warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
+            logger.warning('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
 
         active_set = np.zeros_like(active_set)
         active_set[as_] = True
@@ -537,7 +538,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
     n_dipoles = G.shape[1]
 
     n_step = int(ceil(n_times / float(tstep)))
-    n_freq = wsize / 2 + 1
+    n_freq = wsize // 2 + 1
     n_coefs = n_step * n_freq
     phi = _Phi(wsize, tstep, n_coefs)
     phiT = _PhiT(tstep, n_freq, n_step, n_times)
@@ -560,7 +561,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
 
     alpha_time_lc = alpha_time / lipschitz_constant
     alpha_space_lc = alpha_space / lipschitz_constant
-    for i in xrange(maxit):
+    for i in range(maxit):
         Z0, active_set_0 = Z, active_set  # store previous values
 
         if active_set.sum() < len(R) and Y_time_as is not None:
@@ -580,17 +581,17 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
             Z, active_set_l1 = prox_l1(Y, alpha_time_lc, n_orient)
 
         Z, active_set_l21 = prox_l21(Z, alpha_space_lc, n_orient,
-                                shape=(-1, n_freq, n_step), is_stft=True)
+                                     shape=(-1, n_freq, n_step), is_stft=True)
         active_set = active_set_l1
         active_set[active_set_l1] = active_set_l21
 
         # Check convergence : max(abs(Z - Z0)) < tol
-        stop = (safe_max_abs(Z, True - active_set_0[active_set]) < tol and
-                safe_max_abs(Z0, True - active_set[active_set_0]) < tol and
+        stop = (safe_max_abs(Z, ~active_set_0[active_set]) < tol and
+                safe_max_abs(Z0, ~active_set[active_set_0]) < tol and
                 safe_max_abs_diff(Z, active_set_0[active_set],
                                   Z0, active_set[active_set_0]) < tol)
         if stop:
-            print 'Convergence reached !'
+            print('Convergence reached !')
             break
 
         # FISTA 2 steps
diff --git a/mne/inverse_sparse/tests/test_gamma_map.py b/mne/inverse_sparse/tests/test_gamma_map.py
index ce134bf..b350d1e 100644
--- a/mne/inverse_sparse/tests/test_gamma_map.py
+++ b/mne/inverse_sparse/tests/test_gamma_map.py
@@ -7,9 +7,9 @@ import numpy as np
 from nose.tools import assert_true
 from numpy.testing import assert_array_almost_equal
 
-import mne
 from mne.datasets import sample
-from mne import fiff, read_cov, read_forward_solution
+from mne import read_cov, read_forward_solution, read_evokeds
+from mne.cov import regularize
 from mne.inverse_sparse import gamma_map
 
 data_path = sample.data_path(download=False)
@@ -24,11 +24,11 @@ def test_gamma_map():
     """Test Gamma MAP inverse"""
     forward = read_forward_solution(fname_fwd, force_fixed=False,
                                     surf_ori=True)
-    evoked = fiff.Evoked(fname_evoked, setno=0, baseline=(None, 0))
+    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
     evoked.crop(tmin=0, tmax=0.3)
 
     cov = read_cov(fname_cov)
-    cov = mne.cov.regularize(cov, evoked.info)
+    cov = regularize(cov, evoked.info)
 
     alpha = 0.2
     stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
diff --git a/mne/inverse_sparse/tests/test_mxne_debiasing.py b/mne/inverse_sparse/tests/test_mxne_debiasing.py
index a2c3458..fb11586 100755
--- a/mne/inverse_sparse/tests/test_mxne_debiasing.py
+++ b/mne/inverse_sparse/tests/test_mxne_debiasing.py
@@ -1,5 +1,5 @@
 # Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
diff --git a/mne/inverse_sparse/tests/test_mxne_inverse.py b/mne/inverse_sparse/tests/test_mxne_inverse.py
index 93aca04..2a5ac19 100644
--- a/mne/inverse_sparse/tests/test_mxne_inverse.py
+++ b/mne/inverse_sparse/tests/test_mxne_inverse.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #         Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
 #
 # License: Simplified BSD
@@ -11,7 +11,7 @@ from nose.tools import assert_true
 
 from mne.datasets import sample
 from mne.label import read_label
-from mne import fiff, read_cov, read_forward_solution
+from mne import read_cov, read_forward_solution, read_evokeds
 from mne.inverse_sparse import mixed_norm, tf_mixed_norm
 from mne.minimum_norm import apply_inverse, make_inverse_operator
 
@@ -29,17 +29,16 @@ fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
 def test_mxne_inverse():
     """Test (TF-)MxNE inverse computation"""
     # Handling forward solution
-    evoked = fiff.Evoked(fname_data, setno=1, baseline=(None, 0))
+    evoked = read_evokeds(fname_data, condition=1, baseline=(None, 0))
 
     # Read noise covariance matrix
     cov = read_cov(fname_cov)
 
     # Handling average file
-    setno = 0
     loose = None
     depth = 0.9
 
-    evoked = fiff.read_evoked(fname_data, setno=setno, baseline=(None, 0))
+    evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
     evoked.crop(tmin=-0.1, tmax=0.4)
 
     evoked_l21 = copy.deepcopy(evoked)
diff --git a/mne/inverse_sparse/tests/test_mxne_optim.py b/mne/inverse_sparse/tests/test_mxne_optim.py
index 21af4f4..b810529 100644
--- a/mne/inverse_sparse/tests/test_mxne_optim.py
+++ b/mne/inverse_sparse/tests/test_mxne_optim.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: Simplified BSD
 
@@ -70,7 +70,7 @@ def test_l21_mxne():
                             n_orient=2, solver='prox')
     assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
     # suppress a coordinate-descent warning here
-    with warnings.catch_warnings(True):
+    with warnings.catch_warnings(record=True):
         X_hat_cd, active_set, _ = mixed_norm_solver(M,
                             G, alpha, maxit=1000, tol=1e-8,
                             active_set_size=2, debias=True,
@@ -83,7 +83,7 @@ def test_l21_mxne():
                             active_set_size=2, debias=True,
                             n_orient=5)
     assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
-    with warnings.catch_warnings(True):  # coordinate-ascent warning
+    with warnings.catch_warnings(record=True):  # coordinate-ascent warning
         X_hat_cd, active_set, _ = mixed_norm_solver(M,
                             G, alpha, maxit=1000, tol=1e-8,
                             active_set_size=2, debias=True,
diff --git a/mne/io/__init__.py b/mne/io/__init__.py
new file mode 100644
index 0000000..0d74a62
--- /dev/null
+++ b/mne/io/__init__.py
@@ -0,0 +1,33 @@
+"""FIF module for IO with .fif files"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from .open import fiff_open, show_fiff, _fiff_get_fid
+from .meas_info import read_fiducials, write_fiducials, read_info, write_info
+
+from .proj import proj_equal, make_eeg_average_ref_proj
+from . import array
+from . import base
+from . import brainvision
+from . import bti
+from . import constants
+from . import edf
+from . import egi
+from . import fiff
+from . import kit
+from . import pick
+
+from .array import RawArray
+from .brainvision import read_raw_brainvision
+from .bti import read_raw_bti
+from .edf import read_raw_edf
+from .egi import read_raw_egi
+from .kit import read_raw_kit
+
+# for backward compatibility
+from .fiff import RawFIFF
+from .fiff import RawFIFF as Raw
+from .base import concatenate_raws, get_chpi_positions, set_eeg_reference
diff --git a/mne/io/array/__init__.py b/mne/io/array/__init__.py
new file mode 100644
index 0000000..112d5d8
--- /dev/null
+++ b/mne/io/array/__init__.py
@@ -0,0 +1,5 @@
+"""Module to convert user data to FIF"""
+
+# Author: Eric Larson <larson.eric.d at gmail.com>
+
+from .array import RawArray
diff --git a/mne/io/array/array.py b/mne/io/array/array.py
new file mode 100644
index 0000000..3affb91
--- /dev/null
+++ b/mne/io/array/array.py
@@ -0,0 +1,65 @@
+
+"""Tools for creating Raw objects from numpy arrays"""
+
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..constants import FIFF
+from ..meas_info import Info
+from ..base import _BaseRaw
+from ...utils import verbose, logger
+from ...externals.six import string_types
+
+
+class RawArray(_BaseRaw):
+    """Raw object from numpy array
+
+    Parameters
+    ----------
+    data : array, shape (n_channels, n_times)
+        The channels' time series.
+    info : instance of Info
+        Info dictionary. Consider using ``create_info`` to populate
+        this structure.
+    """
+    @verbose
+    def __init__(self, data, info, verbose=None):
+        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
+        data = np.asanyarray(data, dtype=dtype)
+
+        if data.ndim != 2:
+            raise ValueError('Data must be a 2D array of shape (n_channels, '
+                             'n_samples')
+
+        logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s'
+                    % (dtype.__name__, data.shape[0], data.shape[1]))
+
+        if len(data) != len(info['ch_names']):
+            raise ValueError('len(data) does not match len(info["ch_names"])')
+        assert len(info['ch_names']) == info['nchan']
+
+        cals = np.zeros(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+
+        self.verbose = verbose
+        self.cals = cals
+        self.rawdir = None
+        self.proj = None
+        self.comp = None
+        self._filenames = list()
+        self.preload = True
+        self.info = info
+        self._data = data
+        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
+        self._times = np.arange(self.first_samp,
+                                self.last_samp + 1) / info['sfreq']
+        self._projectors = list()
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    self.first_samp, self.last_samp,
+                    float(self.first_samp) / info['sfreq'],
+                    float(self.last_samp) / info['sfreq']))
+        logger.info('Ready.')
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/io/array/tests/__init__.py
similarity index 100%
copy from mne/fiff/bti/tests/__init__.py
copy to mne/io/array/tests/__init__.py
diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py
new file mode 100644
index 0000000..2dc03fc
--- /dev/null
+++ b/mne/io/array/tests/test_array.py
@@ -0,0 +1,104 @@
+from __future__ import print_function
+
+# Author: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+
+from numpy.testing import (assert_array_almost_equal, assert_allclose,
+                           assert_array_equal)
+from nose.tools import assert_equal, assert_raises, assert_true
+from mne import find_events, Epochs, pick_types
+from mne.io import Raw
+from mne.io.array import RawArray
+from mne.io.meas_info import create_info, _kind_dict
+from mne.utils import _TempDir
+
+warnings.simplefilter('always')  # enable b/c these tests might throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
+fif_fname = op.join(base_dir, 'test_raw.fif')
+
+tempdir = _TempDir()
+
+
+def test_array_raw():
+    """Test creating raw from array
+    """
+    # creating
+    raw = Raw(fif_fname).crop(2, 5, copy=False)
+    data, times = raw[:, :]
+    sfreq = raw.info['sfreq']
+    ch_names = [(ch[4:] if 'STI' not in ch else ch)
+                for ch in raw.info['ch_names']]  # change them, why not
+    #del raw
+    types = list()
+    for ci in range(102):
+        types.extend(('grad', 'grad', 'mag'))
+    types.extend(['stim'] * 9)
+    types.extend(['eeg'] * 60)
+    # wrong length
+    assert_raises(ValueError, create_info, ch_names, sfreq, types)
+    # bad entry
+    types.append('foo')
+    assert_raises(KeyError, create_info, ch_names, sfreq, types)
+    types[-1] = 'eog'
+    # default type
+    info = create_info(ch_names, sfreq)
+    assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
+    # use real types
+    info = create_info(ch_names, sfreq, types)
+    raw2 = RawArray(data, info)
+    data2, times2 = raw2[:, :]
+    assert_allclose(data, data2)
+    assert_allclose(times, times2)
+
+    # saving
+    temp_fname = op.join(tempdir, 'raw.fif')
+    raw2.save(temp_fname)
+    raw3 = Raw(temp_fname)
+    data3, times3 = raw3[:, :]
+    assert_allclose(data, data3)
+    assert_allclose(times, times3)
+
+    # filtering
+    picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
+    assert_equal(len(picks), 4)
+    raw_lp = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
+    raw_hp = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
+    raw_bp = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
+    raw_bs = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
+    data, _ = raw2[picks, :]
+    lp_data, _ = raw_lp[picks, :]
+    hp_data, _ = raw_hp[picks, :]
+    bp_data, _ = raw_bp[picks, :]
+    bs_data, _ = raw_bs[picks, :]
+    sig_dec = 11
+    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
+    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
+
+    # plotting
+    import matplotlib
+    matplotlib.use('Agg')  # for testing don't use X server
+    raw2.plot()
+    raw2.plot_psds()
+
+    # epoching
+    events = find_events(raw2, stim_channel='STI 014')
+    events[:, 2] = 1
+    assert_true(len(events) > 2)
+    epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
+    epochs.plot_drop_log(return_fig=True)
+    epochs.plot()
+    evoked = epochs.average()
+    evoked.plot()
diff --git a/mne/fiff/raw.py b/mne/io/base.py
similarity index 71%
rename from mne/fiff/raw.py
rename to mne/io/base.py
index 18c00da..98b2b31 100644
--- a/mne/fiff/raw.py
+++ b/mne/io/base.py
@@ -1,7 +1,7 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -17,111 +17,36 @@ from scipy.signal import hilbert
 from scipy import linalg
 
 from .constants import FIFF
-from .open import fiff_open
-from .meas_info import read_meas_info, write_meas_info
-from .tree import dir_tree_find
-from .tag import read_tag
-from .pick import pick_types, channel_type
+from .pick import pick_types, channel_type, pick_channels
+from .meas_info import write_meas_info
 from .proj import (setup_proj, activate_proj, proj_equal, ProjMixin,
                    _has_eeg_average_ref_proj, make_eeg_average_ref_proj)
-from .compensator import get_current_comp, set_current_comp, make_compensator
+from ..channels import ContainsMixin, PickDropChannelsMixin
+from .compensator import set_current_comp
+from .write import (start_file, end_file, start_block, end_block,
+                    write_dau_pack16, write_float, write_double,
+                    write_complex64, write_complex128, write_int,
+                    write_id, write_string)
 
 from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
                       notch_filter, band_stop_filter, resample)
 from ..parallel import parallel_func
 from ..utils import (_check_fname, estimate_rank, _check_pandas_installed,
+                     check_fname, _get_stim_channel, object_hash,
                      logger, verbose)
 from ..viz import plot_raw, plot_raw_psds, _mutable_defaults
+from ..externals.six import string_types
+from ..event import concatenate_events
 
 
-class Raw(ProjMixin):
-    """Raw data
-
-    Parameters
-    ----------
-    fnames : list, or string
-        A list of the raw files to treat as a Raw instance, or a single
-        raw file.
-    allow_maxshield : bool, (default False)
-        allow_maxshield if True, allow loading of data that has been
-        processed with Maxshield. Maxshield-processed data should generally
-        not be loaded directly, but should be processed using SSS first.
-    preload : bool or str (default False)
-        Preload data into memory for data manipulation and faster indexing.
-        If True, the data will be preloaded into memory (fast, requires
-        large amount of memory). If preload is a string, preload is the
-        file name of a memory-mapped file which is used to store the data
-        on the hard drive (slower, requires less memory).
-    proj : bool
-        Apply the signal space projection (SSP) operators present in
-        the file to the data. Note: Once the projectors have been
-        applied, they can no longer be removed. It is usually not
-        recommended to apply the projectors at this point as they are
-        applied automatically later on (e.g. when computing inverse
-        solutions).
-    compensation : None | int
-        If None the compensation in the data is not modified.
-        If set to n, e.g. 3, apply gradient compensation of grade n as
-        for CTF systems.
-    add_eeg_ref : bool
-        If True, add average EEG reference projector (if it's not already
-        present).
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    Attributes
-    ----------
-    info : dict
-        Measurement info.
-    ch_names : list of string
-        List of channels' names.
-    n_times : int
-        Total number of time points in the raw file.
-    verbose : bool, str, int, or None
-        See above.
-    """
+class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
+    """Base class for Raw data"""
     @verbose
-    def __init__(self, fnames, allow_maxshield=False, preload=False,
-                 proj=False, compensation=None, add_eeg_ref=True,
-                 verbose=None):
-
-        if not isinstance(fnames, list):
-            fnames = [op.abspath(fnames)] if not op.isabs(fnames) else [fnames]
-        else:
-            fnames = [op.abspath(f) if not op.isabs(f) else f for f in fnames]
-
-        raws = [self._read_raw_file(fname, allow_maxshield, preload,
-                                    compensation) for fname in fnames]
-        _check_raw_compatibility(raws)
+    def __init__(self, *args, **kwargs):
+        raise NotImplementedError
 
-        # combine information from each raw file to construct self
-        self.first_samp = raws[0].first_samp  # meta first sample
-        self._first_samps = np.array([r.first_samp for r in raws])
-        self._last_samps = np.array([r.last_samp for r in raws])
-        self._raw_lengths = np.array([r.n_times for r in raws])
-        self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
-        self.cals = raws[0].cals
-        self.rawdirs = [r.rawdir for r in raws]
-        self.comp = copy.deepcopy(raws[0].comp)
-        self._orig_comp_grade = raws[0]._orig_comp_grade
-        self.fids = [r.fid for r in raws]
-        self.info = copy.deepcopy(raws[0].info)
-        self.verbose = verbose
-        self.info['filenames'] = fnames
-        self.orig_format = raws[0].orig_format
-        self.proj = False
-        self._add_eeg_ref(add_eeg_ref)
-
-        if preload:
-            self._preload_data(preload)
-        else:
-            self._preloaded = False
-
-        self._projector = None
-        # setup the SSP projector
-        self.proj = proj
-        if proj:
-            self.apply_proj()
+    def _read_segment(start, stop, sel, projector, verbose):
+        raise NotImplementedError
 
     def __del__(self):
         # remove file for memmap
@@ -143,6 +68,11 @@ class Raw(ProjMixin):
         except:
             return exception_type, exception_val, trace
 
+    def __hash__(self):
+        if not self.preload:
+            raise RuntimeError('Cannot hash raw unless preloaded')
+        return object_hash(dict(info=self.info, data=self._data))
+
     def _add_eeg_ref(self, add_eeg_ref):
         """Helper to add an average EEG reference"""
         if add_eeg_ref:
@@ -152,189 +82,6 @@ class Raw(ProjMixin):
                 eeg_ref = make_eeg_average_ref_proj(self.info, activate=False)
                 projs.append(eeg_ref)
 
-    def _preload_data(self, preload):
-        """This function actually preloads the data"""
-        if isinstance(preload, basestring):
-            # we will use a memmap: preload is a filename
-            data_buffer = preload
-        else:
-            data_buffer = None
-
-        self._data, self._times = self._read_segment(data_buffer=data_buffer)
-        self._preloaded = True
-        # close files once data are preloaded
-        self.close()
-
-    @verbose
-    def _read_raw_file(self, fname, allow_maxshield, preload, compensation,
-                       verbose=None):
-        """Read in header information from a raw file"""
-        logger.info('Opening raw data file %s...' % fname)
-
-        #   Read in the whole file if preload is on and .fif.gz (saves time)
-        ext = os.path.splitext(fname)[1].lower()
-        whole_file = preload if '.gz' in ext else False
-        fid, tree, _ = fiff_open(fname, preload=whole_file)
-
-        #   Read the measurement info
-        info, meas = read_meas_info(fid, tree)
-
-        #   Locate the data of interest
-        raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
-        if len(raw_node) == 0:
-            raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
-            if allow_maxshield:
-                raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA)
-                if len(raw_node) == 0:
-                    raise ValueError('No raw data in %s' % fname)
-            else:
-                if len(raw_node) == 0:
-                    raise ValueError('No raw data in %s' % fname)
-
-        if len(raw_node) == 1:
-            raw_node = raw_node[0]
-
-        #   Set up the output structure
-        info['filename'] = fname
-
-        #   Process the directory
-        directory = raw_node['directory']
-        nent = raw_node['nent']
-        nchan = int(info['nchan'])
-        first = 0
-        first_samp = 0
-        first_skip = 0
-
-        #   Get first sample tag if it is there
-        if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
-            tag = read_tag(fid, directory[first].pos)
-            first_samp = int(tag.data)
-            first += 1
-
-        #   Omit initial skip
-        if directory[first].kind == FIFF.FIFF_DATA_SKIP:
-            # This first skip can be applied only after we know the buffer size
-            tag = read_tag(fid, directory[first].pos)
-            first_skip = int(tag.data)
-            first += 1
-
-        #  Get first sample tag if it is there
-        if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
-            tag = read_tag(fid, directory[first].pos)
-            first_samp += int(tag.data)
-            first += 1
-
-        raw = _RawShell()
-        raw.first_samp = first_samp
-
-        #   Go through the remaining tags in the directory
-        rawdir = list()
-        nskip = 0
-        orig_format = None
-        for k in range(first, nent):
-            ent = directory[k]
-            if ent.kind == FIFF.FIFF_DATA_SKIP:
-                tag = read_tag(fid, ent.pos)
-                nskip = int(tag.data)
-            elif ent.kind == FIFF.FIFF_DATA_BUFFER:
-                #   Figure out the number of samples in this buffer
-                if ent.type == FIFF.FIFFT_DAU_PACK16:
-                    nsamp = ent.size / (2 * nchan)
-                elif ent.type == FIFF.FIFFT_SHORT:
-                    nsamp = ent.size / (2 * nchan)
-                elif ent.type == FIFF.FIFFT_FLOAT:
-                    nsamp = ent.size / (4 * nchan)
-                elif ent.type == FIFF.FIFFT_DOUBLE:
-                    nsamp = ent.size / (8 * nchan)
-                elif ent.type == FIFF.FIFFT_INT:
-                    nsamp = ent.size / (4 * nchan)
-                elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
-                    nsamp = ent.size / (8 * nchan)
-                elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
-                    nsamp = ent.size / (16 * nchan)
-                else:
-                    fid.close()
-                    raise ValueError('Cannot handle data buffers of type %d' %
-                                     ent.type)
-                if orig_format is None:
-                    if ent.type == FIFF.FIFFT_DAU_PACK16:
-                        orig_format = 'short'
-                    elif ent.type == FIFF.FIFFT_SHORT:
-                        orig_format = 'short'
-                    elif ent.type == FIFF.FIFFT_FLOAT:
-                        orig_format = 'single'
-                    elif ent.type == FIFF.FIFFT_DOUBLE:
-                        orig_format = 'double'
-                    elif ent.type == FIFF.FIFFT_INT:
-                        orig_format = 'int'
-                    elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
-                        orig_format = 'single'
-                    elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
-                        orig_format = 'double'
-
-                #  Do we have an initial skip pending?
-                if first_skip > 0:
-                    first_samp += nsamp * first_skip
-                    raw.first_samp = first_samp
-                    first_skip = 0
-
-                #  Do we have a skip pending?
-                if nskip > 0:
-                    rawdir.append(dict(ent=None, first=first_samp,
-                                       last=first_samp + nskip * nsamp - 1,
-                                       nsamp=nskip * nsamp))
-                    first_samp += nskip * nsamp
-                    nskip = 0
-
-                #  Add a data buffer
-                rawdir.append(dict(ent=ent, first=first_samp,
-                                   last=first_samp + nsamp - 1,
-                                   nsamp=nsamp))
-                first_samp += nsamp
-
-        raw.last_samp = first_samp - 1
-        raw.orig_format = orig_format
-
-        #   Add the calibration factors
-        cals = np.zeros(info['nchan'])
-        for k in range(info['nchan']):
-            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
-
-        raw.cals = cals
-        raw.rawdir = rawdir
-        raw.comp = None
-        raw._orig_comp_grade = None
-
-        #   Set up the CTF compensator
-        current_comp = get_current_comp(info)
-        if current_comp is not None:
-            logger.info('Current compensation grade : %d' % current_comp)
-
-        if compensation is not None:
-            raw.comp = make_compensator(info, current_comp, compensation)
-            if raw.comp is not None:
-                logger.info('Appropriate compensator added to change to '
-                            'grade %d.' % (compensation))
-                raw._orig_comp_grade = current_comp
-                set_current_comp(info, compensation)
-
-        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
-                    raw.first_samp, raw.last_samp,
-                    float(raw.first_samp) / info['sfreq'],
-                    float(raw.last_samp) / info['sfreq']))
-
-        # store the original buffer size
-        info['buffer_size_sec'] = (np.median([r['nsamp'] for r in rawdir])
-                                   / info['sfreq'])
-
-        raw.fid = fid
-        raw.info = info
-        raw.verbose = verbose
-
-        logger.info('Ready.')
-
-        return raw
-
     def _parse_get_set_params(self, item):
         # make sure item is a tuple
         if not isinstance(item, tuple):  # only channel selection passed
@@ -349,7 +96,7 @@ class Raw(ProjMixin):
             nchan = self.info['nchan']
             stop = item[0].stop if item[0].stop is not None else nchan
             step = item[0].step if item[0].step is not None else 1
-            sel = range(start, stop, step)
+            sel = list(range(start, stop, step))
         else:
             sel = item[0]
 
@@ -357,10 +104,15 @@ class Raw(ProjMixin):
             time_slice = item[1]
             start, stop, step = (time_slice.start, time_slice.stop,
                                  time_slice.step)
-        elif isinstance(item[1], int):
-            start, stop, step = item[1], item[1] + 1, 1
         else:
-            raise ValueError('Must pass int or slice to __getitem__')
+            item1 = item[1]
+            # Let's do automated type conversion to integer here
+            if np.array(item[1]).dtype.kind == 'i':
+                item1 = int(item1)
+            if isinstance(item1, int):
+                start, stop, step = item1, item1 + 1, 1
+            else:
+                raise ValueError('Must pass int or slice to __getitem__')
 
         if start is None:
             start = 0
@@ -378,7 +130,7 @@ class Raw(ProjMixin):
     def __getitem__(self, item):
         """getting raw data content with python slicing"""
         sel, start, stop = self._parse_get_set_params(item)
-        if self._preloaded:
+        if self.preload:
             data, times = self._data[sel, start:stop], self._times[start:stop]
         else:
             data, times = self._read_segment(start=start, stop=stop, sel=sel,
@@ -388,7 +140,7 @@ class Raw(ProjMixin):
 
     def __setitem__(self, item, value):
         """setting raw data content with python slicing"""
-        if not self._preloaded:
+        if not self.preload:
             raise RuntimeError('Modifying data of Raw is only supported '
                                'when preloading is used. Use preload=True '
                                '(or string) in the constructor.')
@@ -396,6 +148,12 @@ class Raw(ProjMixin):
         # set the data
         self._data[sel, start:stop] = value
 
+    def anonymize(self):
+        """Anonymize data
+
+        This function will remove info['subject_info'] if it exists."""
+        self.info._anonymize()
+
     @verbose
     def apply_function(self, fun, picks, dtype, n_jobs, verbose=None, *args,
                        **kwargs):
@@ -422,7 +180,7 @@ class Raw(ProjMixin):
             A function to be applied to the channels. The first argument of
             fun has to be a timeseries (numpy.ndarray). The function must
             return an numpy.ndarray with the same size as the input.
-        picks : list of int
+        picks : array-like of int
             Indices of channels to apply the function to.
         dtype : numpy.dtype
             Data type to use for raw data after applying the function. If None
@@ -438,7 +196,7 @@ class Raw(ProjMixin):
         **kwargs :
             Keyword arguments to pass to fun.
         """
-        if not self._preloaded:
+        if not self.preload:
             raise RuntimeError('Raw data needs to be preloaded. Use '
                                'preload=True (or string) in the constructor.')
 
@@ -488,7 +246,7 @@ class Raw(ProjMixin):
 
         Parameters
         ----------
-        picks : list of int
+        picks : array-like of int
             Indices of channels to apply the function to.
         envelope : bool (default: False)
             Compute the envelope signal of each channel.
@@ -549,7 +307,7 @@ class Raw(ProjMixin):
         h_freq : float | None
             High cut-off frequency in Hz. If None the data are only
             high-passed.
-        picks : list of int | None
+        picks : array-like of int | None
             Indices of channels to filter. If None only the data (MEG/EEG)
             channels will be filtered.
         filter_length : str (Default: '10s') | int | None
@@ -582,14 +340,14 @@ class Raw(ProjMixin):
         fs = float(self.info['sfreq'])
         if l_freq == 0:
             l_freq = None
-        if h_freq > (fs / 2.):
+        if h_freq is not None and h_freq > (fs / 2.):
             h_freq = None
         if l_freq is not None and not isinstance(l_freq, float):
             l_freq = float(l_freq)
         if h_freq is not None and not isinstance(h_freq, float):
             h_freq = float(h_freq)
 
-        if not self._preloaded:
+        if not self.preload:
             raise RuntimeError('Raw data needs to be preloaded to filter. Use '
                                'preload=True (or string) in the constructor.')
         if picks is None:
@@ -668,7 +426,7 @@ class Raw(ProjMixin):
             np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
             Europe. None can only be used with the mode 'spectrum_fit',
             where an F test is used to find sinusoidal components.
-        picks : list of int | None
+        picks : array-like of int | None
             Indices of channels to filter. If None only the data (MEG/EEG)
             channels will be filtered.
         filter_length : str (Default: '10s') | int | None
@@ -724,7 +482,7 @@ class Raw(ProjMixin):
                 raise RuntimeError('Could not find any valid channels for '
                                    'your Raw object. Please contact the '
                                    'MNE-Python developers.')
-        if not self._preloaded:
+        if not self.preload:
             raise RuntimeError('Raw data needs to be preloaded to filter. Use '
                                'preload=True (or string) in the constructor.')
 
@@ -765,7 +523,7 @@ class Raw(ProjMixin):
             supersampled (without applying any filtering). This reduces
             resampling artifacts in stim channels, but may lead to missing
             triggers. If None, stim channels are automatically chosen using
-            mne.fiff.pick_types(raw.info, meg=False, stim=True, exclude=[]).
+            mne.pick_types(raw.info, meg=False, stim=True, exclude=[]).
         n_jobs : int | str
             Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
             is installed properly and CUDA is initialized.
@@ -778,7 +536,7 @@ class Raw(ProjMixin):
         For some data, it may be more accurate to use npad=0 to reduce
         artifacts. This is dataset dependent -- check your data!
         """
-        if not self._preloaded:
+        if not self.preload:
             raise RuntimeError('Can only resample preloaded data')
         sfreq = float(sfreq)
         o_sfreq = float(self.info['sfreq'])
@@ -862,7 +620,7 @@ class Raw(ProjMixin):
         smin = raw.time_as_index(tmin)[0]
         smax = raw.time_as_index(tmax)[0]
         cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
-                                     dtype='int')))
+                                                   dtype='int')))
         cumul_lens = np.cumsum(cumul_lens)
         keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
                                  np.greater_equal(smax, cumul_lens[:-1]))
@@ -873,12 +631,11 @@ class Raw(ProjMixin):
         raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
         raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
         raw._raw_lengths = raw._last_samps - raw._first_samps + 1
-        raw.fids = [f for fi, f in enumerate(raw.fids) if fi in keepers]
         raw.rawdirs = [r for ri, r in enumerate(raw.rawdirs)
                        if ri in keepers]
         raw.first_samp = raw._first_samps[0]
         raw.last_samp = raw.first_samp + (smax - smin)
-        if raw._preloaded:
+        if raw.preload:
             raw._data = raw._data[:, smin:smax + 1]
             raw._times = np.arange(raw.n_times) / raw.info['sfreq']
         return raw
@@ -886,20 +643,24 @@ class Raw(ProjMixin):
     @verbose
     def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
              drop_small_buffer=False, proj=False, format='single',
-             overwrite=False, verbose=None):
+             overwrite=False, split_size='2GB', verbose=None):
         """Save raw data to file
 
         Parameters
         ----------
         fname : string
             File name of the new dataset. This has to be a new filename
-            unless data have been preloaded.
-        picks : list of int
-            Indices of channels to include.
-        tmin : float
-            Time in seconds of first sample to save.
-        tmax : float
-            Time in seconds of last sample to save.
+            unless data have been preloaded. Filenames should end with
+            raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif
+            or raw_tsss.fif.gz.
+        picks : array-like of int | None
+            Indices of channels to include. If None all channels are kept.
+        tmin : float | None
+            Time in seconds of first sample to save. If None first sample
+            is used.
+        tmax : float | None
+            Time in seconds of last sample to save. If None last sample
+            is used.
         buffer_size_sec : float | None
             Size of data chunks in seconds. If None, the buffer size of
             the original file is used.
@@ -923,6 +684,12 @@ class Raw(ProjMixin):
         overwrite : bool
             If True, the destination file (if it exists) will be overwritten.
             If False (default), an error will be raised if the file exists.
+        split_size : string | int
+            Large raw files are automatically split into multiple pieces. This
+            parameter specifies the maximum size of each piece. If the
+            parameter is an integer, it specifies the size in Bytes. It is
+            also possible to pass a human-readable string, e.g., 100MB.
+            Note: Due to FIFF file limitations, the maximum split size is 2GB.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
@@ -936,12 +703,26 @@ class Raw(ProjMixin):
         or all forms of SSS). It is recommended not to concatenate and
         then save raw files for this reason.
         """
-        fname = op.abspath(fname)
-        if not self._preloaded and fname in self.info['filenames']:
+        check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
+                                   'raw.fif.gz', 'raw_sss.fif.gz',
+                                   'raw_tsss.fif.gz'))
+
+        if isinstance(split_size, string_types):
+            exp = dict(MB=20, GB=30).get(split_size[-2:], None)
+            if exp is None:
+                raise ValueError('split_size has to end with either'
+                                 '"MB" or "GB"')
+            split_size = int(float(split_size[:-2]) * 2 ** exp)
+
+        if split_size > 2147483648:
+            raise ValueError('split_size cannot be larger than 2GB')
+
+        fname = op.realpath(fname)
+        if not self.preload and fname in self._filenames:
             raise ValueError('You cannot save data to the same file.'
                              ' Please use a different filename.')
 
-        if self._preloaded:
+        if self.preload:
             if np.iscomplexobj(self._data):
                 warnings.warn('Saving raw file with complex data. Loading '
                               'with command-line MNE tools will not work.')
@@ -954,6 +735,8 @@ class Raw(ProjMixin):
             raise ValueError('format must be "short", "int", "single", '
                              'or "double"')
         reset_dict = dict(short=False, int=False, single=True, double=True)
+        reset_range = reset_dict[format]
+        data_type = type_dict[format]
 
         data_test = self[0, 0][0]
         if format == 'short' and np.iscomplexobj(data_test):
@@ -977,15 +760,12 @@ class Raw(ProjMixin):
             inv_comp = linalg.inv(self.comp)
             set_current_comp(info, self._orig_comp_grade)
 
-        outfid, cals = start_writing_raw(fname, info, picks, type_dict[format],
-                                         reset_range=reset_dict[format])
         #
         #   Set up the reading parameters
         #
 
         #   Convert to samples
         start = int(floor(tmin * self.info['sfreq']))
-        first_samp = self.first_samp + start
 
         if tmax is None:
             stop = self.last_samp + 1 - self.first_samp
@@ -998,34 +778,11 @@ class Raw(ProjMixin):
             else:
                 buffer_size_sec = 10.0
         buffer_size = int(ceil(buffer_size_sec * self.info['sfreq']))
-        #
-        #   Read and write all the data
-        #
-        if first_samp != 0:
-            write_int(outfid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
-        for first in range(start, stop, buffer_size):
-            last = first + buffer_size
-            if last >= stop:
-                last = stop + 1
-
-            if picks is None:
-                data, times = self[:, first:last]
-            else:
-                data, times = self[picks, first:last]
-
-            if projector is not None:
-                data = np.dot(projector, data)
-
-            if (drop_small_buffer and (first > start)
-                                            and (len(times) < buffer_size)):
-                logger.info('Skipping data chunk due to small buffer ... '
-                            '[done]')
-                break
-            logger.info('Writing ...')
-            write_raw_buffer(outfid, data, cals, format, inv_comp)
-            logger.info('[done]')
 
-        finish_writing_raw(outfid)
+        # write the raw file
+        _write_raw(fname, self, info, picks, format, data_type, reset_range,
+                   start, stop, buffer_size, projector, inv_comp,
+                   drop_small_buffer, split_size, 0, None)
 
     def plot(raw, events=None, duration=10.0, start=0.0, n_channels=20,
              bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
@@ -1115,7 +872,7 @@ class Raw(ProjMixin):
             Apply projection.
         n_fft : int
             Number of points to use in Welch FFT calculations.
-        picks : list | None
+        picks : array-like of int | None
             List of channels to use. Cannot be None if `ax` is supplied. If
             both `picks` and `ax` are None, separate subplots will be created
             for each standard channel type (`mag`, `grad`, and `eeg`).
@@ -1158,7 +915,7 @@ class Raw(ProjMixin):
                               use_first_samp)
 
     def index_as_time(self, index, use_first_samp=False):
-        """Convert time to indices
+        """Convert indices to time
 
         Parameters
         ----------
@@ -1177,7 +934,7 @@ class Raw(ProjMixin):
                               use_first_samp)
 
     def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
-                      return_singular=False):
+                      return_singular=False, picks=None):
         """Estimate rank of the raw data
 
         This function is meant to provide a reasonable estimate of the rank.
@@ -1187,7 +944,7 @@ class Raw(ProjMixin):
         Parameters
         ----------
         tstart : float
-            Start time to use for rank estimation. Defaul is 0.0.
+            Start time to use for rank estimation. Default is 0.0.
         tstop : float | None
             End time to use for rank estimation. Default is 30.0.
             If None, the end time of the raw file is used.
@@ -1199,6 +956,9 @@ class Raw(ProjMixin):
         return_singular : bool
             If True, also return the singular values that were used
             to determine the rank.
+        picks : array_like of int, shape (n_selected_channels,)
+            The channels to be considered for rank estimation.
+            If None (default) meg and eeg channels are included.
 
         Returns
         -------
@@ -1225,8 +985,10 @@ class Raw(ProjMixin):
         else:
             stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
         tslice = slice(start, stop + 1)
-        picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
-                           exclude='bads')
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
+                               exclude='bads')
+
         # ensure we don't get a view of data
         if len(picks) == 1:
             return 1.0, 1.0
@@ -1236,10 +998,12 @@ class Raw(ProjMixin):
 
     @property
     def ch_names(self):
+        """Channel names"""
         return self.info['ch_names']
 
     @property
     def n_times(self):
+        """Number of time points"""
         return self.last_samp - self.first_samp + 1
 
     def __len__(self):
@@ -1266,7 +1030,8 @@ class Raw(ProjMixin):
         if bad_file is not None:
             # Check to make sure bad channels are there
             names = frozenset(self.info['ch_names'])
-            bad_names = filter(None, open(bad_file).read().splitlines())
+            with open(bad_file) as fid:
+                bad_names = [l for l in fid.read().splitlines() if l]
             names_there = [ci for ci in bad_names if ci in names]
             count_diff = len(bad_names) - len(names_there)
 
@@ -1274,11 +1039,11 @@ class Raw(ProjMixin):
                 if not force:
                     raise ValueError('Bad channels from:\n%s\n not found '
                                      'in:\n%s' % (bad_file,
-                                                  self.info['filenames'][0]))
+                                                  self._filenames[0]))
                 else:
                     warnings.warn('%d bad channels from:\n%s\nnot found '
                                   'in:\n%s' % (count_diff, bad_file,
-                                               self.info['filenames'][0]))
+                                               self._filenames[0]))
             self.info['bads'] = names_there
         else:
             self.info['bads'] = []
@@ -1310,7 +1075,7 @@ class Raw(ProjMixin):
         _check_raw_compatibility(all_raws)
 
         # deal with preloading data first (while files are separate)
-        all_preloaded = self._preloaded and all(r._preloaded for r in raws)
+        all_preloaded = self.preload and all(r.preload for r in raws)
         if preload is None:
             if all_preloaded:
                 preload = True
@@ -1318,23 +1083,23 @@ class Raw(ProjMixin):
                 preload = False
 
         if preload is False:
-            if self._preloaded:
+            if self.preload:
                 self._data = None
                 self._times = None
-            self._preloaded = False
+            self.preload = False
         else:
             # do the concatenation ourselves since preload might be a string
             nchan = self.info['nchan']
             c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
             nsamp = c_ns[-1]
 
-            if not self._preloaded:
+            if not self.preload:
                 this_data = self._read_segment()[0]
             else:
                 this_data = self._data
 
             # allocate the buffer
-            if isinstance(preload, basestring):
+            if isinstance(preload, string_types):
                 _data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
                                   shape=(nchan, nsamp))
             else:
@@ -1343,14 +1108,14 @@ class Raw(ProjMixin):
             _data[:, 0:c_ns[0]] = this_data
 
             for ri in range(len(raws)):
-                if not raws[ri]._preloaded:
+                if not raws[ri].preload:
                     # read the data directly into the buffer
                     data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
                     raws[ri]._read_segment(data_buffer=data_buffer)
                 else:
                     _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
             self._data = _data
-            self._preloaded = True
+            self.preload = True
 
         # now combine information from each raw file to construct new self
         for r in raws:
@@ -1358,35 +1123,24 @@ class Raw(ProjMixin):
             self._last_samps = np.r_[self._last_samps, r._last_samps]
             self._raw_lengths = np.r_[self._raw_lengths, r._raw_lengths]
             self.rawdirs += r.rawdirs
-            self.info['filenames'] += r.info['filenames']
-        # reconstruct fids in case some were preloaded and others weren't
-        self._initialize_fids()
+            self._filenames += r._filenames
         self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
 
         # this has to be done after first and last sample are set appropriately
-        if self._preloaded:
+        if self.preload:
             self._times = np.arange(self.n_times) / self.info['sfreq']
 
     def close(self):
-        """Close the files on disk."""
-        [f.close() for f in self.fids]
-        self.fids = []
+        """Clean up the object.
 
-    def copy(self):
-        """ Return copy of Raw instance
+        Does nothing for now.
         """
-        new = deepcopy(self)
-        new._initialize_fids()
-        return new
+        pass
 
-    def _initialize_fids(self):
-        """Initialize self.fids based on self.info['filenames']
+    def copy(self):
+        """ Return copy of Raw instance
         """
-        if not self._preloaded:
-            self.fids = [open(fname, "rb") for fname in self.info['filenames']]
-            [fid.seek(0, 0) for fid in self.fids]
-        else:
-            self.fids = []
+        return deepcopy(self)
 
     def as_data_frame(self, picks=None, start=None, stop=None, scale_time=1e3,
                       scalings=None, use_time_index=True, copy=True):
@@ -1399,7 +1153,7 @@ class Raw(ProjMixin):
 
         Parameters
         ----------
-        picks : None | array of int
+        picks : array-like of int | None
             If None only MEG and EEG channels are kept
             otherwise the channels indices in picks are kept.
         start : int | None
@@ -1427,7 +1181,7 @@ class Raw(ProjMixin):
 
         pd = _check_pandas_installed()
         if picks is None:
-            picks = range(self.info['nchan'])
+            picks = list(range(self.info['nchan']))
 
         data, times = self[picks, start:stop]
 
@@ -1459,7 +1213,7 @@ class Raw(ProjMixin):
         if use_time_index is True:
             if 'time' in df:
                 df['time'] = df['time'].astype(np.int64)
-            with warnings.catch_warnings(True):
+            with warnings.catch_warnings(record=True):
                 df.set_index('time', inplace=True)
 
         return df
@@ -1470,7 +1224,7 @@ class Raw(ProjMixin):
 
         Parameters
         ----------
-        picks : array-like | None
+        picks : array-like of int | None
             Indices of channels to apply. If None, all channels will be
             exported.
         start : int | None
@@ -1506,197 +1260,48 @@ class Raw(ProjMixin):
 
         return raw_ts
 
-    @verbose
-    def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
-                      verbose=None, projector=None):
-        """Read a chunk of raw data
+    def __repr__(self):
+        s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
+                                                self.n_times)
+        return "<Raw  |  %s>" % s
+
+    def add_events(self, events, stim_channel=None):
+        """Add events to stim channel
 
         Parameters
         ----------
-        start : int, (optional)
-            first sample to include (first is 0). If omitted, defaults to the
-            first sample in data.
-        stop : int, (optional)
-            First sample to not include.
-            If omitted, data is included to the end.
-        sel : array, optional
-            Indices of channels to select.
-        data_buffer : array or str, optional
-            numpy array to fill with data read, must have the correct shape.
-            If str, a np.memmap with the correct data type will be used
-            to store the data.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
-        projector : array
-            SSP operator to apply to the data.
+        events : ndarray, shape (n_events, 3)
+            Events to add. The first column specifies the sample number of
+            each event, the second column is ignored, and the third column
+            provides the event value. If events already exist in the Raw
+            instance at the given sample numbers, the event values will be
+            added together.
+        stim_channel : str | None
+            Name of the stim channel to add to. If None, the config variable
+            'MNE_STIM_CHANNEL' is used. If this is not found, it will default
+            to 'STI 014'.
 
-        Returns
-        -------
-        data : array, [channels x samples]
-           the data matrix (channels x samples).
-        times : array, [samples]
-            returns the time values corresponding to the samples.
+        Notes
+        -----
+        Data must be preloaded in order to add events.
         """
-        #  Initial checks
-        start = int(start)
-        stop = self.n_times if stop is None else min([int(stop), self.n_times])
-
-        if start >= stop:
-            raise ValueError('No data in this range')
-
-        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
-                    (start, stop - 1, start / float(self.info['sfreq']),
-                     (stop - 1) / float(self.info['sfreq'])))
-
-        #  Initialize the data and calibration vector
-        nchan = self.info['nchan']
-
-        n_sel_channels = nchan if sel is None else len(sel)
-        # convert sel to a slice if possible for efficiency
-        if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
-            sel = slice(sel[0], sel[-1] + 1)
-        idx = slice(None, None, None) if sel is None else sel
-        data_shape = (n_sel_channels, stop - start)
-        if isinstance(data_buffer, np.ndarray):
-            if data_buffer.shape != data_shape:
-                raise ValueError('data_buffer has incorrect shape')
-            data = data_buffer
-        else:
-            data = None  # we will allocate it later, once we know the type
-
-        mult = list()
-        for ri in range(len(self._raw_lengths)):
-            mult.append(np.diag(self.cals.ravel()))
-            if self.comp is not None:
-                mult[ri] = np.dot(self.comp, mult[ri])
-            if projector is not None:
-                mult[ri] = np.dot(projector, mult[ri])
-            mult[ri] = mult[ri][idx]
-
-        # deal with having multiple files accessed by the raw object
-        cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
-                                                   dtype='int')))
-        cumul_lens = np.cumsum(cumul_lens)
-        files_used = np.logical_and(np.less(start, cumul_lens[1:]),
-                                    np.greater_equal(stop - 1,
-                                                     cumul_lens[:-1]))
-
-        first_file_used = False
-        s_off = 0
-        dest = 0
-        if isinstance(idx, slice):
-            cals = self.cals.ravel()[idx][:, np.newaxis]
-        else:
-            cals = self.cals.ravel()[:, np.newaxis]
-
-        for fi in np.nonzero(files_used)[0]:
-            start_loc = self._first_samps[fi]
-            # first iteration (only) could start in the middle somewhere
-            if not first_file_used:
-                first_file_used = True
-                start_loc += start - cumul_lens[fi]
-            stop_loc = np.min([stop - 1 - cumul_lens[fi] +
-                               self._first_samps[fi], self._last_samps[fi]])
-            if start_loc < self._first_samps[fi]:
-                raise ValueError('Bad array indexing, could be a bug')
-            if stop_loc > self._last_samps[fi]:
-                raise ValueError('Bad array indexing, could be a bug')
-            if stop_loc < start_loc:
-                raise ValueError('Bad array indexing, could be a bug')
-            len_loc = stop_loc - start_loc + 1
-
-            for this in self.rawdirs[fi]:
-
-                #  Do we need this buffer
-                if this['last'] >= start_loc:
-                    #  The picking logic is a bit complicated
-                    if stop_loc > this['last'] and start_loc < this['first']:
-                        #    We need the whole buffer
-                        first_pick = 0
-                        last_pick = this['nsamp']
-                        logger.debug('W')
-
-                    elif start_loc >= this['first']:
-                        first_pick = start_loc - this['first']
-                        if stop_loc <= this['last']:
-                            #   Something from the middle
-                            last_pick = this['nsamp'] + stop_loc - this['last']
-                            logger.debug('M')
-                        else:
-                            #   From the middle to the end
-                            last_pick = this['nsamp']
-                            logger.debug('E')
-                    else:
-                        #    From the beginning to the middle
-                        first_pick = 0
-                        last_pick = stop_loc - this['first'] + 1
-                        logger.debug('B')
-
-                    #   Now we are ready to pick
-                    picksamp = last_pick - first_pick
-                    if picksamp > 0:
-                        # only read data if it exists
-                        if this['ent'] is not None:
-                            one = read_tag(self.fids[fi], this['ent'].pos,
-                                           shape=(this['nsamp'], nchan),
-                                           rlims=(first_pick, last_pick)).data
-                            if np.isrealobj(one):
-                                dtype = np.float
-                            else:
-                                dtype = np.complex128
-                            one.shape = (picksamp, nchan)
-                            one = one.T.astype(dtype)
-                            # use proj + cal factors in mult
-                            if mult is not None:
-                                one[idx] = np.dot(mult[fi], one)
-                            else:  # apply just the calibration factors
-                                # this logic is designed to limit memory copies
-                                if isinstance(idx, slice):
-                                    # This is a view operation, so it's fast
-                                    one[idx] *= cals
-                                else:
-                                    # Extra operations are actually faster here
-                                    # than creating a new array
-                                    # (fancy indexing)
-                                    one *= cals
-
-                            # if not already done, allocate array with
-                            # right type
-                            data = _allocate_data(data, data_buffer,
-                                                  data_shape, dtype)
-                            if isinstance(idx, slice):
-                                # faster to slice in data than doing
-                                # one = one[idx] sooner
-                                data[:, dest:(dest + picksamp)] = one[idx]
-                            else:
-                                # faster than doing one = one[idx]
-                                data_view = data[:, dest:(dest + picksamp)]
-                                for ii, ix in enumerate(idx):
-                                    data_view[ii] = one[ix]
-                        dest += picksamp
-
-                #   Done?
-                if this['last'] >= stop_loc:
-                    # if not already done, allocate array with float dtype
-                    data = _allocate_data(data, data_buffer, data_shape,
-                                          np.float)
-                    break
-
-            self.fids[fi].seek(0, 0)  # Go back to beginning of the file
-            s_off += len_loc
-            # double-check our math
-            if not s_off == dest:
-                raise ValueError('Incorrect file reading')
-
-        logger.info('[done]')
-        times = np.arange(start, stop) / self.info['sfreq']
-
-        return data, times
-
-    def __repr__(self):
-        s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
-                                                self.n_times)
-        return "<Raw  |  %s>" % s
+        if not self.preload:
+            raise RuntimeError('cannot add events unless data are preloaded')
+        events = np.asarray(events)
+        if events.ndim != 2 or events.shape[1] != 3:
+            raise ValueError('events must be shape (n_events, 3)')
+        stim_channel = _get_stim_channel(stim_channel)
+        pick = pick_channels(self.ch_names, stim_channel)
+        if len(pick) == 0:
+            raise ValueError('Channel %s not found' % stim_channel)
+        pick = pick[0]
+        idx = events[:, 0].astype(int)
+        if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):
+            raise ValueError('event sample numbers must be between %s and %s'
+                             % (self.first_samp, self.last_samp))
+        if not all(idx == events[:, 0]):
+            raise ValueError('event sample numbers must be integers')
+        self._data[pick, idx - self.first_samp] += events[:, 2]
 
 
 def set_eeg_reference(raw, ref_channels, copy=True):
@@ -1724,7 +1329,7 @@ def set_eeg_reference(raw, ref_channels, copy=True):
         Array of reference data subtracted from eeg channels.
     """
     # Check to see that raw data is preloaded
-    if not raw._preloaded:
+    if not raw.preload:
         raise RuntimeError('Raw data needs to be preloaded. Use '
                            'preload=True (or string) in the constructor.')
     # Make sure that reference channels are loaded as list of string
@@ -1756,7 +1361,7 @@ def set_eeg_reference(raw, ref_channels, copy=True):
 def _allocate_data(data, data_buffer, data_shape, dtype):
     if data is None:
         # if not already done, allocate array with right type
-        if isinstance(data_buffer, basestring):
+        if isinstance(data_buffer, string_types):
             # use a memmap
             data = np.memmap(data_buffer, mode='w+',
                              dtype=dtype, shape=data_shape)
@@ -1787,7 +1392,7 @@ def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False):
 
 
 def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
-    """Convert time to indices
+    """Convert indices to time
 
     Parameters
     ----------
@@ -1808,6 +1413,7 @@ def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
 
 class _RawShell():
     """Used for creating a temporary raw object"""
+
     def __init__(self):
         self.first_samp = None
         self.last_samp = None
@@ -1822,14 +1428,102 @@ class _RawShell():
 
 ###############################################################################
 # Writing
+def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
+               stop, buffer_size, projector, inv_comp, drop_small_buffer,
+               split_size, part_idx, prev_fname):
+    """Write raw file with splitting
+    """
 
-from .write import (start_file, end_file, start_block, end_block,
-                    write_dau_pack16, write_float, write_double,
-                    write_complex64, write_complex128, write_int, write_id)
+    if part_idx > 0:
+        # insert index in filename
+        path, base = op.split(fname)
+        idx = base.find('.')
+        use_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
+                                                base[idx + 1:]))
+    else:
+        use_fname = fname
+    logger.info('Writing %s' % use_fname)
+
+    meas_id = info['meas_id']
+    if meas_id is None:
+        meas_id = 0
+
+    fid, cals = _start_writing_raw(use_fname, info, picks, data_type,
+                                   reset_range)
+
+    first_samp = raw.first_samp + start
+    if first_samp != 0:
+        write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
+
+    # previous file name and id
+    if part_idx > 0 and prev_fname is not None:
+        start_block(fid, FIFF.FIFFB_REF)
+        write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
+        write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
+        write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
+        write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
+        end_block(fid, FIFF.FIFFB_REF)
+
+    pos_prev = None
+    for first in range(start, stop, buffer_size):
+        last = first + buffer_size
+        if last >= stop:
+            last = stop + 1
+
+        if picks is None:
+            data, times = raw[:, first:last]
+        else:
+            data, times = raw[picks, first:last]
+
+        if projector is not None:
+            data = np.dot(projector, data)
+
+        if ((drop_small_buffer and (first > start)
+             and (len(times) < buffer_size))):
+            logger.info('Skipping data chunk due to small buffer ... '
+                        '[done]')
+            break
+        logger.info('Writing ...')
+
+        if pos_prev is None:
+            pos_prev = fid.tell()
 
+        _write_raw_buffer(fid, data, cals, format, inv_comp)
 
-def start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
-                      reset_range=True):
+        pos = fid.tell()
+        this_buff_size_bytes = pos - pos_prev
+        if this_buff_size_bytes > split_size / 2:
+            raise ValueError('buffer size is too large for the given split'
+                             'size: decrease "buffer_size_sec" or increase'
+                             '"split_size".')
+        if pos > split_size:
+            raise logger.warning('file is larger than "split_size"')
+
+        # Split files if necessary, leave some space for next file info
+        if pos >= split_size - this_buff_size_bytes - 2 ** 20:
+            next_fname, next_idx = _write_raw(fname, raw, info, picks, format,
+                data_type, reset_range, first + buffer_size, stop, buffer_size,
+                projector, inv_comp, drop_small_buffer, split_size,
+                part_idx + 1, use_fname)
+
+            start_block(fid, FIFF.FIFFB_REF)
+            write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
+            write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
+            write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
+            write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
+            end_block(fid, FIFF.FIFFB_REF)
+            break
+
+        pos_prev = pos
+
+    logger.info('Closing %s [done]' % use_fname)
+    _finish_writing_raw(fid)
+
+    return use_fname, part_idx
+
+
+def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
+                       reset_range=True):
     """Start write raw data in file
 
     Data will be written in float
@@ -1904,7 +1598,7 @@ def start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
     return fid, cals
 
 
-def write_raw_buffer(fid, buf, cals, format, inv_comp):
+def _write_raw_buffer(fid, buf, cals, format, inv_comp):
     """Write raw buffer
 
     Parameters
@@ -1954,14 +1648,8 @@ def write_raw_buffer(fid, buf, cals, format, inv_comp):
 
     write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
 
-    # make sure we didn't go over the 2GB file size limit
-    pos = fid.tell()
-    if pos >= 2147483647:  # np.iinfo(np.int32).max
-        raise IOError('2GB file size limit reached. Support for larger '
-                      'raw files will be added in the future.')
-
 
-def finish_writing_raw(fid):
+def _finish_writing_raw(fid):
     """Finish writing raw FIF file
 
     Parameters
@@ -2005,7 +1693,7 @@ def _check_raw_compatibility(raw):
         raw[0].orig_format = 'unknown'
 
 
-def concatenate_raws(raws, preload=None):
+def concatenate_raws(raws, preload=None, events_list=None):
     """Concatenate raw instances as if they were continuous. Note that raws[0]
     is modified in-place to achieve the concatenation.
 
@@ -2013,19 +1701,32 @@ def concatenate_raws(raws, preload=None):
     ----------
     raws : list
         list of Raw instances to concatenate (in order).
-
     preload : bool, or None
         If None, preload status is inferred using the preload status of the
         raw files passed in. True or False sets the resulting raw file to
         have or not have data preloaded.
+    events_list : None | list
+        The events to concatenate. Defaults to None.
 
     Returns
     -------
     raw : instance of Raw
         The result of the concatenation (first Raw instance passed in).
+    events : ndarray of int, shape (n events, 3)
+        The events. Only returned if `event_list` is not None.
     """
+    if events_list is not None:
+        if len(events_list) != len(raws):
+            raise ValueError('`raws` and `event_list` are required '
+                             'to be of the same length')
+        first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])
+        events = concatenate_events(events_list, first, last)
     raws[0].append(raws[1:], preload)
-    return raws[0]
+
+    if events_list is None:
+        return raws[0]
+    else:
+        return raws[0], events
 
 
 def get_chpi_positions(raw, t_step=None):
@@ -2064,7 +1765,7 @@ def get_chpi_positions(raw, t_step=None):
     may not use the same reference point as the rest of mne-python (i.e.,
     it could be referenced relative to raw.first_samp or something else).
     """
-    if isinstance(raw, Raw):
+    if isinstance(raw, _BaseRaw):
         # for simplicity, we'll sample at 1 sec intervals like maxfilter
         if t_step is None:
             t_step = 1.0
@@ -2081,8 +1782,8 @@ def get_chpi_positions(raw, t_step=None):
         data = np.array([d[0][:, 0] for d in data])
         data = np.c_[t, data]
     else:
-        if not isinstance(raw, basestring):
-            raise TypeError('raw must be an instance of fiff.Raw or string')
+        if not isinstance(raw, string_types):
+            raise TypeError('raw must be an instance of Raw or string')
         if not op.isfile(raw):
             raise IOError('File "%s" does not exist' % raw)
         if t_step is not None:
@@ -2101,8 +1802,8 @@ def _quart_to_rot(q):
     q2 = q[:, 1]
     q3 = q[:, 2]
     rotation = np.array((np.c_[(q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 ** 2,
-                               2 * (q1 * q2 - q0 * q3),
-                               2 * (q1 * q3 + q0 * q2))],
+                                2 * (q1 * q2 - q0 * q3),
+                                2 * (q1 * q3 + q0 * q2))],
                          np.c_[(2 * (q1 * q2 + q0 * q3),
                                 q0 ** 2 + q2 ** 2 - q1 ** 2 - q3 ** 2,
                                 2 * (q2 * q3 - q0 * q1))],
diff --git a/mne/fiff/brainvision/__init__.py b/mne/io/brainvision/__init__.py
similarity index 100%
rename from mne/fiff/brainvision/__init__.py
rename to mne/io/brainvision/__init__.py
diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py
new file mode 100644
index 0000000..cc33b16
--- /dev/null
+++ b/mne/io/brainvision/brainvision.py
@@ -0,0 +1,684 @@
+"""Conversion tool from Brain Vision EEG to FIF"""
+
+# Authors: Teon Brooks <teon at nyu.edu>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import time
+import re
+import warnings
+
+import numpy as np
+
+from ...coreg import get_ras_to_neuromag_trans, read_elp
+from ...transforms import als_ras_trans, apply_trans
+from ...utils import verbose, logger
+from ..constants import FIFF
+from ..meas_info import Info
+from ..base import _BaseRaw
+
+from ...externals.six import StringIO, u
+from ...externals.six.moves import configparser
+
+
+class RawBrainVision(_BaseRaw):
+    """Raw object from Brain Vision EEG file
+
+    Parameters
+    ----------
+    vdhr_fname : str
+        Path to the EEG header file.
+    elp_fname : str | None
+        Path to the elp file containing electrode positions.
+        If None, sensor locations are (0,0,0).
+    elp_names : list | None
+        A list of channel names in the same order as the points in the elp
+        file. Electrode positions should be specified with the same names as
+        in the vhdr file, and fiducials should be specified as "lpa" "nasion",
+        "rpa". ELP positions with other names are ignored. If elp_names is not
+        None and channels are missing, a KeyError is raised.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    reference : None | str
+        Name of the electrode which served as the reference in the recording.
+        If a name is provided, a corresponding channel is added and its data
+        is set to 0. This is useful for later re-referencing. The name should
+        correspond to a name in elp_names.
+    eog : list of str
+        Names of channels that should be designated EOG channels. Names should
+        correspond to the vhdr file (default: ['HEOGL', 'HEOGR', 'VEOGb']).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, vhdr_fname, elp_fname=None, elp_names=None,
+                 preload=False, reference=None,
+                 eog=['HEOGL', 'HEOGR', 'VEOGb'], ch_names=None, verbose=None):
+        # backwards compatibility
+        if ch_names is not None:
+            if elp_names is not None:
+                err = ("ch_names is a deprecated parameter, don't specify "
+                       "ch_names if elp_names are specified.")
+                raise TypeError(err)
+            msg = "The ch_names parameter is deprecated. Use elp_names."
+            warnings.warn(msg, DeprecationWarning)
+            elp_names = ['nasion', 'lpa', 'rpa', None, None, None, None,
+                         None] + list(ch_names)
+
+        # Preliminary Raw attributes
+        self._events = np.empty((0, 3))
+        self.preload = False
+
+        # Channel info and events
+        logger.info('Extracting eeg Parameters from %s...' % vhdr_fname)
+        vhdr_fname = os.path.abspath(vhdr_fname)
+        self.info, self._eeg_info, events = _get_eeg_info(vhdr_fname,
+                                                          elp_fname, elp_names,
+                                                          reference, eog)
+        self.set_brainvision_events(events)
+        logger.info('Creating Raw.info structure...')
+
+        # Raw attributes
+        self.verbose = verbose
+        self._filenames = list()
+        self._projector = None
+        self.comp = None  # no compensation for EEG
+        self.proj = False
+        self.first_samp = 0
+        with open(self.info['file_id'], 'rb') as f:
+            f.seek(0, os.SEEK_END)
+            n_samples = f.tell()
+        dtype = int(self._eeg_info['dtype'][-1])
+        n_chan = self.info['nchan']
+        self.last_samp = (n_samples // (dtype * (n_chan - 1))) - 1
+        self._reference = reference
+
+        if preload:
+            self.preload = preload
+            logger.info('Reading raw data from %s...' % vhdr_fname)
+            self._data, _ = self._read_segment()
+            assert len(self._data) == self.info['nchan']
+
+            # Add time info
+            self._times = np.arange(self.first_samp, self.last_samp + 1,
+                                    dtype=np.float64)
+            self._times /= self.info['sfreq']
+            logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
+                        % (self.first_samp, self.last_samp,
+                           float(self.first_samp) / self.info['sfreq'],
+                           float(self.last_samp) / self.info['sfreq']))
+        logger.info('Ready.')
+
+    def __repr__(self):
+        n_chan = self.info['nchan']
+        data_range = self.last_samp - self.first_samp + 1
+        s = ('%r' % os.path.basename(self.info['file_id']),
+             "n_channels x n_times : %s x %s" % (n_chan, data_range))
+        return "<RawEEG  |  %s>" % ', '.join(s)
+
+    def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
+                      projector=None):
+        """Read a chunk of raw data
+
+        Parameters
+        ----------
+        start : int, (optional)
+            first sample to include (first is 0). If omitted, defaults to the
+            first sample in data.
+        stop : int, (optional)
+            First sample to not include.
+            If omitted, data is included to the end.
+        sel : array, optional
+            Indices of channels to select.
+        projector : array
+            SSP operator to apply to the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        data : array, shape (n_channels, n_samples)
+           The data.
+        times : array, shape (n_samples,)
+            returns the time values corresponding to the samples.
+        """
+        if sel is not None:
+            if len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
+                return (666, 666)
+        if projector is not None:
+            raise NotImplementedError('Currently does not handle projections.')
+        if stop is None:
+            stop = self.last_samp + 1
+        elif stop > self.last_samp + 1:
+            stop = self.last_samp + 1
+
+        #  Initial checks
+        start = int(start)
+        stop = int(stop)
+        if start >= stop:
+            raise ValueError('No data in this range')
+
+        # assemble channel information
+        eeg_info = self._eeg_info
+        sfreq = self.info['sfreq']
+        chs = self.info['chs']
+        if self._reference:
+            chs = chs[:-1]
+        if len(self._events):
+            chs = chs[:-1]
+        n_eeg = len(chs)
+        cals = np.atleast_2d([chan_info['cal'] for chan_info in chs])
+        mults = np.atleast_2d([chan_info['unit_mul'] for chan_info in chs])
+
+        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
+                    (start, stop - 1, start / float(sfreq),
+                     (stop - 1) / float(sfreq)))
+
+        # read data
+        dtype = np.dtype(eeg_info['dtype'])
+        buffer_size = (stop - start)
+        pointer = start * n_eeg * dtype.itemsize
+        with open(self.info['file_id'], 'rb') as f:
+            f.seek(pointer)
+            # extract data
+            data = np.fromfile(f, dtype=dtype, count=buffer_size * n_eeg)
+        if eeg_info['data_orientation'] == 'MULTIPLEXED':
+            data = data.reshape((n_eeg, -1), order='F')
+        elif eeg_info['data_orientation'] == 'VECTORIZED':
+            data = data.reshape((n_eeg, -1), order='C')
+
+        gains = cals * mults
+        data = data * gains.T
+
+        # add reference channel and stim channel (if applicable)
+        data_segments = [data]
+        if self._reference:
+            shape = (1, data.shape[1])
+            ref_channel = np.zeros(shape)
+            data_segments.append(ref_channel)
+        if len(self._events):
+            stim_channel = _synthesize_stim_channel(self._events, start, stop)
+            data_segments.append(stim_channel)
+        if len(data_segments) > 1:
+            data = np.vstack(data_segments)
+
+        if sel is not None:
+            data = data[sel]
+
+        logger.info('[done]')
+        times = np.arange(start, stop, dtype=float) / sfreq
+
+        return data, times
+
+    def get_brainvision_events(self):
+        """Retrieve the events associated with the Brain Vision Raw object
+
+        Returns
+        -------
+        events : array, shape (n_events, 3)
+            Events, each row consisting of an (onset, duration, trigger)
+            sequence.
+        """
+        return self._events.copy()
+
+    def set_brainvision_events(self, events):
+        """Set the events (automatically updates the synthesized stim channel)
+
+        Parameters
+        ----------
+        events : array, shape (n_events, 3)
+            Events, each row consisting of an (onset, duration, trigger)
+            sequence.
+        """
+        events = np.copy(events)
+        if not events.ndim == 2 and events.shape[1] == 3:
+            raise ValueError("[n_events x 3] shaped array required")
+
+        # update info based on presence of stim channel
+        had_events = bool(len(self._events))
+        has_events = bool(len(events))
+        if had_events and not has_events:  # remove stim channel
+            if self.info['ch_names'][-1] != 'STI 014':
+                err = "Last channel is not stim channel; info was modified"
+                raise RuntimeError(err)
+            self.info['nchan'] -= 1
+            del self.info['ch_names'][-1]
+            del self.info['chs'][-1]
+            if self.preload:
+                self._data = self._data[:-1]
+        elif has_events and not had_events:  # add stim channel
+            idx = len(self.info['chs']) + 1
+            chan_info = {'ch_name': 'STI 014',
+                         'kind': FIFF.FIFFV_STIM_CH,
+                         'coil_type': FIFF.FIFFV_COIL_NONE,
+                         'logno': idx,
+                         'scanno': idx,
+                         'cal': 1,
+                         'range': 1,
+                         'unit_mul':  0,
+                         'unit': FIFF.FIFF_UNIT_NONE,
+                         'eeg_loc': np.zeros(3),
+                         'loc': np.zeros(12)}
+            self.info['nchan'] += 1
+            self.info['ch_names'].append(chan_info['ch_name'])
+            self.info['chs'].append(chan_info)
+            if self.preload:
+                shape = (1, self._data.shape[1])
+                self._data = np.vstack((self._data, np.empty(shape)))
+
+        # update events
+        self._events = events
+        if has_events and self.preload:
+            start = self.first_samp
+            stop = self.last_samp + 1
+            self._data[-1] = _synthesize_stim_channel(events, start, stop)
+
+
+def _read_vmrk_events(fname):
+    """Read events from a vmrk file
+
+    Parameters
+    ----------
+    fname : str
+        vmrk file to be read.
+
+    Returns
+    -------
+    events : array, shape (n_events, 3)
+        An array containing the whole recording's events, each row representing
+        an event as (onset, duration, trigger) sequence.
+    """
+    # read vmrk file
+    with open(fname) as fid:
+        txt = fid.read()
+
+    start_tag = 'Brain Vision Data Exchange Marker File, Version 1.0'
+    if not txt.startswith(start_tag):
+        raise ValueError("vmrk file should start with %r" % start_tag)
+
+    # extract Marker Infos block
+    m = re.search("\[Marker Infos\]", txt)
+    if not m:
+        return np.zeros(0)
+    mk_txt = txt[m.end():]
+    m = re.search("\[.*\]", mk_txt)
+    if m:
+        mk_txt = mk_txt[:m.start()]
+
+    # extract event information
+    items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
+    events = []
+    for info in items:
+        mtype, mdesc, onset, duration = info.split(',')[:4]
+        if mtype == 'Stimulus':
+            trigger = int(re.findall('S\s*?(\d+)', mdesc)[0])
+            onset = int(onset)
+            duration = int(duration)
+            events.append((onset, duration, trigger))
+
+    events = np.array(events)
+    return events
+
+
+def _synthesize_stim_channel(events, start, stop):
+    """Synthesize a stim channel from events read from a vmrk file
+
+    Parameters
+    ----------
+    events : array, shape (n_events, 3)
+        Each row representing an event as (onset, duration, trigger) sequence
+        (the format returned by _read_vmrk_events).
+    start : int
+        First sample to return.
+    stop : int
+        Last sample to return.
+
+    Returns
+    -------
+    stim_channel : array, shape (n_samples,)
+        An array containing the whole recording's event marking
+    """
+    # select events overlapping buffer
+    onset = events[:, 0]
+    offset = onset + events[:, 1]
+    idx = np.logical_and(onset < stop, offset > start)
+    events = events[idx]
+
+    # make onset relative to buffer
+    events[:, 0] -= start
+
+    # fix onsets before buffer start
+    idx = events[:, 0] < 0
+    events[idx, 0] = 0
+
+    # create output buffer
+    stim_channel = np.zeros(stop - start)
+    for onset, duration, trigger in events:
+        stim_channel[onset:onset + duration] = trigger
+
+    return stim_channel
+
+
+def _get_elp_locs(elp_fname, elp_names):
+    """Read a Polhemus ascii file
+
+    Parameters
+    ----------
+    elp_fname : str
+        Path to head shape file acquired from Polhemus system and saved in
+        ascii format.
+    elp_names : list
+        A list in order of EEG electrodes found in the Polhemus digitizer file.
+
+    Returns
+    -------
+    ch_locs : dict
+        Dictionary whose keys are the names from elp_names and whose values
+        are the coordinates from the elp file transformed to Neuromag space.
+    """
+    coords_orig = read_elp(elp_fname)
+    coords_ras = apply_trans(als_ras_trans, coords_orig)
+    chs_ras = dict(zip(elp_names, coords_ras))
+    nasion = chs_ras['nasion']
+    lpa = chs_ras['lpa']
+    rpa = chs_ras['rpa']
+    trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+    coords_neuromag = apply_trans(trans, coords_ras)
+    chs_neuromag = dict(zip(elp_names, coords_neuromag))
+    return chs_neuromag
+
+
+def _get_eeg_info(vhdr_fname, elp_fname, elp_names, reference, eog):
+    """Extracts all the information from the header file.
+
+    Parameters
+    ----------
+    vhdr_fname : str
+        Raw EEG header to be read.
+    elp_fname : str | None
+        Path to the elp file containing electrode positions.
+        If None, sensor locations are (0, 0, 0).
+    elp_names : list | None
+        A list of channel names in the same order as the points in the elp
+        file. Electrode positions should be specified with the same names as
+        in the vhdr file, and fiducials should be specified as "lpa" "nasion",
+        "rpa". ELP positions with other names are ignored. If elp_names is not
+        None and channels are missing, a KeyError is raised.
+    reference : None | str
+        Name of the electrode which served as the reference in the recording.
+        If a name is provided, a corresponding channel is added and its data
+        is set to 0. This is useful for later re-referencing. The name should
+        correspond to a name in elp_names.
+    eog : list of str
+        Names of channels that should be designated EOG channels. Names should
+        correspond to the vhdr file.
+
+    Returns
+    -------
+    info : Info
+        The measurement info.
+    edf_info : dict
+        A dict containing Brain Vision specific parameters.
+    events : array, shape (n_events, 3)
+        Events from the corresponding vmrk file.
+    """
+
+    info = Info()
+    # Some keys to be consistent with FIF measurement info
+    info['meas_id'] = None
+    info['projs'] = []
+    info['comps'] = []
+    info['bads'] = []
+    info['acq_pars'], info['acq_stim'] = None, None
+    info['filename'] = vhdr_fname
+    info['ctf_head_t'] = None
+    info['dev_ctf_t'] = []
+    info['dig'] = None
+    info['dev_head_t'] = None
+    info['proj_id'] = None
+    info['proj_name'] = None
+    info['experimenter'] = None
+    info['description'] = None
+    info['buffer_size_sec'] = 10.
+    info['orig_blocks'] = None
+    info['line_freq'] = None
+    info['subject_info'] = None
+
+    eeg_info = {}
+
+    with open(vhdr_fname, 'r') as f:
+        # extract the first section to resemble a cfg
+        l = f.readline().strip()
+        assert l == 'Brain Vision Data Exchange Header File Version 1.0'
+        settings = f.read()
+
+    params, settings = settings.split('[Comment]')
+    cfg = configparser.ConfigParser()
+    if hasattr(cfg, 'read_file'):  # newer API
+        cfg.read_file(StringIO(params))
+    else:
+        cfg.readfp(StringIO(params))
+
+    # get sampling info
+    # Sampling interval is given in microsec
+    sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
+    sfreq = int(sfreq)
+    n_data_chan = cfg.getint('Common Infos', 'NumberOfChannels')
+    n_eeg_chan = n_data_chan + bool(reference)
+
+    # check binary format
+    assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
+    eeg_info['data_orientation'] = cfg.get('Common Infos', 'DataOrientation')
+    if not (eeg_info['data_orientation'] == 'MULTIPLEXED' or
+            eeg_info['data_orientation'] == 'VECTORIZED'):
+        raise NotImplementedError('Data Orientation %s is not supported'
+                                  % eeg_info['data_orientation'])
+
+    binary_format = cfg.get('Binary Infos', 'BinaryFormat')
+    if binary_format == 'INT_16':
+        eeg_info['dtype'] = '<i2'
+    elif binary_format == 'INT_32':
+        eeg_info['dtype'] = '<i4'
+    elif binary_format == 'IEEE_FLOAT_32':
+        eeg_info['dtype'] = '<f4'
+    else:
+        raise NotImplementedError('Datatype %s is not supported'
+                                  % binary_format)
+
+    # load channel labels
+    ch_names = ['UNKNOWN'] * n_eeg_chan
+    cals = np.empty(n_eeg_chan)
+    cals[:] = np.nan
+    units = ['UNKNOWN'] * n_eeg_chan
+    for chan, props in cfg.items('Channel Infos'):
+        n = int(re.findall(r'ch(\d+)', chan)[0])
+        name, _, resolution, unit = props.split(',')[:4]
+        ch_names[n - 1] = name
+        cals[n - 1] = float(resolution)
+        unit = unit.replace('\xc2', '')  # Remove unwanted control characters
+        if u(unit) == u('\xb5V'):
+            units[n - 1] = 1e-6
+        elif unit == 'V':
+            units[n - 1] = 0
+        else:
+            units[n - 1] = unit
+
+    # add reference channel info
+    if reference:
+        ch_names[-1] = reference
+        cals[-1] = cals[-2]
+        units[-1] = units[-2]
+
+    # Attempts to extract filtering info from header. If not found, both are
+    # set to zero.
+    settings = settings.splitlines()
+    idx = None
+    if 'Channels' in settings:
+        idx = settings.index('Channels')
+        settings = settings[idx + 1:]
+        for idx, setting in enumerate(settings):
+            if re.match('#\s+Name', setting):
+                break
+            else:
+                idx = None
+    if idx:
+        lowpass = []
+        highpass = []
+        for i, ch in enumerate(ch_names, 1):
+            if ch == reference:
+                continue
+            line = settings[idx + i].split()
+            assert ch in line
+            highpass.append(line[5])
+            lowpass.append(line[6])
+        if len(highpass) == 0:
+            info['highpass'] = None
+        elif all(highpass):
+            if highpass[0] == 'NaN':
+                info['highpass'] = None
+            elif highpass[0] == 'DC':
+                info['highpass'] = 0
+            else:
+                info['highpass'] = int(highpass[0])
+        else:
+            info['highpass'] = np.min(highpass)
+            warnings.warn('%s' % ('Channels contain different highpass '
+                                  'filters. Highest filter setting will '
+                                  'be stored.'))
+        if len(lowpass) == 0:
+            info['lowpass'] = None
+        elif all(lowpass):
+            if lowpass[0] == 'NaN':
+                info['lowpass'] = None
+            else:
+                info['lowpass'] = int(lowpass[0])
+        else:
+            info['lowpass'] = np.min(lowpass)
+            warnings.warn('%s' % ('Channels contain different lowpass filters.'
+                                  ' Lowest filter setting will be stored.'))
+    else:
+        info['highpass'] = None
+        info['lowpass'] = None
+
+    # locate EEG and marker files
+    path = os.path.dirname(vhdr_fname)
+    info['file_id'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
+    eeg_info['marker_id'] = os.path.join(path, cfg.get('Common Infos',
+                                                       'MarkerFile'))
+    info['meas_date'] = int(time.time())
+
+    # Creates a list of dicts of eeg channels for raw.info
+    logger.info('Setting channel info structure...')
+    info['chs'] = []
+    info['nchan'] = n_eeg_chan
+    info['ch_names'] = ch_names
+    info['sfreq'] = sfreq
+    if elp_fname and elp_names:
+        ch_locs = _get_elp_locs(elp_fname, elp_names)
+        info['dig'] = [{'r': ch_locs['nasion'],
+                        'ident': FIFF.FIFFV_POINT_NASION,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD},
+                       {'r': ch_locs['lpa'], 'ident': FIFF.FIFFV_POINT_LPA,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame': FIFF.FIFFV_COORD_HEAD},
+                       {'r': ch_locs['rpa'], 'ident': FIFF.FIFFV_POINT_RPA,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame': FIFF.FIFFV_COORD_HEAD}]
+    else:
+        ch_locs = None
+
+    missing_positions = []
+    idxs = range(1, len(ch_names) + 1)
+    for idx, ch_name, cal, unit_mul in zip(idxs, ch_names, cals, units):
+        is_eog = ch_name in eog
+        if ch_locs is None:
+            loc = np.zeros(3)
+        elif ch_name in ch_locs:
+            loc = ch_locs[ch_name]
+        else:
+            loc = np.zeros(3)
+            if not is_eog:
+                missing_positions.append(ch_name)
+
+        if is_eog:
+            kind = FIFF.FIFFV_EOG_CH
+        else:
+            kind = FIFF.FIFFV_EEG_CH
+
+        chan_info = {'ch_name': ch_name,
+                     'coil_type': FIFF.FIFFV_COIL_EEG,
+                     'kind': kind,
+                     'logno': idx,
+                     'scanno': idx,
+                     'cal': cal,
+                     'range': 1.,
+                     'unit_mul': unit_mul,
+                     'unit': FIFF.FIFF_UNIT_V,
+                     'coord_frame': FIFF.FIFFV_COORD_HEAD,
+                     'eeg_loc': loc,
+                     'loc': np.hstack((loc, np.zeros(9)))}
+
+        info['chs'].append(chan_info)
+
+    # raise error if positions are missing
+    if missing_positions:
+        err = ("The following positions are missing from the ELP "
+               "definitions: %s. If those channels lack positions because "
+               "they are EOG channels use the eog "
+               "parameter" % str(missing_positions))
+        raise KeyError(err)
+
+    # for stim channel
+    events = _read_vmrk_events(eeg_info['marker_id'])
+
+    return info, eeg_info, events
+
+
+def read_raw_brainvision(vhdr_fname, elp_fname=None, elp_names=None,
+                         preload=False, reference=None,
+                         eog=['HEOGL', 'HEOGR', 'VEOGb'], ch_names=None,
+                         verbose=None):
+    """Reader for Brain Vision EEG file
+
+    Parameters
+    ----------
+    vhdr_fname : str
+        Path to the EEG header file.
+    elp_fname : str | None
+        Path to the elp file containing electrode positions.
+        If None, sensor locations are (0,0,0).
+    elp_names : list | None
+        A list of channel names in the same order as the points in the elp
+        file. Electrode positions should be specified with the same names as
+        in the vhdr file, and fiducials should be specified as "lpa" "nasion",
+        "rpa". ELP positions with other names are ignored. If elp_names is not
+        None and channels are missing, a KeyError is raised.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    reference : None | str
+        Name of the electrode which served as the reference in the recording.
+        If a name is provided, a corresponding channel is added and its data
+        is set to 0. This is useful for later re-referencing. The name should
+        correspond to a name in elp_names.
+    eog : list of str
+        Names of channels that should be designated EOG channels. Names should
+        correspond to the vhdr file (default: ['HEOGL', 'HEOGR', 'VEOGb']).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    raw = RawBrainVision(vhdr_fname, elp_fname, elp_names, preload,
+                         reference, eog, ch_names, verbose)
+    return raw
diff --git a/mne/fiff/brainvision/tests/__init__.py b/mne/io/brainvision/tests/__init__.py
similarity index 100%
rename from mne/fiff/brainvision/tests/__init__.py
rename to mne/io/brainvision/tests/__init__.py
diff --git a/mne/fiff/brainvision/tests/data/test.eeg b/mne/io/brainvision/tests/data/test.eeg
similarity index 100%
rename from mne/fiff/brainvision/tests/data/test.eeg
rename to mne/io/brainvision/tests/data/test.eeg
diff --git a/mne/fiff/brainvision/tests/data/test.vhdr b/mne/io/brainvision/tests/data/test.vhdr
similarity index 100%
rename from mne/fiff/brainvision/tests/data/test.vhdr
rename to mne/io/brainvision/tests/data/test.vhdr
diff --git a/mne/fiff/brainvision/tests/data/test.vmrk b/mne/io/brainvision/tests/data/test.vmrk
similarity index 100%
rename from mne/fiff/brainvision/tests/data/test.vmrk
rename to mne/io/brainvision/tests/data/test.vmrk
diff --git a/mne/fiff/brainvision/tests/data/test_bin_raw.fif b/mne/io/brainvision/tests/data/test_bin_raw.fif
similarity index 100%
rename from mne/fiff/brainvision/tests/data/test_bin_raw.fif
rename to mne/io/brainvision/tests/data/test_bin_raw.fif
diff --git a/mne/fiff/brainvision/tests/data/test_elp.txt b/mne/io/brainvision/tests/data/test_elp.txt
similarity index 100%
rename from mne/fiff/brainvision/tests/data/test_elp.txt
rename to mne/io/brainvision/tests/data/test_elp.txt
diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py
new file mode 100644
index 0000000..3ffbed7
--- /dev/null
+++ b/mne/io/brainvision/tests/test_brainvision.py
@@ -0,0 +1,157 @@
+"""Data Equivalence Tests"""
+from __future__ import print_function
+
+# Author: Teon Brooks <teon at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import inspect
+
+from nose.tools import assert_equal
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+
+import mne
+from mne.utils import _TempDir
+from mne import pick_types
+from mne.io.constants import FIFF
+from mne.io import Raw
+from mne.io import read_raw_brainvision
+
+FILE = inspect.getfile(inspect.currentframe())
+data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
+vhdr_path = op.join(data_dir, 'test.vhdr')
+elp_path = op.join(data_dir, 'test_elp.txt')
+eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
+elp_names = ['nasion', 'lpa', 'rpa', None, None, None, None, None,
+             'FP1', 'FP2', 'F7', 'GND', 'F8',
+             'FC5', 'F3', 'Fz', 'F4', 'FC6',
+             'FC1', 'FCz', 'FC2', 'CP5', 'C3',
+             'Cz', 'C4', 'CP6', 'CP1', 'CPz',
+             'CP2', 'P7', 'P3', 'Pz', 'P4',
+             'P8', 'O1', 'POz', 'O2', 'A1',
+             'ReRef', 'HL', 'HR', 'Vb']
+eog = ('HL', 'HR', 'Vb')
+
+tempdir = _TempDir()
+
+
+def test_brainvision_data():
+    """Test reading raw Brain Vision files
+    """
+    raw_py = read_raw_brainvision(vhdr_path, elp_path, elp_names, preload=True)
+    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
+    data_py, times_py = raw_py[picks]
+
+    print(raw_py)  # to test repr
+    print(raw_py.info)  # to test Info repr
+
+    # compare with a file that was generated using MNE-C
+    raw_bin = Raw(eeg_bin, preload=True)
+    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
+    data_bin, times_bin = raw_bin[picks]
+
+    assert_array_almost_equal(data_py, data_bin)
+    assert_array_almost_equal(times_py, times_bin)
+
+    # Make sure EOG channels are marked correctly
+    raw_py = read_raw_brainvision(vhdr_path, elp_path, elp_names, eog=eog,
+                                  preload=True)
+    for ch in raw_py.info['chs']:
+        if ch['ch_name'] in eog:
+            assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)
+        elif ch['ch_name'] in elp_names:
+            assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)
+        elif ch['ch_name'] == 'STI 014':
+            assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)
+        else:
+            raise RuntimeError("Unknown Channel: %s" % ch['ch_name'])
+
+
+def test_events():
+    """Test reading and modifying events"""
+    raw = read_raw_brainvision(vhdr_path, preload=True)
+
+    # check that events are read and stim channel is synthesized correcly
+    events = raw.get_brainvision_events()
+    assert_array_equal(events, [[ 487, 1, 253],
+                                [ 497, 1, 255],
+                                [1770, 1, 254],
+                                [1780, 1, 255],
+                                [3253, 1, 254],
+                                [3263, 1, 255],
+                                [4936, 1, 253],
+                                [4946, 1, 255],
+                                [6620, 1, 254],
+                                [6630, 1, 255]])
+
+    mne_events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
+
+    # modify events and check that stim channel is updated
+    index = events[:, 2] == 255
+    events = events[index]
+    raw.set_brainvision_events(events)
+    mne_events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
+
+    # remove events
+    nchan = raw.info['nchan']
+    ch_name = raw.info['chs'][-2]['ch_name']
+    events = np.empty((0, 3))
+    raw.set_brainvision_events(events)
+    assert_equal(raw.info['nchan'], nchan - 1)
+    assert_equal(len(raw._data), nchan - 1)
+    assert_equal(raw.info['chs'][-1]['ch_name'], ch_name)
+    fname = op.join(tempdir, 'evt_raw.fif')
+    raw.save(fname)
+
+    # add events back in
+    events = [[10, 1, 2]]
+    raw.set_brainvision_events(events)
+    assert_equal(raw.info['nchan'], nchan)
+    assert_equal(len(raw._data), nchan)
+    assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
+
+
+def test_read_segment():
+    """Test writing raw eeg files when preload is False
+    """
+    raw1 = read_raw_brainvision(vhdr_path, preload=False)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, overwrite=True)
+    raw11 = Raw(raw1_file, preload=True)
+    data1, times1 = raw1[:, :]
+    data11, times11 = raw11[:, :]
+    assert_array_almost_equal(data1, data11, 8)
+    assert_array_almost_equal(times1, times11)
+    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
+
+    raw2 = read_raw_brainvision(vhdr_path, preload=True)
+    raw2_file = op.join(tempdir, 'test2-raw.fif')
+    raw2.save(raw2_file, overwrite=True)
+    data2, times2 = raw2[:, :]
+    assert_array_equal(data1, data2)
+    assert_array_equal(times1, times2)
+
+    raw1 = Raw(raw1_file, preload=True)
+    raw2 = Raw(raw2_file, preload=True)
+    assert_array_equal(raw1._data, raw2._data)
+
+    # save with buffer size smaller than file
+    raw3_file = op.join(tempdir, 'test3-raw.fif')
+    raw3 = read_raw_brainvision(vhdr_path)
+    raw3.save(raw3_file, buffer_size_sec=2)
+    raw3 = Raw(raw3_file, preload=True)
+    assert_array_equal(raw3._data, raw1._data)
+
+    # add reference channel
+    raw4_file = op.join(tempdir, 'test4-raw.fif')
+    raw4 = read_raw_brainvision(vhdr_path, reference='A1')
+    raw4.save(raw4_file, buffer_size_sec=2)
+    raw4 = Raw(raw4_file, preload=True)
+    ref_idx = raw4.ch_names.index('A1')
+    assert_equal(len(raw4._data), len(raw1._data) + 1)
+    ref_data, _ = raw4[ref_idx]
+    assert_array_equal(ref_data, 0)
diff --git a/mne/io/bti/__init__.py b/mne/io/bti/__init__.py
new file mode 100644
index 0000000..1272b62
--- /dev/null
+++ b/mne/io/bti/__init__.py
@@ -0,0 +1,5 @@
+"""Bti module for conversion to FIF"""
+
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+
+from .bti import read_raw_bti
diff --git a/mne/fiff/bti/raw.py b/mne/io/bti/bti.py
similarity index 58%
rename from mne/fiff/bti/raw.py
rename to mne/io/bti/bti.py
index c0f0431..b6e88bb 100644
--- a/mne/fiff/bti/raw.py
+++ b/mne/io/bti/bti.py
@@ -1,7 +1,7 @@
 
-# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Yuval Harpaz <yuvharpaz at gmail.com>
 #
@@ -12,8 +12,8 @@ from itertools import count
 import numpy as np
 
 from ...utils import logger, verbose, sum_squared
-from .. import Raw
-from .. import FIFF
+from ..constants import FIFF
+from ..base import _BaseRaw
 from .constants import BTI
 from .read import (read_int32, read_int16, read_str, read_float, read_double,
                    read_transform, read_char, read_int64, read_uint16,
@@ -22,7 +22,7 @@ from .read import (read_int32, read_int16, read_str, read_float, read_double,
 from .transforms import (bti_identity_trans, bti_to_vv_trans,
                          bti_to_vv_coil_trans, inverse_trans, merge_trans)
 from ..meas_info import Info
-
+from ...externals import six
 
 FIFF_INFO_CHS_FIELDS = ('loc', 'ch_name', 'unit_mul', 'coil_trans',
                         'coord_frame', 'coil_type', 'range', 'unit', 'cal',
@@ -38,12 +38,12 @@ FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD)
 BTI_WH2500_REF_MAG = ['MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA']
 BTI_WH2500_REF_GRAD = ['GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA']
 
-dtypes = zip(range(1, 5), ('>i2', '>i4', '>f4', '>f8'))
+dtypes = zip(list(range(1, 5)), ('>i2', '>i4', '>f4', '>f8'))
 DTYPES = dict((i, np.dtype(t)) for i, t in dtypes)
 
 RAW_INFO_FIELDS = ['dev_head_t', 'nchan', 'bads', 'projs', 'dev_ctf_t',
                    'meas_date', 'meas_id', 'dig', 'sfreq', 'highpass',
-                   'filenames', 'comps', 'chs', 'ch_names', 'file_id',
+                   'comps', 'chs', 'ch_names', 'file_id',
                    'lowpass', 'acq_pars', 'acq_stim', 'filename',
                    'ctf_head_t']
 
@@ -71,19 +71,19 @@ def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
         elif name == 'TRIGGER':
             name = 'STI 014'
         elif any([name == k for k in eog_ch]):
-            name = 'EOG %3.3d' % eog.next()
+            name = 'EOG %3.3d' % six.advance_iterator(eog)
         elif name == ecg_ch:
             name = 'ECG 001'
         elif name.startswith('E'):
-            name = 'EEG %3.3d' % eeg.next()
+            name = 'EEG %3.3d' % six.advance_iterator(eeg)
         elif name == 'UACurrent':
             name = 'UTL 001'
         elif name.startswith('M'):
-            name = 'RFM %3.3d' % ref_mag.next()
+            name = 'RFM %3.3d' % six.advance_iterator(ref_mag)
         elif name.startswith('G'):
-            name = 'RFG %3.3d' % ref_grad.next()
+            name = 'RFG %3.3d' % six.advance_iterator(ref_grad)
         elif name.startswith('X'):
-            name = 'EXT %3.3d' % ext.next()
+            name = 'EXT %3.3d' % six.advance_iterator(ext)
 
         new += [name]
 
@@ -154,9 +154,9 @@ def _setup_head_shape(fname, use_hpi=True):
     idx_points, dig_points, t = _convert_head_shape(idx_points, dig_points)
     all_points = np.r_[idx_points, dig_points].astype('>f4')
 
-    idx_idents = range(1, 4) + range(1, (len(idx_points) + 1) - 3)
+    idx_idents = list(range(1, 4)) + list(range(1, (len(idx_points) + 1) - 3))
     dig = []
-    for idx in xrange(all_points.shape[0]):
+    for idx in range(all_points.shape[0]):
         point_info = dict(zip(FIFF_INFO_DIG_FIELDS, FIFF_INFO_DIG_DEFAULTS))
         point_info['r'] = all_points[idx]
         if idx < 3:
@@ -218,343 +218,342 @@ def _read_config(fname):
         The config blocks found.
 
     """
-    fid = open(fname, 'rb')
-
-    cfg = dict()
-
-    cfg['hdr'] = {'version': read_int16(fid),
-                  'site_name': read_str(fid, 32),
-                  'dap_hostname': read_str(fid, 16),
-                  'sys_type': read_int16(fid),
-                  'sys_options': read_int32(fid),
-                  'supply_freq': read_int16(fid),
-                  'total_chans': read_int16(fid),
-                  'system_fixed_gain': read_float(fid),
-                  'volts_per_bit': read_float(fid),
-                  'total_sensors': read_int16(fid),
-                  'total_user_blocks': read_int16(fid),
-                  'next_der_chan_no': read_int16(fid)}
-
-    fid.seek(2, 1)
-
-    cfg['checksum'] = read_uint32(fid)
-    cfg['reserved'] = read_char(fid, 32)
-    cfg['transforms'] = [read_transform(fid) for t in
-                         range(cfg['hdr']['total_sensors'])]
-
-    cfg['user_blocks'] = dict()
-    for block in range(cfg['hdr']['total_user_blocks']):
-        ub = dict()
-
-        ub['hdr'] = {'nbytes': read_int32(fid),
-                     'kind': read_str(fid, 20),
-                     'checksum': read_int32(fid),
-                     'username': read_str(fid, 32),
-                     'timestamp': read_int32(fid),
-                     'user_space_size': read_int32(fid),
-                     'reserved': read_char(fid, 32)}
+    with open(fname, 'rb') as fid:
+        cfg = dict()
+        cfg['hdr'] = {'version': read_int16(fid),
+                      'site_name': read_str(fid, 32),
+                      'dap_hostname': read_str(fid, 16),
+                      'sys_type': read_int16(fid),
+                      'sys_options': read_int32(fid),
+                      'supply_freq': read_int16(fid),
+                      'total_chans': read_int16(fid),
+                      'system_fixed_gain': read_float(fid),
+                      'volts_per_bit': read_float(fid),
+                      'total_sensors': read_int16(fid),
+                      'total_user_blocks': read_int16(fid),
+                      'next_der_chan_no': read_int16(fid)}
 
-        _correct_offset(fid)
-        kind = ub['hdr'].pop('kind')
-        if not kind:  # make sure reading goes right. Should never be empty
-            raise RuntimeError('Could not read user block. Probably you'
-                               ' acquired data using a BTi version currently'
-                               'not supported. Please contact the mne-python'
-                               ' developers.')
-        dta, cfg['user_blocks'][kind] = dict(), ub
-        if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']:
-            if kind == BTI.UB_B_MAG_INFO:
-                dta['version'] = read_int32(fid)
-                fid.seek(20, 1)
-                dta['headers'] = list()
-                for hdr in range(6):
-                    d = {'name': read_str(fid, 16),
-                         'transform': read_transform(fid),
-                         'units_per_bit': read_float(fid)}
-                    dta['headers'] += [d]
+        fid.seek(2, 1)
+
+        cfg['checksum'] = read_uint32(fid)
+        cfg['reserved'] = read_char(fid, 32)
+        cfg['transforms'] = [read_transform(fid) for t in
+                             range(cfg['hdr']['total_sensors'])]
+
+        cfg['user_blocks'] = dict()
+        for block in range(cfg['hdr']['total_user_blocks']):
+            ub = dict()
+
+            ub['hdr'] = {'nbytes': read_int32(fid),
+                         'kind': read_str(fid, 20),
+                         'checksum': read_int32(fid),
+                         'username': read_str(fid, 32),
+                         'timestamp': read_int32(fid),
+                         'user_space_size': read_int32(fid),
+                         'reserved': read_char(fid, 32)}
+
+            _correct_offset(fid)
+            kind = ub['hdr'].pop('kind')
+            if not kind:  # make sure reading goes right. Should never be empty
+                raise RuntimeError('Could not read user block. Probably you '
+                                   'acquired data using a BTi version '
+                                   'currently not supported. Please contact '
+                                   'the mne-python developers.')
+            dta, cfg['user_blocks'][kind] = dict(), ub
+            if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']:
+                if kind == BTI.UB_B_MAG_INFO:
+                    dta['version'] = read_int32(fid)
                     fid.seek(20, 1)
+                    dta['headers'] = list()
+                    for hdr in range(6):
+                        d = {'name': read_str(fid, 16),
+                             'transform': read_transform(fid),
+                             'units_per_bit': read_float(fid)}
+                        dta['headers'] += [d]
+                        fid.seek(20, 1)
+
+                elif kind == BTI.UB_B_COH_POINTS:
+                    dta['n_points'] = read_int32(fid)
+                    dta['status'] = read_int32(fid)
+                    dta['points'] = []
+                    for pnt in range(16):
+                        d = {'pos': read_double_matrix(fid, 1, 3),
+                             'direction': read_double_matrix(fid, 1, 3),
+                             'error': read_double(fid)}
+                        dta['points'] += [d]
+
+                elif kind == BTI.UB_B_CCP_XFM_BLOCK:
+                    dta['method'] = read_int32(fid)
+                    # handle difference btw/ linux (0) and solaris (4)
+                    size = 0 if ub['hdr']['user_space_size'] == 132 else 4
+                    fid.seek(size, 1)
+                    dta['transform'] = read_transform(fid)
+
+                elif kind == BTI.UB_B_EEG_LOCS:
+                    dta['electrodes'] = []
+                    while True:
+                        d = {'label': read_str(fid, 16),
+                             'location': read_double_matrix(fid, 1, 3)}
+                        if not d['label']:
+                            break
+                        dta['electrodes'] += [d]
+
+                elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER,
+                              BTI.UB_B_WHS_SUBSYS_VER]:
+                    dta['version'] = read_int16(fid)
+                    dta['struct_size'] = read_int16(fid)
+                    dta['entries'] = read_int16(fid)
 
-            elif kind == BTI.UB_B_COH_POINTS:
-                dta['n_points'] = read_int32(fid)
-                dta['status'] = read_int32(fid)
-                dta['points'] = []
-                for pnt in xrange(16):
-                    d = {'pos': read_double_matrix(fid, 1, 3),
-                         'direction': read_double_matrix(fid, 1, 3),
-                         'error': read_double(fid)}
-                    dta['points'] += [d]
-
-            elif kind == BTI.UB_B_CCP_XFM_BLOCK:
-                dta['method'] = read_int32(fid)
-                # handle difference btw/ linux (0) and solaris (4)
-                size = 0 if ub['hdr']['user_space_size'] == 132 else 4
-                fid.seek(size, 1)
-                dta['transform'] = read_transform(fid)
-
-            elif kind == BTI.UB_B_EEG_LOCS:
-                dta['electrodes'] = []
-                while True:
-                    d = {'label': read_str(fid, 16),
-                         'location': read_double_matrix(fid, 1, 3)}
-                    if not d['label']:
-                        break
-                    dta['electrodes'] += [d]
-
-            elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER,
-                          BTI.UB_B_WHS_SUBSYS_VER]:
-                dta['version'] = read_int16(fid)
-                dta['struct_size'] = read_int16(fid)
-                dta['entries'] = read_int16(fid)
-
-                fid.seek(8, 1)
-
-            elif kind == BTI.UB_B_WHC_CHAN_MAP:
-                num_channels = None
-                for name, data in cfg['user_blocks'].items():
-                    if name == BTI.UB_B_WHC_CHAN_MAP_VER:
-                        num_channels = data['entries']
-                        break
-
-                if num_channels is None:
-                    raise ValueError('Cannot find block %s to determine number'
-                                     'of channels' % BTI.UB_B_WHC_CHAN_MAP_VER)
-
-                dta['channels'] = list()
-                for i in xrange(num_channels):
-                    d = {'subsys_type': read_int16(fid),
-                         'subsys_num': read_int16(fid),
-                         'card_num': read_int16(fid),
-                         'chan_num': read_int16(fid),
-                         'recdspnum': read_int16(fid)}
-                    dta['channels'] += [d]
                     fid.seek(8, 1)
 
-            elif kind == BTI.UB_B_WHS_SUBSYS:
-                num_subsys = None
-                for name, data in cfg['user_blocks'].items():
-                    if name == BTI.UB_B_WHS_SUBSYS_VER:
-                        num_subsys = data['entries']
-                        break
-
-                if num_subsys is None:
-                    raise ValueError('Cannot find block %s to determine'
-                                     ' number of subsystems'
-                                     % BTI.UB_B_WHS_SUBSYS_VER)
-
-                dta['subsys'] = list()
-                for sub_key in range(num_subsys):
-                    d = {'subsys_type': read_int16(fid),
-                         'subsys_num': read_int16(fid),
-                         'cards_per_sys': read_int16(fid),
-                         'channels_per_card': read_int16(fid),
-                         'card_version': read_int16(fid)}
-
+                elif kind == BTI.UB_B_WHC_CHAN_MAP:
+                    num_channels = None
+                    for name, data in cfg['user_blocks'].items():
+                        if name == BTI.UB_B_WHC_CHAN_MAP_VER:
+                            num_channels = data['entries']
+                            break
+
+                    if num_channels is None:
+                        raise ValueError('Cannot find block %s to determine '
+                                         'number of channels'
+                                         % BTI.UB_B_WHC_CHAN_MAP_VER)
+
+                    dta['channels'] = list()
+                    for i in range(num_channels):
+                        d = {'subsys_type': read_int16(fid),
+                             'subsys_num': read_int16(fid),
+                             'card_num': read_int16(fid),
+                             'chan_num': read_int16(fid),
+                             'recdspnum': read_int16(fid)}
+                        dta['channels'] += [d]
+                        fid.seek(8, 1)
+
+                elif kind == BTI.UB_B_WHS_SUBSYS:
+                    num_subsys = None
+                    for name, data in cfg['user_blocks'].items():
+                        if name == BTI.UB_B_WHS_SUBSYS_VER:
+                            num_subsys = data['entries']
+                            break
+
+                    if num_subsys is None:
+                        raise ValueError('Cannot find block %s to determine'
+                                         ' number of subsystems'
+                                         % BTI.UB_B_WHS_SUBSYS_VER)
+
+                    dta['subsys'] = list()
+                    for sub_key in range(num_subsys):
+                        d = {'subsys_type': read_int16(fid),
+                             'subsys_num': read_int16(fid),
+                             'cards_per_sys': read_int16(fid),
+                             'channels_per_card': read_int16(fid),
+                             'card_version': read_int16(fid)}
+
+                        fid.seek(2, 1)
+
+                        d.update({'offsetdacgain': read_float(fid),
+                                  'squid_type': read_int32(fid),
+                                  'timesliceoffset': read_int16(fid),
+                                  'padding': read_int16(fid),
+                                  'volts_per_bit': read_float(fid)})
+
+                        dta['subsys'] += [d]
+
+                elif kind == BTI.UB_B_CH_LABELS:
+                    dta['version'] = read_int32(fid)
+                    dta['entries'] = read_int32(fid)
+                    fid.seek(16, 1)
+
+                    dta['labels'] = list()
+                    for label in range(dta['entries']):
+                        dta['labels'] += [read_str(fid, 16)]
+
+                elif kind == BTI.UB_B_CALIBRATION:
+                    dta['sensor_no'] = read_int16(fid)
                     fid.seek(2, 1)
+                    dta['timestamp'] = read_int32(fid)
+                    dta['logdir'] = read_str(fid, 256)
+
+                elif kind == BTI.UB_B_SYS_CONFIG_TIME:
+                    # handle difference btw/ linux (256) and solaris (512)
+                    size = 256 if ub['hdr']['user_space_size'] == 260 else 512
+                    dta['sysconfig_name'] = read_str(fid, size)
+                    dta['timestamp'] = read_int32(fid)
+
+                elif kind == BTI.UB_B_DELTA_ENABLED:
+                    dta['delta_enabled'] = read_int16(fid)
+
+                elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]:
+                    dta['hdr'] = {'version': read_int32(fid),
+                                  'entry_size': read_int32(fid),
+                                  'n_entries': read_int32(fid),
+                                  'filtername': read_str(fid, 16),
+                                  'n_e_values': read_int32(fid),
+                                  'reserved': read_str(fid, 28)}
+
+                    if dta['hdr']['version'] == 2:
+                        size = 16
+                        dta['ch_names'] = [read_str(fid, size) for ch in
+                                           range(dta['hdr']['n_entries'])]
+                        dta['e_ch_names'] = [read_str(fid, size) for ch in
+                                             range(dta['hdr']['n_e_values'])]
+
+                        rows = dta['hdr']['n_entries']
+                        cols = dta['hdr']['n_e_values']
+                        dta['etable'] = read_float_matrix(fid, rows, cols)
+                    else:  # handle MAGNES2500 naming scheme
+                        dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values']
+                        dta['hdr']['n_e_values'] = 6
+                        dta['e_ch_names'] = BTI_WH2500_REF_MAG
+                        rows = dta['hdr']['n_entries']
+                        cols = dta['hdr']['n_e_values']
+                        dta['etable'] = read_float_matrix(fid, rows, cols)
+
+                        _correct_offset(fid)
+
+                elif any([kind == BTI.UB_B_WEIGHTS_USED,
+                          kind[:4] == BTI.UB_B_WEIGHT_TABLE]):
+                    dta['hdr'] = {'version': read_int32(fid),
+                                  'entry_size': read_int32(fid),
+                                  'n_entries': read_int32(fid),
+                                  'name': read_str(fid, 32),
+                                  'description': read_str(fid, 80),
+                                  'n_anlg': read_int32(fid),
+                                  'n_dsp': read_int32(fid),
+                                  'reserved': read_str(fid, 72)}
+
+                    if dta['hdr']['version'] == 2:
+                        dta['ch_names'] = [read_str(fid, 16) for ch in
+                                           range(dta['hdr']['n_entries'])]
+                        dta['anlg_ch_names'] = [read_str(fid, 16) for ch in
+                                                range(dta['hdr']['n_anlg'])]
+
+                        dta['dsp_ch_names'] = [read_str(fid, 16) for ch in
+                                               range(dta['hdr']['n_dsp'])]
+
+                        rows = dta['hdr']['n_entries']
+                        cols = dta['hdr']['n_dsp']
+                        dta['dsp_wts'] = read_float_matrix(fid, rows, cols)
+                        cols = dta['hdr']['n_anlg']
+                        dta['anlg_wts'] = read_int16_matrix(fid, rows, cols)
+
+                    else:  # handle MAGNES2500 naming scheme
+                        dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries']
+                        dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3]
+                        dta['hdr']['n_anlg'] = len(dta['anlg_ch_names'])
+                        dta['dsp_ch_names'] = BTI_WH2500_REF_GRAD
+                        dta['hdr.n_dsp'] = len(dta['dsp_ch_names'])
+                        dta['anlg_wts'] = np.zeros((dta['hdr']['n_entries'],
+                                                    dta['hdr']['n_anlg']),
+                                                   dtype='i2')
+                        dta['dsp_wts'] = np.zeros((dta['hdr']['n_entries'],
+                                                   dta['hdr']['n_dsp']),
+                                                  dtype='f4')
+                        for n in range(dta['hdr']['n_entries']):
+                            dta['anlg_wts'][d] = read_int16_matrix(fid, 1,
+                                                       dta['hdr']['n_anlg'])
+                            read_int16(fid)
+                            dta['dsp_wts'][d] = read_float_matrix(fid, 1,
+                                                        dta['hdr']['n_dsp'])
+
+                        _correct_offset(fid)
+
+                elif kind == BTI.UB_B_TRIG_MASK:
+                    dta['version'] = read_int32(fid)
+                    dta['entries'] = read_int32(fid)
+                    fid.seek(16, 1)
+
+                    dta['masks'] = []
+                    for entry in range(dta['entries']):
+                        d = {'name': read_str(fid, 20),
+                             'nbits': read_uint16(fid),
+                             'shift': read_uint16(fid),
+                             'mask': read_uint32(fid)}
+                        dta['masks'] += [d]
+                        fid.seek(8, 1)
+
+            else:
+                dta['unknown'] = {'hdr': read_char(fid,
+                                  ub['hdr']['user_space_size'])}
+
+            ub.update(dta)  # finally update the userblock data
+            _correct_offset(fid)  # after reading.
+
+        cfg['chs'] = list()
+
+        # prepare reading channels
+        dev_header = lambda x: {'size': read_int32(x),
+                                'checksum': read_int32(x),
+                                'reserved': read_str(x, 32)}
+
+        for channel in range(cfg['hdr']['total_chans']):
+            ch = {'name': read_str(fid, 16),
+                  'chan_no': read_int16(fid),
+                  'ch_type': read_uint16(fid),
+                  'sensor_no': read_int16(fid),
+                  'data': dict()}
 
-                    d.update({'offsetdacgain': read_float(fid),
-                              'squid_type': read_int32(fid),
-                              'timesliceoffset': read_int16(fid),
-                              'padding': read_int16(fid),
-                              'volts_per_bit': read_float(fid)})
-
-                    dta['subsys'] += [d]
-
-            elif kind == BTI.UB_B_CH_LABELS:
-                dta['version'] = read_int32(fid)
-                dta['entries'] = read_int32(fid)
-                fid.seek(16, 1)
-
-                dta['labels'] = list()
-                for label in xrange(dta['entries']):
-                    dta['labels'] += [read_str(fid, 16)]
-
-            elif kind == BTI.UB_B_CALIBRATION:
-                dta['sensor_no'] = read_int16(fid)
-                fid.seek(2, 1)
-                dta['timestamp'] = read_int32(fid)
-                dta['logdir'] = read_str(fid, 256)
-
-            elif kind == BTI.UB_B_SYS_CONFIG_TIME:
-                # handle difference btw/ linux (256) and solaris (512)
-                size = 256 if ub['hdr']['user_space_size'] == 260 else 512
-                dta['sysconfig_name'] = read_str(fid, size)
-                dta['timestamp'] = read_int32(fid)
-
-            elif kind == BTI.UB_B_DELTA_ENABLED:
-                dta['delta_enabled'] = read_int16(fid)
-
-            elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]:
-                dta['hdr'] = {'version': read_int32(fid),
-                              'entry_size': read_int32(fid),
-                              'n_entries': read_int32(fid),
-                              'filtername': read_str(fid, 16),
-                              'n_e_values': read_int32(fid),
-                              'reserved': read_str(fid, 28)}
-
-                if dta['hdr']['version'] == 2:
-                    size = 16
-                    dta['ch_names'] = [read_str(fid, size) for ch in
-                                       range(dta['hdr']['n_entries'])]
-                    dta['e_ch_names'] = [read_str(fid, size) for ch in
-                                         range(dta['hdr']['n_e_values'])]
-
-                    rows = dta['hdr']['n_entries']
-                    cols = dta['hdr']['n_e_values']
-                    dta['etable'] = read_float_matrix(fid, rows, cols)
-                else:  # handle MAGNES2500 naming scheme
-                    dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values']
-                    dta['hdr']['n_e_values'] = 6
-                    dta['e_ch_names'] = BTI_WH2500_REF_MAG
-                    rows = dta['hdr']['n_entries']
-                    cols = dta['hdr']['n_e_values']
-                    dta['etable'] = read_float_matrix(fid, rows, cols)
-
-                    _correct_offset(fid)
-
-            elif any([kind == BTI.UB_B_WEIGHTS_USED,
-                      kind[:4] == BTI.UB_B_WEIGHT_TABLE]):
-                dta['hdr'] = {'version': read_int32(fid),
-                              'entry_size': read_int32(fid),
-                              'n_entries': read_int32(fid),
-                              'name': read_str(fid, 32),
-                              'description': read_str(fid, 80),
-                              'n_anlg': read_int32(fid),
-                              'n_dsp': read_int32(fid),
-                              'reserved': read_str(fid, 72)}
-
-                if dta['hdr']['version'] == 2:
-                    dta['ch_names'] = [read_str(fid, 16) for ch in
-                                       range(dta['hdr']['n_entries'])]
-                    dta['anlg_ch_names'] = [read_str(fid, 16) for ch in
-                                            range(dta['hdr']['n_anlg'])]
-
-                    dta['dsp_ch_names'] = [read_str(fid, 16) for ch in
-                                           range(dta['hdr']['n_dsp'])]
-
-                    rows = dta['hdr']['n_entries']
-                    cols = dta['hdr']['n_dsp']
-                    dta['dsp_wts'] = read_float_matrix(fid, rows, cols)
-                    cols = dta['hdr']['n_anlg']
-                    dta['anlg_wts'] = read_int16_matrix(fid, rows, cols)
-
-                else:  # handle MAGNES2500 naming scheme
-                    dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries']
-                    dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3]
-                    dta['hdr']['n_anlg'] = len(dta['anlg_ch_names'])
-                    dta['dsp_ch_names'] = BTI_WH2500_REF_GRAD
-                    dta['hdr.n_dsp'] = len(dta['dsp_ch_names'])
-                    dta['anlg_wts'] = np.zeros((dta['hdr']['n_entries'],
-                                                dta['hdr']['n_anlg']),
-                                               dtype='i2')
-                    dta['dsp_wts'] = np.zeros((dta['hdr']['n_entries'],
-                                               dta['hdr']['n_dsp']),
-                                              dtype='f4')
-                    for n in range(dta['hdr']['n_entries']):
-                        dta['anlg_wts'][d] = read_int16_matrix(fid, 1,
-                                                    dta['hdr']['n_anlg'])
-                        read_int16(fid)
-                        dta['dsp_wts'][d] = read_float_matrix(fid, 1,
-                                                    dta['hdr']['n_dsp'])
-
-                    _correct_offset(fid)
-
-            elif kind == BTI.UB_B_TRIG_MASK:
-                dta['version'] = read_int32(fid)
-                dta['entries'] = read_int32(fid)
-                fid.seek(16, 1)
-
-                dta['masks'] = []
-                for entry in range(dta['entries']):
-                    d = {'name': read_str(fid, 20),
-                         'nbits': read_uint16(fid),
-                         'shift': read_uint16(fid),
-                         'mask': read_uint32(fid)}
-                    dta['masks'] += [d]
-                    fid.seek(8, 1)
-
-        else:
-            dta['unknown'] = {'hdr': read_char(fid,
-                              ub['hdr']['user_space_size'])}
-
-        ub.update(dta)  # finally update the userblock data
-        _correct_offset(fid)  # after reading.
-
-    cfg['chs'] = list()
-
-    # prepare reading channels
-    dev_header = lambda x: {'size': read_int32(x),
-                            'checksum': read_int32(x),
-                            'reserved': read_str(x, 32)}
-
-    for channel in range(cfg['hdr']['total_chans']):
-        ch = {'name': read_str(fid, 16),
-              'chan_no': read_int16(fid),
-              'ch_type': read_uint16(fid),
-              'sensor_no': read_int16(fid),
-              'data': dict()}
-
-        fid.seek(2, 1)
-        ch.update({'gain': read_float(fid),
-                   'units_per_bit': read_float(fid),
-                   'yaxis_label': read_str(fid, 16),
-                   'aar_val': read_double(fid),
-                   'checksum': read_int32(fid),
-                   'reserved': read_str(fid, 32)})
-
-        cfg['chs'] += [ch]
-        _correct_offset(fid)  # before and after
-        dta = dict()
-        if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]:
-            dev = {'device_info': dev_header(fid),
-                   'inductance': read_float(fid),
-                   'padding': read_str(fid, 4),
-                   'transform': read_transform(fid),
-                   'xform_flag': read_int16(fid),
-                   'total_loops': read_int16(fid)}
-
-            fid.seek(4, 1)
-            dev['reserved'] = read_str(fid, 32)
-            dta.update({'dev': dev, 'loops': []})
-            for loop in range(dev['total_loops']):
-                d = {'position': read_double_matrix(fid, 1, 3),
-                     'orientation': read_double_matrix(fid, 1, 3),
-                     'radius': read_double(fid),
-                     'wire_radius': read_double(fid),
-                     'turns': read_int16(fid)}
+            fid.seek(2, 1)
+            ch.update({'gain': read_float(fid),
+                       'units_per_bit': read_float(fid),
+                       'yaxis_label': read_str(fid, 16),
+                       'aar_val': read_double(fid),
+                       'checksum': read_int32(fid),
+                       'reserved': read_str(fid, 32)})
+
+            cfg['chs'] += [ch]
+            _correct_offset(fid)  # before and after
+            dta = dict()
+            if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]:
+                dev = {'device_info': dev_header(fid),
+                       'inductance': read_float(fid),
+                       'padding': read_str(fid, 4),
+                       'transform': read_transform(fid),
+                       'xform_flag': read_int16(fid),
+                       'total_loops': read_int16(fid)}
+
+                fid.seek(4, 1)
+                dev['reserved'] = read_str(fid, 32)
+                dta.update({'dev': dev, 'loops': []})
+                for loop in range(dev['total_loops']):
+                    d = {'position': read_double_matrix(fid, 1, 3),
+                         'orientation': read_double_matrix(fid, 1, 3),
+                         'radius': read_double(fid),
+                         'wire_radius': read_double(fid),
+                         'turns': read_int16(fid)}
+                    fid.seek(2, 1)
+                    d['checksum'] = read_int32(fid)
+                    d['reserved'] = read_str(fid, 32)
+                    dta['loops'] += [d]
+
+            elif ch['ch_type'] == BTI.CHTYPE_EEG:
+                dta = {'device_info': dev_header(fid),
+                       'impedance': read_float(fid),
+                       'padding': read_str(fid, 4),
+                       'transform': read_transform(fid),
+                       'reserved': read_char(fid, 32)}
+
+            elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL:
+                dta = {'device_info': dev_header(fid),
+                       'user_space_size': read_int32(fid),
+                       'reserved': read_str(fid, 32)}
+
+            elif ch['ch_type'] == BTI.CHTYPE_TRIGGER:
+                dta = {'device_info': dev_header(fid),
+                       'user_space_size': read_int32(fid)}
                 fid.seek(2, 1)
-                d['checksum'] = read_int32(fid)
-                d['reserved'] = read_str(fid, 32)
-                dta['loops'] += [d]
-
-        elif ch['ch_type'] == BTI.CHTYPE_EEG:
-            dta = {'device_info': dev_header(fid),
-                   'impedance': read_float(fid),
-                   'padding': read_str(fid, 4),
-                   'transform': read_transform(fid),
-                   'reserved': read_char(fid, 32)}
-
-        elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL:
-            dta = {'device_info': dev_header(fid),
-                   'user_space_size': read_int32(fid),
-                   'reserved': read_str(fid, 32)}
+                dta['reserved'] = read_str(fid, 32)
 
-        elif ch['ch_type'] == BTI.CHTYPE_TRIGGER:
-            dta = {'device_info': dev_header(fid),
-                   'user_space_size': read_int32(fid)}
-            fid.seek(2, 1)
-            dta['reserved'] = read_str(fid, 32)
+            elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
+                dta = {'device_info': dev_header(fid),
+                       'user_space_size': read_int32(fid),
+                       'reserved': read_str(fid, 32)}
 
-        elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
-            dta = {'device_info': dev_header(fid),
-                   'user_space_size': read_int32(fid),
-                   'reserved': read_str(fid, 32)}
+            elif ch['ch_type'] == BTI.CHTYPE_SHORTED:
+                dta = {'device_info': dev_header(fid),
+                       'reserved': read_str(fid, 32)}
 
-        elif ch['ch_type'] == BTI.CHTYPE_SHORTED:
-            dta = {'device_info': dev_header(fid),
-                   'reserved': read_str(fid, 32)}
-
-        ch.update(dta)  # add data collected
-        _correct_offset(fid)  # after each reading
+            ch.update(dta)  # add data collected
+            _correct_offset(fid)  # after each reading
 
     return cfg
 
@@ -758,73 +757,72 @@ def _read_ch_config(fid):
 def _read_bti_header(pdf_fname, config_fname):
     """ Read bti PDF header
     """
-    fid = open(pdf_fname, 'rb')
-
-    fid.seek(-8, 2)
-    start = fid.tell()
-    header_position = read_int64(fid)
-    check_value = header_position & BTI.FILE_MASK
-
-    if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK):
-        header_position = check_value
-
-    # Check header position for alignment issues
-    if ((header_position % 8) != 0):
-        header_position += (8 - (header_position % 8))
+    with open(pdf_fname, 'rb') as fid:
+        fid.seek(-8, 2)
+        start = fid.tell()
+        header_position = read_int64(fid)
+        check_value = header_position & BTI.FILE_MASK
+
+        if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK):
+            header_position = check_value
+
+        # Check header position for alignment issues
+        if ((header_position % 8) != 0):
+            header_position += (8 - (header_position % 8))
+
+        fid.seek(header_position, 0)
+
+        # actual header starts here
+        info = {'version': read_int16(fid),
+                'file_type': read_str(fid, 5),
+                'hdr_size': start - header_position,  # add to info for convenience
+                'start': start}
+
+        fid.seek(1, 1)
+
+        info.update({'data_format': read_int16(fid),
+                    'acq_mode': read_int16(fid),
+                    'total_epochs': read_int32(fid),
+                    'input_epochs': read_int32(fid),
+                    'total_events': read_int32(fid),
+                    'total_fixed_events': read_int32(fid),
+                    'sample_period': read_float(fid),
+                    'xaxis_label': read_str(fid, 16),
+                    'total_processes': read_int32(fid),
+                    'total_chans': read_int16(fid)})
 
-    fid.seek(header_position, 0)
-
-    # actual header starts here
-    info = {'version': read_int16(fid),
-            'file_type': read_str(fid, 5),
-            'hdr_size': start - header_position,  # add to info for convenience
-            'start': start}
-
-    fid.seek(1, 1)
+        fid.seek(2, 1)
+        info.update({'checksum': read_int32(fid),
+                    'total_ed_classes': read_int32(fid),
+                    'total_associated_files': read_int16(fid),
+                    'last_file_index': read_int16(fid),
+                    'timestamp': read_int32(fid)})
 
-    info.update({'data_format': read_int16(fid),
-                'acq_mode': read_int16(fid),
-                'total_epochs': read_int32(fid),
-                'input_epochs': read_int32(fid),
-                'total_events': read_int32(fid),
-                'total_fixed_events': read_int32(fid),
-                'sample_period': read_float(fid),
-                'xaxis_label': read_str(fid, 16),
-                'total_processes': read_int32(fid),
-                'total_chans': read_int16(fid)})
-
-    fid.seek(2, 1)
-    info.update({'checksum': read_int32(fid),
-                'total_ed_classes': read_int32(fid),
-                'total_associated_files': read_int16(fid),
-                'last_file_index': read_int16(fid),
-                'timestamp': read_int32(fid)})
-
-    fid.seek(20, 1)
-    _correct_offset(fid)
+        fid.seek(20, 1)
+        _correct_offset(fid)
 
-    # actual header ends here, so dar seems ok.
+        # actual header ends here, so dar seems ok.
 
-    info['epochs'] = [_read_epoch(fid) for epoch in
-                      range(info['total_epochs'])]
+        info['epochs'] = [_read_epoch(fid) for epoch in
+                          range(info['total_epochs'])]
 
-    info['chs'] = [_read_channel(fid) for ch in
-                   range(info['total_chans'])]
+        info['chs'] = [_read_channel(fid) for ch in
+                       range(info['total_chans'])]
 
-    info['events'] = [_read_event(fid) for event in
-                      range(info['total_events'])]
+        info['events'] = [_read_event(fid) for event in
+                          range(info['total_events'])]
 
-    info['processes'] = [_read_process(fid) for process in
-                         range(info['total_processes'])]
+        info['processes'] = [_read_process(fid) for process in
+                             range(info['total_processes'])]
 
-    info['assocfiles'] = [_read_assoc_file(fid) for af in
-                          range(info['total_associated_files'])]
+        info['assocfiles'] = [_read_assoc_file(fid) for af in
+                              range(info['total_associated_files'])]
 
-    info['edclasses'] = [_read_pfid_ed(fid) for ed_class in
-                         range(info['total_ed_classes'])]
+        info['edclasses'] = [_read_pfid_ed(fid) for ed_class in
+                             range(info['total_ed_classes'])]
 
-    info['extra_data'] = fid.read(start - fid.tell())
-    info['fid'] = fid
+        info['extra_data'] = fid.read(start - fid.tell())
+        info['pdf_fname'] = pdf_fname
 
     info['total_slices'] = sum(e['pts_in_epoch'] for e in
                                info['epochs'])
@@ -869,7 +867,10 @@ def _read_bti_header(pdf_fname, config_fname):
     info['chs'] = [chans[pos] for pos in by_index]
 
     by_name = [(i, d['name']) for i, d in enumerate(info['chs'])]
-    by_name.sort(key=lambda c: int(c[1][1:]) if c[1][0] == 'A' else c[1])
+    a_chs = filter(lambda c: c[1].startswith('A'), by_name)
+    other_chs = filter(lambda c: not c[1].startswith('A'), by_name)
+    by_name = sorted(a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs)
+
     by_name = [idx[0] for idx in by_name]
     info['chs'] = [chans[pos] for pos in by_name]
     info['order'] = by_name
@@ -913,12 +914,12 @@ def _read_data(info, start=None, stop=None):
         raise RuntimeError('Invalid data range supplied:'
                            ' %d, %d' % (start, stop))
 
-    info['fid'].seek(info['bytes_per_slice'] * start, 0)
-
-    cnt = (stop - start) * info['total_chans']
-    shape = [stop - start, info['total_chans']]
-    data = np.fromfile(info['fid'], dtype=info['dtype'],
-                       count=cnt).astype('f4').reshape(shape)
+    with open(info['pdf_fname'], 'rb') as fid:
+        fid.seek(info['bytes_per_slice'] * start, 0)
+        cnt = (stop - start) * info['total_chans']
+        shape = [stop - start, info['total_chans']]
+        data = np.fromfile(fid, dtype=info['dtype'],
+                           count=cnt).astype('f4').reshape(shape)
 
     for ch in info['chs']:
         data[:, ch['index']] *= ch['cal']
@@ -926,7 +927,7 @@ def _read_data(info, start=None, stop=None):
     return data[:, info['order']].T
 
 
-class RawBTi(Raw):
+class RawBTi(_BaseRaw):
     """ Raw object from 4D Neuroimaging MagnesWH3600 data
 
     Parameters
@@ -956,7 +957,7 @@ class RawBTi(Raw):
 
     Attributes & Methods
     --------------------
-    See documentation for mne.fiff.Raw
+    See documentation for mne.io.Raw
 
     """
     @verbose
@@ -1021,7 +1022,6 @@ class RawBTi(Raw):
         info['lowpass'] = lp
         info['acq_pars'], info['acq_stim'] = None, None
         info['filename'] = None
-        info['filenames'] = []
         chs = []
 
         ch_names = [ch['name'] for ch in bti_info['chs']]
@@ -1067,6 +1067,13 @@ class RawBTi(Raw):
                     chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
                 elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
                     chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF
+
+            elif chan_vv.startswith('EEG'):
+                chan_info['kind'] = FIFF.FIFFV_EEG_CH
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
+                chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                chan_info['unit'] = FIFF.FIFF_UNIT_V
+
             elif chan_vv == 'STI 013':
                 chan_info['kind'] = FIFF.FIFFV_RESP_CH
             elif chan_vv == 'STI 014':
@@ -1151,14 +1158,18 @@ class RawBTi(Raw):
         self.rawdir = None
         self.proj = None
         self.comp = None
-        self.fids = list()
-        self._preloaded = True
+        self._filenames = list()
+        self.preload = True
         self._projector_hashes = [None]
         self.info = info
 
         logger.info('Reading raw data from %s...' % pdf_fname)
         self._data = _read_data(bti_info)
         self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
+        self._raw_lengths = np.array([self._data.shape[1]])
+        self._first_samps = np.array([0])
+        self._last_samps = self._raw_lengths - 1
+        self.rawdirs = [[]]
 
         assert len(self._data) == len(self.info['ch_names'])
         self._times = np.arange(self.first_samp,
diff --git a/mne/fiff/bti/constants.py b/mne/io/bti/constants.py
similarity index 96%
rename from mne/fiff/bti/constants.py
rename to mne/io/bti/constants.py
index e283c25..727278c 100644
--- a/mne/fiff/bti/constants.py
+++ b/mne/io/bti/constants.py
@@ -1,8 +1,8 @@
-# Authors: Denis Engemann <d.engemann at fz-juelich.de?>
+# Authors: Denis Engemann <denis.engemann at gmail.com?>
 #
 # License: BSD (3-clause)
 
-from .. constants import Bunch
+from ..constants import Bunch
 
 BTI = Bunch()
 
diff --git a/mne/fiff/bti/read.py b/mne/io/bti/read.py
similarity index 90%
rename from mne/fiff/bti/read.py
rename to mne/io/bti/read.py
index b14eb0c..aadb07c 100644
--- a/mne/fiff/bti/read.py
+++ b/mne/io/bti/read.py
@@ -1,8 +1,9 @@
-# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
 #          simplified BSD-3 license
 
 import struct
 import numpy as np
+from ...externals.six import b
 
 
 def _unpack_matrix(fid, format, rows, cols, dtype):
@@ -11,7 +12,7 @@ def _unpack_matrix(fid, format, rows, cols, dtype):
     bsize = struct.calcsize(format)
     string = fid.read(bsize)
     data = struct.unpack(format, string)
-    iter_mat = [(r, c) for r in xrange(rows) for c in xrange(cols)]
+    iter_mat = [(r, c) for r in range(rows) for c in range(cols)]
     for idx, (row, col) in enumerate(iter_mat):
         out[row, col] = data[idx]
 
@@ -36,7 +37,10 @@ def read_str(fid, count=1):
     format = '>' + ('c' * count)
     data = list(struct.unpack(format, fid.read(struct.calcsize(format))))
 
-    return ''.join(data[0:data.index('\x00') if '\x00' in data else count])
+    bytestr = b('').join(data[0:data.index(b('\x00')) if b('\x00') in data else
+                         count])
+
+    return str(bytestr.decode('ascii')) # Return native str type for Py2/3
 
 
 def read_char(fid, count=1):
diff --git a/mne/fiff/edf/tests/__init__.py b/mne/io/bti/tests/__init__.py
similarity index 100%
rename from mne/fiff/edf/tests/__init__.py
rename to mne/io/bti/tests/__init__.py
diff --git a/mne/fiff/bti/tests/data/exported4D_linux.fif b/mne/io/bti/tests/data/exported4D_linux_raw.fif
similarity index 100%
rename from mne/fiff/bti/tests/data/exported4D_linux.fif
rename to mne/io/bti/tests/data/exported4D_linux_raw.fif
diff --git a/mne/fiff/bti/tests/data/exported4D_solaris.fif b/mne/io/bti/tests/data/exported4D_solaris_raw.fif
similarity index 100%
rename from mne/fiff/bti/tests/data/exported4D_solaris.fif
rename to mne/io/bti/tests/data/exported4D_solaris_raw.fif
diff --git a/mne/fiff/bti/tests/data/test_config_linux b/mne/io/bti/tests/data/test_config_linux
similarity index 100%
rename from mne/fiff/bti/tests/data/test_config_linux
rename to mne/io/bti/tests/data/test_config_linux
diff --git a/mne/fiff/bti/tests/data/test_config_solaris b/mne/io/bti/tests/data/test_config_solaris
similarity index 100%
rename from mne/fiff/bti/tests/data/test_config_solaris
rename to mne/io/bti/tests/data/test_config_solaris
diff --git a/mne/fiff/bti/tests/data/test_hs_linux b/mne/io/bti/tests/data/test_hs_linux
similarity index 100%
rename from mne/fiff/bti/tests/data/test_hs_linux
rename to mne/io/bti/tests/data/test_hs_linux
diff --git a/mne/fiff/bti/tests/data/test_hs_solaris b/mne/io/bti/tests/data/test_hs_solaris
similarity index 100%
rename from mne/fiff/bti/tests/data/test_hs_solaris
rename to mne/io/bti/tests/data/test_hs_solaris
diff --git a/mne/fiff/bti/tests/data/test_pdf_linux b/mne/io/bti/tests/data/test_pdf_linux
similarity index 100%
rename from mne/fiff/bti/tests/data/test_pdf_linux
rename to mne/io/bti/tests/data/test_pdf_linux
diff --git a/mne/fiff/bti/tests/data/test_pdf_solaris b/mne/io/bti/tests/data/test_pdf_solaris
similarity index 100%
rename from mne/fiff/bti/tests/data/test_pdf_solaris
rename to mne/io/bti/tests/data/test_pdf_solaris
diff --git a/mne/fiff/bti/tests/test_bti.py b/mne/io/bti/tests/test_bti.py
similarity index 73%
rename from mne/fiff/bti/tests/test_bti.py
rename to mne/io/bti/tests/test_bti.py
index 2c725ec..6750112 100644
--- a/mne/fiff/bti/tests/test_bti.py
+++ b/mne/io/bti/tests/test_bti.py
@@ -1,4 +1,5 @@
-# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+from __future__ import print_function
+# Authors: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -9,10 +10,12 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 from nose.tools import assert_true, assert_raises, assert_equal
 
-from mne.fiff import Raw as Raw
-from mne.fiff.bti.raw import (_read_config, _setup_head_shape,
-                              read_raw_bti, _read_data, _read_bti_header)
+from mne.io import Raw as Raw
+from mne.io.bti.bti import (_read_config, _setup_head_shape,
+                            _read_data, _read_bti_header)
+from mne.io import read_raw_bti
 from mne.utils import _TempDir
+from functools import reduce
 
 base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
 
@@ -20,7 +23,7 @@ archs = 'linux', 'solaris'
 pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
 config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
 hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
-exported_fnames = [op.join(base_dir, 'exported4D_%s.fif' % a) for a in archs]
+exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a) for a in archs]
 tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
 tempdir = _TempDir()
 
@@ -44,7 +47,18 @@ def test_read_pdf():
         data = _read_data(info)
         shape = (info['total_chans'], info['total_slices'])
         assert_true(data.shape == shape)
-        info['fid'].close()
+
+
+def test_crop():
+    """ Test crop raw """
+    raw = read_raw_bti(pdf_fnames[0], config_fnames[0], hs_fnames[0])
+    y, t = raw[:]
+    t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
+    mask = (t0 <= t) * (t <= t1)
+    raw_ = raw.crop(t0, t1)
+    y_, _ = raw_[:]
+    assert_true(y_.shape[1] == mask.sum())
+    assert_true(y_.shape[0] == y.shape[0])
 
 
 def test_raw():
@@ -63,23 +77,24 @@ def test_raw():
                 assert_array_almost_equal(ex.info['dev_head_t']['trans'],
                                           ra.info['dev_head_t']['trans'], 7)
                 dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']])
-                              for r_ in ra, ex]
+                              for r_ in (ra, ex)]
                 assert_array_equal(dig1, dig2)
 
                 coil1, coil2 = [np.concatenate([d['coil_trans'].flatten()
                                 for d in r_.info['chs'][:NCH]])
-                                for r_ in ra, ex]
+                                for r_ in (ra, ex)]
                 assert_array_almost_equal(coil1, coil2, 7)
 
                 loc1, loc2 = [np.concatenate([d['loc'].flatten()
-                              for d in r_.info['chs'][:NCH]]) for r_ in ra, ex]
+                              for d in r_.info['chs'][:NCH]])
+                              for r_ in (ra, ex)]
                 assert_array_equal(loc1, loc2)
 
                 assert_array_equal(ra._data[:NCH], ex._data[:NCH])
                 assert_array_equal(ra.cals[:NCH], ex.cals[:NCH])
                 ra.save(tmp_raw_fname)
             with Raw(tmp_raw_fname) as r:
-                print r
+                print(r)
         os.remove(tmp_raw_fname)
 
 
@@ -88,5 +103,6 @@ def test_setup_headshape():
     for hs in hs_fnames:
         dig, t = _setup_head_shape(hs)
         expected = set(['kind', 'ident', 'r'])
-        found = set(reduce(lambda x, y: x + y, [d.keys() for d in dig]))
+        found = set(reduce(lambda x, y: list(x) + list(y),
+                           [d.keys() for d in dig]))
         assert_true(not expected - found)
diff --git a/mne/fiff/bti/transforms.py b/mne/io/bti/transforms.py
similarity index 97%
rename from mne/fiff/bti/transforms.py
rename to mne/io/bti/transforms.py
index 9627c3d..a19b83b 100644
--- a/mne/fiff/bti/transforms.py
+++ b/mne/io/bti/transforms.py
@@ -1,4 +1,4 @@
-# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
 #
 #          simplified BSD-3 license
 
diff --git a/mne/fiff/compensator.py b/mne/io/compensator.py
similarity index 100%
rename from mne/fiff/compensator.py
rename to mne/io/compensator.py
diff --git a/mne/fiff/constants.py b/mne/io/constants.py
similarity index 98%
rename from mne/fiff/constants.py
rename to mne/io/constants.py
index b95135d..afa431b 100644
--- a/mne/fiff/constants.py
+++ b/mne/io/constants.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
@@ -77,6 +77,10 @@ FIFF.FIFF_BLOCK_NAME      = 111
 FIFF.FIFF_BLOCK_VERSION   = 112
 FIFF.FIFF_CREATOR         = 113  # Program that created the file (string)
 FIFF.FIFF_MODIFIER        = 114  # Program that modified the file (string)
+FIFF.FIFF_REF_ROLE        = 115
+FIFF.FIFF_REF_FILE_ID     = 116
+FIFF.FIFF_REF_FILE_NUM    = 117
+FIFF.FIFF_REF_FILE_NAME   = 118
 #
 #  Megacq saves the parameters in these tags
 #
@@ -119,6 +123,7 @@ FIFF.FIFF_SUBAVE_FIRST   = 231    # The first epoch # contained in the subaverag
 FIFF.FIFF_NAME           = 233          # Intended to be a short name.
 FIFF.FIFF_DESCRIPTION    = FIFF.FIFF_COMMENT # (Textual) Description of an object
 FIFF.FIFF_DIG_STRING     = 234          # String of digitized points
+FIFF.FIFF_LINE_FREQ      = 235    # Line frequency
 #
 # HPI fitting program tags
 #
@@ -225,6 +230,13 @@ FIFF.FIFF_SQUID_GATE        = 703
 FIFF.FIFFV_ASPECT_IFII_LOW  = 1100
 FIFF.FIFFV_ASPECT_IFII_HIGH = 1101
 FIFF.FIFFV_ASPECT_GATE      = 1102
+
+#
+# Values for file references
+#
+FIFF.FIFFV_ROLE_PREV_FILE = 1
+FIFF.FIFFV_ROLE_NEXT_FILE = 2
+
 #
 # References
 #
@@ -761,5 +773,9 @@ FIFF.FIFFV_COIL_CTF_GRAD           = 5001  # CTF axial gradiometer
 FIFF.FIFFV_COIL_KIT_GRAD           = 6001  # KIT system axial gradiometer
 
 # MNE RealTime
-FIFF.FIFF_MNE_RT_COMMAND           = 3700
-FIFF.FIFF_MNE_RT_CLIENT_ID         = 3701
+FIFF.FIFF_MNE_RT_COMMAND           = 3700  # realtime command
+FIFF.FIFF_MNE_RT_CLIENT_ID         = 3701  # realtime client
+
+# MNE epochs bookkeeping
+FIFF.FIFFB_MNE_EPOCHS_SELECTION    = 3800  # the epochs selection
+FIFF.FIFFB_MNE_EPOCHS_DROP_LOG     = 3801  # the drop log
diff --git a/mne/fiff/ctf.py b/mne/io/ctf.py
similarity index 98%
rename from mne/fiff/ctf.py
rename to mne/io/ctf.py
index abe13b2..3f789f6 100644
--- a/mne/fiff/ctf.py
+++ b/mne/io/ctf.py
@@ -1,6 +1,6 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -11,6 +11,7 @@ import numpy as np
 from .constants import FIFF
 from .tag import find_tag, has_tag, read_tag
 from .tree import dir_tree_find
+
 from ..utils import logger, verbose
 
 
diff --git a/mne/fiff/diff.py b/mne/io/diff.py
similarity index 94%
rename from mne/fiff/diff.py
rename to mne/io/diff.py
index 810381a..ac39412 100644
--- a/mne/fiff/diff.py
+++ b/mne/io/diff.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD Style.
 
diff --git a/mne/fiff/edf/__init__.py b/mne/io/edf/__init__.py
similarity index 100%
rename from mne/fiff/edf/__init__.py
rename to mne/io/edf/__init__.py
diff --git a/mne/fiff/edf/edf.py b/mne/io/edf/edf.py
similarity index 72%
rename from mne/fiff/edf/edf.py
rename to mne/io/edf/edf.py
index e2beba0..7282db6 100644
--- a/mne/fiff/edf/edf.py
+++ b/mne/io/edf/edf.py
@@ -2,7 +2,8 @@
 
 """
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Authors: Teon Brooks <teon at nyu.edu>
+#          Martin Billinger <martin.billinger at tugraz.at>
 #
 # License: BSD (3-clause)
 
@@ -11,19 +12,22 @@ import calendar
 import datetime
 import re
 import warnings
+from math import ceil, floor
 
 import numpy as np
+from scipy.interpolate import interp1d
 
 from ...transforms import als_ras_trans_mm, apply_trans
 from ...utils import verbose, logger
-from ..raw import Raw
+from ..base import _BaseRaw
 from ..meas_info import Info
 from ..constants import FIFF
 from ...coreg import get_ras_to_neuromag_trans
 from ...filter import resample
+from ...externals.six.moves import zip
 
 
-class RawEDF(Raw):
+class RawEDF(_BaseRaw):
     """Raw object from EDF+,BDF file
 
     Parameters
@@ -49,6 +53,13 @@ class RawEDF(Raw):
         Path to annotation map file containing mapping from label to trigger.
         Must be specified if annot is not None.
 
+    tal_channel : int | None
+        The channel index (starting at 0).
+        Index of the channel containing EDF+ annotations.
+        -1 corresponds to the last channel.
+        If None, the annotation channel is not used.
+        Note: this is overruled by the annotation file if specified.
+
     hpts : str | None
         Path to the hpts file containing electrode positions.
         If None, sensor locations are (0,0,0).
@@ -66,16 +77,18 @@ class RawEDF(Raw):
 
     See Also
     --------
-    mne.fiff.Raw : Documentation of attribute and methods.
+    mne.io.Raw : Documentation of attribute and methods.
     """
     @verbose
     def __init__(self, input_fname, n_eeg=None, stim_channel=-1, annot=None,
-                 annotmap=None, hpts=None, preload=False, verbose=None):
+                 annotmap=None, tal_channel=None, hpts=None, preload=False,
+                 verbose=None):
         logger.info('Extracting edf Parameters from %s...' % input_fname)
         input_fname = os.path.abspath(input_fname)
         self.info, self._edf_info = _get_edf_info(input_fname, n_eeg,
                                                   stim_channel, annot,
-                                                  annotmap, hpts, preload)
+                                                  annotmap, tal_channel,
+                                                  hpts, preload)
         logger.info('Creating Raw.info structure...')
 
         if bool(annot) != bool(annotmap):
@@ -84,16 +97,22 @@ class RawEDF(Raw):
 
         # Raw attributes
         self.verbose = verbose
-        self._preloaded = False
-        self.fids = list()
+        self.preload = False
+        self._filenames = list()
         self._projector = None
         self.first_samp = 0
         self.last_samp = self._edf_info['nsamples'] - 1
         self.comp = None  # no compensation for EDF
         self.proj = False
+        self._first_samps = np.array([self.first_samp])
+        self._last_samps = np.array([self.last_samp])
+        self._raw_lengths = np.array([self._edf_info['nsamples']])
+        self.rawdirs = np.array([])
+        self.cals = np.array([])
+        self.orig_format = 'int'
 
         if preload:
-            self._preloaded = preload
+            self.preload = preload
             logger.info('Reading raw data from %s...' % input_fname)
             self._data, _ = self._read_segment()
             assert len(self._data) == self.info['nchan']
@@ -148,7 +167,7 @@ class RawEDF(Raw):
             returns the time values corresponding to the samples.
         """
         if sel is None:
-            sel = range(self.info['nchan'])
+            sel = list(range(self.info['nchan']))
         elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
             return (666, 666)
         if projector is not None:
@@ -167,9 +186,13 @@ class RawEDF(Raw):
         data_size = self._edf_info['data_size']
         data_offset = self._edf_info['data_offset']
         stim_channel = self._edf_info['stim_channel']
+        tal_channel = self._edf_info['tal_channel']
         annot = self._edf_info['annot']
         annotmap = self._edf_info['annotmap']
 
+        blockstart = int(floor(float(start) / sfreq) * sfreq)
+        blockstop = int(ceil(float(stop) / sfreq) * sfreq)
+
         if start >= stop:
             raise ValueError('No data in this range')
 
@@ -188,8 +211,8 @@ class RawEDF(Raw):
         with open(self.info['file_id'], 'rb') as fid:
             # extract data
             fid.seek(data_offset)
-            buffer_size = stop - start
-            pointer = start * n_chan
+            buffer_size = blockstop - blockstart
+            pointer = blockstart * n_chan * data_size
             fid.seek(data_offset + pointer)
 
             if 'n_samps' in self._edf_info:
@@ -197,7 +220,7 @@ class RawEDF(Raw):
                 max_samp = float(np.max(n_samps))
                 blocks = int(buffer_size / max_samp)
             else:
-                blocks = int(buffer_size / sfreq)
+                blocks = int(ceil(float(buffer_size) / sfreq))
             datas = []
             # bdf data: 24bit data
             if self._edf_info['subtype'] == '24BIT':
@@ -209,7 +232,7 @@ class RawEDF(Raw):
                 data = (data[:, 0] + (data[:, 1] << 8) + (data[:, 2] << 16))
                 # 24th bit determines the sign
                 data[data >= (1 << 23)] -= (1 << 24)
-                data = data.reshape((sfreq, n_chan, blocks), order='F')
+                data = data.reshape((int(sfreq), n_chan, blocks), order='F')
                 for i in range(blocks):
                     datas.append(data[:, :, i].T)
             else:
@@ -223,7 +246,25 @@ class RawEDF(Raw):
                     for i, samp in enumerate(n_samps):
                         chan_data = data[i::n_chan]
                         chan_data = np.hstack(chan_data)
-                        if samp != max_samp:
+                        if i == tal_channel:
+                            # don't resample tal_channel,
+                            # pad with zeros instead.
+                            n_missing = int(max_samp - samp) * blocks
+                            chan_data = np.hstack([chan_data, [0] * n_missing])
+                        elif i == stim_channel and samp < max_samp:
+                            if annot and annotmap or tal_channel is not None:
+                                # don't bother with resampling the stim channel
+                                # because it gets overwritten later on.
+                                chan_data = np.zeros(max_samp)
+                            else:
+                                warnings.warn('Interpolating stim channel. '
+                                              'Events may jitter.')
+                                oldrange = np.linspace(0, 1, samp + 1, True)
+                                newrange = np.linspace(0, 1, max_samp, False)
+                                chan_data = interp1d(oldrange,
+                                                     np.append(chan_data, 0),
+                                                     kind='zero')(newrange)
+                        elif samp != max_samp:
                             mult = max_samp / samp
                             chan_data = resample(x=chan_data, up=mult,
                                                  down=1, npad=0)
@@ -231,7 +272,8 @@ class RawEDF(Raw):
                 else:
                     data = np.fromfile(fid, dtype='<i2',
                                        count=buffer_size * n_chan)
-                    data = data.reshape((sfreq, n_chan, blocks), order='F')
+                    data = data.reshape((int(sfreq), n_chan, blocks),
+                                        order='F')
                     for i in range(blocks):
                         datas.append(data[:, :, i].T)
         if 'n_samps' in self._edf_info:
@@ -245,12 +287,32 @@ class RawEDF(Raw):
                 data[stim_channel] = 0
                 evts = _read_annot(annot, annotmap, sfreq, self.last_samp)
                 data[stim_channel, :evts.size] = evts[start:stop]
+            elif tal_channel is not None:
+                evts = _parse_tal_channel(data[tal_channel])
+                self._edf_info['events'] = evts
+
+                unique_annots = sorted(set([e[2] for e in evts]))
+                mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
+
+                data[stim_channel] = 0
+                for t_start, t_duration, annotation in evts:
+                    evid = mapping[annotation]
+                    n_start = int(t_start * sfreq)
+                    n_stop = int(t_duration * sfreq) + n_start - 1
+                    # make sure events without duration get one sample
+                    n_stop = n_stop if n_stop > n_start else n_start+1
+                    if any(data[stim_channel][n_start:n_stop]):
+                        raise NotImplementedError('EDF+ with overlapping '
+                                                  'events not supported.')
+                    data[stim_channel][n_start:n_stop] = evid
             else:
                 stim = np.array(data[stim_channel], int)
                 mask = 255 * np.ones(stim.shape, int)
                 stim = np.bitwise_and(stim, mask)
                 data[stim_channel] = stim
-        data = data[sel]
+        datastart = start - blockstart
+        datastop = stop - blockstart
+        data = data[sel, datastart:datastop]
 
         logger.info('[done]')
         times = np.arange(start, stop, dtype=float) / self.info['sfreq']
@@ -258,7 +320,46 @@ class RawEDF(Raw):
         return data, times
 
 
-def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
+def _parse_tal_channel(tal_channel_data):
+    """Parse time-stamped annotation lists (TALs) in stim_channel
+    and return list of events.
+
+    Parameters
+    ----------
+    tal_channel_data : ndarray, shape = [n_samples]
+        channel data in EDF+ TAL format
+
+    Returns
+    -------
+    events : list
+        List of events. Each event contains [start, duration, annotation].
+
+    References
+    ----------
+    http://www.edfplus.info/specs/edfplus.html#tal
+    """
+
+    # convert tal_channel to an ascii string
+    tals = bytearray()
+    for s in tal_channel_data:
+        i = int(s)
+        tals.extend([i % 256, i // 256])
+
+    regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
+    tal_list = re.findall(regex_tal, tals.decode('ascii'))
+    events = []
+    for ev in tal_list:
+        onset = float(ev[0])
+        duration = float(ev[2]) if ev[2] else 0
+        for annotation in ev[3].split('\x14')[1:]:
+            if annotation:
+                events.append([onset, duration, annotation])
+
+    return events
+
+
+def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
+                  hpts, preload):
     """Extracts all the information from the EDF+,BDF file.
 
     Parameters
@@ -284,6 +385,13 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
         Path to annotation map file containing mapping from label to trigger.
         Must be specified if annot is not None.
 
+    tal_channel : int | None
+        The channel index (starting at 0).
+        Index of the channel containing EDF+ annotations.
+        -1 corresponds to the last channel.
+        If None, the annotation channel is not used.
+        Note: this is overruled by the annotation file if specified.
+
     hpts : str | None
         Path to the hpts file containing electrode positions.
         If None, sensor locations are (0,0,0).
@@ -311,16 +419,18 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
     info['filename'] = fname
     info['ctf_head_t'] = None
     info['dev_ctf_t'] = []
-    info['filenames'] = []
     info['dig'] = None
     info['dev_head_t'] = None
     info['proj_id'] = None
     info['proj_name'] = None
     info['experimenter'] = None
+    info['line_freq'] = None
+    info['subject_info'] = None
 
     edf_info = dict()
     edf_info['annot'] = annot
     edf_info['annotmap'] = annotmap
+    edf_info['events'] = []
 
     with open(fname, 'rb') as fid:
         assert(fid.tell() == 0)
@@ -328,13 +438,15 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
 
         _ = fid.read(80).strip()  # subject id
         _ = fid.read(80).strip()  # recording id
-        day, month, year = [int(x) for x in re.findall('(\d+)', fid.read(8))]
-        hour, minute, sec = [int(x) for x in re.findall('(\d+)', fid.read(8))]
+        day, month, year = [int(x) for x in re.findall('(\d+)',
+                                                       fid.read(8).decode())]
+        hour, minute, sec = [int(x) for x in re.findall('(\d+)',
+                                                        fid.read(8).decode())]
         date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
         info['meas_date'] = calendar.timegm(date.utctimetuple())
 
         edf_info['data_offset'] = header_nbytes = int(fid.read(8))
-        subtype = fid.read(44).strip()[:5]
+        subtype = fid.read(44).strip().decode()[:5]
         edf_info['subtype'] = subtype
 
         edf_info['n_records'] = n_records = int(fid.read(8))
@@ -343,10 +455,10 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
         info['nchan'] = int(fid.read(4))
         if n_eeg is None:
             n_eeg = info['nchan']
-        channels = range(info['nchan'])
-        ch_names = [fid.read(16).strip() for _ in channels]
+        channels = list(range(info['nchan']))
+        ch_names = [fid.read(16).strip().decode() for _ in channels]
         _ = [fid.read(80).strip() for _ in channels]  # transducer type
-        units = [fid.read(8).strip() for _ in channels]
+        units = [fid.read(8).strip().decode() for _ in channels]
         for i, unit in enumerate(units):
             if unit == 'uV':
                 units[i] = -6
@@ -358,18 +470,20 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
         physical_max = np.array([float(fid.read(8)) for _ in channels])
         digital_min = np.array([float(fid.read(8)) for _ in channels])
         digital_max = np.array([float(fid.read(8)) for _ in channels])
-        prefiltering = [fid.read(80).strip() for _ in channels][:-1]
+        prefiltering = [fid.read(80).strip().decode() for _ in channels][:-1]
         highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
                              for filt in prefiltering])
         lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
                             for filt in prefiltering])
+
+        high_pass_default = 0.
         if highpass.size == 0:
-            info['highpass'] = None
+            info['highpass'] = high_pass_default
         elif all(highpass):
             if highpass[0] == 'NaN':
-                info['highpass'] = None
+                info['highpass'] = high_pass_default
             elif highpass[0] == 'DC':
-                info['highpass'] = 0
+                info['highpass'] = 0.
             else:
                 info['highpass'] = int(highpass[0])
         else:
@@ -377,6 +491,7 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
             warnings.warn('%s' % ('Channels contain different highpass'
                                   + 'filters. Highest filter setting will'
                                   + 'be stored.'))
+
         if lowpass.size == 0:
             info['lowpass'] = None
         elif all(lowpass):
@@ -398,16 +513,19 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
         n_samples_per_record = n_samples_per_record[0]
         fid.read(32 * info['nchan'])  # reserved
         assert fid.tell() == header_nbytes
+
     physical_ranges = physical_max - physical_min
     cals = digital_max - digital_min
-    info['sfreq'] = int(n_samples_per_record / record_length)
+    info['sfreq'] = n_samples_per_record / float(record_length)
     edf_info['nsamples'] = n_records * n_samples_per_record
 
+    if info['lowpass'] is None:
+        info['lowpass'] = info['sfreq'] / 2.
+
     # Some keys to be consistent with FIF measurement info
     info['description'] = None
     info['buffer_size_sec'] = 10.
     info['orig_blocks'] = None
-    info['orig_fid_str'] = None
 
     if edf_info['subtype'] == '24BIT':
         edf_info['data_size'] = 3  # 24-bit (3 byte) integers
@@ -415,14 +533,15 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
         edf_info['data_size'] = 2  # 16-bit (2 byte) integers
 
     if hpts and os.path.lexists(hpts):
-        fid = open(hpts, 'rb').read()
+        with open(hpts, 'rb') as fid:
+            ff = fid.read().decode()
         locs = {}
         temp = re.findall('eeg\s(\w+)\s(-?[\d,.]+)\s(-?[\d,.]+)\s(-?[\d,.]+)',
-                          fid)
+                          ff)
         temp += re.findall('cardinal\s([\d,.]+)\s(-?[\d,.]+)\s(-?[\d,.]+)\s(-?'
-                           '[\d,.]+)', fid)
+                           '[\d,.]+)', ff)
         for loc in temp:
-            coord = np.array(map(float, loc[1:]))
+            coord = np.array(loc[1:], dtype=float)
             coord = apply_trans(als_ras_trans_mm, coord)
             locs[loc[0].lower()] = coord
         trans = get_ras_to_neuromag_trans(nasion=locs['2'], lpa=locs['1'],
@@ -505,6 +624,17 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, hpts, preload):
     else:
         edf_info['stim_channel'] = stim_channel - 1
 
+    # TODO: automatic detection of the tal_channel?
+    if tal_channel == -1:
+        edf_info['tal_channel'] = info['nchan'] - 1
+    else:
+        edf_info['tal_channel'] = tal_channel
+
+    if tal_channel and not preload:
+        raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
+                                   ' parsed completely on loading.'
+                                   'Must set preload=True'))
+
     return info, edf_info
 
 
@@ -519,7 +649,7 @@ def _read_annot(annot, annotmap, sfreq, data_length):
     annotmap : str
         Path to annotation map file containing mapping from label to trigger.
 
-    sfreq : int
+    sfreq : float
         Sampling frequency.
 
     data_length : int
@@ -534,8 +664,7 @@ def _read_annot(annot, annotmap, sfreq, data_length):
     annot = open(annot).read()
     triggers = re.findall(pat, annot)
     times, values = zip(*triggers)
-    times = map(float, times)
-    times = [time * sfreq for time in times]
+    times = [float(time) * sfreq for time in times]
 
     pat = '(\w+):(\d+)'
     annotmap = open(annotmap).read()
@@ -553,7 +682,8 @@ def _read_annot(annot, annotmap, sfreq, data_length):
 
 
 def read_raw_edf(input_fname, n_eeg=None, stim_channel=-1, annot=None,
-                 annotmap=None, hpts=None, preload=False, verbose=None):
+                 annotmap=None, tal_channel=None, hpts=None,
+                 preload=False, verbose=None):
     """Reader function for EDF+, BDF conversion to FIF
 
     Parameters
@@ -579,6 +709,13 @@ def read_raw_edf(input_fname, n_eeg=None, stim_channel=-1, annot=None,
         Path to annotation map file containing mapping from label to trigger.
         Must be specified if annot is not None.
 
+    tal_channel : int | None
+        The channel index (starting at 0).
+        Index of the channel containing EDF+ annotations.
+        -1 corresponds to the last channel.
+        If None, the annotation channel is not used.
+        Note: this is overruled by the annotation file if specified.
+
     hpts : str | None
         Path to the hpts file containing electrode positions.
         If None, sensor locations are (0,0,0).
@@ -592,4 +729,5 @@ def read_raw_edf(input_fname, n_eeg=None, stim_channel=-1, annot=None,
     """
     return RawEDF(input_fname=input_fname, n_eeg=n_eeg,
                   stim_channel=stim_channel, annot=annot, annotmap=annotmap,
-                  hpts=hpts, preload=preload, verbose=verbose)
+                  tal_channel=tal_channel, hpts=hpts, preload=preload,
+                  verbose=verbose)
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/io/edf/tests/__init__.py
similarity index 100%
copy from mne/fiff/bti/tests/__init__.py
copy to mne/io/edf/tests/__init__.py
diff --git a/mne/fiff/edf/tests/data/biosemi.hpts b/mne/io/edf/tests/data/biosemi.hpts
similarity index 100%
rename from mne/fiff/edf/tests/data/biosemi.hpts
rename to mne/io/edf/tests/data/biosemi.hpts
diff --git a/mne/fiff/edf/tests/data/test.bdf b/mne/io/edf/tests/data/test.bdf
similarity index 100%
rename from mne/fiff/edf/tests/data/test.bdf
rename to mne/io/edf/tests/data/test.bdf
diff --git a/mne/fiff/edf/tests/data/test.edf b/mne/io/edf/tests/data/test.edf
similarity index 97%
rename from mne/fiff/edf/tests/data/test.edf
rename to mne/io/edf/tests/data/test.edf
index 198feec..1800e25 100644
--- a/mne/fiff/edf/tests/data/test.edf
+++ b/mne/io/edf/tests/data/test.edf
@@ -1,4 +1,4 @@
-0       X X 13-May-1951 Anonymous                                                       Startdate 27-Oct-2013                                                           27.10.1319.41.4935840                                               6       1       139 A1              A2              A3              A4              A5              A6              A7              A8              A9              A10             A11             A12             A13             A14             A15          [...]
   ��  	     
   �� 	    ������  
 
+0       X X X X                                                                         Startdate 29-APR-2014 X X X                                                     29.04.1422.19.4436096   EDF+C                                       6       1       140 A1              A2              A3              A4              A5              A6              A7              A8              A9              A10             A11             A12             A13             A14             A15          [...]
   ��  	     
   �� 	    ������  
 
     	   ����    ������������������ ��������
 
   
    ��        ����  ��������������  ��������
   ����  
  ����������������������������������������������������������
@@ -1332,7 +1332,7 @@
   $ > K 8  ���� ������������������������������������ ( G m k @ 
 ��d�b�f�n�����������{�X�[�������	 ' 4 2 
   3 7 1 / 
 ������������  & = : ' 
   ������  + 
 %    ? B 3     9 > 8 , / 6 6 $ ) G = 7 L d L , 0 B 2 ? 4 . ) $ 1 + J X n h  ^ H B  ! 7 @ I X r L U _ V 6 
 2 T d W T H T U U W  ~ h n ` f O V ^ j f N U L k a U J 8 $   
 < O L E (  ? J ; : ) " / / & $ # " 
 * %  ) - ' 
 8 9 (     ! 0 ( 7 9 F P ] ^ b F   	    
 
 
  * 3 - % '  �� �� 
 $ > > $ 8    B X R X D % * # H N I @ + ���� 2 4 % �� 
  . #  - )     * 
  ����  	   " $ 5 ;  ������������7 + # ���� ����
  ����������������������������������m�����h�>��)�6�4�0�D�O�5�7�C�)��8�Y�G�7�b�U�.�5�@�V�V�i���|����h�m�����������������{�|�q����������������� 
 ������  ���� ������# M > ? " �������� ) A 5 
  ����������������������  ����  ������ + $ ��  �������� 0 < Y [ a ��������     " ��������# $     
                       
                                                     
                                                                                      
                                                           
                    
                                                        
   
                                                                                           
   
                                                               
                                                   
     ������������������~����������������������������������������������������������~��}����������������������z�s�z���{�~���������w�u�����q�t�u�~�v�v�|�����x�p�v�|�w�r�j�j�v�r�~�s�|�s�l�~�t�q�����|���s�|�����}�u�{�������y�{�v�}���p�r�r�����|�x�~�����z�z�{�|�����t�y�v�����������x����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������� [...]
-�����!��������$�
��
��
����
�
���#� �
����                                                                                                                                                                                                                                                                                                                                                                                                                                                                              [...]
+�����!��������$�
��
��
����
�
���#� �
����                                                                                                                                                                                                                                                                                                                                                                                                                                                                              [...]
   ���� ���������� �������� �������� 	     ������ ��
   	  ��������   
    	           
   
  %         	 	      
  
     &   	 
   ��     
  	    ' , 2 0         
  
 #      "  
       ��     
    ��     	 
 
 
    ! $   ! 
 % * ) 2 2 1 ) * , 2 ' ) " / %     	  
          ��	 	      
 
 	  
 
       ��     %   
 
   
@@ -3266,7 +3266,7 @@
 ���
����	�
 �
 ��
-��"�"��!�
����#�����
�*�
�$�"���-�
�(��������"�
�
�*�"�#����&�-�#�'�
� �:�)�-�#�*�&��#�%�'�(�-�+�%�+�.�
�-�)�
�
�$�*� �,�
�(�(�.�$�+�$�
�&� �,�&�#�"�2�+�0�"�"�0�6�(�
� �,�
�,�$�*�,�7�-�2�4�2�&� �;�3�<�4�2�@�9�6�.�.�4�-�4�>�B�G�M�H�Q�R�G�B�<�D�7�E�V�R�W�P�>�B�O�[�^�X�R�U�Z�I�F�R�`�T�_�d�h�e�b�c�\�`�f�[�]�o�m�n�p�l�n�}�i�~�s�p�z�x�q�r�z�n�n�u�~�v�u�s�j�r�s�~�z�������v�s���t�y���|�{�������������������������|���~�x�����������}�������w�~�x�~�������������~���u�e�u�y������ [...]
   �� ��  
  ����      ��   
 
   ��������
 
+��"�"��!�
����#�����
�*�
�$�"���-�
�(��������"�
�
�*�"�#����&�-�#�'�
� �:�)�-�#�*�&��#�%�'�(�-�+�%�+�.�
�-�)�
�
�$�*� �,�
�(�(�.�$�+�$�
�&� �,�&�#�"�2�+�0�"�"�0�6�(�
� �,�
�,�$�*�,�7�-�2�4�2�&� �;�3�<�4�2�@�9�6�.�.�4�-�4�>�B�G�M�H�Q�R�G�B�<�D�7�E�V�R�W�P�>�B�O�[�^�X�R�U�Z�I�F�R�`�T�_�d�h�e�b�c�\�`�f�[�]�o�m�n�p�l�n�}�i�~�s�p�z�x�q�r�z�n�n�u�~�v�u�s�j�r�s�~�z�������v�s���t�y���|�{�������������������������|���~�x�����������}�������w�~�x�~�������������~���u�e�u�y������ [...]
   �� ��  
  ����      ��   
 
   ��������
 
        ��   
 
  
  ������   ������    ��    
   	 
   
 
 #  
 '   
  
     
    !   
  
 
  
           ������������     
   
    
  ��	  �� 
@@ -5192,7 +5192,7 @@
  ������  
 ? 
 ���� 1 % 
  �� �� ������������ B A W  ��    ��  ��������  
 ��
 C W R B 3 2  
  ����������������v�{�����������    �� Q R 
 ����������]�Y�\�|������������������� , 
  9 P M B } e B  ���� 
  8 P 0 ? D +   ( 2 * 2 - A Z ^ S 6 C @ V A ) X \ � � � F 5  ����: B S Q Q g h � � g N 0 4 N A B   > G R [ m � � }  z X ' C \ ? u g 6 U P Z l p W F I r b M K 4 \ r ] c � � � g T b q i L I e i _ l q y [ A 7  
 ! 6 ' ) : 7 7 *  " 0 E K P w � � � � � ] ����������
 / 6 7 G N V > = ] { � � � G O   1 ;  - K Y � � � s � � K ������
 8 !  " , ( " P w s 4 ; $  # %     ) B 9  ��" 3 , 6   2   ����  �� ��
  
   ������	  ����. 2  ��  
 - & 	  # D D ! )    ) , B ) # " ' @ B A D  ��	 
 �� 
     " #  (   * + 9 A  
   ���� $     ) 
 ������ 8 "  $ 1 L < ?  ����  / ' 
 �� 	 
  ��	 "  ����@ c !  ���� 6 J 8 ������������ 
 % 2 $ 
 ������  	   ! Q V L \ 4   ! ��������
  4 G P  ������������������ % ^ M  - < 3 9 6 V O j \ ] S < . K 4 
 4 9 R J 8 
 �� ��  
 '  ������.  
 
 ��	 �� ������������ 
  $       ������������  ��  �� ) 3 1 3 $ 2 %  
  ��������������u�w�����������
    �� F 1  ����������I�Y�y���������������������
     $ 9 ; j \ A   ���� ����
 Q I ) +   ���� 
    $ B 
 . # 	    ��( C � � b . 
  ����  0 6 B H M Z g Y H . 
 ) 
   $ - / H < D a X L f h , �� 8 S K ' $ ? + A d a = + / c > . B  ) U @ - X j W b N D X D "  L Q ; ; K T f H : $  ; M J G Q S O E / 9 I e f d � � � � � � m ���� !  * J T Q d i n W Y p � � � � c l 9 3 R ] 0 H b q � � � � � � P  �� + L : 5 < A @ = Z � ~ N Y G 3 H B & + ( + B Z Y +  > K F O 7 K 6 4 	 
 ) 3  
  9 1 ( 4 
 
  $ 
 
 
 E J '  ) + 0 = > % 2 4 T U 5 5 0 4 ? E [ I B > B \ V V [ , 
 * $  , 5 , /  [...]
  * A o h 2 B O I Q N f [ } r p d P B W D 8 F L b a N % �� 
  ( # : (  �� H / ! 
    # 
 ����
 ��	    6 . - ) "       ���� 
    
 $ ; C 9 = 3 ; '  
   ����������������������������      P G $  ��������T�c����������������������� , "   . J O � n V  �� % 
    " a Y > ; 
     
   
 . D + : 1  / ) 
 : [ � � k 7 %  ����* ) 9 B O ] R b q h Z 5 + =   (  , > ; N A K c f Y f ] 1  
 J R T E > Q = P f m N = A p L ; M * < c V E e y i k Y I W H , , O ] 9 ; H T                                                                        
          
                                  
                                                                                                                                         
                                              
                                                                                                                                               
                                                
                     
  ����������������������������������~�������������������������������������������x���y����������������������������������������~�����z�|�~�x�r�t�t�v�w��y�|���s�n�u��k�x�f�r�|�o�r�k�q�y�p�e�b�i�o�o�^�_�g�k�h�k�j�d�h�]�d�f�\�S�\�X�]�V�c�W�c�Z�_�U�R�V�W�V�P�L�J�J�A�A�L�D�D�7�H�J�J�H�Q�P�P�H�D�D�<�H�H�P�D�>�4�6�=�9�>�;�2�5�E�<�C�@�4�8�.�.�!�.�9�%�'�0�>�?�=�8�<�B�6�@�4�<�=�>�4�4�=�*��*�8�D�G�D�4�<�A�B�D�H�>�J�R�V�N�L�T�V�_�e�R�L�T�Z�a�`�p�e�Y�n�n�l�b�b�Z�j�s�j�{�����n�j�d�b�Z�p�v�z�}�x�|�l� [...]
-����������������y�k�d�`�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������                                                                                                                                                                                                                                                                                      [...]
+����������������y�k�d�`�����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������                                                                                                                                                                                                                                                                                      [...]
   ��   
  
    ����
    
     	 
  ��
      
   ������    ���������� 	  
     	 ������        ���������������������������������������� ��         ������������  ������������    �������� ��������������������������  ��������������������    ��   �� ����    ��������    ����������������������   ��������  
@@ -6859,7 +6859,7 @@
  ����$   
 2 J * $     3  �� �� + 4 & ' + > ; B : 
 
 E 9 N 3 T ( �������� ����
    * )  ���� 
   
 ' + 2 5 *  ����  , c R D ����������������  , G ; Q a g N 7 Q S @  0 # ' 
 / I I S A 2 0 ! " 5  % 9 1 ��   
 ! * 9 H 5   $ 
  ����  ��   > C # �� 7 " 	 
  ����������  
  
 ) G P L % ����������   ����������
 
 ������S�\������� % 0  ���������� ��������e�t�k�U�\�����������������  ������ $   1 .  
     
 * + # 
 
     7 < 
 ��
  3 = 2 2 /  4 0 2 ��$ 3    ; V \ c [ . �� �� $  6 / 9 ] x d F 3 3 2 1 
 * , A K D J / 5 T C = P [ V W e d T I Q O N . Z m R G c { p K ? + ` < < W g o v a J 9 �� Q O U H ] [ B i s C     ��
 4 Y 9 3 ' $ 
 , +   ? # ��    
  ! . 4 ' .  
 ���� !     ( . H S 3 
 ������
 -  �������� , 0  
 ����                  
                               
                                           
          
         
                                                                                                                     
            
   
                    
                                              
                    
            
     
                             
         
    
                
                  
                       
   
                                                                ������������������������������������������������������������������������������������������������������������������������������"������������N�p���������������������
�)�&� ��
���
� ���������������~�M�>�#��
�������������a�G�0�"��"�������������������������������������������������������������������������������������������������������������������������������� [...]
-�Z�������������4�y�����������������������������������������������~�p�s�\�J�6�'���������������f�\�B�*��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������                                                                      [...]
+�Z�������������4�y�����������������������������������������������~�p�s�\�J�6�'���������������f�\�B�*��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������                                                                      [...]
  
  ���� ���������� 
 	 	   ��  
    ����������  ����	  ��������
  ������������������������
   ����������������������  ������  ��     ��   ����        ���� ���� ��    ���� ��
 ����������������    ������������������������������������ ������������������������  �������������� ���������������� 
   ��������������������������������  ��������������  ��������������������	    �������� 
@@ -8187,7 +8187,7 @@
  
 ���������� ������      ������
  $  �� �� 
    *  ��
 % ! ������������������l�x������������� $ : I \ / ��������������������h�^�s�z������� ��  ' 5 D W C  
-   ��
 . 
   ������ > G 3 > E G ; * �� ��
 < 8 & ���� ������ 0 _ � t I ��������  - / 9 8 ; ! # ' A < h p � � L ) / , ��- ' 
  - K H G H V { } M 7 F V - 
  1 0 _ h | ` e U C T R M 9 * < K N l o w r � � � � X ����
  	 ��4 V ( @ ;   & : # ! ' ) : : &  " % J e g ^  �������������� # 4 �� 
 + , �� F  �� - 2 I [ ;   # I   	 
  9 5 %  C ? $ 1  ���� +  # , & ������ #  " 5 2 = < 8 4 % " ����������+   
 ��
   4 K A Q B  ��  
 %       -  ' ; W f 1 $ ������    
  
                       
                                                                                   
                                                                                                              
                            
                                                
          
                   
                
                                                            
                               
                     
                                                         ������������������������������������������������������������������������������}�+����������������L�s����������������
�$�
�
�&�0�2�6�5�@�H�<�6�N�E�>�D�D�8�4�3�3�*��
���������������v�Z�T�E�4�!�
�������������v�V�D�)�������������������������������������������������������������������������������������������������������������������������� [...]
+   ��
 . 
   ������ > G 3 > E G ; * �� ��
 < 8 & ���� ������ 0 _ � t I ��������  - / 9 8 ; ! # ' A < h p � � L ) / , ��- ' 
  - K H G H V { } M 7 F V - 
  1 0 _ h | ` e U C T R M 9 * < K N l o w r � � � � X ����
  	 ��4 V ( @ ;   & : # ! ' ) : : &  " % J e g ^  �������������� # 4 �� 
 + , �� F  �� - 2 I [ ;   # I   	 
  9 5 %  C ? $ 1  ���� +  # , & ������ #  " 5 2 = < 8 4 % " ����������+   
 ��
   4 K A Q B  ��  
 %       -  ' ; W f 1 $ ������    
  
                       
                                                                                   
                                                                                                              
                            
                                                
          
                   
                
                                                            
                               
                     
                                                         ������������������������������������������������������������������������������}�+����������������L�s����������������
�$�
�
�&�0�2�6�5�@�H�<�6�N�E�>�D�D�8�4�3�3�*��
���������������v�Z�T�E�4�!�
�������������v�V�D�)�������������������������������������������������������������������������������������������������������������������������� [...]
    �� ������������   ��������    
    ������������  ��   
   ����������
 
 ����������    ' !  ��������������
    ��������  
    ������������ 
    ��   ��  
   ��  ������������  ���������� �������������������������� ��   ����������������������������������
   ������ ����   	   ������ ���� 
   ����������������  ����������������������������������������������������      ������ ����  ����������  ���� ������������   ������������������������������������ ��������������������������������������������  
    ��  ������ ������ ��������  
@@ -9588,4 +9588,4 @@
  ���� ���� 
  ! (   1 I , S A 
 ����
 �� 0 A 1   < ���� $  
     !  
  !   I O +  % 4 
  �� 
   5 4 W z i  ����������) 
 
   C   2 2 : 1 F ������   
 B l { r W  ������
- # 
 
  
 ( 2 $ ��  ���� ������                      
                                                      
                 
                                                                              
                                                                                                                               
     
                    
                                                                             
                    
              
                                      
 
            
                           ����������������������������������������������������������������������������������l� �����,���W�8�3�J�g�z�������� ���
��
�
�$�(� � ���
����
���	�������������������������_�L�B�%����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������� [...]
\ No newline at end of file
+ # 
 
  
 ( 2 $ ��  ���� ������                      
                                                      
                 
                                                                              
                                                                                                                               
     
                    
                                                                             
                    
              
                                      
 
            
                           ����������������������������������������������������������������������������������l� �����,���W�8�3�J�g�z�������� ���
��
�
�$�(� � ���
����
���	�������������������������_�L�B�%����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������� [...]
\ No newline at end of file
diff --git a/mne/fiff/edf/tests/data/test_bdf_eeglab.mat b/mne/io/edf/tests/data/test_bdf_eeglab.mat
similarity index 100%
rename from mne/fiff/edf/tests/data/test_bdf_eeglab.mat
rename to mne/io/edf/tests/data/test_bdf_eeglab.mat
diff --git a/mne/fiff/edf/tests/data/test_edf_eeglab.mat b/mne/io/edf/tests/data/test_edf_eeglab.mat
similarity index 100%
rename from mne/fiff/edf/tests/data/test_edf_eeglab.mat
rename to mne/io/edf/tests/data/test_edf_eeglab.mat
diff --git a/mne/fiff/edf/tests/data/test_eeglab.mat b/mne/io/edf/tests/data/test_eeglab.mat
similarity index 100%
rename from mne/fiff/edf/tests/data/test_eeglab.mat
rename to mne/io/edf/tests/data/test_eeglab.mat
diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py
new file mode 100644
index 0000000..baf4a71
--- /dev/null
+++ b/mne/io/edf/tests/test_edf.py
@@ -0,0 +1,196 @@
+"""Data Equivalence Tests"""
+from __future__ import print_function
+
+# Authors: Teon Brooks <teon at nyu.edu>
+#          Martin Billinger <martin.billinger at tugraz.at>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import inspect
+
+from nose.tools import assert_equal, assert_true
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from numpy.testing import assert_raises
+from scipy import io
+import numpy as np
+
+from mne.externals.six import iterbytes
+from mne.utils import _TempDir
+from mne import pick_types
+from mne.io import Raw
+from mne.io import read_raw_edf
+import mne.io.edf.edf as edfmodule
+from mne.event import find_events
+
+FILE = inspect.getfile(inspect.currentframe())
+data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
+hpts_path = op.join(data_dir, 'biosemi.hpts')
+bdf_path = op.join(data_dir, 'test.bdf')
+edf_path = op.join(data_dir, 'test.edf')
+bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
+edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
+
+tempdir = _TempDir()
+
+
+def test_bdf_data():
+    """Test reading raw bdf files
+    """
+    raw_py = read_raw_edf(bdf_path, hpts=hpts_path, preload=True)
+    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
+    data_py, _ = raw_py[picks]
+
+    print(raw_py)  # to test repr
+    print(raw_py.info)  # to test Info repr
+
+    # this .mat was generated using the EEG Lab Biosemi Reader
+    raw_eeglab = io.loadmat(bdf_eeglab_path)
+    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
+    data_eeglab = raw_eeglab[picks]
+
+    assert_array_almost_equal(data_py, data_eeglab)
+
+    # Manually checking that float coordinates are imported
+    assert_true((raw_py.info['chs'][0]['eeg_loc']).any())
+    assert_true((raw_py.info['chs'][25]['eeg_loc']).any())
+    assert_true((raw_py.info['chs'][63]['eeg_loc']).any())
+
+
+def test_edf_data():
+    """Test reading raw edf files
+    """
+    raw_py = read_raw_edf(edf_path, stim_channel=139, preload=True)
+
+    picks = pick_types(raw_py.info, meg=False, eeg=True,
+                       exclude=['EDF Annotations'])
+    data_py, _ = raw_py[picks]
+
+    print(raw_py)  # to test repr
+    print(raw_py.info)  # to test Info repr
+
+    # this .mat was generated using the EEG Lab Biosemi Reader
+    raw_eeglab = io.loadmat(edf_eeglab_path)
+    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
+    data_eeglab = raw_eeglab[picks]
+
+    assert_array_almost_equal(data_py, data_eeglab)
+
+
+def test_read_segment():
+    """Test writing raw edf files when preload is False
+    """
+    raw1 = read_raw_edf(edf_path, stim_channel=139, preload=False)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
+    raw11 = Raw(raw1_file, preload=True)
+    data1, times1 = raw1[:139, :]
+    data11, times11 = raw11[:139, :]
+    assert_array_almost_equal(data1, data11, 10)
+    assert_array_almost_equal(times1, times11)
+    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
+
+    raw2 = read_raw_edf(edf_path, stim_channel=139, preload=True)
+    raw2_file = op.join(tempdir, 'test2-raw.fif')
+    raw2.save(raw2_file, overwrite=True)
+    data2, times2 = raw2[:139, :]
+    assert_array_equal(data1, data2)
+    assert_array_equal(times1, times2)
+
+    raw1 = Raw(raw1_file, preload=True)
+    raw2 = Raw(raw2_file, preload=True)
+    assert_array_equal(raw1._data, raw2._data)
+
+    # test the _read_segment function by only loading some of the data
+    raw1 = read_raw_edf(edf_path, preload=False)
+    raw2 = read_raw_edf(edf_path, preload=True)
+
+    # select some random range of data to compare
+    data1, times1 = raw1[:, 345:417]
+    data2, times2 = raw2[:, 345:417]
+    assert_array_equal(data1, data2)
+    assert_array_equal(times1, times2)
+
+
+def test_append():
+    """Test appending raw edf objects using Raw.append
+    """
+    # Author: Alan Leggitt <alan.leggitt at ucsf.edu>
+    raw = read_raw_edf(bdf_path, hpts=hpts_path, preload=False)
+    raw0 = raw.copy()
+    raw1 = raw.copy()
+    raw0.append(raw1)
+    assert_true(2 * len(raw) == len(raw0))
+
+
+def test_parse_annotation():
+    """Test parsing the tal channel
+    """
+
+    # test the parser
+    annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
+             b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
+             b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
+             b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
+             b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
+             b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
+    annot = [a for a in iterbytes(annot)]
+    annot[1::2] = [a * 256 for a in annot[1::2]]
+    tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
+    events = edfmodule._parse_tal_channel(tal_channel)
+    assert_equal(events, [[180.0, 0, 'Lights off'],
+                          [180.0, 0, 'Close door'],
+                          [180.0, 0, 'Lights off'],
+                          [180.0, 0, 'Close door'],
+                          [3.14, 4.2, 'nothing'],
+                          [1800.2, 25.5, 'Apnea']])
+
+
+def test_edf_annotations():
+    """Test if events are detected correctly in a typical MNE workflow.
+    """
+
+    # test an actual file
+    raw = read_raw_edf(edf_path, tal_channel=-1,
+                       hpts=hpts_path, preload=True)
+    edf_events = find_events(raw, output='step', shortest_event=0,
+                             stim_channel='STI 014')
+
+    # onset, duration, id
+    events = [[0.1344, 0.2560, 2],
+              [0.3904, 1.0000, 2],
+              [2.0000, 0.0000, 3],
+              [2.5000, 2.5000, 2]]
+    events = np.array(events)
+    events[:, :2] *= 512  # convert time to samples
+    events = np.array(events, dtype=int)
+    events[:, 1] -= 1
+    events[events[:, 1] <= 0, 1] = 1
+    events[:, 1] += events[:, 0]
+
+    onsets = events[:, [0, 2]]
+    offsets = events[:, [1, 2]]
+
+    events = np.zeros((2 * events.shape[0], 3), dtype=int)
+    events[0::2, [0, 2]] = onsets
+    events[1::2, [0, 1]] = offsets
+
+    assert_array_equal(edf_events, events)
+
+
+def test_write_annotations():
+    """Test writing raw files when annotations were parsed.
+    """
+    raw1 = read_raw_edf(edf_path, tal_channel=-1, preload=True)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
+    raw11 = Raw(raw1_file, preload=True)
+    data1, times1 = raw1[:, :]
+    data11, times11 = raw11[:, :]
+
+    assert_array_almost_equal(data1, data11)
+    assert_array_almost_equal(times1, times11)
+    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
+
+    assert_raises(RuntimeError, read_raw_edf,
+                  edf_path, tal_channel=-1, preload=False)
diff --git a/mne/io/egi/__init__.py b/mne/io/egi/__init__.py
new file mode 100644
index 0000000..59f9db1
--- /dev/null
+++ b/mne/io/egi/__init__.py
@@ -0,0 +1,5 @@
+"""EGI module for conversion to FIF"""
+
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+
+from .egi import read_raw_egi, _combine_triggers
diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py
new file mode 100644
index 0000000..103e5d2
--- /dev/null
+++ b/mne/io/egi/egi.py
@@ -0,0 +1,322 @@
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          simplified BSD-3 license
+
+import datetime
+import os
+import time
+import warnings
+
+import numpy as np
+
+from ..base import _BaseRaw
+from ..meas_info import Info
+from ..constants import FIFF
+from ...utils import verbose, logger
+
+_other_fields = [
+    'lowpass', 'buffer_size_sec', 'dev_ctf_t',
+    'meas_id', 'subject_info',
+    'dev_head_t', 'line_freq', 'acq_stim', 'proj_id', 'description',
+    'highpass', 'experimenter', 'file_id', 'proj_name',
+    'dig', 'ctf_head_t', 'orig_blocks', 'acq_pars'
+]
+
+
+def _read_header(fid):
+    """Read EGI binary header"""
+
+    version = np.fromfile(fid, np.int32, 1)[0]
+
+    if version > 6 & ~np.bitwise_and(version, 6):
+        version = version.byteswap().astype(np.uint32)
+    else:
+        ValueError('Watchout. This does not seem to be a simple '
+                   'binary EGI file.')
+    my_fread = lambda *x, **y: np.fromfile(*x, **y)[0]
+    info = dict(
+        version=version,
+        year=my_fread(fid, '>i2', 1),
+        month=my_fread(fid, '>i2', 1),
+        day=my_fread(fid, '>i2', 1),
+        hour=my_fread(fid, '>i2', 1),
+        minute=my_fread(fid, '>i2', 1),
+        second=my_fread(fid, '>i2', 1),
+        millisecond=my_fread(fid, '>i4', 1),
+        samp_rate=my_fread(fid, '>i2', 1),
+        n_channels=my_fread(fid, '>i2', 1),
+        gain=my_fread(fid, '>i2', 1),
+        bits=my_fread(fid, '>i2', 1),
+        value_range=my_fread(fid, '>i2', 1)
+    )
+
+    unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0
+    precision = np.bitwise_and(version, 6)
+    if precision == 0:
+        RuntimeError('Floating point precision is undefined.')
+
+    if unsegmented:
+        info.update(dict(n_categories=0,
+                         n_segments=1,
+                         n_samples=np.fromfile(fid, '>i4', 1)[0],
+                         n_events=np.fromfile(fid, '>i2', 1)[0],
+                         event_codes=[],
+                         category_names=[],
+                         category_lengths=[],
+                         pre_baseline=0))
+        for event in range(info['n_events']):
+            event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1'))
+            info['event_codes'].append(event_codes)
+        info['event_codes'] = np.array(info['event_codes'])
+    else:
+        raise NotImplementedError('Only continous files are supported')
+
+    info.update(dict(precision=precision, unsegmented=unsegmented))
+
+    return info
+
+
+def _read_events(fid, info):
+    """Read events"""
+    unpack = [info[k] for k in ['n_events', 'n_segments', 'n_channels']]
+    n_events, n_segments, n_channels = unpack
+    n_samples = 1 if info['unsegmented'] else info['n_samples']
+    events = np.zeros([n_events, n_segments * info['n_samples']])
+    dtype, bytesize = {2: ('>i2', 2), 4: ('>f4', 4),
+                       6: ('>f8', 8)}[info['precision']]
+
+    info.update({'dtype': dtype, 'bytesize': bytesize})
+    beg_dat = fid.tell()
+
+    for ii in range(info['n_events']):
+        fid.seek(beg_dat + (int(n_channels) + ii) * bytesize, 0)
+        events[ii] = np.fromfile(fid, dtype, n_samples)
+        fid.seek(int((n_channels + n_events) * bytesize), 1)
+    return events
+
+
+def _read_data(fid, info):
+    """Aux function"""
+    if not info['unsegmented']:
+        raise NotImplementedError('Only continous files are supported')
+
+    fid.seek(36 + info['n_events'] * 4, 0)  # skip header
+    readsize = (info['n_channels'] + info['n_events']) * info['n_samples']
+    final_shape = (info['n_samples'], info['n_channels'] + info['n_events'])
+    data = np.fromfile(fid, info['dtype'], readsize).reshape(final_shape).T
+    return data
+
+
+def _combine_triggers(data, remapping=None):
+    """Combine binary triggers"""
+    new_trigger = np.zeros(data[0].shape)
+    first = np.nonzero(data[0])[0]
+    for d in data[1:]:
+        if np.intersect1d(d.nonzero()[0], first).any():
+            raise RuntimeError('Events must be mutually exclusive')
+
+    if remapping is None:
+        remapping = np.arange(data) + 1
+
+    for d, event_id in zip(data, remapping):
+        idx = d.nonzero()
+        if np.any(idx):
+            new_trigger[idx] += event_id
+
+    return new_trigger[None]
+
+
+ at verbose
+def read_raw_egi(input_fname, include=None, exclude=None, verbose=None):
+    """Read EGI simple binary as raw object
+
+    Note. The trigger channel names are based on the
+    arbitrary user dependent event codes used. However this
+    function will attempt to generate a synthetic trigger channel
+    named ``STI 014`` in accordance with the general Neuromag / MNE
+    naming pattern.
+    The event_id assignment equals np.arange(n_events - n_excluded) + 1.
+    The resulting `event_id` mapping is stored as attribute to
+    the resulting raw object but will be ignored when saving to a fiff.
+    Note. The trigger channel is artificially constructed based on
+    timestamps received by the Netstation. As a consequence, triggers
+    have only short durations.
+    This step will fail if events are not mutually exclusive.
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the raw file.
+    include : None | list
+       The event channels to be ignored when creating the synthetic
+       trigger. Defaults to None.
+       Note. Overrides `exclude` parameter.
+    exclude : None | list
+       The event channels to be ignored when creating the synthetic
+       trigger. Defaults to None. If None, channels that have more than
+       one event and the ``sync`` and ``TREV`` channels will be
+       ignored.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : instance of mne.io.Raw
+        A raw object containing EGI data.
+    """
+    return _RawEGI(input_fname, include, exclude, verbose)
+
+
+class _RawEGI(_BaseRaw):
+    """Raw object from EGI simple binary file
+    """
+    @verbose
+    def __init__(self, input_fname, include=None, exclude=None,
+                 verbose=None):
+        """docstring for __init__"""
+        with open(input_fname, 'rb') as fid:  # 'rb' important for py3k
+            logger.info('Reading EGI header from %s...' % input_fname)
+            egi_info = _read_header(fid)
+            logger.info('    Reading events ...')
+            _ = _read_events(fid, egi_info)  # update info + jump
+            logger.info('    Reading data ...')
+            # reads events as well
+            data = _read_data(fid, egi_info).astype(np.float64)
+            if egi_info['value_range'] and egi_info['bits']:
+                mv = egi_info['value_range'] / 2 ** egi_info['bits']
+            else:
+                mv = 1e-6
+            data[:egi_info['n_channels']] = data[:egi_info['n_channels']] * mv
+
+        logger.info('    Assembling measurement info ...')
+
+        event_codes = list(egi_info['event_codes'])
+        egi_events = data[-egi_info['n_events']:]
+
+        if include is None:
+            exclude_list = ['sync', 'TREV'] if exclude is None else exclude
+            exclude_inds = [i for i, k in enumerate(event_codes) if k in
+                            exclude_list]
+            more_excludes = []
+            if exclude is None:
+                for ii, event in enumerate(egi_events):
+                    if event.sum() <= 1 and event_codes[ii]:
+                        more_excludes.append(ii)
+            if len(exclude_inds) + len(more_excludes) == len(event_codes):
+                warnings.warn('Did not find any event code with more '
+                              'than one event.', RuntimeWarning)
+            else:
+                exclude_inds.extend(more_excludes)
+
+            exclude_inds.sort()
+            include_ = [i for i in np.arange(egi_info['n_events']) if
+                        i not in exclude_inds]
+            include_names = [k for i, k in enumerate(event_codes)
+                             if i in include_]
+        else:
+            include_ = [i for i, k in enumerate(event_codes) if k in include]
+            include_names = include
+
+        for kk, v in [('include', include_names), ('exclude', exclude)]:
+            if isinstance(v, list):
+                for k in v:
+                    if k not in event_codes:
+                        raise ValueError('Could find event named "%s"' % k)
+            elif v is not None:
+                raise ValueError('`%s` must be None or of type list' % kk)
+
+        event_ids = np.arange(len(include_)) + 1
+        try:
+            logger.info('    Synthesizing trigger channel "STI 014" ...')
+            logger.info('    Excluding events {%s} ...' %
+                        ", ".join([k for i, k in enumerate(event_codes)
+                                   if i not in include_]))
+            new_trigger = _combine_triggers(egi_events[include_],
+                                            remapping=event_ids)
+            data = np.concatenate([data, new_trigger])
+        except RuntimeError:
+            logger.info('    Found multiple events at the same time sample. '
+                        'Could not create trigger channel.')
+            new_trigger = None
+
+        self.event_id = dict(zip([e for e in event_codes if e in
+                                  include_names], event_ids))
+        self._data = data
+        self.verbose = verbose
+        self.info = info = Info(dict((k, None) for k in _other_fields))
+        info['sfreq'] = egi_info['samp_rate']
+        info['filename'] = input_fname
+        my_time = datetime.datetime(
+            egi_info['year'],
+            egi_info['month'],
+            egi_info['day'],
+            egi_info['hour'],
+            egi_info['minute'],
+            egi_info['second']
+        )
+        my_timestamp = time.mktime(my_time.timetuple())
+        info['meas_date'] = np.array([my_timestamp], dtype=np.float32)
+        info['projs'] = []
+        ch_names = ['EEG %03d' % (i + 1) for i in range(egi_info['n_channels'])]
+        ch_names.extend(list(egi_info['event_codes']))
+        if new_trigger is not None:
+            ch_names.append('STI 014')  # our new_trigger
+        info['nchan'] = len(data)
+        info['chs'] = []
+        info['ch_names'] = ch_names
+        info['bads'] = []
+        info['comps'] = []
+        for ii, ch_name in enumerate(ch_names):
+            ch_info = {'cal': 1.0,
+                       'logno': ii + 1,
+                       'scanno': ii + 1,
+                       'range': 1.0,
+                       'unit_mul': 0,
+                       'ch_name': ch_name,
+                       'unit': FIFF.FIFF_UNIT_V,
+                       'coord_frame': FIFF.FIFFV_COORD_HEAD,
+                       'coil_type': FIFF.FIFFV_COIL_EEG,
+                       'kind': FIFF.FIFFV_EEG_CH,
+                       'eeg_loc': None,
+                       'loc': np.array([0, 0, 0, 1] * 3, dtype='f4')}
+
+            if len(ch_name) == 4 or ch_name.startswith('STI'):
+                u = {'unit_mul': 0,
+                     'coil_type': FIFF.FIFFV_COIL_NONE,
+                     'unit': FIFF.FIFF_UNIT_NONE,
+                     'kind': FIFF.FIFFV_STIM_CH}
+                ch_info.update(u)
+            info['chs'].append(ch_info)
+
+        self.preload = True
+        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
+        self._times = np.arange(self.first_samp, self.last_samp + 1,
+                                dtype=np.float64)
+        self._times /= self.info['sfreq']
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
+                    % (self.first_samp, self.last_samp,
+                       float(self.first_samp) / self.info['sfreq'],
+                       float(self.last_samp) / self.info['sfreq']))
+
+        # Raw attributes
+        self._filenames = list()
+        self._projector = None
+        self.first_samp = 0
+        self.last_samp = egi_info['n_samples'] - 1
+        self.comp = None  # no compensation for egi
+        self.proj = False
+        self._first_samps = np.array([self.first_samp])
+        self._last_samps = np.array([self.last_samp])
+        self._raw_lengths = np.array([egi_info['n_samples']])
+        self.rawdirs = np.array([])
+        self.cals = np.ones(self.info['nchan'])
+        # use information from egi
+        self.orig_format = {'>f4': 'single', '>f4': 'double',
+                            '>i2': 'int'}[egi_info['dtype']]
+        logger.info('Ready.')
+
+    def __repr__(self):
+        n_chan = self.info['nchan']
+        data_range = self.last_samp - self.first_samp + 1
+        s = ('%r' % os.path.basename(self.info['filename']),
+             "n_channels x n_times : %s x %s" % (n_chan, data_range))
+        return "<RawEGI  |  %s>" % ', '.join(s)
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/io/egi/tests/__init__.py
similarity index 100%
copy from mne/fiff/bti/tests/__init__.py
copy to mne/io/egi/tests/__init__.py
diff --git a/mne/io/egi/tests/data/test_egi.raw b/mne/io/egi/tests/data/test_egi.raw
new file mode 100644
index 0000000..9c123ab
Binary files /dev/null and b/mne/io/egi/tests/data/test_egi.raw differ
diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py
new file mode 100644
index 0000000..589df2f
--- /dev/null
+++ b/mne/io/egi/tests/test_egi.py
@@ -0,0 +1,80 @@
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          simplified BSD-3 license
+
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true, assert_raises, assert_equal
+
+from mne import find_events
+from mne.io import read_raw_egi
+from mne.io.egi import _combine_triggers
+from mne import pick_types
+from mne.io import Raw
+from mne.utils import _TempDir
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+tempdir = _TempDir()
+
+base_dir = op.join(op.dirname(op.realpath(__file__)), 'data')
+egi_fname = op.join(base_dir, 'test_egi.raw')
+
+
+def test_io_egi():
+    """Test importing EGI simple binary files"""
+    # test default
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always', category=RuntimeWarning)
+        _ = read_raw_egi(egi_fname, include=None)
+        assert_equal(len(w), 1)
+        assert_true(w[0].category == RuntimeWarning)
+        msg = 'Did not find any event code with more than one event.'
+        assert_true(msg in '%s' % w[0].message)
+
+    include = ['TRSP', 'XXX1']
+    raw = read_raw_egi(egi_fname, include=include)
+
+    _ = repr(raw)
+    _ = repr(raw.info)  # analysis:ignore, noqa
+
+    assert_equal('eeg' in raw, True)
+    out_fname = op.join(tempdir, 'test_egi_raw.fif')
+    raw.save(out_fname)
+
+    raw2 = Raw(out_fname, preload=True)
+    data1, times1 = raw[:10, :]
+    data2, times2 = raw2[:10, :]
+
+    assert_array_almost_equal(data1, data2)
+    assert_array_almost_equal(times1, times2)
+
+    eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
+    assert_equal(len(eeg_chan), 256)
+    picks = pick_types(raw.info, eeg=True)
+    assert_equal(len(picks), 256)
+    assert_equal('STI 014' in raw.ch_names, True)
+
+    events = find_events(raw, stim_channel='STI 014')
+    assert_equal(len(events), 2)  # ground truth
+    assert_equal(np.unique(events[:, 1])[0], 0)
+    assert_true(np.unique(events[:, 0])[0] != 0)
+    assert_true(np.unique(events[:, 2])[0] != 0)
+    triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])
+
+    # test trigger functionality
+    assert_raises(RuntimeError, _combine_triggers, triggers, None)
+    triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
+    events_ids = [12, 24]
+    new_trigger = _combine_triggers(triggers, events_ids)
+    assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))
+
+    assert_raises(ValueError, read_raw_egi, egi_fname,
+                  include=['Foo'])
+    assert_raises(ValueError, read_raw_egi, egi_fname,
+                  exclude=['Bar'])
+    for ii, k in enumerate(include, 1):
+        assert_true(k in raw.event_id)
+        assert_true(raw.event_id[k] == ii)
diff --git a/mne/io/fiff/__init__.py b/mne/io/fiff/__init__.py
new file mode 100644
index 0000000..084e30a
--- /dev/null
+++ b/mne/io/fiff/__init__.py
@@ -0,0 +1 @@
+from .raw import RawFIFF
diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py
new file mode 100644
index 0000000..0ed3348
--- /dev/null
+++ b/mne/io/fiff/raw.py
@@ -0,0 +1,598 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy
+import warnings
+import os
+import os.path as op
+
+import numpy as np
+
+from ..constants import FIFF
+from ..open import fiff_open, _fiff_get_fid
+from ..meas_info import read_meas_info
+from ..tree import dir_tree_find
+from ..tag import read_tag
+from ..proj import proj_equal
+from ..compensator import get_current_comp, set_current_comp, make_compensator
+from ..base import _BaseRaw
+
+from ...utils import check_fname, logger, verbose
+from ...externals.six import string_types
+
+
+class RawFIFF(_BaseRaw):
+    """Raw data
+
+    Parameters
+    ----------
+    fnames : list, or string
+        A list of the raw files to treat as a Raw instance, or a single
+        raw file. For files that have automatically been split, only the
+        name of the first file has to be specified. Filenames should end
+        with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz,
+        raw_tsss.fif or raw_tsss.fif.gz.
+    allow_maxshield : bool, (default False)
+        allow_maxshield if True, allow loading of data that has been
+        processed with Maxshield. Maxshield-processed data should generally
+        not be loaded directly, but should be processed using SSS first.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    proj : bool
+        Apply the signal space projection (SSP) operators present in
+        the file to the data. Note: Once the projectors have been
+        applied, they can no longer be removed. It is usually not
+        recommended to apply the projectors at this point as they are
+        applied automatically later on (e.g. when computing inverse
+        solutions).
+    compensation : None | int
+        If None the compensation in the data is not modified.
+        If set to n, e.g. 3, apply gradient compensation of grade n as
+        for CTF systems.
+    add_eeg_ref : bool
+        If True, add average EEG reference projector (if it's not already
+        present).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    info : dict
+        Measurement info.
+    ch_names : list of string
+        List of channels' names.
+    n_times : int
+        Total number of time points in the raw file.
+    preload : bool
+        Indicates whether raw data are in memory.
+    verbose : bool, str, int, or None
+        See above.
+    """
+    @verbose
+    def __init__(self, fnames, allow_maxshield=False, preload=False,
+                 proj=False, compensation=None, add_eeg_ref=True,
+                 verbose=None):
+
+        if not isinstance(fnames, list):
+            fnames = [fnames]
+        fnames = [op.realpath(f) for f in fnames]
+        split_fnames = []
+
+        raws = []
+        for ii, fname in enumerate(fnames):
+            do_check_fname = fname not in split_fnames
+            raw, next_fname = self._read_raw_file(fname, allow_maxshield,
+                                                  preload, compensation,
+                                                  do_check_fname)
+            raws.append(raw)
+            if next_fname is not None:
+                if not op.exists(next_fname):
+                    logger.warning('Split raw file detected but next file %s '
+                                   'does not exist.' % next_fname)
+                    continue
+                if next_fname in fnames:
+                    # the user manually specified the split files
+                    logger.info('Note: %s is part of a split raw file. It is '
+                                'not necessary to manually specify the parts '
+                                'in this case; simply construct Raw using '
+                                'the name of the first file.' % next_fname)
+                    continue
+
+                # process this file next
+                fnames.insert(ii + 1, next_fname)
+                split_fnames.append(next_fname)
+
+        _check_raw_compatibility(raws)
+
+        # combine information from each raw file to construct self
+        self._filenames = [r.filename for r in raws]
+        self.first_samp = raws[0].first_samp  # meta first sample
+        self._first_samps = np.array([r.first_samp for r in raws])
+        self._last_samps = np.array([r.last_samp for r in raws])
+        self._raw_lengths = np.array([r.n_times for r in raws])
+        self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
+        self.cals = raws[0].cals
+        self.rawdirs = [r.rawdir for r in raws]
+        self.comp = copy.deepcopy(raws[0].comp)
+        self._orig_comp_grade = raws[0]._orig_comp_grade
+        self.info = copy.deepcopy(raws[0].info)
+        self.verbose = verbose
+        self.orig_format = raws[0].orig_format
+        self.proj = False
+        self._add_eeg_ref(add_eeg_ref)
+
+        if preload:
+            self._preload_data(preload)
+        else:
+            self.preload = False
+
+        self._projector = None
+        # setup the SSP projector
+        self.proj = proj
+        if proj:
+            self.apply_proj()
+
+    def _preload_data(self, preload):
+        """This function actually preloads the data"""
+        if isinstance(preload, string_types):
+            # we will use a memmap: preload is a filename
+            data_buffer = preload
+        else:
+            data_buffer = None
+
+        self._data, self._times = self._read_segment(data_buffer=data_buffer)
+        self.preload = True
+        # close files once data are preloaded
+        self.close()
+
+    @verbose
+    def _read_raw_file(self, fname, allow_maxshield, preload, compensation,
+                       do_check_fname=True, verbose=None):
+        """Read in header information from a raw file"""
+        logger.info('Opening raw data file %s...' % fname)
+
+        if do_check_fname:
+            check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif',
+                                       'raw_tsss.fif', 'raw.fif.gz',
+                                       'raw_sss.fif.gz', 'raw_tsss.fif.gz'))
+
+        #   Read in the whole file if preload is on and .fif.gz (saves time)
+        ext = os.path.splitext(fname)[1].lower()
+        whole_file = preload if '.gz' in ext else False
+        ff, tree, _ = fiff_open(fname, preload=whole_file)
+        with ff as fid:
+            #   Read the measurement info
+            info, meas = read_meas_info(fid, tree)
+
+            #   Locate the data of interest
+            raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
+            if len(raw_node) == 0:
+                raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
+                if allow_maxshield:
+                    raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA)
+                    if len(raw_node) == 0:
+                        raise ValueError('No raw data in %s' % fname)
+                else:
+                    if len(raw_node) == 0:
+                        raise ValueError('No raw data in %s' % fname)
+
+            if len(raw_node) == 1:
+                raw_node = raw_node[0]
+
+            #   Set up the output structure
+            info['filename'] = fname
+
+            #   Process the directory
+            directory = raw_node['directory']
+            nent = raw_node['nent']
+            nchan = int(info['nchan'])
+            first = 0
+            first_samp = 0
+            first_skip = 0
+
+            #   Get first sample tag if it is there
+            if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
+                tag = read_tag(fid, directory[first].pos)
+                first_samp = int(tag.data)
+                first += 1
+
+            #   Omit initial skip
+            if directory[first].kind == FIFF.FIFF_DATA_SKIP:
+                # This first skip can be applied only after we know the bufsize
+                tag = read_tag(fid, directory[first].pos)
+                first_skip = int(tag.data)
+                first += 1
+
+            raw = _RawShell()
+            raw.filename = fname
+            raw.first_samp = first_samp
+
+            #   Go through the remaining tags in the directory
+            rawdir = list()
+            nskip = 0
+            orig_format = None
+            for k in range(first, nent):
+                ent = directory[k]
+                if ent.kind == FIFF.FIFF_DATA_SKIP:
+                    tag = read_tag(fid, ent.pos)
+                    nskip = int(tag.data)
+                elif ent.kind == FIFF.FIFF_DATA_BUFFER:
+                    #   Figure out the number of samples in this buffer
+                    if ent.type == FIFF.FIFFT_DAU_PACK16:
+                        nsamp = ent.size // (2 * nchan)
+                    elif ent.type == FIFF.FIFFT_SHORT:
+                        nsamp = ent.size // (2 * nchan)
+                    elif ent.type == FIFF.FIFFT_FLOAT:
+                        nsamp = ent.size // (4 * nchan)
+                    elif ent.type == FIFF.FIFFT_DOUBLE:
+                        nsamp = ent.size // (8 * nchan)
+                    elif ent.type == FIFF.FIFFT_INT:
+                        nsamp = ent.size // (4 * nchan)
+                    elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                        nsamp = ent.size // (8 * nchan)
+                    elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                        nsamp = ent.size // (16 * nchan)
+                    else:
+                        raise ValueError('Cannot handle data buffers of type '
+                                         '%d' % ent.type)
+                    if orig_format is None:
+                        if ent.type == FIFF.FIFFT_DAU_PACK16:
+                            orig_format = 'short'
+                        elif ent.type == FIFF.FIFFT_SHORT:
+                            orig_format = 'short'
+                        elif ent.type == FIFF.FIFFT_FLOAT:
+                            orig_format = 'single'
+                        elif ent.type == FIFF.FIFFT_DOUBLE:
+                            orig_format = 'double'
+                        elif ent.type == FIFF.FIFFT_INT:
+                            orig_format = 'int'
+                        elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                            orig_format = 'single'
+                        elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                            orig_format = 'double'
+
+                    #  Do we have an initial skip pending?
+                    if first_skip > 0:
+                        first_samp += nsamp * first_skip
+                        raw.first_samp = first_samp
+                        first_skip = 0
+
+                    #  Do we have a skip pending?
+                    if nskip > 0:
+                        rawdir.append(dict(ent=None, first=first_samp,
+                                           last=first_samp + nskip * nsamp - 1,
+                                           nsamp=nskip * nsamp))
+                        first_samp += nskip * nsamp
+                        nskip = 0
+
+                    #  Add a data buffer
+                    rawdir.append(dict(ent=ent, first=first_samp,
+                                       last=first_samp + nsamp - 1,
+                                       nsamp=nsamp))
+                    first_samp += nsamp
+
+            # Try to get the next filename tag for split files
+            nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF)
+            next_fname = None
+            for nodes in nodes_list:
+                next_fname = None
+                for ent in nodes['directory']:
+                    if ent.kind == FIFF.FIFF_REF_ROLE:
+                        tag = read_tag(fid, ent.pos)
+                        role = int(tag.data)
+                        if role != FIFF.FIFFV_ROLE_NEXT_FILE:
+                            next_fname = None
+                            break
+                    if ent.kind == FIFF.FIFF_REF_FILE_NAME:
+                        tag = read_tag(fid, ent.pos)
+                        next_fname = op.join(op.dirname(fname), tag.data)
+                    if ent.kind == FIFF.FIFF_REF_FILE_NUM:
+                        # Some files don't have the name, just the number. So
+                        # we construct the name from the current name.
+                        if next_fname is not None:
+                            continue
+                        next_num = read_tag(fid, ent.pos).data
+                        path, base = op.split(fname)
+                        idx = base.find('.')
+                        idx2 = base.rfind('-')
+                        if idx2 < 0 and next_num == 1:
+                            # this is the first file, which may not be numbered
+                            next_fname = op.join(path, '%s-%d.%s' % (base[:idx],
+                                next_num, base[idx + 1:]))
+                            continue
+                        num_str = base[idx2 + 1:idx]
+                        if not num_str.isdigit():
+                            continue
+                        next_fname = op.join(path, '%s-%d.%s' % (base[:idx2],
+                                             next_num, base[idx + 1:]))
+                if next_fname is not None:
+                    break
+
+        raw.last_samp = first_samp - 1
+        raw.orig_format = orig_format
+
+        #   Add the calibration factors
+        cals = np.zeros(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+
+        raw.cals = cals
+        raw.rawdir = rawdir
+        raw.comp = None
+        raw._orig_comp_grade = None
+
+        #   Set up the CTF compensator
+        current_comp = get_current_comp(info)
+        if current_comp is not None:
+            logger.info('Current compensation grade : %d' % current_comp)
+
+        if compensation is not None:
+            raw.comp = make_compensator(info, current_comp, compensation)
+            if raw.comp is not None:
+                logger.info('Appropriate compensator added to change to '
+                            'grade %d.' % (compensation))
+                raw._orig_comp_grade = current_comp
+                set_current_comp(info, compensation)
+
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    raw.first_samp, raw.last_samp,
+                    float(raw.first_samp) / info['sfreq'],
+                    float(raw.last_samp) / info['sfreq']))
+
+        # store the original buffer size
+        info['buffer_size_sec'] = (np.median([r['nsamp'] for r in rawdir])
+                                   / info['sfreq'])
+
+        raw.info = info
+        raw.verbose = verbose
+
+        logger.info('Ready.')
+
+        return raw, next_fname
+
+    def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
+                      verbose=None, projector=None):
+        """Read a chunk of raw data
+
+        Parameters
+        ----------
+        start : int, (optional)
+            first sample to include (first is 0). If omitted, defaults to the
+            first sample in data.
+        stop : int, (optional)
+            First sample to not include.
+            If omitted, data is included to the end.
+        sel : array, optional
+            Indices of channels to select.
+        data_buffer : array or str, optional
+            numpy array to fill with data read, must have the correct shape.
+            If str, a np.memmap with the correct data type will be used
+            to store the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        projector : array
+            SSP operator to apply to the data.
+
+        Returns
+        -------
+        data : array, [channels x samples]
+           the data matrix (channels x samples).
+        times : array, [samples]
+            returns the time values corresponding to the samples.
+        """
+        #  Initial checks
+        start = int(start)
+        stop = self.n_times if stop is None else min([int(stop), self.n_times])
+
+        if start >= stop:
+            raise ValueError('No data in this range')
+
+        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
+                    (start, stop - 1, start / float(self.info['sfreq']),
+                     (stop - 1) / float(self.info['sfreq'])))
+
+        #  Initialize the data and calibration vector
+        nchan = self.info['nchan']
+
+        n_sel_channels = nchan if sel is None else len(sel)
+        # convert sel to a slice if possible for efficiency
+        if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
+            sel = slice(sel[0], sel[-1] + 1)
+        idx = slice(None, None, None) if sel is None else sel
+        data_shape = (n_sel_channels, stop - start)
+        if isinstance(data_buffer, np.ndarray):
+            if data_buffer.shape != data_shape:
+                raise ValueError('data_buffer has incorrect shape')
+            data = data_buffer
+        else:
+            data = None  # we will allocate it later, once we know the type
+
+        mult = list()
+        for ri in range(len(self._raw_lengths)):
+            mult.append(np.diag(self.cals.ravel()))
+            if self.comp is not None:
+                mult[ri] = np.dot(self.comp, mult[ri])
+            if projector is not None:
+                mult[ri] = np.dot(projector, mult[ri])
+            mult[ri] = mult[ri][idx]
+
+        # deal with having multiple files accessed by the raw object
+        cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
+                                                   dtype='int')))
+        cumul_lens = np.cumsum(cumul_lens)
+        files_used = np.logical_and(np.less(start, cumul_lens[1:]),
+                                    np.greater_equal(stop - 1,
+                                                     cumul_lens[:-1]))
+
+        first_file_used = False
+        s_off = 0
+        dest = 0
+        if isinstance(idx, slice):
+            cals = self.cals.ravel()[idx][:, np.newaxis]
+        else:
+            cals = self.cals.ravel()[:, np.newaxis]
+
+        for fi in np.nonzero(files_used)[0]:
+            start_loc = self._first_samps[fi]
+            # first iteration (only) could start in the middle somewhere
+            if not first_file_used:
+                first_file_used = True
+                start_loc += start - cumul_lens[fi]
+            stop_loc = np.min([stop - 1 - cumul_lens[fi] +
+                               self._first_samps[fi], self._last_samps[fi]])
+            if start_loc < self._first_samps[fi]:
+                raise ValueError('Bad array indexing, could be a bug')
+            if stop_loc > self._last_samps[fi]:
+                raise ValueError('Bad array indexing, could be a bug')
+            if stop_loc < start_loc:
+                raise ValueError('Bad array indexing, could be a bug')
+            len_loc = stop_loc - start_loc + 1
+            fid = _fiff_get_fid(self._filenames[fi])
+
+            for this in self.rawdirs[fi]:
+
+                #  Do we need this buffer
+                if this['last'] >= start_loc:
+                    #  The picking logic is a bit complicated
+                    if stop_loc > this['last'] and start_loc < this['first']:
+                        #    We need the whole buffer
+                        first_pick = 0
+                        last_pick = this['nsamp']
+                        logger.debug('W')
+
+                    elif start_loc >= this['first']:
+                        first_pick = start_loc - this['first']
+                        if stop_loc <= this['last']:
+                            #   Something from the middle
+                            last_pick = this['nsamp'] + stop_loc - this['last']
+                            logger.debug('M')
+                        else:
+                            #   From the middle to the end
+                            last_pick = this['nsamp']
+                            logger.debug('E')
+                    else:
+                        #    From the beginning to the middle
+                        first_pick = 0
+                        last_pick = stop_loc - this['first'] + 1
+                        logger.debug('B')
+
+                    #   Now we are ready to pick
+                    picksamp = last_pick - first_pick
+                    if picksamp > 0:
+                        # only read data if it exists
+                        if this['ent'] is not None:
+                            one = read_tag(fid, this['ent'].pos,
+                                           shape=(this['nsamp'], nchan),
+                                           rlims=(first_pick, last_pick)).data
+                            if np.isrealobj(one):
+                                dtype = np.float
+                            else:
+                                dtype = np.complex128
+                            one.shape = (picksamp, nchan)
+                            one = one.T.astype(dtype)
+                            # use proj + cal factors in mult
+                            if mult is not None:
+                                one[idx] = np.dot(mult[fi], one)
+                            else:  # apply just the calibration factors
+                                # this logic is designed to limit memory copies
+                                if isinstance(idx, slice):
+                                    # This is a view operation, so it's fast
+                                    one[idx] *= cals
+                                else:
+                                    # Extra operations are actually faster here
+                                    # than creating a new array
+                                    # (fancy indexing)
+                                    one *= cals
+
+                            # if not already done, allocate array with
+                            # right type
+                            data = _allocate_data(data, data_buffer,
+                                                  data_shape, dtype)
+                            if isinstance(idx, slice):
+                                # faster to slice in data than doing
+                                # one = one[idx] sooner
+                                data[:, dest:(dest + picksamp)] = one[idx]
+                            else:
+                                # faster than doing one = one[idx]
+                                data_view = data[:, dest:(dest + picksamp)]
+                                for ii, ix in enumerate(idx):
+                                    data_view[ii] = one[ix]
+                        dest += picksamp
+
+                #   Done?
+                if this['last'] >= stop_loc:
+                    # if not already done, allocate array with float dtype
+                    data = _allocate_data(data, data_buffer, data_shape,
+                                          np.float)
+                    break
+
+            fid.close()  # clean it up
+            s_off += len_loc
+            # double-check our math
+            if not s_off == dest:
+                raise ValueError('Incorrect file reading')
+
+        logger.info('[done]')
+        times = np.arange(start, stop) / self.info['sfreq']
+
+        return data, times
+
+
+def _allocate_data(data, data_buffer, data_shape, dtype):
+    if data is None:
+        # if not already done, allocate array with right type
+        if isinstance(data_buffer, string_types):
+            # use a memmap
+            data = np.memmap(data_buffer, mode='w+',
+                             dtype=dtype, shape=data_shape)
+        else:
+            data = np.zeros(data_shape, dtype=dtype)
+    return data
+
+
+class _RawShell():
+    """Used for creating a temporary raw object"""
+    def __init__(self):
+        self.first_samp = None
+        self.last_samp = None
+        self.cals = None
+        self.rawdir = None
+        self._projector = None
+
+    @property
+    def n_times(self):
+        return self.last_samp - self.first_samp + 1
+
+
+def _check_raw_compatibility(raw):
+    """Check to make sure all instances of Raw
+    in the input list raw have compatible parameters"""
+    for ri in range(1, len(raw)):
+        if not raw[ri].info['nchan'] == raw[0].info['nchan']:
+            raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
+        if not raw[ri].info['bads'] == raw[0].info['bads']:
+            raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
+        if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
+            raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
+        if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
+            raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
+        if not all(raw[ri].cals == raw[0].cals):
+            raise ValueError('raw[%d].cals must match' % ri)
+        if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
+            raise ValueError('SSP projectors in raw files must be the same')
+        if not all(proj_equal(p1, p2) for p1, p2 in
+                   zip(raw[0].info['projs'], raw[ri].info['projs'])):
+            raise ValueError('SSP projectors in raw files must be the same')
+    if not all([r.orig_format == raw[0].orig_format for r in raw]):
+        warnings.warn('raw files do not all have the same data format, '
+                      'could result in precision mismatch. Setting '
+                      'raw.orig_format="unknown"')
+        raw[0].orig_format = 'unknown'
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/io/fiff/tests/__init__.py
similarity index 100%
copy from mne/fiff/bti/tests/__init__.py
copy to mne/io/fiff/tests/__init__.py
diff --git a/mne/fiff/tests/test_raw.py b/mne/io/fiff/tests/test_raw.py
similarity index 81%
rename from mne/fiff/tests/test_raw.py
rename to mne/io/fiff/tests/test_raw.py
index 2cf4d18..8302b74 100644
--- a/mne/fiff/tests/test_raw.py
+++ b/mne/io/fiff/tests/test_raw.py
@@ -1,27 +1,35 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#         Denis Engemann <d.engemann at fz-juelich.de>
+from __future__ import print_function
+
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 import os
 import os.path as op
+import glob
 from copy import deepcopy
 import warnings
 
 import numpy as np
 from numpy.testing import (assert_array_almost_equal, assert_array_equal,
                            assert_allclose)
-from nose.tools import assert_true, assert_raises, assert_equal
-
-from mne.fiff import (Raw, pick_types, pick_channels, concatenate_raws, FIFF,
-                      get_chpi_positions, set_eeg_reference)
-from mne import concatenate_events, find_events
-from mne.utils import (_TempDir, requires_nitime, requires_pandas, requires_mne,
-                       run_subprocess)
+from nose.tools import (assert_true, assert_raises, assert_equal,
+                        assert_not_equal)
+
+from mne import pick_types, pick_channels
+from mne.io.constants import FIFF
+from mne.io import (Raw, concatenate_raws,
+                    get_chpi_positions, set_eeg_reference)
+from mne import concatenate_events, find_events, equalize_channels
+from mne.utils import (_TempDir, requires_nitime, requires_pandas,
+                       requires_mne, run_subprocess)
+from mne.externals.six.moves import zip
+from mne.externals.six.moves import cPickle as pickle
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-base_dir = op.join(op.dirname(__file__), 'data')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
 fif_fname = op.join(base_dir, 'test_raw.fif')
 fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
 ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
@@ -35,6 +43,48 @@ hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
 tempdir = _TempDir()
 
 
+def test_hash_raw():
+    """Test hashing raw objects
+    """
+    raw = Raw(fif_fname)
+    assert_raises(RuntimeError, raw.__hash__)
+    raw = Raw(fif_fname, preload=True).crop(0, 0.5)
+    raw_2 = Raw(fif_fname, preload=True).crop(0, 0.5)
+    assert_equal(hash(raw), hash(raw_2))
+    # do NOT use assert_equal here, failing output is terrible
+    assert_true(pickle.dumps(raw) == pickle.dumps(raw_2))
+
+    raw_2._data[0, 0] -= 1
+    assert_not_equal(hash(raw), hash(raw_2))
+
+
+def test_subject_info():
+    """Test reading subject information
+    """
+    raw = Raw(fif_fname)
+    raw.crop(0, 1, False)
+    assert_true(raw.info['subject_info'] is None)
+    # fake some subject data
+    keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
+            'hand']
+    vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
+    subject_info = dict()
+    for key, val in zip(keys, vals):
+        subject_info[key] = val
+    raw.info['subject_info'] = subject_info
+    out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
+    raw.save(out_fname, overwrite=True)
+    raw_read = Raw(out_fname)
+    for key in keys:
+        assert_equal(subject_info[key], raw_read.info['subject_info'][key])
+    raw_read.anonymize()
+    assert_true(raw_read.info.get('subject_info') is None)
+    out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
+    raw_read.save(out_fname_anon, overwrite=True)
+    raw_read = Raw(out_fname_anon)
+    assert_true(raw_read.info.get('subject_info') is None)
+
+
 def test_get_chpi():
     """Test CHPI position computation
     """
@@ -66,10 +116,13 @@ def test_rank_estimation():
     """Test raw rank estimation
     """
     raw = Raw(fif_fname)
-    n_meg = len(pick_types(raw.info, meg=True, eeg=False, exclude='bads'))
-    n_eeg = len(pick_types(raw.info, meg=False, eeg=True, exclude='bads'))
+    picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
+    n_meg = len(picks_meg)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    n_eeg = len(picks_eeg)
     raw = Raw(fif_fname, preload=True)
     assert_array_equal(raw.estimate_rank(), n_meg + n_eeg)
+    assert_array_equal(raw.estimate_rank(picks=picks_eeg), n_eeg)
     raw = Raw(fif_fname, preload=False)
     raw.apply_proj()
     n_proj = len(raw.info['projs'])
@@ -131,7 +184,9 @@ def test_multiple_files():
     first_samps = [r.first_samp for r in raws]
 
     # test concatenation of split file
-    all_raw_1 = concatenate_raws(raws, preload=False)
+    assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
+    all_raw_1, events1 = concatenate_raws(raws, preload=False,
+                                          events_list=events)
     assert_true(raw.first_samp == all_raw_1.first_samp)
     assert_true(raw.last_samp == all_raw_1.last_samp)
     assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
@@ -140,15 +195,16 @@ def test_multiple_files():
     assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
 
     # test proper event treatment for split files
-    events = concatenate_events(events, first_samps, last_samps)
-    events2 = find_events(all_raw_2, stim_channel='STI 014')
-    assert_array_equal(events, events2)
+    events2 = concatenate_events(events, first_samps, last_samps)
+    events3 = find_events(all_raw_2, stim_channel='STI 014')
+    assert_array_equal(events1, events2)
+    assert_array_equal(events1, events3)
 
     # test various methods of combining files
     raw = Raw(fif_fname, preload=True)
     n_times = len(raw._times)
     # make sure that all our data match
-    times = range(0, 2 * n_times, 999)
+    times = list(range(0, 2 * n_times, 999))
     # add potentially problematic points
     times.extend([n_times - 1, n_times, 2 * n_times - 1])
 
@@ -166,14 +222,14 @@ def test_multiple_files():
     # with all data preloaded, result should be preloaded
     raw_combo = Raw(fif_fname, preload=True)
     raw_combo.append(Raw(fif_fname, preload=True))
-    assert_true(raw_combo._preloaded is True)
+    assert_true(raw_combo.preload is True)
     assert_true(len(raw_combo._times) == raw_combo._data.shape[1])
     _compare_combo(raw, raw_combo, times, n_times)
 
     # with any data not preloaded, don't set result as preloaded
     raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
                                   Raw(fif_fname, preload=False)])
-    assert_true(raw_combo._preloaded is False)
+    assert_true(raw_combo.preload is False)
     assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
                        find_events(raw_combo0, stim_channel='STI 014'))
     _compare_combo(raw, raw_combo, times, n_times)
@@ -182,7 +238,7 @@ def test_multiple_files():
     raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
                                   Raw(fif_fname, preload=True)],
                                  preload=True)
-    assert_true(raw_combo._preloaded is True)
+    assert_true(raw_combo.preload is True)
     _compare_combo(raw, raw_combo, times, n_times)
 
     raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
@@ -218,6 +274,30 @@ def test_multiple_files():
     assert_true(len(raw) == raw.last_samp - raw.first_samp + 1)
 
 
+def test_split_files():
+    """Test writing and reading of split raw files
+    """
+    raw_1 = Raw(fif_fname, preload=True)
+    split_fname = op.join(tempdir, 'split_raw.fif')
+    raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
+
+    raw_2 = Raw(split_fname)
+    data_1, times_1 = raw_1[:, :]
+    data_2, times_2 = raw_2[:, :]
+    assert_array_equal(data_1, data_2)
+    assert_array_equal(times_1, times_2)
+
+    # test the case where the silly user specifies the split files
+    fnames = [split_fname]
+    fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        raw_2 = Raw(fnames)
+    data_2, times_2 = raw_2[:, :]
+    assert_array_equal(data_1, data_2)
+    assert_array_equal(times_1, times_2)
+
+
 def test_load_bad_channels():
     """Test reading/writing of bad channels
     """
@@ -242,8 +322,10 @@ def test_load_bad_channels():
 
     # Test forcing the bad case
     with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         raw.load_bad_channels(bad_file_wrong, force=True)
-        assert_equal(len(w), 1)
+        n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
+        assert_equal(n_found, 1)  # there could be other irrelevant errors
         # write it out, read it in, and check
         raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
         raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
@@ -259,6 +341,16 @@ def test_load_bad_channels():
 def test_io_raw():
     """Test IO for raw data (Neuromag + CTF + gz)
     """
+    # test unicode io
+    for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
+        with Raw(fif_fname) as r:
+            desc1 = r.info['description'] = chars.decode('utf-8')
+            temp_file = op.join(tempdir, 'raw.fif')
+            r.save(temp_file, overwrite=True)
+            with Raw(temp_file) as r2:
+                desc2 = r2.info['description']
+            assert_equal(desc1, desc2)
+
     # Let's construct a simple test for IO first
     raw = Raw(fif_fname, preload=True)
     raw.crop(0, 3.5)
@@ -348,6 +440,14 @@ def test_io_raw():
         if fname_in == fif_fname or fname_in == fif_fname + '.gz':
             assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter("always")
+        raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        raw.save(raw_badname)
+        Raw(raw_badname)
+    assert_true(len(w) > 0)  # len(w) should be 2 but Travis sometimes has more
+
 
 def test_io_complex():
     """Test IO with complex data types
@@ -369,6 +469,7 @@ def test_io_complex():
         raw_cp._data[picks, start:stop] += imag_rand
         # this should throw an error because it's complex
         with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
             raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
                         overwrite=True)
             # warning gets thrown on every instance b/c simplifilter('always')
@@ -461,7 +562,7 @@ def test_proj():
 
 
 def test_preload_modify():
-    """ Test preloading and modifying data
+    """Test preloading and modifying data
     """
     for preload in [False, True, 'memmap.dat']:
         raw = Raw(fif_fname, preload=preload)
@@ -469,10 +570,10 @@ def test_preload_modify():
         nsamp = raw.last_samp - raw.first_samp + 1
         picks = pick_types(raw.info, meg='grad', exclude='bads')
 
-        data = np.random.randn(len(picks), nsamp / 2)
+        data = np.random.randn(len(picks), nsamp // 2)
 
         try:
-            raw[picks, :nsamp / 2] = data
+            raw[picks, :nsamp // 2] = data
         except RuntimeError as err:
             if not preload:
                 continue
@@ -489,7 +590,8 @@ def test_preload_modify():
 
 
 def test_filter():
-    """ Test filtering (FIR and IIR) and Raw.apply_function interface """
+    """Test filtering (FIR and IIR) and Raw.apply_function interface
+    """
     raw = Raw(fif_fname, preload=True).crop(0, 7, False)
     sig_dec = 11
     sig_dec_notch = 12
@@ -541,7 +643,8 @@ def test_filter():
 
     # do a very simple check on line filtering
     raw_bs = raw.copy()
-    with warnings.catch_warnings(True) as _:
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
         raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
         data_bs, _ = raw_bs[picks, :]
         raw_notch = raw.copy()
@@ -601,15 +704,16 @@ def test_crop():
 
 
 def test_resample():
-    """ Test resample (with I/O and multiple files) """
+    """Test resample (with I/O and multiple files)
+    """
     raw = Raw(fif_fname, preload=True).crop(0, 3, False)
     raw_resamp = raw.copy()
     sfreq = raw.info['sfreq']
     # test parallel on upsample
     raw_resamp.resample(sfreq * 2, n_jobs=2)
     assert_true(raw_resamp.n_times == len(raw_resamp._times))
-    raw_resamp.save(op.join(tempdir, 'raw_resamp.fif'))
-    raw_resamp = Raw(op.join(tempdir, 'raw_resamp.fif'), preload=True)
+    raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
+    raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
     assert_true(sfreq == raw_resamp.info['sfreq'] / 2)
     assert_true(raw.n_times == raw_resamp.n_times / 2)
     assert_true(raw_resamp._data.shape[1] == raw_resamp.n_times)
@@ -652,7 +756,8 @@ def test_resample():
 
 
 def test_hilbert():
-    """ Test computation of analytic signal using hilbert """
+    """Test computation of analytic signal using hilbert
+    """
     raw = Raw(fif_fname, preload=True)
     picks_meg = pick_types(raw.info, meg=True, exclude='bads')
     picks = picks_meg[:4]
@@ -666,7 +771,8 @@ def test_hilbert():
 
 
 def test_raw_copy():
-    """ Test Raw copy"""
+    """Test Raw copy
+    """
     raw = Raw(fif_fname, preload=True)
     data, _ = raw[:, :]
     copied = raw.copy()
@@ -764,7 +870,7 @@ def test_save():
     assert_raises(IOError, raw.save, fif_fname)
 
     # test abspath support
-    new_fname = op.join(op.abspath(op.curdir), 'break.fif')
+    new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
     raw.save(op.join(tempdir, new_fname), overwrite=True)
     new_raw = Raw(op.join(tempdir, new_fname), preload=False)
     assert_raises(ValueError, new_raw.save, new_fname)
@@ -778,7 +884,7 @@ def test_with_statement():
     """ Test with statement """
     for preload in [True, False]:
         with Raw(fif_fname, preload=preload) as raw_:
-            print raw_
+            print(raw_)
 
 
 def test_compensation_raw():
@@ -866,3 +972,61 @@ def test_set_eeg_reference():
     reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
                                         copy=False)
     assert_true(raw is reref)
+
+
+def test_drop_channels_mixin():
+    """Test channels-dropping functionality
+    """
+    raw = Raw(fif_fname, preload=True)
+    drop_ch = raw.ch_names[:3]
+    ch_names = raw.ch_names[3:]
+
+    ch_names_orig = raw.ch_names
+    dummy = raw.drop_channels(drop_ch, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, raw.ch_names)
+    assert_equal(len(ch_names_orig), raw._data.shape[0])
+
+    raw.drop_channels(drop_ch)
+    assert_equal(ch_names, raw.ch_names)
+    assert_equal(len(ch_names), len(raw.cals))
+    assert_equal(len(ch_names), raw._data.shape[0])
+
+
+def test_pick_channels_mixin():
+    """Test channel-picking functionality
+    """
+    # preload is True
+
+    raw = Raw(fif_fname, preload=True)
+    ch_names = raw.ch_names[:3]
+
+    ch_names_orig = raw.ch_names
+    dummy = raw.pick_channels(ch_names, copy=True)  # copy is True
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, raw.ch_names)
+    assert_equal(len(ch_names_orig), raw._data.shape[0])
+
+    raw.pick_channels(ch_names, copy=False)  # copy is False
+    assert_equal(ch_names, raw.ch_names)
+    assert_equal(len(ch_names), len(raw.cals))
+    assert_equal(len(ch_names), raw._data.shape[0])
+
+    raw = Raw(fif_fname, preload=False)
+    assert_raises(RuntimeError, raw.pick_channels, ch_names)
+    assert_raises(RuntimeError, raw.drop_channels, ch_names)
+
+
+def test_equalize_channels():
+    """Test equalization of channels
+    """
+    raw1 = Raw(fif_fname, preload=True)
+
+    raw2 = raw1.copy()
+    ch_names = raw1.ch_names[2:]
+    raw1.drop_channels(raw1.ch_names[:1])
+    raw2.drop_channels(raw2.ch_names[1:2])
+    my_comparison = [raw1, raw2]
+    equalize_channels(my_comparison)
+    for e in my_comparison:
+        assert_equal(ch_names, e.ch_names)
diff --git a/mne/fiff/kit/__init__.py b/mne/io/kit/__init__.py
similarity index 62%
rename from mne/fiff/kit/__init__.py
rename to mne/io/kit/__init__.py
index 32c546b..6bb193a 100644
--- a/mne/fiff/kit/__init__.py
+++ b/mne/io/kit/__init__.py
@@ -4,8 +4,9 @@
 #
 # License: BSD (3-clause)
 
+from ...coreg import read_elp  # for backwards compatibility
 from .kit import read_raw_kit
-from .coreg import read_elp, read_hsp, read_mrk, write_hsp, write_mrk
+from .coreg import read_hsp, read_mrk, write_hsp, write_mrk
 from . import kit
 from . import coreg
 from . import constants
diff --git a/mne/fiff/kit/constants.py b/mne/io/kit/constants.py
similarity index 100%
rename from mne/fiff/kit/constants.py
rename to mne/io/kit/constants.py
diff --git a/mne/fiff/kit/coreg.py b/mne/io/kit/coreg.py
similarity index 76%
rename from mne/fiff/kit/coreg.py
rename to mne/io/kit/coreg.py
index cddba6e..a9075c1 100644
--- a/mne/fiff/kit/coreg.py
+++ b/mne/io/kit/coreg.py
@@ -5,18 +5,17 @@
 # License: BSD (3-clause)
 
 from datetime import datetime
-import cPickle as pickle
+from ...externals.six.moves import cPickle as pickle
 import os
 from os import SEEK_CUR
 import re
 from struct import unpack
 
 import numpy as np
-from scipy.linalg import norm
 
 from ... import __version__
-from ...transforms import translation
 from .constants import KIT
+from ...externals.six import b
 
 
 def read_mrk(fname):
@@ -35,7 +34,7 @@ def read_mrk(fname):
     """
     ext = os.path.splitext(fname)[-1]
     if ext in ('.sqd', '.mrk'):
-        with open(fname, 'r') as fid:
+        with open(fname, 'rb', buffering=0) as fid:
             fid.seek(KIT.MRK_INFO)
             mrk_offset = unpack('i', fid.read(KIT.INT))[0]
             fid.seek(mrk_offset)
@@ -51,7 +50,7 @@ def read_mrk(fname):
     elif ext == '.txt':
         mrk_points = np.loadtxt(fname)
     elif ext == '.pickled':
-        with open(fname) as fid:
+        with open(fname, 'rb') as fid:
             food = pickle.load(fid)
         try:
             mrk_points = food['mrk']
@@ -92,7 +91,7 @@ def write_mrk(fname, points):
         raise ValueError(err)
 
     if ext == '.pickled':
-        with open(fname, 'w') as fid:
+        with open(fname, 'wb') as fid:
             pickle.dump({'mrk': mrk}, fid, pickle.HIGHEST_PROTOCOL)
     elif ext == '.txt':
         np.savetxt(fname, mrk, fmt='%.18e', delimiter='\t', newline='\n')
@@ -101,34 +100,6 @@ def write_mrk(fname, points):
         raise ValueError(err)
 
 
-def read_elp(fname):
-    """ELP point extraction in Polhemus head space
-
-    Parameters
-    ----------
-    fname : str
-        Absolute path to laser point file acquired from Polhemus system.
-        File formats allowed: *.txt
-
-    Returns
-    -------
-    elp_points : numpy.array, shape = (n_points, 3)
-        Fiducial and marker points in Polhemus head space.
-    """
-    pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
-    elp_points = pattern.findall(open(fname).read())
-    elp_points = np.array(elp_points, dtype=float)
-    if elp_points.shape[1] != 3:
-        err = ("File %r does not contain 3 columns as required; got shape "
-               "%s." % (fname, elp_points.shape))
-        raise ValueError(err)
-    elif len(elp_points) < 8:
-        err = ("File %r contains fewer than 8 points; got shape "
-               "%s." % (fname, elp_points.shape))
-        raise ValueError(err)
-    return elp_points
-
-
 def read_hsp(fname):
     """Read a Polhemus ascii head shape file
 
@@ -166,12 +137,12 @@ def write_hsp(fname, pts):
         err = "pts must be of shape (n_pts, 3), not %r" % str(pts.shape)
         raise ValueError(err)
 
-    with open(fname, 'w') as fid:
+    with open(fname, 'wb') as fid:
         version = __version__
         now = datetime.now().strftime("%I:%M%p on %B %d, %Y")
-        fid.write("% Ascii 3D points file created by mne-python version "
-                  "{version} at {now}\n".format(version=version, now=now))
-        fid.write("% {N} 3D points, x y z per line\n".format(N=len(pts)))
+        fid.write(b("% Ascii 3D points file created by mne-python version "
+                    "{version} at {now}\n".format(version=version, now=now)))
+        fid.write(b("% {N} 3D points, x y z per line\n".format(N=len(pts))))
         np.savetxt(fid, pts, '%8.2f', ' ')
 
 
diff --git a/mne/fiff/kit/kit.py b/mne/io/kit/kit.py
similarity index 91%
rename from mne/fiff/kit/kit.py
rename to mne/io/kit/kit.py
index b3ba1ad..5d7a5ce 100644
--- a/mne/fiff/kit/kit.py
+++ b/mne/io/kit/kit.py
@@ -16,29 +16,31 @@ import time
 import numpy as np
 from scipy import linalg
 
-from ...fiff import pick_types
-from ...coreg import fit_matched_points, _decimate_points
-from ...coreg import get_ras_to_neuromag_trans
+from ..pick import pick_types
+from ...coreg import (read_elp, fit_matched_points, _decimate_points,
+                      get_ras_to_neuromag_trans)
 from ...utils import verbose, logger
 from ...transforms import apply_trans, als_ras_trans, als_ras_trans_mm
-from ..raw import Raw
+from ..base import _BaseRaw
 from ..constants import FIFF
 from ..meas_info import Info
 from ..tag import _loc_to_trans
 from .constants import KIT, KIT_NY, KIT_AD
-from .coreg import read_elp, read_hsp, read_mrk
+from .coreg import read_hsp, read_mrk
+from ...externals.six import string_types
 
 
-class RawKIT(Raw):
+class RawKIT(_BaseRaw):
     """Raw object from KIT SQD file adapted from bti/raw.py
 
     Parameters
     ----------
     input_fname : str
         Path to the sqd file.
-    mrk : None | str | array_like, shape = (5, 3)
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
         Marker points representing the location of the marker coils with
         respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
     elp : None | str | array_like, shape = (8, 3)
         Digitizer points representing the location of the fiducials and the
         marker coils with respect to the digitized head shape, or path to a
@@ -60,15 +62,15 @@ class RawKIT(Raw):
     stimthresh : float
         The threshold level for accepting voltage changes in KIT trigger
         channels as a trigger event.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
     preload : bool
         If True, all data are loaded at initialization.
         If False, data are not read until save.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     See Also
     --------
-    mne.fiff.Raw : Documentation of attribute and methods.
+    mne.io.Raw : Documentation of attribute and methods.
     """
     @verbose
     def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
@@ -82,8 +84,7 @@ class RawKIT(Raw):
 
         # Raw attributes
         self.verbose = verbose
-        self._preloaded = False
-        self.fids = list()
+        self.preload = False
         self._projector = None
         self.first_samp = 0
         self.last_samp = self._sqd_params['nsamples'] - 1
@@ -107,13 +108,18 @@ class RawKIT(Raw):
         self.info['filename'] = None
         self.info['ctf_head_t'] = None
         self.info['dev_ctf_t'] = []
-        self.info['filenames'] = []
+        self._filenames = []
         self.info['dig'] = None
         self.info['dev_head_t'] = None
 
-        if (mrk and elp and hsp):
+        if isinstance(mrk, list):
+            mrk = [read_mrk(marker) if isinstance(marker, string_types)
+                   else marker for marker in mrk]
+            mrk = np.mean(mrk, axis=0)
+
+        if (mrk is not None and elp is not None and hsp is not None):
             self._set_dig_kit(mrk, elp, hsp)
-        elif (mrk or elp or hsp):
+        elif (mrk is not None or elp is not None or hsp is not None):
             err = ("mrk, elp and hsp need to be provided as a group (all or "
                    "none)")
             raise ValueError(err)
@@ -204,7 +210,7 @@ class RawKIT(Raw):
 
         self._set_stimchannels(stim, slope)
         if preload:
-            self._preloaded = preload
+            self.preload = preload
             logger.info('Reading raw data from %s...' % input_fname)
             self._data, _ = self._read_segment()
             assert len(self._data) == self.info['nchan']
@@ -237,7 +243,8 @@ class RawKIT(Raw):
     def __repr__(self):
         s = ('%r' % os.path.basename(self._sqd_params['fname']),
              "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
-                                       self.last_samp - self.first_samp + 1))
+                                                 self.last_samp -
+                                                 self.first_samp + 1))
         return "<RawKIT  |  %s>" % ', '.join(s)
 
     def read_stim_ch(self, buffer_size=1e5):
@@ -294,7 +301,7 @@ class RawKIT(Raw):
             returns the time values corresponding to the samples.
         """
         if sel is None:
-            sel = range(self.info['nchan'])
+            sel = list(range(self.info['nchan']))
         elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
             return (666, 666)
         if projector is not None:
@@ -313,9 +320,9 @@ class RawKIT(Raw):
 
         logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
                     (start, stop - 1, start / float(self.info['sfreq']),
-                               (stop - 1) / float(self.info['sfreq'])))
+                     (stop - 1) / float(self.info['sfreq'])))
 
-        with open(self._sqd_params['fname'], 'rb') as fid:
+        with open(self._sqd_params['fname'], 'rb', buffering=0) as fid:
             # extract data
             fid.seek(KIT.DATA_OFFSET)
             # data offset info
@@ -380,7 +387,7 @@ class RawKIT(Raw):
             Decimate hsp points for head shape files with more than 10'000
             points.
         """
-        if isinstance(hsp, basestring):
+        if isinstance(hsp, string_types):
             hsp = read_hsp(hsp)
 
         n_pts = len(hsp)
@@ -394,10 +401,15 @@ class RawKIT(Raw):
             msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
             logger.warning(msg)
 
-        if isinstance(elp, basestring):
-            elp = read_elp(elp)[:8]
+        if isinstance(elp, string_types):
+            elp_points = read_elp(elp)[:8]
+            if len(elp) < 8:
+                err = ("File %r contains fewer than 8 points; got shape "
+                       "%s." % (elp, elp_points.shape))
+                raise ValueError(err)
+            elp = elp_points
 
-        if isinstance(mrk, basestring):
+        if isinstance(mrk, string_types):
             mrk = read_mrk(mrk)
 
         hsp = apply_trans(als_ras_trans_mm, hsp)
@@ -429,7 +441,11 @@ class RawKIT(Raw):
             Device head transformation.
         """
         trans = np.asarray(trans)
-        if not trans.shape == (4, 4):
+        if fid.shape != (3, 3):
+            raise ValueError("fid needs to be a 3 by 3 array")
+        if elp.shape != (5, 3):
+            raise ValueError("elp needs to be a 5 by 3 array")
+        if trans.shape != (4, 4):
             raise ValueError("trans needs to be 4 by 4 array")
 
         nasion, lpa, rpa = fid
@@ -481,7 +497,7 @@ class RawKIT(Raw):
             '-' means a negative slope (high-to-low) on the event channel(s)
             is used to trigger an event.
         """
-        if self._preloaded:
+        if self.preload:
             err = "Can't change stim channel after preloading data"
             raise NotImplementedError(err)
 
@@ -497,11 +513,14 @@ class RawKIT(Raw):
             else:
                 raise ValueError("stim needs to be list of int, '>' or "
                                  "'<', not %r" % str(stim))
+        elif np.max(stim) >= self._sqd_params['nchan']:
+            msg = ("Tried to set stim channel %i, but squid file only has %i"
+                   " channels" % (np.max(stim), self._sqd_params['nchan']))
+            raise ValueError(msg)
 
         self._sqd_params['stim'] = stim
 
 
-
 def get_sqd_params(rawfile):
     """Extracts all the information from the sqd file.
 
@@ -517,7 +536,7 @@ def get_sqd_params(rawfile):
     """
     sqd = dict()
     sqd['rawfile'] = rawfile
-    with open(rawfile, 'rb') as fid:
+    with open(rawfile, 'rb', buffering=0) as fid:  # buffering=0 for np bug
         fid.seek(KIT.BASIC_INFO)
         basic_offset = unpack('i', fid.read(KIT.INT))[0]
         fid.seek(basic_offset)
@@ -525,7 +544,7 @@ def get_sqd_params(rawfile):
         fid.seek(KIT.INT * 3, SEEK_CUR)
         # basic info
         sysname = unpack('128s', fid.read(KIT.STRING))
-        sysname = sysname[0].split('\n')[0]
+        sysname = sysname[0].decode().split('\n')[0]
         fid.seek(KIT.STRING, SEEK_CUR)  # skips modelname
         sqd['nchan'] = unpack('i', fid.read(KIT.INT))[0]
 
@@ -543,7 +562,7 @@ def get_sqd_params(rawfile):
 
         fid.seek(chan_offset)
         sensors = []
-        for i in xrange(KIT_SYS.N_SENS):
+        for i in range(KIT_SYS.N_SENS):
             fid.seek(chan_offset + chan_size * i)
             sens_type = unpack('i', fid.read(KIT.INT))[0]
             if sens_type == 1:
@@ -576,7 +595,7 @@ def get_sqd_params(rawfile):
                               >> KIT_SYS.GAIN2_BIT]
         if KIT_SYS.GAIN3_BIT:
             gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data)
-                                     >> KIT_SYS.GAIN3_BIT]
+                                  >> KIT_SYS.GAIN3_BIT]
             sqd['amp_gain'] = gain1 * gain2 * gain3
         else:
             sqd['amp_gain'] = gain1 * gain2
@@ -629,9 +648,10 @@ def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
     ----------
     input_fname : str
         Path to the sqd file.
-    mrk : None | str | array_like, shape = (5, 3)
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
         Marker points representing the location of the marker coils with
         respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
     elp : None | str | array_like, shape = (8, 3)
         Digitizer points representing the location of the fiducials and the
         marker coils with respect to the digitized head shape, or path to a
@@ -653,12 +673,12 @@ def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
     stimthresh : float
         The threshold level for accepting voltage changes in KIT trigger
         channels as a trigger event.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
     preload : bool
         If True, all data are loaded at initialization.
         If False, data are not read until save.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
     """
     return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
                   stim=stim, slope=slope, stimthresh=stimthresh,
-                  verbose=verbose, preload=preload)
+                  preload=preload, verbose=verbose)
diff --git a/mne/fiff/kit/tests/__init__.py b/mne/io/kit/tests/__init__.py
similarity index 100%
rename from mne/fiff/kit/tests/__init__.py
rename to mne/io/kit/tests/__init__.py
diff --git a/mne/fiff/kit/tests/data/sns.txt b/mne/io/kit/tests/data/sns.txt
similarity index 100%
rename from mne/fiff/kit/tests/data/sns.txt
rename to mne/io/kit/tests/data/sns.txt
diff --git a/mne/fiff/kit/tests/data/test.sqd b/mne/io/kit/tests/data/test.sqd
similarity index 100%
rename from mne/fiff/kit/tests/data/test.sqd
rename to mne/io/kit/tests/data/test.sqd
diff --git a/mne/fiff/kit/tests/data/test_Ykgw.mat b/mne/io/kit/tests/data/test_Ykgw.mat
similarity index 100%
rename from mne/fiff/kit/tests/data/test_Ykgw.mat
rename to mne/io/kit/tests/data/test_Ykgw.mat
diff --git a/mne/fiff/kit/tests/data/test_bin.fif b/mne/io/kit/tests/data/test_bin_raw.fif
similarity index 100%
rename from mne/fiff/kit/tests/data/test_bin.fif
rename to mne/io/kit/tests/data/test_bin_raw.fif
diff --git a/mne/fiff/kit/tests/data/test_elp.txt b/mne/io/kit/tests/data/test_elp.txt
similarity index 100%
rename from mne/fiff/kit/tests/data/test_elp.txt
rename to mne/io/kit/tests/data/test_elp.txt
diff --git a/mne/fiff/kit/tests/data/test_hsp.txt b/mne/io/kit/tests/data/test_hsp.txt
similarity index 100%
rename from mne/fiff/kit/tests/data/test_hsp.txt
rename to mne/io/kit/tests/data/test_hsp.txt
diff --git a/mne/fiff/kit/tests/data/test_mrk.sqd b/mne/io/kit/tests/data/test_mrk.sqd
similarity index 100%
rename from mne/fiff/kit/tests/data/test_mrk.sqd
rename to mne/io/kit/tests/data/test_mrk.sqd
diff --git a/mne/fiff/kit/tests/data/test_mrk_post.sqd b/mne/io/kit/tests/data/test_mrk_post.sqd
similarity index 100%
rename from mne/fiff/kit/tests/data/test_mrk_post.sqd
rename to mne/io/kit/tests/data/test_mrk_post.sqd
diff --git a/mne/fiff/kit/tests/data/test_mrk_pre.sqd b/mne/io/kit/tests/data/test_mrk_pre.sqd
similarity index 100%
rename from mne/fiff/kit/tests/data/test_mrk_pre.sqd
rename to mne/io/kit/tests/data/test_mrk_pre.sqd
diff --git a/mne/fiff/kit/tests/data/trans-sample.fif b/mne/io/kit/tests/data/trans-sample.fif
similarity index 100%
rename from mne/fiff/kit/tests/data/trans-sample.fif
rename to mne/io/kit/tests/data/trans-sample.fif
diff --git a/mne/fiff/kit/tests/test_coreg.py b/mne/io/kit/tests/test_coreg.py
similarity index 90%
rename from mne/fiff/kit/tests/test_coreg.py
rename to mne/io/kit/tests/test_coreg.py
index 3a140d5..46f189f 100644
--- a/mne/fiff/kit/tests/test_coreg.py
+++ b/mne/io/kit/tests/test_coreg.py
@@ -8,7 +8,7 @@ import os
 import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 
-from mne.fiff.kit import read_hsp, write_hsp, read_mrk, write_mrk
+from mne.io.kit import read_hsp, write_hsp, read_mrk, write_mrk
 from mne.coreg import get_ras_to_neuromag_trans
 from mne.transforms import apply_trans, rotation, translation
 from mne.utils import _TempDir
@@ -19,7 +19,6 @@ parent_dir = os.path.dirname(os.path.abspath(FILE))
 data_dir = os.path.join(parent_dir, 'data')
 hsp_fname = os.path.join(data_dir, 'test_hsp.txt')
 mrk_fname = os.path.join(data_dir, 'test_mrk.sqd')
-
 tempdir = _TempDir()
 
 
@@ -30,8 +29,8 @@ def test_io_hsp():
     dest = os.path.join(tempdir, 'test.txt')
     write_hsp(dest, pts)
     pts1 = read_hsp(dest)
-
-    assert_array_equal(pts, pts1)
+    assert_array_equal(pts, pts1, "Hsp points diverged after writing and "
+                       "reading.")
 
 
 def test_io_mrk():
@@ -39,7 +38,7 @@ def test_io_mrk():
     pts = read_mrk(mrk_fname)
 
     # pickle
-    path = os.path.join(tempdir, 'mrk.pickled')
+    path = os.path.join(tempdir, "mrk.pickled")
     write_mrk(path, pts)
     pts_2 = read_mrk(path)
     assert_array_equal(pts, pts_2, "read/write with pickle")
diff --git a/mne/fiff/kit/tests/test_kit.py b/mne/io/kit/tests/test_kit.py
similarity index 78%
rename from mne/fiff/kit/tests/test_kit.py
rename to mne/io/kit/tests/test_kit.py
index b82e9be..1d11cf1 100644
--- a/mne/fiff/kit/tests/test_kit.py
+++ b/mne/io/kit/tests/test_kit.py
@@ -1,4 +1,5 @@
 """Data and Channel Location Equivalence Tests"""
+from __future__ import print_function
 
 # Author: Teon Brooks <teon at nyu.edu>
 #
@@ -10,15 +11,18 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 import scipy.io
 from mne.utils import _TempDir
-from mne.fiff import Raw, pick_types
-from mne.fiff.kit import read_raw_kit, read_hsp, write_hsp
-from mne.fiff.kit.coreg import read_sns
+from mne import pick_types
+from mne.io import Raw
+from mne.io import read_raw_kit
+from mne.io.kit.coreg import read_sns
 
 FILE = inspect.getfile(inspect.currentframe())
 parent_dir = op.dirname(op.abspath(FILE))
 data_dir = op.join(parent_dir, 'data')
 sqd_path = op.join(data_dir, 'test.sqd')
 mrk_path = op.join(data_dir, 'test_mrk.sqd')
+mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd')
+mrk3_path = op.join(data_dir, 'test_mrk_post.sqd')
 elp_path = op.join(data_dir, 'test_elp.txt')
 hsp_path = op.join(data_dir, 'test_hsp.txt')
 
@@ -29,13 +33,13 @@ def test_data():
     """Test reading raw kit files
     """
     raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path,
-                          stim=range(167, 159, -1), slope='+', stimthresh=1,
-                          preload=True)
-    print repr(raw_py)
+                          stim=list(range(167, 159, -1)), slope='+',
+                          stimthresh=1, preload=True)
+    print(repr(raw_py))
 
     # Binary file only stores the sensor channels
     py_picks = pick_types(raw_py.info, exclude='bads')
-    raw_bin = op.join(data_dir, 'test_bin.fif')
+    raw_bin = op.join(data_dir, 'test_bin_raw.fif')
     raw_bin = Raw(raw_bin, preload=True)
     bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads')
     data_bin, _ = raw_bin[bin_picks]
@@ -59,11 +63,11 @@ def test_read_segment():
     """
     raw1 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                         preload=False)
-    raw1_file = op.join(tempdir, 'raw1.fif')
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
     raw1.save(raw1_file, buffer_size_sec=.1, overwrite=True)
     raw2 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                         preload=True)
-    raw2_file = op.join(tempdir, 'raw2.fif')
+    raw2_file = op.join(tempdir, 'test2-raw.fif')
     raw2.save(raw2_file, buffer_size_sec=.1, overwrite=True)
     raw1 = Raw(raw1_file, preload=True)
     raw2 = Raw(raw2_file, preload=True)
@@ -77,7 +81,7 @@ def test_ch_loc():
     """Test raw kit loc
     """
     raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<')
-    raw_bin = Raw(op.join(data_dir, 'test_bin.fif'))
+    raw_bin = Raw(op.join(data_dir, 'test_bin_raw.fif'))
 
     ch_py = raw_py._sqd_params['sensor_locs'][:, :5]
     # ch locs stored as m, not mm
@@ -85,6 +89,8 @@ def test_ch_loc():
     ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
     assert_array_almost_equal(ch_py, ch_sns, 2)
 
+    assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
+                              raw_bin.info['dev_head_t']['trans'], 4)
     for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
         if bin_ch['ch_name'].startswith('MEG'):
             # the stored ch locs have more precision than the sns.txt
@@ -93,6 +99,10 @@ def test_ch_loc():
                                       bin_ch['coil_trans'],
                                       decimal=2)
 
+    # test when more than one marker file provided
+    mrks = [mrk_path, mrk2_path, mrk3_path]
+    _ = read_raw_kit(sqd_path, mrks, elp_path, hsp_path, preload=False)
+
 
 def test_stim_ch():
     """Test raw kit stim ch
@@ -104,13 +114,3 @@ def test_stim_ch():
     stim1, _ = raw[stim_pick]
     stim2 = np.array(raw.read_stim_ch(), ndmin=2)
     assert_array_equal(stim1, stim2)
-
-
-def test_hsp_io():
-    """Test reading and writing hsp files"""
-    pts = read_hsp(hsp_path)
-    temp_fname = op.join(tempdir, 'temp_hsp.txt')
-    write_hsp(temp_fname, pts)
-    pts2 = read_hsp(temp_fname)
-    assert_array_equal(pts, pts2, "Hsp points diverged after writing and "
-                       "reading.")
diff --git a/mne/fiff/matrix.py b/mne/io/matrix.py
similarity index 98%
rename from mne/fiff/matrix.py
rename to mne/io/matrix.py
index 3f89acd..caecafa 100644
--- a/mne/fiff/matrix.py
+++ b/mne/io/matrix.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
diff --git a/mne/fiff/meas_info.py b/mne/io/meas_info.py
similarity index 54%
rename from mne/fiff/meas_info.py
rename to mne/io/meas_info.py
index b192e96..802f633 100644
--- a/mne/fiff/meas_info.py
+++ b/mne/io/meas_info.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
@@ -8,21 +8,31 @@ from copy import deepcopy
 import os.path as op
 import numpy as np
 from scipy import linalg
-from StringIO import StringIO
+from ..externals.six import BytesIO, string_types
 from datetime import datetime as dt
 
+from .constants import FIFF
 from .open import fiff_open
 from .tree import dir_tree_find, copy_tree
-from .constants import FIFF
-from .tag import read_tag
-from .proj import read_proj, write_proj
+from .tag import read_tag, find_tag
+from .proj import _read_proj, _write_proj, _uniquify_projs
 from .ctf import read_ctf_comp, write_ctf_comp
-from .channels import read_bad_channels
 from .write import (start_file, end_file, start_block, end_block,
                     write_string, write_dig_point, write_float, write_int,
-                    write_coord_trans, write_ch_info, write_name_list)
+                    write_coord_trans, write_ch_info, write_name_list,
+                    write_julian)
 from ..utils import logger, verbose
 
+_kind_dict = dict(
+    eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T),
+    grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M),
+    misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE),
+    stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+)
+
 
 def _summarize_str(st):
     """Aux function"""
@@ -40,7 +50,7 @@ class Info(dict):
         for k, v in self.items():
             if k in ['bads', 'ch_names']:
                 entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)
-                           if v else '0 items')
+                        if v else '0 items')
                 if len(entr) >= 56:
                     # get rid of of half printed ch names
                     entr = _summarize_str(entr)
@@ -59,7 +69,7 @@ class Info(dict):
                 this_len = (len(v) if hasattr(v, '__len__') else
                            ('%s' % v if v is not None else None))
                 entr = (('%d items' % this_len) if isinstance(this_len, int)
-                           else ('%s' % this_len if this_len else ''))
+                        else ('%s' % this_len if this_len else ''))
             if entr:
                 non_empty += 1
                 entr = ' | ' + entr
@@ -71,6 +81,10 @@ class Info(dict):
         st %= non_empty
         return st
 
+    def _anonymize(self):
+        if self.get('subject_info') is not None:
+            del self['subject_info']
+
 
 def read_fiducials(fname):
     """Read fiducials from a fiff file
@@ -81,7 +95,7 @@ def read_fiducials(fname):
         List of digitizer points (each point in a dict).
     coord_frame : int
         The coordinate frame of the points (one of
-        mne.fiff.FIFF.FIFFV_COORD_...)
+        mne.io.constants.FIFF.FIFFV_COORD_...)
     """
     fid, tree, _ = fiff_open(fname)
     with fid:
@@ -123,7 +137,7 @@ def write_fiducials(fname, pts, coord_frame=0):
         the keys 'kind', 'ident' and 'r'.
     coord_frame : int
         The coordinate frame of the points (one of
-        mne.fiff.FIFF.FIFFV_COORD_...)
+        mne.io.constants.FIFF.FIFFV_COORD_...)
     """
     pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))
     bad_frames = pts_frames - set((coord_frame,))
@@ -155,7 +169,7 @@ def read_info(fname, verbose=None):
 
     Returns
     -------
-    info : instance of mne.fiff.meas_info.Info
+    info : instance of mne.io.meas_info.Info
        Info on dataset.
     """
     f, tree, _ = fiff_open(fname)
@@ -164,6 +178,33 @@ def read_info(fname, verbose=None):
     return info
 
 
+def read_bad_channels(fid, node):
+    """Read bad channels
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+
+    node : dict
+        The node of the FIF tree that contains info on the bad channels.
+
+    Returns
+    -------
+    bads : list
+        A list of bad channel's names.
+    """
+    nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    bads = []
+    if len(nodes) > 0:
+        for node in nodes:
+            tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
+            if tag is not None and tag.data is not None:
+                bads = tag.data.split(':')
+    return bads
+
+
 @verbose
 def read_meas_info(fid, tree, verbose=None):
     """Read the measurement info
@@ -179,7 +220,7 @@ def read_meas_info(fid, tree, verbose=None):
 
     Returns
     -------
-    info : instance of mne.fiff.meas_info.Info
+    info : instance of mne.io.meas_info.Info
        Info on dataset.
     meas : dict
         Node in tree that contains the info.
@@ -212,6 +253,7 @@ def read_meas_info(fid, tree, verbose=None):
     description = None
     proj_id = None
     proj_name = None
+    line_freq = None
     p = 0
     for k in range(meas_info['nent']):
         kind = meas_info['directory'][k].kind
@@ -256,6 +298,9 @@ def read_meas_info(fid, tree, verbose=None):
         elif kind == FIFF.FIFF_PROJ_NAME:
             tag = read_tag(fid, pos)
             proj_name = tag.data
+        elif kind == FIFF.FIFF_LINE_FREQ:
+            tag = read_tag(fid, pos)
+            line_freq = float(tag.data)
 
     # Check that we have everything we need
     if nchan is None:
@@ -280,13 +325,14 @@ def read_meas_info(fid, tree, verbose=None):
                 if kind == FIFF.FIFF_COORD_TRANS:
                     tag = read_tag(fid, pos)
                     cand = tag.data
-                    if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
-                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                    if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and
+                        cand['to'] == FIFF.FIFFV_COORD_HEAD and
+                        dev_head_t is None):
                         dev_head_t = cand
-                    elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
-                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                    elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and
+                          cand['to'] == FIFF.FIFFV_COORD_HEAD and
+                          ctf_head_t is None):
                         ctf_head_t = cand
-
     #   Locate the Polhemus data
     isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
     dig = None
@@ -322,7 +368,7 @@ def read_meas_info(fid, tree, verbose=None):
                 acq_stim = tag.data
 
     #   Load the SSP data
-    projs = read_proj(fid, meas_info)
+    projs = _read_proj(fid, meas_info)
 
     #   Load the CTF compensation data
     comps = read_ctf_comp(fid, meas_info, chs)
@@ -338,6 +384,38 @@ def read_meas_info(fid, tree, verbose=None):
     else:
         info = Info(file_id=None)
 
+    subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
+    if len(subject_info) == 1:
+        subject_info = subject_info[0]
+        si = dict()
+        for k in range(subject_info['nent']):
+            kind = subject_info['directory'][k].kind
+            pos = subject_info['directory'][k].pos
+            if kind == FIFF.FIFF_SUBJ_ID:
+                tag = read_tag(fid, pos)
+                si['id'] = int(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_HIS_ID:
+                tag = read_tag(fid, pos)
+                si['his_id'] = str(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_LAST_NAME:
+                tag = read_tag(fid, pos)
+                si['last_name'] = str(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:
+                tag = read_tag(fid, pos)
+                si['first_name'] = str(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:
+                tag = read_tag(fid, pos)
+                si['birthday'] = tag.data
+            elif kind == FIFF.FIFF_SUBJ_SEX:
+                tag = read_tag(fid, pos)
+                si['sex'] = int(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_HAND:
+                tag = read_tag(fid, pos)
+                si['hand'] = int(tag.data)
+    else:
+        si = None
+    info['subject_info'] = si
+
     #   Load extra information blocks
     read_extra_meas_info(fid, tree, info)
 
@@ -370,6 +448,7 @@ def read_meas_info(fid, tree, verbose=None):
     info['sfreq'] = sfreq
     info['highpass'] = highpass if highpass is not None else 0
     info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0
+    info['line_freq'] = line_freq
 
     #   Add the channel information and make a list of channel names
     #   for convenience
@@ -403,55 +482,53 @@ def read_meas_info(fid, tree, verbose=None):
 
 def read_extra_meas_info(fid, tree, info):
     """Read extra blocks from fid"""
-    # current method saves them into a cStringIO file instance for simplicity
+    # current method saves them into a BytesIO file instance for simplicity
     # this and its partner, write_extra_meas_info, could be made more
     # comprehensive (i.e.., actually parse and read the data instead of
     # just storing it for later)
-    blocks = [FIFF.FIFFB_SUBJECT, FIFF.FIFFB_EVENTS,
-              FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,
+    blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,
               FIFF.FIFFB_PROCESSING_HISTORY]
-    info['orig_blocks'] = blocks
-
-    fid_str = StringIO()
-    fid_str = start_file(fid_str)
-    start_block(fid_str, FIFF.FIFFB_MEAS_INFO)
-    for block in blocks:
+    info['orig_blocks'] = dict(blocks=blocks)
+    fid_bytes = BytesIO()
+    start_file(fid_bytes, tree['id'])
+    start_block(fid_bytes, FIFF.FIFFB_MEAS_INFO)
+    for block in info['orig_blocks']['blocks']:
         nodes = dir_tree_find(tree, block)
-        copy_tree(fid, tree['id'], nodes, fid_str)
-    info['orig_fid_str'] = fid_str
+        copy_tree(fid, tree['id'], nodes, fid_bytes)
+    end_block(fid_bytes, FIFF.FIFFB_MEAS_INFO)
+    info['orig_blocks']['bytes'] = fid_bytes.getvalue()
 
 
 def write_extra_meas_info(fid, info):
     """Write otherwise left out blocks of data"""
-    # uses cStringIO fake file to read the appropriate blocks
+    # uses BytesIO fake file to read the appropriate blocks
     if 'orig_blocks' in info and info['orig_blocks'] is not None:
         # Blocks from the original
-        blocks = info['orig_blocks']
-        fid_str, tree, _ = fiff_open(info['orig_fid_str'])
-        for block in blocks:
+        fid_bytes, tree, _ = fiff_open(BytesIO(info['orig_blocks']['bytes']))
+        for block in info['orig_blocks']['blocks']:
             nodes = dir_tree_find(tree, block)
-            copy_tree(fid_str, tree['id'], nodes, fid)
+            copy_tree(fid_bytes, tree['id'], nodes, fid)
 
 
 def write_meas_info(fid, info, data_type=None, reset_range=True):
-    """Write measurement info in fif file.
+    """Write measurement info into a file id (from a fif file)
 
     Parameters
     ----------
     fid : file
-        Open file descriptor
-    info : instance of mne.fiff.meas_info.Info
-        The measurement info structure
+        Open file descriptor.
+    info : instance of mne.io.meas_info.Info
+        The measurement info structure.
     data_type : int
         The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
-        5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for
+        5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
         raw data.
     reset_range : bool
         If True, info['chs'][k]['range'] will be set to unity.
 
-    Note
-    ----
-    Tags are written in a particular order for compatibility with maxfilter
+    Notes
+    -----
+    Tags are written in a particular order for compatibility with maxfilter.
     """
 
     # Measurement info
@@ -487,7 +564,7 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
         write_coord_trans(fid, info['ctf_head_t'])
 
     #   Projectors
-    write_proj(fid, info['projs'])
+    _write_proj(fid, info['projs'])
 
     #   CTF compensation info
     write_ctf_comp(fid, info['comps'])
@@ -513,6 +590,8 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
     write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
     write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
     write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
+    if info.get('line_freq') is not None:
+        write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
     if data_type is not None:
         write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
 
@@ -526,4 +605,245 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
             c['range'] = 1.0
         write_ch_info(fid, c)
 
+    # Subject information
+    if info.get('subject_info') is not None:
+        start_block(fid, FIFF.FIFFB_SUBJECT)
+        si = info['subject_info']
+        if si.get('id') is not None:
+            write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
+        if si.get('his_id') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
+        if si.get('last_name') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
+        if si.get('first_name') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
+        if si.get('birthday') is not None:
+            write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
+        if si.get('sex') is not None:
+            write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
+        if si.get('hand') is not None:
+            write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
+        end_block(fid, FIFF.FIFFB_SUBJECT)
+
     end_block(fid, FIFF.FIFFB_MEAS_INFO)
+
+
+def write_info(fname, info, data_type=None, reset_range=True):
+    """Write measurement info in fif file.
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file. Should end by -info.fif.
+    info : instance of mne.io.meas_info.Info
+        The measurement info structure
+    data_type : int
+        The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
+        5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
+        raw data.
+    reset_range : bool
+        If True, info['chs'][k]['range'] will be set to unity.
+    """
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_meas_info(fid, info, data_type, reset_range)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
+
+
+def _is_equal_dict(dicts):
+    """Aux function"""
+    tests = zip(*[d.items() for d in dicts])
+    is_equal = []
+    for d in tests:
+        k0, v0 = d[0]
+        is_equal.append(all([np.all(k == k0) and
+                        np.all(v == v0)  for k, v in d]))
+    return all(is_equal)
+
+
+ at verbose
+def _merge_dict_values(dicts, key, verbose=None):
+    """Merge things together
+
+    Fork for {'dict', 'list', 'array', 'other'}
+    and consider cases where one or all are of the same type.
+    """
+    values = [d[key] for d in dicts]
+    msg = ("Don't know how to merge '%s'. Make sure values are "
+           "compatible." % key)
+
+    def _flatten(lists):
+        return [item for sublist in lists for item in sublist]
+
+    def _check_isinstance(values, kind, func):
+        return func([isinstance(v, kind) for v in values])
+
+    def _where_isinstance(values, kind):
+        """Aux function"""
+        return np.where([isinstance(v, type) for v in values])[0]
+
+    # list
+    if _check_isinstance(values, list, all):
+        lists = (d[key] for d in dicts)
+        return (_uniquify_projs(_flatten(lists)) if key == 'projs'
+                else _flatten(lists))
+    elif _check_isinstance(values, list, any):
+        idx = _where_isinstance(values, list)
+        if len(idx) == 1:
+            return values[int(idx)]
+        elif len(idx) > 1:
+            lists = (d[key] for d in dicts if isinstance(d[key], list))
+            return  _flatten(lists)
+    # dict
+    elif _check_isinstance(values, dict, all):
+        is_qual = _is_equal_dict(values)
+        if is_qual:
+            return values[0]
+        else:
+            RuntimeError(msg)
+    elif _check_isinstance(values, dict, any):
+        idx = _where_isinstance(values, dict)
+        if len(idx) == 1:
+            return values[int(idx)]
+        elif len(idx) > 1:
+            raise RuntimeError(msg)
+    # ndarray
+    elif _check_isinstance(values, np.ndarray, all):
+        is_qual = all([np.all(values[0] == x) for x in values[1:]])
+        if is_qual:
+            return values[0]
+        elif key == 'meas_date':
+            logger.info('Found multiple entries for %s. '
+                        'Setting value to `None`' % key)
+            return None
+        else:
+            raise RuntimeError(msg)
+    elif _check_isinstance(values, np.ndarray, any):
+        idx = _where_isinstance(values, np.ndarray)
+        if len(idx) == 1:
+            return values[int(idx)]
+        elif len(idx) > 1:
+            raise RuntimeError(msg)
+    # other
+    else:
+        unique_values = set(values)
+        if len(unique_values) == 1:
+            return list(values)[0]
+        elif isinstance(list(unique_values)[0], BytesIO):
+            logger.info('Found multiple StringIO instances. '
+                        'Setting value to `None`')
+            return None
+        elif isinstance(list(unique_values)[0], string_types):
+            logger.info('Found multiple filenames. '
+                        'Setting value to `None`')
+            return None
+        else:
+            raise RuntimeError(msg)
+
+
+ at verbose
+def _merge_info(infos, verbose=None):
+    """Merge two measurement info dictionaries"""
+
+    info = Info()
+    ch_names = _merge_dict_values(infos, 'ch_names')
+    duplicates = set([ch for ch in ch_names if ch_names.count(ch) > 1])
+    if len(duplicates) > 0:
+        err = ("The following channels are present in more than one input "
+               "measurement info objects: %s" % list(duplicates))
+        raise ValueError(err)
+    info['nchan'] = len(ch_names)
+    info['ch_names'] = ch_names
+    info['chs'] = []
+    for this_info in infos:
+        info['chs'].extend(this_info['chs'])
+
+    transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']
+    for trans_name in transforms:
+        trans = [i[trans_name] for i in infos if i[trans_name]]
+        if len(trans) == 0:
+            info[trans_name] = None
+        elif len(trans) == 1:
+            info[trans_name] = trans[0]
+        elif all([np.all(trans[0]['trans'] == x['trans']) and
+                  trans[0]['from'] == x['from'] and
+                  trans[0]['to'] == x['to']
+                  for x in trans[1:]]):
+            info[trans_name] = trans[0]
+        else:
+            err = ("Measurement infos provide mutually inconsistent %s" %
+                   trans_name)
+            raise ValueError(err)
+    other_fields = ['acq_pars', 'acq_stim', 'bads', 'buffer_size_sec',
+                    'comps', 'description', 'dig', 'experimenter', 'file_id',
+                    'filename', 'highpass', 'line_freq', 'lowpass',
+                    'meas_date', 'meas_id', 'orig_blocks', 'proj_id',
+                    'proj_name', 'projs', 'sfreq', 'subject_info', 'sfreq']
+
+    for k in other_fields:
+        info[k] = _merge_dict_values(infos, k)
+
+    return info
+
+
+def create_info(ch_names, sfreq, ch_types=None):
+    """Create a basic Info instance suitable for use with create_raw
+
+    Parameters
+    ----------
+    ch_names : list of str
+        Channel names.
+    sfreq : float
+        Sample rate of the data.
+    ch_types : list of str
+        Channel types. If None, data are assumed to be misc.
+        Currently supported fields are "mag", "grad", "eeg", and "misc".
+
+    Notes
+    -----
+    The info dictionary will be sparsely populated to enable functionality
+    within the rest of the package. Advanced functionality such as source
+    localization can only be obtained through substantial, proper
+    modifications of the info structure (not recommended).
+    """
+    if not isinstance(ch_names, (list, tuple)):
+        raise TypeError('ch_names must be a list or tuple')
+    sfreq = float(sfreq)
+    if sfreq <= 0:
+        raise ValueError('sfreq must be positive')
+    nchan = len(ch_names)
+    if ch_types is None:
+        ch_types = ['misc'] * nchan
+    if len(ch_types) != nchan:
+        raise ValueError('ch_types and ch_names must be the same length')
+    info = Info()
+    info['meas_date'] = [0, 0]
+    info['sfreq'] = sfreq
+    for key in ['bads', 'projs', 'comps']:
+        info[key] = list()
+    for key in ['meas_id', 'file_id', 'highpass', 'lowpass', 'acq_pars',
+                'acq_stim', 'filename', 'dig']:
+        info[key] = None
+    info['ch_names'] = ch_names
+    info['nchan'] = nchan
+    info['chs'] = list()
+    loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32)
+    for ci, (name, kind) in enumerate(zip(ch_names, ch_types)):
+        if not isinstance(name, string_types):
+            raise TypeError('each entry in ch_names must be a string')
+        if not isinstance(kind, string_types):
+            raise TypeError('each entry in ch_types must be a string')
+        if kind not in _kind_dict:
+            raise KeyError('kind must be one of %s, not %s'
+                           % (list(_kind_dict.keys()), kind))
+        kind = _kind_dict[kind]
+        chan_info = dict(loc=loc, eeg_loc=None, unit_mul=0, range=1., cal=1.,
+                         coil_trans=None, kind=kind[0], coil_type=kind[1],
+                         unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
+                         ch_name=name, scanno=ci + 1, logno=ci + 1)
+        info['chs'].append(chan_info)
+    info['dev_head_t'] = None
+    info['dev_ctf_t'] = None
+    info['ctf_head_t'] = None
+    return info
diff --git a/mne/fiff/open.py b/mne/io/open.py
similarity index 88%
rename from mne/fiff/open.py
rename to mne/io/open.py
index d5b1b2b..f3b8c71 100644
--- a/mne/fiff/open.py
+++ b/mne/io/open.py
@@ -1,17 +1,34 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
+from ..externals.six import string_types
 import numpy as np
 import os.path as op
-import gzip
-import cStringIO
+from io import BytesIO
 
 from .tag import read_tag_info, read_tag, read_big, Tag
 from .tree import make_dir_tree
 from .constants import FIFF
 from ..utils import logger, verbose
+from ..externals import six
+from ..fixes import gzip_open
+
+
+def _fiff_get_fid(fname):
+    """Helper to open a FIF file with no additional parsing"""
+    if isinstance(fname, string_types):
+        if op.splitext(fname)[1].lower() == '.gz':
+            logger.debug('Using gzip')
+            fid = gzip_open(fname, "rb")  # Open in binary mode
+        else:
+            logger.debug('Using normal I/O')
+            fid = open(fname, "rb")  # Open in binary mode
+    else:
+        fid = fname
+        fid.seek(0)
+    return fid
 
 
 @verbose
@@ -37,25 +54,15 @@ def fiff_open(fname, preload=False, verbose=None):
         The tree is a complex structure filled with dictionaries,
         lists and tags.
     directory : list
-        list of nodes.
+        A list of tags.
     """
-    if isinstance(fname, basestring):
-        if op.splitext(fname)[1].lower() == '.gz':
-            logger.debug('Using gzip')
-            fid = gzip.open(fname, "rb")  # Open in binary mode
-        else:
-            logger.debug('Using normal I/O')
-            fid = open(fname, "rb")  # Open in binary mode
-    else:
-        fid = fname
-        fid.seek(0)
-
+    fid = _fiff_get_fid(fname)
     # do preloading of entire file
     if preload:
-        # note that cStringIO objects instantiated this way are read-only,
+        # note that StringIO objects instantiated this way are read-only,
         # but that's okay here since we are using mode "rb" anyway
         fid_old = fid
-        fid = cStringIO.StringIO(read_big(fid_old))
+        fid = BytesIO(read_big(fid_old))
         fid_old.close()
 
     tag = read_tag_info(fid)
@@ -141,7 +148,7 @@ def show_fiff(fname, indent='    ', read_limit=np.inf, max_str=30,
 
 def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']):
     """Helper to find matching values"""
-    vals = [k for k, v in FIFF.iteritems()
+    vals = [k for k, v in six.iteritems(FIFF)
             if v == value and any([fmt in k for fmt in fmts])
             and not any(exc in k for exc in exclude)]
     return vals
@@ -185,8 +192,10 @@ def _show_tree(fid, tree, indent, level, read_limit, max_str):
                             postpend += ' ... array size=' + str(tag.data.size)
                     elif isinstance(tag.data, dict):
                         postpend += ' ... dict len=' + str(len(tag.data))
-                    elif isinstance(tag.data, basestring):
+                    elif isinstance(tag.data, string_types):
                         postpend += ' ... str len=' + str(len(tag.data))
+                    elif isinstance(tag.data, (list, tuple)):
+                        postpend += ' ... list len=' + str(len(tag.data))
                     else:
                         postpend += ' ... (unknown type)'
                 postpend = '>' * 20 + 'BAD' if not good else postpend
diff --git a/mne/fiff/pick.py b/mne/io/pick.py
similarity index 96%
rename from mne/fiff/pick.py
rename to mne/io/pick.py
index 9c2b6ae..c6347a4 100644
--- a/mne/fiff/pick.py
+++ b/mne/io/pick.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
@@ -6,11 +6,12 @@
 
 from copy import deepcopy
 import re
-from warnings import warn
 
 import numpy as np
+
 from .constants import FIFF
 from ..utils import logger, verbose
+from ..externals.six import string_types
 
 
 def channel_type(info, idx):
@@ -84,6 +85,8 @@ def pick_channels(ch_names, include, exclude=[]):
     sel : array of int
         Indices of good channels.
     """
+    if len(np.unique(ch_names)) != len(ch_names):
+        raise RuntimeError('ch_names is not a unique list, picking is unsafe')
     sel = []
     for k, name in enumerate(ch_names):
         if (len(include) == 0 or name in include) and name not in exclude:
@@ -189,10 +192,12 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
                          ' If only one channel is to be excluded, use '
                          '[ch_name] instead of passing ch_name.')
 
-    if isinstance(ref_meg, basestring):
+    if isinstance(ref_meg, string_types):
         if ref_meg != 'auto':
             raise ValueError('ref_meg has to be either a bool or \'auto\'')
-        ref_meg = info['comps'] is not None and len(info['comps']) > 0
+
+        ref_meg = ('comps' in info and info['comps'] is not None and
+                   len(info['comps']) > 0)
 
     for k in range(nchan):
         kind = info['chs'][k]['kind']
@@ -258,7 +263,7 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
     return sel
 
 
-def pick_info(info, sel=[]):
+def pick_info(info, sel=[], copy=True):
     """Restrict an info structure to a selection of channels
 
     Parameters
@@ -267,21 +272,24 @@ def pick_info(info, sel=[]):
         Info structure from evoked or raw data.
     sel : list of int
         Indices of channels to include.
+    copy : bool
+        If copy is False, info is modified inplace.
 
     Returns
     -------
     res : dict
         Info structure restricted to a selection of channels.
     """
+    if copy:
+        info = deepcopy(info)
 
-    res = deepcopy(info)
     if len(sel) == 0:
         raise ValueError('Warning : No channels match the selection.')
 
-    res['chs'] = [res['chs'][k] for k in sel]
-    res['ch_names'] = [res['ch_names'][k] for k in sel]
-    res['nchan'] = len(sel)
-    return res
+    info['chs'] = [info['chs'][k] for k in sel]
+    info['ch_names'] = [info['ch_names'][k] for k in sel]
+    info['nchan'] = len(sel)
+    return info
 
 
 def _has_kit_refs(info, picks):
@@ -308,6 +316,7 @@ def pick_channels_evoked(orig, include=[], exclude='bads'):
     exclude : list of string, (optional) | 'bads'
         Channels to exclude (if empty, do not exclude any).
          Defaults to 'bads'.
+
     Returns
     -------
     res : instance of Evoked
@@ -392,6 +401,7 @@ def pick_types_evoked(orig, meg=True, eeg=False, stim=False, eog=False,
                      ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc,
                      resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst,
                      include=include, exclude=exclude)
+
     include_ch_names = [orig.ch_names[k] for k in sel]
     return pick_channels_evoked(orig, include_ch_names)
 
diff --git a/mne/fiff/proj.py b/mne/io/proj.py
similarity index 91%
rename from mne/fiff/proj.py
rename to mne/io/proj.py
index 754d2a8..dd92f03 100644
--- a/mne/fiff/proj.py
+++ b/mne/io/proj.py
@@ -1,6 +1,6 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -8,10 +8,11 @@ from copy import deepcopy
 from math import sqrt
 import numpy as np
 from scipy import linalg
+from itertools import count
 
 from .tree import dir_tree_find
-from .constants import FIFF
 from .tag import find_tag
+from .constants import FIFF
 from .pick import pick_types
 from ..utils import logger, verbose
 
@@ -50,7 +51,7 @@ class ProjMixin(object):
             projs = [projs]
 
         if (not isinstance(projs, list) and
-            not all([isinstance(p, Projection) for p in projs])):
+                not all([isinstance(p, Projection) for p in projs])):
             raise ValueError('Only projs can be added. You supplied '
                              'something else.')
 
@@ -98,12 +99,12 @@ class ProjMixin(object):
         if self.info['projs'] is None:
             logger.info('No projector specified for this dataset.'
                         'Please consider the method self.add_proj.')
-            return
+            return self
 
         if all([p['active'] for p in self.info['projs']]):
             logger.info('Projections have already been applied. Doing '
-                         'nothing.')
-            return
+                        'nothing.')
+            return self
 
         _projector, info = setup_proj(deepcopy(self.info), activate=True,
                                       verbose=self.verbose)
@@ -126,7 +127,7 @@ class ProjMixin(object):
                     data = np.empty_like(self._data)
                     for ii, e in enumerate(self._data):
                         data[ii] = self._preprocess(np.dot(self._projector, e),
-                            self.verbose)
+                                                    self.verbose)
                 else:  # get data knows what to do.
                     data = data()
             else:
@@ -167,20 +168,19 @@ class ProjMixin(object):
 def proj_equal(a, b):
     """ Test if two projectors are equal """
 
-    equal = a['active'] == b['active']\
-            and a['kind'] == b['kind']\
-            and a['desc'] == b['desc']\
-            and a['data']['col_names'] == b['data']['col_names']\
-            and a['data']['row_names'] == b['data']['row_names']\
-            and a['data']['ncol'] == b['data']['ncol']\
-            and a['data']['nrow'] == b['data']['nrow']\
-            and np.all(a['data']['data'] == b['data']['data'])
-
+    equal = (a['active'] == b['active'] and
+             a['kind'] == b['kind'] and
+             a['desc'] == b['desc'] and
+             a['data']['col_names'] == b['data']['col_names'] and
+             a['data']['row_names'] == b['data']['row_names'] and
+             a['data']['ncol'] == b['data']['ncol'] and
+             a['data']['nrow'] == b['data']['nrow'] and
+             np.all(a['data']['data'] == b['data']['data']))
     return equal
 
 
 @verbose
-def read_proj(fid, node, verbose=None):
+def _read_proj(fid, node, verbose=None):
     """Read spatial projections from a FIF file.
 
     Parameters
@@ -301,7 +301,7 @@ from .write import (write_int, write_float, write_string, write_name_list,
                     write_float_matrix, end_block, start_block)
 
 
-def write_proj(fid, projs):
+def _write_proj(fid, projs):
     """Write a projection operator to a file.
 
     Parameters
@@ -317,7 +317,7 @@ def write_proj(fid, projs):
         start_block(fid, FIFF.FIFFB_PROJ_ITEM)
         write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
         write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
-                             proj['data']['col_names'])
+                        proj['data']['col_names'])
         write_string(fid, FIFF.FIFF_NAME, proj['desc'])
         write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
         if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
@@ -387,8 +387,8 @@ def make_projector(projs, ch_names, bads=[], include_active=True):
     nonzero = 0
     for k, p in enumerate(projs):
         if not p['active'] or include_active:
-            if len(p['data']['col_names']) != \
-                        len(np.unique(p['data']['col_names'])):
+            if (len(p['data']['col_names']) !=
+                    len(np.unique(p['data']['col_names']))):
                 raise ValueError('Channel name list in projection item %d'
                                  ' contains duplicate items' % k)
 
@@ -551,8 +551,8 @@ def make_eeg_average_ref_proj(info, activate=True, verbose=None):
     eeg_proj_data = dict(col_names=eeg_names, row_names=None,
                          data=vec, nrow=1, ncol=n_eeg)
     eeg_proj = Projection(active=activate, data=eeg_proj_data,
-                    desc='Average EEG reference',
-                    kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF)
+                          desc='Average EEG reference',
+                          kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF)
     return eeg_proj
 
 
@@ -567,7 +567,7 @@ def _has_eeg_average_ref_proj(projs):
 
 @verbose
 def setup_proj(info, add_eeg_ref=True, activate=True,
-              verbose=None):
+               verbose=None):
     """Set up projection for Raw and Epochs
 
     Parameters
@@ -606,10 +606,31 @@ def setup_proj(info, add_eeg_ref=True, activate=True,
         projector = None
     else:
         logger.info('Created an SSP operator (subspace dimension = %d)'
-                                                               % nproj)
+                    % nproj)
 
     #   The projection items have been activated
     if activate:
         info['projs'] = activate_proj(info['projs'], copy=False)
 
     return projector, info
+
+
+def _uniquify_projs(projs):
+    """Aux function"""
+    final_projs = []
+    for proj in projs:  # flatten
+        if not any([proj_equal(p, proj) for p in final_projs]):
+            final_projs.append(proj)
+
+    my_count = count(len(final_projs))
+
+    def sorter(x):
+        """sort in a nice way"""
+        digits = [s for s in x['desc'] if s.isdigit()]
+        if digits:
+            sort_idx = int(digits[-1])
+        else:
+            sort_idx = next(my_count)
+        return (sort_idx, x['desc'])
+
+    return sorted(final_projs, key=sorter)
diff --git a/mne/fiff/tag.py b/mne/io/tag.py
similarity index 94%
rename from mne/fiff/tag.py
rename to mne/io/tag.py
index c814fb1..8b983da 100644
--- a/mne/fiff/tag.py
+++ b/mne/io/tag.py
@@ -1,16 +1,19 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
 import struct
-import numpy as np
-from scipy import linalg
 import os
 import gzip
+import numpy as np
+from scipy import linalg
 
 from .constants import FIFF
 
+from ..externals.six import text_type
+from ..externals.jdcal import jd2jcal
+
 
 class Tag(object):
     """Tag in FIF tree structure
@@ -71,7 +74,7 @@ def read_big(fid, size=None):
 
     Returns
     -------
-    buf : str
+    buf : bytes
         The data.
 
     Notes
@@ -114,21 +117,21 @@ def read_big(fid, size=None):
     if size is not None:
         # Use pre-buffering method
         segments = np.r_[np.arange(0, size, buf_size), size]
-        buf = bytearray(' ' * size)
+        buf = bytearray(b' ' * size)
         for start, end in zip(segments[:-1], segments[1:]):
-            data = fid.read(end - start)
+            data = fid.read(int(end - start))
             if len(data) != end - start:
                 raise ValueError('Read error')
             buf[start:end] = data
-        buf = str(buf)
+        buf = bytes(buf)
     else:
         # Use presumably less efficient concatenating method
-        buf = ['']
+        buf = [b'']
         new = fid.read(buf_size)
         while len(new) > 0:
             buf.append(new)
             new = fid.read(buf_size)
-        buf = ''.join(buf)
+        buf = b''.join(buf)
 
     return buf
 
@@ -162,9 +165,9 @@ def _fromstring_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
             raise ValueError('rlims must yield at least one output')
         row_size = item_size * shape[1]
         # # of bytes to skip at the beginning, # to read, where to end
-        start_skip = rlims[0] * row_size
-        read_size = n_row_out * row_size
-        end_pos = fid.tell() + tag_size
+        start_skip = int(rlims[0] * row_size)
+        read_size = int(n_row_out * row_size)
+        end_pos = int(fid.tell() + tag_size)
         # Move the pointer ahead to the read point
         fid.seek(start_skip, 1)
         # Do the reading
@@ -278,8 +281,7 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                     raise Exception('Cannot handle matrix of type %d yet'
                                     % matrix_type)
 
-            elif matrix_coding == matrix_coding_CCS or \
-                                    matrix_coding == matrix_coding_RCS:
+            elif matrix_coding in (matrix_coding_CCS, matrix_coding_RCS):
                 from scipy import sparse
                 # Find dimensions and return to the beginning of tag data
                 pos = fid.tell()
@@ -293,9 +295,9 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
 
                 # Back to where the data start
                 fid.seek(pos, 0)
-                nnz = dims[0]
-                nrow = dims[1]
-                ncol = dims[2]
+                nnz = int(dims[0])
+                nrow = int(dims[1])
+                ncol = int(dims[2])
                 sparse_data = np.fromstring(fid.read(4 * nnz), dtype='>f4')
                 shape = (dims[1], dims[2])
                 if matrix_coding == matrix_coding_CCS:
@@ -346,7 +348,11 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
             elif tag.type == FIFF.FIFFT_STRING:
                 tag.data = _fromstring_rows(fid, tag.size, dtype=">c",
                                             shape=shape, rlims=rlims)
-                tag.data = ''.join(tag.data)
+
+                # Always decode to unicode.
+                td = tag.data.tostring().decode('utf-8', 'ignore')
+                tag.data = text_type(td)
+
             elif tag.type == FIFF.FIFFT_DAU_PACK16:
                 tag.data = _fromstring_rows(fid, tag.size, dtype=">i2",
                                             shape=shape, rlims=rlims)
@@ -439,11 +445,9 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                 #   Handle the channel name
                 #
                 ch_name = np.fromstring(fid.read(16), dtype=">c")
-                #
-                # Omit nulls
-                #
-                tag.data['ch_name'] = \
-                    ''.join(ch_name[:np.where(ch_name == '')[0][0]])
+                ch_name = ch_name[:np.argmax(ch_name == b'')].tostring()
+                # Use unicode or bytes depending on Py2/3
+                tag.data['ch_name'] = str(ch_name.decode())
 
             elif tag.type == FIFF.FIFFT_OLD_PACK:
                 offset = float(np.fromstring(fid.read(4), dtype=">f4"))
@@ -452,9 +456,12 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                 tag.data = scale * tag.data + offset
             elif tag.type == FIFF.FIFFT_DIR_ENTRY_STRUCT:
                 tag.data = list()
-                for _ in range(tag.size / 16 - 1):
+                for _ in range(tag.size // 16 - 1):
                     s = fid.read(4 * 4)
                     tag.data.append(Tag(*struct.unpack(">iIii", s)))
+            elif tag.type == FIFF.FIFFT_JULIAN:
+                tag.data = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data = jd2jcal(tag.data)
             else:
                 raise Exception('Unimplemented tag data type %s' % tag.type)
 
diff --git a/mne/fiff/tests/__init__.py b/mne/io/tests/__init__.py
similarity index 100%
rename from mne/fiff/tests/__init__.py
rename to mne/io/tests/__init__.py
diff --git a/mne/fiff/tests/data/fsaverage-fiducials.fif b/mne/io/tests/data/fsaverage-fiducials.fif
similarity index 100%
rename from mne/fiff/tests/data/fsaverage-fiducials.fif
rename to mne/io/tests/data/fsaverage-fiducials.fif
diff --git a/mne/fiff/tests/data/process_raw.sh b/mne/io/tests/data/process_raw.sh
similarity index 100%
rename from mne/fiff/tests/data/process_raw.sh
rename to mne/io/tests/data/process_raw.sh
diff --git a/mne/fiff/tests/data/sample-audvis-raw-trans.txt b/mne/io/tests/data/sample-audvis-raw-trans.txt
similarity index 100%
rename from mne/fiff/tests/data/sample-audvis-raw-trans.txt
rename to mne/io/tests/data/sample-audvis-raw-trans.txt
diff --git a/mne/io/tests/data/small-src.fif.gz b/mne/io/tests/data/small-src.fif.gz
new file mode 100644
index 0000000..b8d0183
Binary files /dev/null and b/mne/io/tests/data/small-src.fif.gz differ
diff --git a/mne/fiff/tests/data/test-eve-1.fif b/mne/io/tests/data/test-1-eve.fif
similarity index 100%
rename from mne/fiff/tests/data/test-eve-1.fif
rename to mne/io/tests/data/test-1-eve.fif
diff --git a/mne/fiff/tests/data/test-ave-2.log b/mne/io/tests/data/test-ave-2.log
similarity index 100%
rename from mne/fiff/tests/data/test-ave-2.log
rename to mne/io/tests/data/test-ave-2.log
diff --git a/mne/fiff/tests/data/test-ave.fif b/mne/io/tests/data/test-ave.fif
similarity index 100%
rename from mne/fiff/tests/data/test-ave.fif
rename to mne/io/tests/data/test-ave.fif
diff --git a/mne/fiff/tests/data/test-ave.fif.gz b/mne/io/tests/data/test-ave.fif.gz
similarity index 100%
rename from mne/fiff/tests/data/test-ave.fif.gz
rename to mne/io/tests/data/test-ave.fif.gz
diff --git a/mne/fiff/tests/data/test-ave.log b/mne/io/tests/data/test-ave.log
similarity index 100%
rename from mne/fiff/tests/data/test-ave.log
rename to mne/io/tests/data/test-ave.log
diff --git a/mne/fiff/tests/data/test-cov.fif b/mne/io/tests/data/test-cov.fif
similarity index 100%
rename from mne/fiff/tests/data/test-cov.fif
rename to mne/io/tests/data/test-cov.fif
diff --git a/mne/fiff/tests/data/test-cov.fif.gz b/mne/io/tests/data/test-cov.fif.gz
similarity index 100%
rename from mne/fiff/tests/data/test-cov.fif.gz
rename to mne/io/tests/data/test-cov.fif.gz
diff --git a/mne/fiff/tests/data/test-eve-1.eve b/mne/io/tests/data/test-eve-1.eve
similarity index 100%
rename from mne/fiff/tests/data/test-eve-1.eve
rename to mne/io/tests/data/test-eve-1.eve
diff --git a/mne/fiff/tests/data/test-eve-old-style.eve b/mne/io/tests/data/test-eve-old-style.eve
similarity index 100%
rename from mne/fiff/tests/data/test-eve-old-style.eve
rename to mne/io/tests/data/test-eve-old-style.eve
diff --git a/mne/fiff/tests/data/test-eve.eve b/mne/io/tests/data/test-eve.eve
similarity index 100%
rename from mne/fiff/tests/data/test-eve.eve
rename to mne/io/tests/data/test-eve.eve
diff --git a/mne/fiff/tests/data/test-eve.fif b/mne/io/tests/data/test-eve.fif
similarity index 100%
rename from mne/fiff/tests/data/test-eve.fif
rename to mne/io/tests/data/test-eve.fif
diff --git a/mne/fiff/tests/data/test-eve.fif.gz b/mne/io/tests/data/test-eve.fif.gz
similarity index 100%
rename from mne/fiff/tests/data/test-eve.fif.gz
rename to mne/io/tests/data/test-eve.fif.gz
diff --git a/mne/fiff/tests/data/test-km-cov.fif b/mne/io/tests/data/test-km-cov.fif
similarity index 100%
rename from mne/fiff/tests/data/test-km-cov.fif
rename to mne/io/tests/data/test-km-cov.fif
diff --git a/mne/fiff/tests/data/test-lh.label b/mne/io/tests/data/test-lh.label
similarity index 100%
rename from mne/fiff/tests/data/test-lh.label
rename to mne/io/tests/data/test-lh.label
diff --git a/mne/fiff/tests/data/test-mpr-eve.eve b/mne/io/tests/data/test-mpr-eve.eve
similarity index 100%
rename from mne/fiff/tests/data/test-mpr-eve.eve
rename to mne/io/tests/data/test-mpr-eve.eve
diff --git a/mne/fiff/tests/data/test-nf-ave.fif b/mne/io/tests/data/test-nf-ave.fif
similarity index 100%
rename from mne/fiff/tests/data/test-nf-ave.fif
rename to mne/io/tests/data/test-nf-ave.fif
diff --git a/mne/fiff/tests/data/test-no-reject.ave b/mne/io/tests/data/test-no-reject.ave
similarity index 100%
rename from mne/fiff/tests/data/test-no-reject.ave
rename to mne/io/tests/data/test-no-reject.ave
diff --git a/mne/fiff/tests/data/test_proj.fif b/mne/io/tests/data/test-proj.fif
similarity index 100%
rename from mne/fiff/tests/data/test_proj.fif
rename to mne/io/tests/data/test-proj.fif
diff --git a/mne/fiff/tests/data/test_proj.fif.gz b/mne/io/tests/data/test-proj.fif.gz
similarity index 100%
rename from mne/fiff/tests/data/test_proj.fif.gz
rename to mne/io/tests/data/test-proj.fif.gz
diff --git a/mne/fiff/tests/data/test-rh.label b/mne/io/tests/data/test-rh.label
similarity index 100%
rename from mne/fiff/tests/data/test-rh.label
rename to mne/io/tests/data/test-rh.label
diff --git a/mne/fiff/tests/data/test.ave b/mne/io/tests/data/test.ave
similarity index 100%
rename from mne/fiff/tests/data/test.ave
rename to mne/io/tests/data/test.ave
diff --git a/mne/fiff/tests/data/test.cov b/mne/io/tests/data/test.cov
similarity index 100%
rename from mne/fiff/tests/data/test.cov
rename to mne/io/tests/data/test.cov
diff --git a/mne/fiff/tests/data/test_bads.txt b/mne/io/tests/data/test_bads.txt
similarity index 100%
rename from mne/fiff/tests/data/test_bads.txt
rename to mne/io/tests/data/test_bads.txt
diff --git a/mne/fiff/tests/data/test_chpi_raw_hp.txt b/mne/io/tests/data/test_chpi_raw_hp.txt
similarity index 100%
rename from mne/fiff/tests/data/test_chpi_raw_hp.txt
rename to mne/io/tests/data/test_chpi_raw_hp.txt
diff --git a/mne/fiff/tests/data/test_chpi_raw_sss.fif b/mne/io/tests/data/test_chpi_raw_sss.fif
similarity index 100%
rename from mne/fiff/tests/data/test_chpi_raw_sss.fif
rename to mne/io/tests/data/test_chpi_raw_sss.fif
diff --git a/mne/fiff/tests/data/test_ctf_comp_raw.fif b/mne/io/tests/data/test_ctf_comp_raw.fif
similarity index 100%
rename from mne/fiff/tests/data/test_ctf_comp_raw.fif
rename to mne/io/tests/data/test_ctf_comp_raw.fif
diff --git a/mne/fiff/tests/data/test_ctf_raw.fif b/mne/io/tests/data/test_ctf_raw.fif
similarity index 100%
rename from mne/fiff/tests/data/test_ctf_raw.fif
rename to mne/io/tests/data/test_ctf_raw.fif
diff --git a/mne/fiff/tests/data/test_empty_room.cov b/mne/io/tests/data/test_empty_room.cov
similarity index 100%
rename from mne/fiff/tests/data/test_empty_room.cov
rename to mne/io/tests/data/test_empty_room.cov
diff --git a/mne/fiff/tests/data/test_erm-cov.fif b/mne/io/tests/data/test_erm-cov.fif
similarity index 100%
rename from mne/fiff/tests/data/test_erm-cov.fif
rename to mne/io/tests/data/test_erm-cov.fif
diff --git a/mne/fiff/tests/data/test_ica.lout b/mne/io/tests/data/test_ica.lout
similarity index 100%
rename from mne/fiff/tests/data/test_ica.lout
rename to mne/io/tests/data/test_ica.lout
diff --git a/mne/fiff/tests/data/test_keepmean.cov b/mne/io/tests/data/test_keepmean.cov
similarity index 100%
rename from mne/fiff/tests/data/test_keepmean.cov
rename to mne/io/tests/data/test_keepmean.cov
diff --git a/mne/fiff/tests/data/test_raw-eve.fif b/mne/io/tests/data/test_raw-eve.fif
similarity index 100%
rename from mne/fiff/tests/data/test_raw-eve.fif
rename to mne/io/tests/data/test_raw-eve.fif
diff --git a/mne/fiff/tests/data/test_raw.fif b/mne/io/tests/data/test_raw.fif
similarity index 100%
rename from mne/fiff/tests/data/test_raw.fif
rename to mne/io/tests/data/test_raw.fif
diff --git a/mne/fiff/tests/data/test_raw.fif.gz b/mne/io/tests/data/test_raw.fif.gz
similarity index 100%
rename from mne/fiff/tests/data/test_raw.fif.gz
rename to mne/io/tests/data/test_raw.fif.gz
diff --git a/mne/fiff/tests/data/test_raw.lout b/mne/io/tests/data/test_raw.lout
similarity index 100%
rename from mne/fiff/tests/data/test_raw.lout
rename to mne/io/tests/data/test_raw.lout
diff --git a/mne/fiff/tests/data/test_withbads_raw.fif b/mne/io/tests/data/test_withbads_raw.fif
similarity index 100%
rename from mne/fiff/tests/data/test_withbads_raw.fif
rename to mne/io/tests/data/test_withbads_raw.fif
diff --git a/mne/fiff/tests/data/test_wrong_bads.txt b/mne/io/tests/data/test_wrong_bads.txt
similarity index 100%
rename from mne/fiff/tests/data/test_wrong_bads.txt
rename to mne/io/tests/data/test_wrong_bads.txt
diff --git a/mne/fiff/tests/test_compensator.py b/mne/io/tests/test_compensator.py
similarity index 88%
rename from mne/fiff/tests/test_compensator.py
rename to mne/io/tests/test_compensator.py
index baca7f4..3620c86 100644
--- a/mne/fiff/tests/test_compensator.py
+++ b/mne/io/tests/test_compensator.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -7,9 +7,9 @@ from nose.tools import assert_true
 import numpy as np
 from numpy.testing import assert_allclose
 
-from mne import Epochs
-from mne.fiff.compensator import make_compensator, get_current_comp
-from mne.fiff import Raw, pick_types, read_evoked
+from mne import Epochs, read_evokeds, pick_types
+from mne.io.compensator import make_compensator, get_current_comp
+from mne.io import Raw
 from mne.utils import _TempDir, requires_mne, run_subprocess
 
 base_dir = op.join(op.dirname(__file__), 'data')
@@ -52,11 +52,11 @@ def test_compensation_mne():
         return evoked
 
     def compensate_mne(fname, comp):
-        tmp_fname = '%s-%d.fif' % (fname[:-4], comp)
+        tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
         cmd = ['mne_compensate_data', '--in', fname,
                '--out', tmp_fname, '--grad', str(comp)]
         run_subprocess(cmd)
-        return read_evoked(tmp_fname)
+        return read_evokeds(tmp_fname)[0]
 
     # save evoked response with default compensation
     fname_default = op.join(tempdir, 'ctf_default-ave.fif')
diff --git a/mne/fiff/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py
similarity index 76%
rename from mne/fiff/tests/test_meas_info.py
rename to mne/io/tests/test_meas_info.py
index 84b048e..4cdb190 100644
--- a/mne/fiff/tests/test_meas_info.py
+++ b/mne/io/tests/test_meas_info.py
@@ -1,11 +1,13 @@
 import os.path as op
 
 from nose.tools import assert_true, assert_equal, assert_raises
+import numpy as np
 from numpy.testing import assert_array_equal
 
-from mne import fiff, Epochs, read_events
-from mne.fiff import read_fiducials, write_fiducials, FIFF
-from mne.fiff.meas_info import Info
+from mne import io, Epochs, read_events
+from mne.io import read_fiducials, write_fiducials
+from mne.io.constants import FIFF
+from mne.io.meas_info import Info
 from mne.utils import _TempDir
 
 base_dir = op.join(op.dirname(__file__), 'data')
@@ -40,7 +42,7 @@ def test_fiducials_io():
 
 def test_info():
     """Test info object"""
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
     event_id, tmin, tmax = 1, -0.2, 0.5
     events = read_events(event_name)
     event_id = int(events[0, 2])
@@ -64,3 +66,19 @@ def test_info():
         info_str = '%s' % obj.info
         assert_equal(len(info_str.split('\n')), (len(obj.info.keys()) + 2))
         assert_true(all(k in info_str for k in obj.info.keys()))
+
+
+def test_read_write_info():
+    """Test IO of info
+    """
+    info = io.read_info(raw_fname)
+    temp_file = op.join(tempdir, 'info.fif')
+    # check for bug `#1198`
+    info['dev_head_t']['trans'] = np.eye(4)
+    t1 =  info['dev_head_t']['trans']
+    io.write_info(temp_file, info)
+    info2 = io.read_info(temp_file)
+    t2 = info2['dev_head_t']['trans']
+    assert_true(len(info['chs']) == len(info2['chs']))
+    assert_array_equal(t1, t2)
+
diff --git a/mne/fiff/tests/test_pick.py b/mne/io/tests/test_pick.py
similarity index 89%
rename from mne/fiff/tests/test_pick.py
rename to mne/io/tests/test_pick.py
index 53fcc33..2fda567 100644
--- a/mne/fiff/tests/test_pick.py
+++ b/mne/io/tests/test_pick.py
@@ -1,5 +1,5 @@
 from numpy.testing import assert_array_equal
-from mne.fiff.pick import pick_channels_regexp
+from mne import pick_channels_regexp
 
 
 def test_pick_channels_regexp():
diff --git a/mne/fiff/tree.py b/mne/io/tree.py
similarity index 97%
rename from mne/fiff/tree.py
rename to mne/io/tree.py
index 8f65e3e..981dc20 100644
--- a/mne/fiff/tree.py
+++ b/mne/io/tree.py
@@ -1,9 +1,15 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
+import struct
+import numpy as np
+
+from .constants import FIFF
+from .tag import Tag
 from .tag import read_tag
+from .write import write_id, start_block, end_block, _write
 from ..utils import logger, verbose
 
 
@@ -103,13 +109,6 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=None):
 ###############################################################################
 # Writing
 
-import numpy as np
-import struct
-from .constants import FIFF
-from .tag import Tag
-from .write import write_id, start_block, end_block, _write
-
-
 def copy_tree(fidin, in_id, nodes, fidout):
     """Copies directory subtrees from fidin to fidout"""
 
@@ -125,7 +124,7 @@ def copy_tree(fidin, in_id, nodes, fidout):
             if in_id is not None:
                 write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id)
 
-            write_id(fidout, FIFF.FIFF_BLOCK_ID)
+            write_id(fidout, FIFF.FIFF_BLOCK_ID, in_id)
             write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id'])
 
         if node['directory'] is not None:
diff --git a/mne/fiff/write.py b/mne/io/write.py
similarity index 90%
rename from mne/fiff/write.py
rename to mne/io/write.py
index bb9be8c..11f0c4f 100644
--- a/mne/fiff/write.py
+++ b/mne/io/write.py
@@ -1,27 +1,29 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
+from ..externals.six import string_types, b
 import time
 import numpy as np
 from scipy import linalg
 import os.path as op
-import gzip
-import sys
-import os
 import re
 import uuid
 
 from .constants import FIFF
 from ..utils import logger
+from ..externals.jdcal import jcal2jd
+from ..fixes import gzip_open
 
 
 def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
     if isinstance(data, np.ndarray):
         data_size *= data.size
-    if isinstance(data, str):
-        data_size *= len(data)
+
+    # XXX for string types the data size is used as
+    # computed in ``write_string``.
+
     fid.write(np.array(kind, dtype='>i4').tostring())
     fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
     fid.write(np.array(data_size, dtype='>i4').tostring())
@@ -71,10 +73,22 @@ def write_complex128(fid, kind, data):
     _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
 
 
+def write_julian(fid, kind, data):
+    """Writes a Julian-formatted date to a FIF file"""
+    assert len(data) == 3
+    data_size = 4
+    jd = np.sum(jcal2jd(*data))
+    data = np.array(jd, dtype='>i4')
+    _write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4')
+
+
 def write_string(fid, kind, data):
     """Writes a string tag"""
-    data_size = 1
-    _write(fid, str(data), kind, data_size, FIFF.FIFFT_STRING, '>c')
+
+    str_data = data.encode('utf-8')  # Use unicode or bytes depending on Py2/3
+    data_size = len(str_data)  # therefore compute size here
+    my_dtype = '>a'  # py2/3 compatible on writing -- don't ask me why
+    _write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype)
 
 
 def write_name_list(fid, kind, data):
@@ -153,11 +167,13 @@ def get_machid():
     ids : array (length 2, int32)
         The machine identifier used in MNE.
     """
-    mac = re.findall('..', '%012x' % uuid.getnode())
-    mac += ['00', '00']  # add two more fields
+    mac = b('%012x' %uuid.getnode()) # byte conversion for Py3
+    mac = re.findall(b'..', mac) # split string
+    mac += [b'00', b'00']  # add two more fields
 
     # Convert to integer in reverse-order (for some reason)
-    mac = ''.join([h.decode('hex') for h in mac[::-1]])
+    from codecs import encode
+    mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]])
     ids = np.flipud(np.fromstring(mac, np.int32, count=2))
     return ids
 
@@ -195,7 +211,7 @@ def end_block(fid, kind):
     write_int(fid, FIFF.FIFF_BLOCK_END, kind)
 
 
-def start_file(fname):
+def start_file(fname, id_=None):
     """Opens a fif file for writing and writes the compulsory header tags
 
     Parameters
@@ -204,13 +220,15 @@ def start_file(fname):
         The name of the file to open. It is recommended
         that the name ends with .fif or .fif.gz. Can also be an
         already opened file.
+    id_ : dict | None
+        ID to use for the FIFF_FILE_ID.
     """
-    if isinstance(fname, basestring):
+    if isinstance(fname, string_types):
         if op.splitext(fname)[1].lower() == '.gz':
             logger.debug('Writing using gzip')
             # defaults to compression level 9, which is barely smaller but much
             # slower. 2 offers a good compromise.
-            fid = gzip.open(fname, "wb", compresslevel=2)
+            fid = gzip_open(fname, "wb", compresslevel=2)
         else:
             logger.debug('Writing using normal I/O')
             fid = open(fname, "wb")
@@ -219,7 +237,7 @@ def start_file(fname):
         fid = fname
         fid.seek(0)
     #   Write the compulsory items
-    write_id(fid, FIFF.FIFF_FILE_ID)
+    write_id(fid, FIFF.FIFF_FILE_ID, id_)
     write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
     write_int(fid, FIFF.FIFF_FREE_LIST, -1)
     return fid
@@ -320,7 +338,7 @@ def write_ch_info(fid, ch):
 
     fid.write(np.array(ch_name, dtype='>c').tostring())
     if len(ch_name) < 16:
-        fid.write('\0' * (16 - len(ch_name)))
+        fid.write(b('\0') * (16 - len(ch_name)))
 
 
 def write_dig_point(fid, dig):
diff --git a/mne/label.py b/mne/label.py
index 45398a7..ae17f22 100644
--- a/mne/label.py
+++ b/mne/label.py
@@ -1,22 +1,143 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
+from collections import defaultdict
+from colorsys import hsv_to_rgb, rgb_to_hsv
 from os import path as op
 import os
 import copy as cp
-import numpy as np
 import re
+from warnings import warn
+
+import numpy as np
 from scipy import linalg, sparse
 
-from .utils import get_subjects_dir, _check_subject, logger, verbose
+from .fixes import digitize, in1d
+from .utils import (get_subjects_dir, _check_subject, logger, verbose,
+                    deprecated)
 from .source_estimate import (_read_stc, mesh_edges, mesh_dist, morph_data,
                               SourceEstimate, spatial_src_connectivity)
-from .surface import read_surface
+from .source_space import add_source_space_distances
+from .surface import read_surface, fast_cross_3d
+from .source_space import SourceSpaces
 from .parallel import parallel_func, check_n_jobs
 from .stats.cluster_level import _find_clusters
+from .externals.six import b, string_types
+from .externals.six.moves import zip, xrange
+
+
+def _blend_colors(color_1, color_2):
+    """Blend two colors in HSV space
+
+    Parameters
+    ----------
+    color_1, color_2 : None | tuple
+        RGBA tuples with values between 0 and 1. None if no color is available.
+        If both colors are None, the output is None. If only one is None, the
+        output is the other color.
+
+    Returns
+    -------
+    color : None | tuple
+        RGBA tuple of the combined color. Saturation, value and alpha are
+        averaged, whereas the new hue is determined as angle half way between
+        the two input colors' hues.
+    """
+    if color_1 is None and color_2 is None:
+        return None
+    elif color_1 is None:
+        return color_2
+    elif color_2 is None:
+        return color_1
+
+    r_1, g_1, b_1, a_1 = color_1
+    h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1)
+    r_2, g_2, b_2, a_2 = color_2
+    h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2)
+    hue_diff = abs(h_1 - h_2)
+    if hue_diff < 0.5:
+        h = min(h_1, h_2) + hue_diff / 2.
+    else:
+        h = max(h_1, h_2) + (1. - hue_diff) / 2.
+        h %= 1.
+    s = (s_1 + s_2) / 2.
+    v = (v_1 + v_2) / 2.
+    r, g, b = hsv_to_rgb(h, s, v)
+    a = (a_1 + a_2) / 2.
+    color = (r, g, b, a)
+    return color
+
+
+def _split_colors(color, n):
+    """Create n colors in HSV space that occupy a gradient in value
+
+    Parameters
+    ----------
+    color : tuple
+        RGBA tuple with values between 0 and 1.
+    n : int >= 2
+        Number of colors on the gradient.
+
+    Returns
+    -------
+    colors : tuple of tuples, len = n
+        N RGBA tuples that occupy a gradient in value (low to high) but share
+        saturation and hue with the input color.
+    """
+    r, g, b, a = color
+    h, s, v = rgb_to_hsv(r, g, b)
+    gradient_range = np.sqrt(n / 10.)
+    if v > 0.5:
+        v_max = min(0.95, v + gradient_range / 2)
+        v_min = max(0.05, v_max - gradient_range)
+    else:
+        v_min = max(0.05, v - gradient_range / 2)
+        v_max = min(0.95, v_min + gradient_range)
+
+    hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n))
+    rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors)
+    rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors)
+    return tuple(rgba_colors)
+
+
+def _n_colors(n, bytes_=False, cmap='hsv'):
+    """Produce a list of n unique RGBA color tuples based on a colormap
+
+    Parameters
+    ----------
+    n : int
+        Number of colors.
+    bytes : bool
+        Return colors as integers values between 0 and 255 (instead of floats
+        between 0 and 1).
+    cmap : str
+        Which colormap to use.
+
+    Returns
+    -------
+    colors : array, shape (n, 4)
+        RGBA color values.
+    """
+    n_max = 2 ** 10
+    if n > n_max:
+        err = "Can't produce more than %i unique colors" % n_max
+        raise NotImplementedError(err)
+
+    from matplotlib.cm import get_cmap
+    cm = get_cmap(cmap, n_max)
+    pos = np.linspace(0, 1, n, False)
+    colors = cm(pos, bytes=bytes_)
+    if bytes_:
+        # make sure colors are unique
+        for ii, c in enumerate(colors):
+            if np.any(np.all(colors[:ii] == c, 1)):
+                raise RuntimeError('Could not get %d unique colors from %s '
+                                   'colormap. Try using a different colormap.'
+                                   % (n, cmap))
+    return colors
 
 
 class Label(object):
@@ -42,11 +163,16 @@ class Label(object):
         Kept as information but not used by the object itself.
     subject : str | None
         Name of the subject the label is from.
+    color : None | matplotlib color
+        Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
     Attributes
     ----------
+    color : None | tuple
+        Default label color, represented as RGBA tuple with values between 0
+        and 1.
     comment : str
         Comment from the first line of the label file.
     hemi : 'lh' | 'rh'
@@ -67,20 +193,29 @@ class Label(object):
     """
     @verbose
     def __init__(self, vertices, pos=None, values=None, hemi=None, comment="",
-                 name=None, filename=None, subject=None, verbose=None):
-        if not isinstance(hemi, basestring):
+                 name=None, filename=None, subject=None, color=None,
+                 verbose=None):
+        # check parameters
+        if not isinstance(hemi, string_types):
             raise ValueError('hemi must be a string, not %s' % type(hemi))
         vertices = np.asarray(vertices)
         if np.any(np.diff(vertices.astype(int)) <= 0):
-            raise ValueError('Vertices must be ordered in increasing '
-                             'order.')
+            raise ValueError('Vertices must be ordered in increasing order.')
+
+        if color is not None:
+            from matplotlib.colors import colorConverter
+            color = colorConverter.to_rgba(color)
 
         if values is None:
             values = np.ones(len(vertices))
+        else:
+            values = np.asarray(values)
+
         if pos is None:
             pos = np.zeros((len(vertices), 3))
-        values = np.asarray(values)
-        pos = np.asarray(pos)
+        else:
+            pos = np.asarray(pos)
+
         if not (len(vertices) == len(values) == len(pos)):
             err = ("vertices, values and pos need to have same length (number "
                    "of vertices)")
@@ -97,6 +232,7 @@ class Label(object):
         self.comment = comment
         self.verbose = verbose
         self.subject = _check_subject(None, subject, False)
+        self.color = color
         self.name = name
         self.filename = filename
 
@@ -108,6 +244,7 @@ class Label(object):
         self.comment = state['comment']
         self.verbose = state['verbose']
         self.subject = state.get('subject', None)
+        self.color = state.get('color', None)
         self.name = state['name']
         self.filename = state['filename']
 
@@ -119,6 +256,7 @@ class Label(object):
                    comment=self.comment,
                    verbose=self.verbose,
                    subject=self.subject,
+                   color=self.color,
                    name=self.name,
                    filename=self.filename)
         return out
@@ -149,7 +287,8 @@ class Label(object):
                     lh, rh = self.copy(), other.copy()
                 else:
                     lh, rh = other.copy(), self.copy()
-                return BiHemiLabel(lh, rh, name=name)
+                color = _blend_colors(self.color, other.color)
+                return BiHemiLabel(lh, rh, name, color)
         else:
             raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
 
@@ -184,19 +323,35 @@ class Label(object):
             pos = np.vstack((self.pos, other.pos))
             values = np.hstack((self.values, other.values))
 
+        indcs = np.argsort(vertices)
+        vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
+
+        comment = "%s + %s" % (self.comment, other.comment)
+
         name0 = self.name if self.name else 'unnamed'
         name1 = other.name if other.name else 'unnamed'
+        name = "%s + %s" % (name0, name1)
 
-        indcs = np.argsort(vertices)
-        vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
+        color = _blend_colors(self.color, other.color)
+        verbose = self.verbose or other.verbose
 
-        label = Label(vertices, pos=pos, values=values, hemi=self.hemi,
-                      comment="%s + %s" % (self.comment, other.comment),
-                      name="%s + %s" % (name0, name1))
+        label = Label(vertices, pos, values, self.hemi, comment, name, None,
+                      self.subject, color, verbose)
         return label
 
     def save(self, filename):
-        "calls write_label to write the label to disk"
+        """Write to disk as FreeSurfer *.label file
+
+        Parameters
+        ----------
+        filename : string
+            Path to label file to produce.
+
+        Notes
+        -----
+        Note that due to file specification limitations, the Label's subject
+        and color attributes are not saved to disk.
+        """
         write_label(filename, self)
 
     def copy(self):
@@ -209,6 +364,63 @@ class Label(object):
         """
         return cp.deepcopy(self)
 
+    def fill(self, src, name=None):
+        """Fill the surface between sources for a label defined in source space
+
+        Parameters
+        ----------
+        src : SourceSpaces
+            Source space in which the label was defined. If a source space is
+            provided, the label is expanded to fill in surface vertices that
+            lie between the vertices included in the source space. For the
+            added vertices, ``pos`` is filled in with positions from the
+            source space, and ``values`` is filled in from the closest source
+            space vertex.
+        name : None | str
+            Name for the new Label (default is self.name).
+
+        Returns
+        -------
+        label : Label
+            The label covering the same vertices in source space but also
+            including intermediate surface vertices.
+        """
+        # find source space patch info
+        if self.hemi == 'lh':
+            hemi_src = src[0]
+        elif self.hemi == 'rh':
+            hemi_src = src[1]
+
+        if not np.all(in1d(self.vertices, hemi_src['vertno'])):
+            msg = "Source space does not contain all of the label's vertices"
+            raise ValueError(msg)
+
+        nearest = hemi_src['nearest']
+        if nearest is None:
+            msg = ("Computing patch info for source space, this can take "
+                   "a while. In order to avoid this in the future, run "
+                   "mne.add_source_space_distances() on the source space "
+                   "and save it.")
+            logger.warn(msg)
+            add_source_space_distances(src)
+            nearest = hemi_src['nearest']
+
+        # find new vertices
+        include = in1d(nearest, self.vertices, False)
+        vertices = np.nonzero(include)[0]
+
+        # values
+        nearest_in_label = digitize(nearest[vertices], self.vertices, True)
+        values = self.values[nearest_in_label]
+        # pos
+        pos = hemi_src['rr'][vertices]
+
+        if name is None:
+            name = self.name
+        label = Label(vertices, pos, values, self.hemi, self.comment, name,
+                      None, self.subject, self.color)
+        return label
+
     @verbose
     def smooth(self, subject=None, smooth=2, grade=None,
                subjects_dir=None, n_jobs=1, copy=True, verbose=None):
@@ -313,7 +525,7 @@ class Label(object):
         with label.vertices.
         """
         subject_from = _check_subject(self.subject, subject_from)
-        if not isinstance(subject_to, basestring):
+        if not isinstance(subject_to, string_types):
             raise TypeError('"subject_to" must be entered as a string')
         if not isinstance(smooth, int):
             raise ValueError('smooth must be an integer')
@@ -350,6 +562,42 @@ class Label(object):
         label.subject = subject_to
         return label
 
+    def split(self, parts=2, subject=None, subjects_dir=None,
+              freesurfer=False):
+        """Split the Label into two or more parts
+
+        Parameters
+        ----------
+        parts : int >= 2 | tuple of str
+            A sequence of strings specifying label names for the new labels
+            (from posterior to anterior), or the number of new labels to create
+            (default is 2). If a number is specified, names of the new labels
+            will be the input label's name with div1, div2 etc. appended.
+        subject : None | str
+            Subject which this label belongs to (needed to locate surface file;
+            should only be specified if it is not specified in the label).
+        subjects_dir : None | str
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        freesurfer : bool
+            By default (``False``) ``split_label`` uses an algorithm that is
+            slightly optimized for performance and numerical precision. Set
+            ``freesurfer`` to ``True`` in order to replicate label splits from
+            FreeSurfer's ``mris_divide_parcellation``.
+
+        Returns
+        -------
+        labels : list of Label (len = n_parts)
+            The labels, starting from the lowest to the highest end of the
+            projection axis.
+
+        Notes
+        -----
+        Works by finding the label's principal eigen-axis on the spherical
+        surface, projecting all label vertex coordinates onto this axis and
+        dividing them at regular spatial intervals.
+        """
+        return split_label(self, parts, subject, subjects_dir, freesurfer)
+
 
 class BiHemiLabel(object):
     """A freesurfer/MNE label with vertices in both hemispheres
@@ -373,7 +621,7 @@ class BiHemiLabel(object):
     """
     hemi = 'both'
 
-    def __init__(self, lh, rh, name=None):
+    def __init__(self, lh, rh, name=None, color=None):
         if lh.subject != rh.subject:
             raise ValueError('lh.subject (%s) and rh.subject (%s) must '
                              'agree' % (lh.subject, rh.subject))
@@ -381,6 +629,7 @@ class BiHemiLabel(object):
         self.rh = rh
         self.name = name
         self.subject = lh.subject
+        self.color = color
 
     def __repr__(self):
         temp = "<BiHemiLabel  |  %s, lh : %i vertices,  rh : %i vertices>"
@@ -406,10 +655,11 @@ class BiHemiLabel(object):
             raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
 
         name = '%s + %s' % (self.name, other.name)
-        return BiHemiLabel(lh, rh, name=name)
+        color = _blend_colors(self.color, other.color)
+        return BiHemiLabel(lh, rh, name, color)
 
 
-def read_label(filename, subject=None):
+def read_label(filename, subject=None, color=None):
     """Read FreeSurfer Label file
 
     Parameters
@@ -422,6 +672,10 @@ def read_label(filename, subject=None):
         incompatible labels and SourceEstimates (e.g., ones from other
         subjects). Note that due to file specification limitations, the
         subject name isn't saved to or loaded from files written to disk.
+    color : None | matplotlib color
+        Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
+        Note that due to file specification limitations, the color isn't saved
+        to or loaded from files written to disk.
 
     Returns
     -------
@@ -431,17 +685,15 @@ def read_label(filename, subject=None):
             vertices       vertex indices (0 based, column 1)
             pos            locations in meters (columns 2 - 4 divided by 1000)
             values         values at the vertices (column 5)
+
+    See Also
+    --------
+    read_labels_from_annot
     """
-    fid = open(filename, 'r')
-    comment = fid.readline().replace('\n', '')[1:]
-    if subject is not None and not isinstance(subject, basestring):
+    if subject is not None and not isinstance(subject, string_types):
         raise TypeError('subject must be a string')
 
-    nv = int(fid.readline())
-    data = np.empty((5, nv))
-    for i, line in enumerate(fid):
-        data[:, i] = line.split()
-
+    # find hemi
     basename = op.basename(filename)
     if basename.endswith('lh.label') or basename.startswith('lh.'):
         hemi = 'lh'
@@ -450,7 +702,24 @@ def read_label(filename, subject=None):
     else:
         raise ValueError('Cannot find which hemisphere it is. File should end'
                          ' with lh.label or rh.label')
-    fid.close()
+
+    # find name
+    if basename.startswith(('lh.', 'rh.')):
+        if basename.endswith('.label'):
+            basename_ = basename[3:-6]
+        else:
+            basename_ = basename[3:]
+    else:
+        basename_ = basename[:-9]
+    name = "%s-%s" % (basename_, hemi)
+
+    # read the file
+    with open(filename, 'r') as fid:
+        comment = fid.readline().replace('\n', '')[1:]
+        nv = int(fid.readline())
+        data = np.empty((5, nv))
+        for i, line in enumerate(fid):
+            data[:, i] = line.split()
 
     # let's make sure everything is ordered correctly
     vertices = np.array(data[0], dtype=np.int32)
@@ -461,8 +730,8 @@ def read_label(filename, subject=None):
     pos = pos[order]
     values = values[order]
 
-    label = Label(vertices=vertices, pos=pos, values=values, hemi=hemi,
-                  comment=comment, filename=filename, subject=subject)
+    label = Label(vertices, pos, values, hemi, comment, name, filename,
+                  subject, color)
 
     return label
 
@@ -479,6 +748,15 @@ def write_label(filename, label, verbose=None):
         The label object to save.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    Note that due to file specification limitations, the Label's subject and
+    color attributes are not saved to disk.
+
+    See Also
+    --------
+    write_labels_to_annot
     """
     hemi = label.hemi
     path_head, name = op.split(filename)
@@ -490,20 +768,166 @@ def write_label(filename, label, verbose=None):
 
     logger.info('Saving label to : %s' % filename)
 
-    fid = open(filename, 'wb')
-    n_vertices = len(label.vertices)
-    data = np.zeros((n_vertices, 5), dtype=np.float)
-    data[:, 0] = label.vertices
-    data[:, 1:4] = 1e3 * label.pos
-    data[:, 4] = label.values
-    fid.write("#%s\n" % label.comment)
-    fid.write("%d\n" % n_vertices)
-    for d in data:
-        fid.write("%d %f %f %f %f\n" % tuple(d))
-
+    with open(filename, 'wb') as fid:
+        n_vertices = len(label.vertices)
+        data = np.zeros((n_vertices, 5), dtype=np.float)
+        data[:, 0] = label.vertices
+        data[:, 1:4] = 1e3 * label.pos
+        data[:, 4] = label.values
+        fid.write(b("#%s\n" % label.comment))
+        fid.write(b("%d\n" % n_vertices))
+        for d in data:
+            fid.write(b("%d %f %f %f %f\n" % tuple(d)))
     return label
 
 
+def split_label(label, parts=2, subject=None, subjects_dir=None,
+                freesurfer=False):
+    """Split a Label into two or more parts
+
+    Parameters
+    ----------
+    label : Label | str
+        Label which is to be split (Label object or path to a label file).
+    parts : int >= 2 | tuple of str
+        A sequence of strings specifying label names for the new labels (from
+        posterior to anterior), or the number of new labels to create (default
+        is 2). If a number is specified, names of the new labels will be the
+        input label's name with div1, div2 etc. appended.
+    subject : None | str
+        Subject which this label belongs to (needed to locate surface file;
+        should only be specified if it is not specified in the label).
+    subjects_dir : None | str
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    freesurfer : bool
+        By default (``False``) ``split_label`` uses an algorithm that is
+        slightly optimized for performance and numerical precision. Set
+        ``freesurfer`` to ``True`` in order to replicate label splits from
+        FreeSurfer's ``mris_divide_parcellation``.
+
+    Returns
+    -------
+    labels : list of Label (len = n_parts)
+        The labels, starting from the lowest to the highest end of the
+        projection axis.
+
+    Notes
+    -----
+    Works by finding the label's principal eigen-axis on the spherical surface,
+    projecting all label vertex coordinates onto this axis and dividing them at
+    regular spatial intervals.
+    """
+    # find the label
+    if isinstance(label, BiHemiLabel):
+        raise TypeError("Can only split labels restricted to one hemisphere.")
+    elif isinstance(label, string_types):
+        label = read_label(label)
+
+    # find the parts
+    if np.isscalar(parts):
+        n_parts = int(parts)
+        if label.name.endswith(('lh', 'rh')):
+            basename = label.name[:-3]
+            name_ext = label.name[-3:]
+        else:
+            basename = label.name
+            name_ext = ''
+        name_pattern = "%s_div%%i%s" % (basename, name_ext)
+        names = tuple(name_pattern % i for i in range(1, n_parts + 1))
+    else:
+        names = parts
+        n_parts = len(names)
+
+    if n_parts < 2:
+        raise ValueError("Can't split label into %i parts" % n_parts)
+
+    # find the subject
+    subjects_dir = get_subjects_dir(subjects_dir)
+    if label.subject is None and subject is None:
+        raise ValueError("The subject needs to be specified.")
+    elif subject is None:
+        subject = label.subject
+    elif label.subject is None:
+        pass
+    elif subject != label.subject:
+        err = ("The label specifies a different subject (%r) from the subject "
+               "parameter (%r)." % label.subject, subject)
+        raise ValueError(err)
+
+    # find the spherical surface
+    surf_fname = '.'.join((label.hemi, 'sphere'))
+    surf_path = os.path.join(subjects_dir, subject, "surf", surf_fname)
+    surface_points, surface_tris = read_surface(surf_path)
+    # find the label coordinates on the surface
+    points = surface_points[label.vertices]
+    center = np.mean(points, axis=0)
+    centered_points = points - center
+
+    # find the label's normal
+    if freesurfer:
+        # find the Freesurfer vertex closest to the center
+        distance = np.sqrt(np.sum(centered_points ** 2, axis=1))
+        i_closest = np.argmin(distance)
+        closest_vertex = label.vertices[i_closest]
+        # find the normal according to freesurfer convention
+        idx = np.any(surface_tris == closest_vertex, axis=1)
+        tris_for_normal = surface_tris[idx]
+        r1 = surface_points[tris_for_normal[:, 0], :]
+        r2 = surface_points[tris_for_normal[:, 1], :]
+        r3 = surface_points[tris_for_normal[:, 2], :]
+        tri_normals = fast_cross_3d((r2 - r1), (r3 - r1))
+        normal = np.mean(tri_normals, axis=0)
+        normal /= linalg.norm(normal)
+    else:
+        # Normal of the center
+        normal = center / linalg.norm(center)
+
+    # project all vertex coordinates on the tangential plane for this point
+    q, _ = linalg.qr(normal[:, np.newaxis])
+    tangent_u = q[:, 1:]
+    m_obs = np.dot(centered_points, tangent_u)
+    # find principal eigendirection
+    m_cov = np.dot(m_obs.T, m_obs)
+    w, vr = linalg.eig(m_cov)
+    i = np.argmax(w)
+    eigendir = vr[:, i]
+    # project back into 3d space
+    axis = np.dot(tangent_u, eigendir)
+    # orient them from posterior to anterior
+    if axis[1] < 0:
+        axis *= -1
+
+    # project the label on the axis
+    proj = np.dot(points, axis)
+
+    # assign mark (new label index)
+    proj -= proj.min()
+    proj /= (proj.max() / n_parts)
+    mark = proj // 1
+    mark[mark == n_parts] = n_parts - 1
+
+    # colors
+    if label.color is None:
+        colors = (None,) * n_parts
+    else:
+        colors = _split_colors(label.color, n_parts)
+
+    # construct new labels
+    labels = []
+    for i, name, color in zip(range(n_parts), names, colors):
+        idx = (mark == i)
+        vert = label.vertices[idx]
+        pos = label.pos[idx]
+        values = label.values[idx]
+        hemi = label.hemi
+        comment = label.comment
+        lbl = Label(vert, pos, values, hemi, comment, name, None, subject,
+                    color)
+        labels.append(lbl)
+
+    return labels
+
+
 def label_time_courses(labelfile, stcfile):
     """Extract the time courses corresponding to a label file from an stc file
 
@@ -582,32 +1006,38 @@ def label_sign_flip(label, src):
     return flip
 
 
-def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
+def stc_to_label(stc, src=None, smooth=None, connected=False,
+                 subjects_dir=None):
     """Compute a label from the non-zero sources in an stc object.
 
     Parameters
     ----------
     stc : SourceEstimate
         The source estimates.
-    src : list of dict | string | None
+    src : SourceSpaces | str | None
         The source space over which the source estimates are defined.
         If it's a string it should the subject name (e.g. fsaverage).
         Can be None if stc.subject is not None.
-    smooth : int
-        Number of smoothing steps to use.
+    smooth : bool
+        Fill in vertices on the cortical surface that are not in the source
+        space based on the closest source space vertex (requires
+        src to be a SourceSpace). The default is currently to smooth with a
+        deprecated method, and will change to True in v0.9 (i.e., the parameter
+        should be explicitly specified as boolean until then to avoid a
+        deprecation warning).
     connected : bool
         If True a list of connected labels will be returned in each
         hemisphere. The labels are ordered in decreasing order depending
         of the maximum value in the stc.
-    subjects_dir : string, or None
+    subjects_dir : str | None
         Path to SUBJECTS_DIR if it is not set in the environment.
 
     Returns
     -------
     labels : list of Labels | list of list of Labels
         The generated labels. If connected is False, it returns
-        a list of Labels (One per hemisphere). If no Label is available
-        in an hemisphere, None is returned. If connected is True,
+        a list of Labels (one per hemisphere). If no Label is available
+        in a hemisphere, None is returned. If connected is True,
         it returns for each hemisphere a list of connected labels
         ordered in decreasing order depending of the maximum value in the stc.
         If no Label is available in an hemisphere, an empty list is returned.
@@ -615,7 +1045,7 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
     src = stc.subject if src is None else src
     if src is None:
         raise ValueError('src cannot be None if stc.subject is None')
-    if isinstance(src, basestring):
+    if isinstance(src, string_types):
         subject = src
     else:
         subject = stc.subject
@@ -623,7 +1053,29 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
     if not isinstance(stc, SourceEstimate):
         raise ValueError('SourceEstimate should be surface source estimates')
 
-    if isinstance(src, basestring):
+    if not isinstance(smooth, bool):
+        if smooth is None:
+            msg = ("The smooth parameter was not explicitly specified. The "
+                   "default behavior of stc_to_label() will change in v0.9 "
+                   "to filling the label using source space patch "
+                   "information. In order to avoid this warning, set smooth "
+                   "to a boolean explicitly.")
+            smooth = 5
+        else:
+            msg = ("The smooth parameter of stc_to_label() was specified as "
+                   "int. This value is deprecated and will raise an error in "
+                   "v0.9. In order to avoid this warning, set smooth to a "
+                   "boolean.")
+        warn(msg, DeprecationWarning)
+
+    if isinstance(src, string_types):
+        if connected:
+            raise ValueError('The option to return only connected labels is '
+                             'only available if source spaces are provided.')
+        if isinstance(smooth, bool) and smooth:
+            msg = ("stc_to_label with smooth='patch' requires src to be an "
+                   "instance of SourceSpace")
+            raise ValueError(msg)
         subjects_dir = get_subjects_dir(subjects_dir)
         surf_path_from = op.join(subjects_dir, src, 'surf')
         rr_lh, tris_lh = read_surface(op.join(surf_path_from,
@@ -632,11 +1084,9 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
                                       'rh.white'))
         rr = [rr_lh, rr_rh]
         tris = [tris_lh, tris_rh]
-        if connected:
-            raise ValueError('The option to return only connected labels'
-                             ' is only available if a source space is passed'
-                             ' as parameter.')
     else:
+        if not isinstance(src, SourceSpaces):
+            raise TypeError('src must be a string or a set of source spaces')
         if len(src) != 2:
             raise ValueError('source space should contain the 2 hemispheres')
         rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
@@ -647,22 +1097,22 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
     cnt = 0
     cnt_full = 0
     for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
-                                    zip(['lh', 'rh'], stc.vertno, tris, rr)):
-
+            zip(['lh', 'rh'], stc.vertno, tris, rr)):
         this_data = stc.data[cnt:cnt + len(this_vertno)]
-
         e = mesh_edges(this_tris)
         e.data[e.data == 2] = 1
         n_vertices = e.shape[0]
         e = e + sparse.eye(n_vertices, n_vertices)
 
-        if connected:
-            if not isinstance(src, basestring):  # XXX : ugly
-                inuse = np.where(src[hemi_idx]['inuse'])[0]
-                tmp = np.zeros((len(inuse), this_data.shape[1]))
-                this_vertno_idx = np.searchsorted(inuse, this_vertno)
-                tmp[this_vertno_idx] = this_data
-                this_data = tmp
+        if connected:  # we know src *must* be a SourceSpaces now
+            vertno = np.where(src[hemi_idx]['inuse'])[0]
+            if not len(np.setdiff1d(this_vertno, vertno)) == 0:
+                raise RuntimeError('stc contains vertices not present '
+                                   'in source space, did you morph?')
+            tmp = np.zeros((len(vertno), this_data.shape[1]))
+            this_vertno_idx = np.searchsorted(vertno, this_vertno)
+            tmp[this_vertno_idx] = this_data
+            this_data = tmp
             offset = cnt_full + len(this_data)
             this_src_conn = src_conn[cnt_full:offset, cnt_full:offset].tocoo()
             this_data_abs_max = np.abs(this_data).max(axis=1)
@@ -673,7 +1123,7 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
             clusters_max = np.argsort([np.max(this_data_abs_max[c])
                                        for c in clusters])[::-1]
             clusters = [clusters[k] for k in clusters_max]
-            clusters = [inuse[c] for c in clusters]
+            clusters = [vertno[c] for c in clusters]
         else:
             clusters = [this_vertno[np.any(this_data, axis=1)]]
 
@@ -688,19 +1138,23 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
                 this_labels = []
         else:
             this_labels = []
-            for c in clusters:
+            colors = _n_colors(len(clusters))
+            for c, color in zip(clusters, colors):
                 idx_use = c
-                for k in range(smooth):
-                    e_use = e[:, idx_use]
-                    data1 = e_use * np.ones(len(idx_use))
-                    idx_use = np.where(data1)[0]
-
-                label = Label(vertices=idx_use,
-                              pos=this_rr[idx_use],
-                              values=np.ones(len(idx_use)),
-                              hemi=hemi,
-                              comment='Label from stc',
-                              subject=subject)
+                if isinstance(smooth, bool) and smooth:
+                    label = Label(idx_use, this_rr[idx_use], None, hemi,
+                                  'Label from stc', subject=subject,
+                                  color=color).fill(src)
+                else:
+                    for k in range(smooth):
+                        e_use = e[:, idx_use]
+                        data1 = e_use * np.ones(len(idx_use))
+                        idx_use = np.where(data1)[0]
+
+                    label = Label(idx_use, this_rr[idx_use], None, hemi,
+                                  'Label from stc', subject=subject,
+                                  color=color)
+
                 this_labels.append(label)
 
             if not connected:
@@ -711,28 +1165,31 @@ def stc_to_label(stc, src=None, smooth=5, connected=False, subjects_dir=None):
     return labels
 
 
-def _verts_within_dist(graph, source, max_dist):
+def _verts_within_dist(graph, sources, max_dist):
     """Find all vertices wihin a maximum geodesic distance from source
 
     Parameters
     ----------
     graph : scipy.sparse.csr_matrix
-        Sparse matrix with distances between adjacent vertices
-    source : int
-        Source vertex
-    max_dist: float
-        Maximum geodesic distance
+        Sparse matrix with distances between adjacent vertices.
+    sources : list of int
+        Source vertices.
+    max_dist : float
+        Maximum geodesic distance.
 
     Returns
     -------
     verts : array
-        Vertices within max_dist
+        Vertices within max_dist.
     dist : array
-        Distances from source vertex
+        Distances from source vertex.
     """
     dist_map = {}
-    dist_map[source] = 0
-    verts_added_last = [source]
+    verts_added_last = []
+    for source in sources:
+        dist_map[source] = 0
+        verts_added_last.append(source)
+
     # add neighbors until no more neighbors within max_dist can be found
     while len(verts_added_last) > 0:
         verts_added = []
@@ -753,32 +1210,39 @@ def _verts_within_dist(graph, source, max_dist):
                         verts_added.append(j)
         verts_added_last = verts_added
 
-    verts = np.sort(np.array(dist_map.keys(), dtype=np.int))
+    verts = np.sort(np.array(list(dist_map.keys()), dtype=np.int))
     dist = np.array([dist_map[v] for v in verts])
 
     return verts, dist
 
 
-def _grow_labels(seeds, extents, hemis, dist, vert):
+def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
     """Helper for parallelization of grow_labels
     """
     labels = []
-    for seed, extent, hemi in zip(seeds, extents, hemis):
+    for seed, extent, hemi, name in zip(seeds, extents, hemis, names):
         label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
 
         # create a label
-        comment = 'Circular label: seed=%d, extent=%0.1fmm' % (seed, extent)
+        if len(seed) == 1:
+            seed_repr = str(seed)
+        else:
+            seed_repr = ','.join(map(str, seed))
+        comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr,
+                                                               extent)
         label = Label(vertices=label_verts,
                       pos=vert[hemi][label_verts],
                       values=label_dist,
                       hemi=hemi,
-                      comment=comment)
+                      comment=comment,
+                      name=str(name),
+                      subject=subject)
         labels.append(label)
     return labels
 
 
-def grow_labels(subject, seeds, extents, hemis, subjects_dir=None,
-                n_jobs=1):
+def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
+                overlap=True, names=None):
     """Generate circular labels in source space with region growing
 
     This function generates a number of labels in source space by growing
@@ -795,33 +1259,43 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None,
     ----------
     subject : string
         Name of the subject as in SUBJECTS_DIR.
-    seeds : array or int
-        Seed vertex numbers.
-    extents : array or float
+    seeds : int | list
+        Seed, or list of seeds. Each seed can be either a vertex number or
+        a list of vertex numbers.
+    extents : array | float
         Extents (radius in mm) of the labels.
-    hemis : array or int
+    hemis : array | int
         Hemispheres to use for the labels (0: left, 1: right).
     subjects_dir : string
         Path to SUBJECTS_DIR if not set in the environment.
     n_jobs : int
         Number of jobs to run in parallel. Likely only useful if tens
-        or hundreds of labels are being expanded simultaneously.
+        or hundreds of labels are being expanded simultaneously. Does not
+        apply with ``overlap=False``.
+    overlap : bool
+        Produce overlapping labels. If True (default), the resulting labels
+        can be overlapping. If False, each label will be grown one step at a
+        time, and occupied territory will not be invaded.
+    names : None | list of str
+        Assign names to the new labels (list needs to have the same length as
+        seeds).
 
     Returns
     -------
-    labels : list of Labels. The labels' ``comment`` attribute contains
-        information on the seed vertex and extent; the ``values``  attribute
-        contains distance from the seed in millimeters
-
+    labels : list of Label
+        The labels' ``comment`` attribute contains information on the seed
+        vertex and extent; the ``values``  attribute contains distance from the
+        seed in millimeters
     """
     subjects_dir = get_subjects_dir(subjects_dir)
     n_jobs = check_n_jobs(n_jobs)
 
     # make sure the inputs are arrays
-    seeds = np.atleast_1d(seeds)
+    if np.isscalar(seeds):
+        seeds = [seeds]
+    seeds = np.atleast_1d([np.atleast_1d(seed) for seed in seeds])
     extents = np.atleast_1d(extents)
     hemis = np.atleast_1d(hemis)
-
     n_seeds = len(seeds)
 
     if len(extents) != 1 and len(extents) != n_seeds:
@@ -839,7 +1313,21 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None,
     if len(hemis) == 1:
         hemis = np.tile(hemis, n_seeds)
 
-    hemis = ['lh' if h == 0 else 'rh' for h in hemis]
+    hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis])
+
+    # names
+    if names is None:
+        names = ["Label_%i-%s" % items for items in enumerate(hemis)]
+    else:
+        if np.isscalar(names):
+            names = [names]
+        if len(names) != n_seeds:
+            raise ValueError('The names parameter has to be None or have '
+                             'length len(seeds)')
+        for i, hemi in enumerate(hemis):
+            if not names[i].endswith(hemi):
+                names[i] = '-'.join((names[i], hemi))
+    names = np.array(names)
 
     # load the surfaces and create the distance graphs
     tris, vert, dist = {}, {}, {}
@@ -848,13 +1336,93 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None,
         vert[hemi], tris[hemi] = read_surface(surf_fname)
         dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
 
-    # create the patches
-    parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
-    seeds = np.array_split(seeds, n_jobs)
-    extents = np.array_split(extents, n_jobs)
-    hemis = np.array_split(hemis, n_jobs)
-    labels = sum(parallel(my_grow_labels(s, e, h, dist, vert)
-                          for s, e, h in zip(seeds, extents, hemis)), [])
+    if overlap:
+        # create the patches
+        parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
+        seeds = np.array_split(seeds, n_jobs)
+        extents = np.array_split(extents, n_jobs)
+        hemis = np.array_split(hemis, n_jobs)
+        names = np.array_split(names, n_jobs)
+        labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject)
+                              for s, e, h, n
+                              in zip(seeds, extents, hemis, names)), [])
+    else:
+        # special procedure for non-overlapping labels
+        labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis,
+                                             vert, dist, names)
+
+    # add a unique color to each label
+    colors = _n_colors(len(labels))
+    for label, color in zip(labels, colors):
+        label.color = color
+
+    return labels
+
+
+def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_,
+                                graphs, names_):
+    """Grow labels while ensuring that they don't overlap
+    """
+    labels = []
+    for hemi in set(hemis):
+        hemi_index = (hemis == hemi)
+        seeds = seeds_[hemi_index]
+        extents = extents_[hemi_index]
+        names = names_[hemi_index]
+        graph = graphs[hemi]  # distance graph
+        n_vertices = len(vertices_[hemi])
+        n_labels = len(seeds)
+
+        # prepare parcellation
+        parc = np.empty(n_vertices, dtype='int32')
+        parc[:] = -1
+
+        # initialize active sources
+        sources = {}  # vert -> (label, dist_from_seed)
+        edge = []  # queue of vertices to process
+        for label, seed in enumerate(seeds):
+            if np.any(parc[seed] >= 0):
+                raise ValueError("Overlapping seeds")
+            parc[seed] = label
+            for s in np.atleast_1d(seed):
+                sources[s] = (label, 0.)
+                edge.append(s)
+
+        # grow from sources
+        while edge:
+            vert_from = edge.pop(0)
+            label, old_dist = sources[vert_from]
+
+            # add neighbors within allowable distance
+            row = graph[vert_from, :]
+            for vert_to, dist in zip(row.indices, row.data):
+                new_dist = old_dist + dist
+
+                # abort if outside of extent
+                if new_dist > extents[label]:
+                    continue
+
+                vert_to_label = parc[vert_to]
+                if vert_to_label >= 0:
+                    _, vert_to_dist = sources[vert_to]
+                    # abort if the vertex is occupied by a closer seed
+                    if new_dist > vert_to_dist:
+                        continue
+                    elif vert_to in edge:
+                        edge.remove(vert_to)
+
+                # assign label value
+                parc[vert_to] = label
+                sources[vert_to] = (label, new_dist)
+                edge.append(vert_to)
+
+        # convert parc to labels
+        for i in xrange(n_labels):
+            vertices = np.nonzero(parc == i)[0]
+            name = str(names[i])
+            label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
+            labels.append(label_)
+
     return labels
 
 
@@ -906,7 +1474,7 @@ def _read_annot(fname):
 
             names = list()
             ctab = np.zeros((n_entries, 5), np.int)
-            for i in xrange(n_entries):
+            for i in range(n_entries):
                 name_length = np.fromfile(fid, '>i4', 1)[0]
                 name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
                 names.append(name)
@@ -924,7 +1492,7 @@ def _read_annot(fname):
             _ = np.fromfile(fid, "|S%d" % length, 1)[0]  # Orig table path
             entries_to_read = np.fromfile(fid, '>i4', 1)[0]
             names = list()
-            for i in xrange(entries_to_read):
+            for i in range(entries_to_read):
                 _ = np.fromfile(fid, '>i4', 1)[0]  # Structure
                 name_length = np.fromfile(fid, '>i4', 1)[0]
                 name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
@@ -967,10 +1535,25 @@ def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
 
 
 @verbose
+ at deprecated("labels_from_parc() will be removed in release 0.9. Use "
+            "read_labels_from_annot() instead (note the change in return "
+            "values).")
 def labels_from_parc(subject, parc='aparc', hemi='both', surf_name='white',
                      annot_fname=None, regexp=None, subjects_dir=None,
                      verbose=None):
-    """Read labels from FreeSurfer parcellation
+    """Deprecated (will be removed in mne 0.9). Use read_labels_from_annot()
+    instead"""
+    labels = read_labels_from_annot(subject, parc, hemi, surf_name,
+                                    annot_fname, regexp, subjects_dir, verbose)
+    label_colors = [l.color for l in labels]
+    return labels, label_colors
+
+
+ at verbose
+def read_labels_from_annot(subject, parc='aparc', hemi='both',
+                           surf_name='white', annot_fname=None, regexp=None,
+                           subjects_dir=None, verbose=None):
+    """Read labels from a FreeSurfer annotation file
 
     Note: Only cortical labels will be returned.
 
@@ -996,12 +1579,11 @@ def labels_from_parc(subject, parc='aparc', hemi='both', surf_name='white',
         Path to SUBJECTS_DIR if it is not set in the environment.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
     Returns
     -------
     labels : list of Label
         The labels, sorted by label name (ascending).
-    colors : list of tuples
-        RGBA color for obtained from the parc color table for each label.
     """
     logger.info('Reading labels from parcellation..')
 
@@ -1011,10 +1593,14 @@ def labels_from_parc(subject, parc='aparc', hemi='both', surf_name='white',
     annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
                                           subjects_dir)
 
+    if regexp is not None:
+        # allow for convenient substring match
+        r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
+              else regexp))
+
     # now we are ready to create the labels
     n_read = 0
     labels = list()
-    label_colors = list()
     for fname, hemi in zip(annot_fname, hemis):
         # read annotation
         annot, ctab, label_names = _read_annot(fname)
@@ -1032,40 +1618,30 @@ def labels_from_parc(subject, parc='aparc', hemi='both', surf_name='white',
             if len(vertices) == 0:
                 # label is not part of cortical surface
                 continue
+            name = label_name.decode() + '-' + hemi
+            if (regexp is not None) and not r_.match(name):
+                continue
             pos = vert_pos[vertices, :]
             values = np.zeros(len(vertices))
-            name = label_name + '-' + hemi
+            label_rgba = tuple(label_rgba / 255.)
             label = Label(vertices, pos, values, hemi, name=name,
-                          subject=subject)
+                          subject=subject, color=label_rgba)
             labels.append(label)
 
-            # store the color
-            label_rgba = tuple(label_rgba / 255.)
-            label_colors.append(label_rgba)
-
         n_read = len(labels) - n_read
         logger.info('   read %d labels from %s' % (n_read, fname))
 
-    if regexp is not None:
-        # allow for convenient substring match
-        r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
-              else regexp))
+    # sort the labels by label name
+    labels = sorted(labels, key=lambda l: l.name)
 
-    # sort the labels and colors by label name
-    names = [label.name for label in labels]
-    labels_ = zip(*((label, color) for (name, label, color) in sorted(
-                    zip(names, labels, label_colors))
-                        if (r_.match(name) if regexp else True)))
-    if labels_:
-        labels, label_colors = labels_
-    else:
-        raise RuntimeError('The regular expression supplied did not match.')
-    # convert tuples to lists
-    labels = list(labels)
-    label_colors = list(label_colors)
-    logger.info('[done]')
+    if len(labels) == 0:
+        msg = 'No labels found.'
+        if regexp is not None:
+            msg += ' Maybe the regular expression %r did not match?' % regexp
+        raise RuntimeError(msg)
 
-    return labels, label_colors
+    logger.info('[done]')
+    return labels
 
 
 def _write_annot(fname, annot, ctab, names):
@@ -1120,46 +1696,62 @@ def _write_annot(fname, annot, ctab, names):
 
 
 @verbose
-def parc_from_labels(labels, colors, subject=None, parc=None,
+ at deprecated("parc_from_labels() will be removed in release 0.9. Use "
+            "write_labels_to_annot() instead (note the change in the function "
+            "signature).")
+def parc_from_labels(labels, colors=None, subject=None, parc=None,
                      annot_fname=None, overwrite=False, subjects_dir=None,
                      verbose=None):
-    """Create a FreeSurfer parcellation from labels
+    """Deprecated (will be removed in mne 0.9). Use write_labels_to_annot()
+    instead"""
+    if colors is not None:
+        # do some input checking
+        colors = np.asarray(colors)
+        if colors.shape[1] != 4:
+            raise ValueError('Each color must have 4 values')
+        if len(colors) != len(labels):
+            raise ValueError('colors must have the same length as labels')
+        if np.any(colors < 0) or np.any(colors > 1):
+            raise ValueError('color values must be between 0 and 1')
+
+        # assign colors to labels
+        labels = [label.copy() for label in labels]
+        for label, color in zip(labels, colors):
+            label.color = color
+
+    write_labels_to_annot(labels, subject, parc, overwrite, subjects_dir,
+                          annot_fname, verbose)
+
+
+ at verbose
+def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
+                          subjects_dir=None, annot_fname=None,
+                          colormap='hsv', verbose=None):
+    """Create a FreeSurfer annotation from a list of labels
 
     Parameters
     ----------
     labels : list with instances of mne.Label
         The labels to create a parcellation from.
-    colors : list of tuples | None
-        RGBA color to write into the colortable for each label. If None,
-        the colors are created based on the alphabetical order of the label
-        names. Note: Per hemisphere, each label must have a unique color,
-        otherwise the stored parcellation will be invalid.
     subject : str | None
         The subject for which to write the parcellation for.
     parc : str | None
         The parcellation name to use.
-    annot_fname : str | None
-        Filename of the .annot file. If not None, only this file is written
-        and 'parc' and 'subject' are ignored.
     overwrite : bool
         Overwrite files if they already exist.
     subjects_dir : string, or None
         Path to SUBJECTS_DIR if it is not set in the environment.
+    annot_fname : str | None
+        Filename of the .annot file. If not None, only this file is written
+        and 'parc' and 'subject' are ignored.
+    colormap : str
+        Colormap to use to generate label colors for labels that do not
+        have a color specified.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
     logger.info('Writing labels to parcellation..')
 
-    # do some input checking
-    if colors is not None:
-        colors = np.asarray(colors)
-        if colors.shape[1] != 4:
-            raise ValueError('Each color must have 4 values')
-        if len(colors) != len(labels):
-            raise ValueError('colors must have the same length as labels')
-        if np.any(colors < 0) or np.any(colors > 1):
-            raise ValueError('color values must be between 0 and 1')
-
     subjects_dir = get_subjects_dir(subjects_dir)
 
     # get the .annot filenames and hemispheres
@@ -1172,8 +1764,14 @@ def parc_from_labels(labels, colors, subject=None, parc=None,
                 raise ValueError('File %s exists. Use "overwrite=True" to '
                                  'overwrite it' % fname)
 
-    names = ['%s-%s' % (label.name, label.hemi) for label in labels]
-
+    # prepare container for data to save:
+    to_save = []
+    # keep track of issues found in the labels
+    duplicate_colors = []
+    invalid_colors = []
+    overlap = []
+    no_color = (-1, -1, -1, -1)
+    no_color_rgb = (-1, -1, -1)
     for hemi, fname in zip(hemis, annot_fname):
         hemi_labels = [label for label in labels if label.hemi == hemi]
         n_hemi_labels = len(hemi_labels)
@@ -1181,48 +1779,150 @@ def parc_from_labels(labels, colors, subject=None, parc=None,
             # no labels for this hemisphere
             continue
         hemi_labels.sort(key=lambda label: label.name)
-        if colors is not None:
-            hemi_colors = [colors[names.index('%s-%s' % (label.name, hemi))]
-                           for label in hemi_labels]
-        else:
-            import matplotlib.pyplot as plt
-            hemi_colors = plt.cm.spectral(np.linspace(0, 1, n_hemi_labels))
-
-        # Creat annot and color table array to write
-        max_vert = 0
-        for label in hemi_labels:
-            max_vert = max(max_vert, np.max(label.vertices))
-        n_vertices = max_vert + 1
-        annot = np.zeros(n_vertices, dtype=np.int)
-        ctab = np.zeros((n_hemi_labels, 4), dtype=np.int32)
-        for ii, (label, color) in enumerate(zip(hemi_labels, hemi_colors)):
-            ctab[ii] = np.round(255 * np.asarray(color))
-            if np.all(ctab[ii, :3] == 0):
+
+        # convert colors to 0-255 RGBA tuples
+        hemi_colors = [no_color if label.color is None else
+                       tuple(int(round(255 * i)) for i in label.color)
+                       for label in hemi_labels]
+        ctab = np.array(hemi_colors, dtype=np.int32)
+        ctab_rgb = ctab[:, :3]
+
+        # make dict to check label colors (for annot ID only R, G and B count)
+        labels_by_color = defaultdict(list)
+        for label, color in zip(hemi_labels, ctab_rgb):
+            labels_by_color[tuple(color)].append(label.name)
+
+        # check label colors
+        for color, names in labels_by_color.items():
+            if color == no_color_rgb:
+                continue
+
+            if color == (0, 0, 0):
                 # we cannot have an all-zero color, otherw. e.g. tksurfer
                 # refuses to read the parcellation
-                if colors is not None:
-                    logger.warning('    Colormap contains color with, "r=0, '
-                                   'g=0, b=0" value. Some FreeSurfer tools '
-                                   'may fail to read the parcellation')
-                else:
-                    ctab[ii, :3] = 1
-
-            # create the annotation id from the color
-            annot_id = (ctab[ii, 0] + ctab[ii, 1] * 2 ** 8
-                        + ctab[ii, 2] * 2 ** 16)
+                msg = ('    At least one label contains a color with, "r=0, '
+                       'g=0, b=0" value. Some FreeSurfer tools may fail to '
+                       'read the parcellation')
+                logger.warning(msg)
+
+            if any(i > 255 for i in color):
+                msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
+                invalid_colors.append(msg)
+
+            if len(names) > 1:
+                msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
+                duplicate_colors.append(msg)
+
+        # replace None values (labels with unspecified color)
+        if labels_by_color[no_color_rgb]:
+            default_colors = _n_colors(n_hemi_labels, bytes_=True,
+                                       cmap=colormap)
+            safe_color_i = 0  # keep track of colors known to be in hemi_colors
+            for i in xrange(n_hemi_labels):
+                if ctab[i, 0] == -1:
+                    color = default_colors[i]
+                    # make sure to add no duplicate color
+                    while np.any(np.all(color[:3] == ctab_rgb, 1)):
+                        color = default_colors[safe_color_i]
+                        safe_color_i += 1
+                    # assign the color
+                    ctab[i] = color
+
+        # find number of vertices in surface
+        if subject is not None and subjects_dir is not None:
+            fpath = os.path.join(subjects_dir, subject, 'surf',
+                                 '%s.white' % hemi)
+            points, _ = read_surface(fpath)
+            n_vertices = len(points)
+        else:
+            max_vert = max(np.max(label.vertices) for label in hemi_labels)
+            n_vertices = max_vert + 1
+            msg = ('    Number of vertices in the surface could not be '
+                   'verified because the surface file could not be found; '
+                   'specify subject and subjects_dir parameters.')
+            logger.warning(msg)
+
+        # Create annot and color table array to write
+        annot = np.empty(n_vertices, dtype=np.int)
+        annot[:] = -1
+        # create the annotation ids from the colors
+        annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
+        annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
+        for label, annot_id in zip(hemi_labels, annot_ids):
+            # make sure the label is not overwriting another label
+            if np.any(annot[label.vertices] != -1):
+                other_ids = set(annot[label.vertices])
+                other_ids.discard(-1)
+                other_indices = (annot_ids.index(i) for i in other_ids)
+                other_names = (hemi_labels[i].name for i in other_indices)
+                other_repr = ', '.join(other_names)
+                msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
+                overlap.append(msg)
 
             annot[label.vertices] = annot_id
 
+        hemi_names = [label.name for label in hemi_labels]
+
+        # Assign unlabeled vertices to an "unknown" label
+        unlabeled = (annot == -1)
+        if np.any(unlabeled):
+            msg = ("Assigning %i unlabeled vertices to "
+                   "'unknown-%s'" % (unlabeled.sum(), hemi))
+            logger.info(msg)
+
+            # find an unused color (try shades of gray first)
+            for i in range(1, 257):
+                if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
+                    break
+            if i < 256:
+                color = (i, i, i, 0)
+            else:
+                err = ("Need one free shade of gray for 'unknown' label. "
+                       "Please modify your label colors, or assign the "
+                       "unlabeled vertices to another label.")
+                raise ValueError(err)
+
+            # find the id
+            annot_id = np.sum(annot_id_coding * color[:3])
+
+            # update data to write
+            annot[unlabeled] = annot_id
+            ctab = np.vstack((ctab, color))
+            hemi_names.append("unknown")
+
         # convert to FreeSurfer alpha values
         ctab[:, 3] = 255 - ctab[:, 3]
 
-        hemi_names = [label.name for label in hemi_labels]
-
         # remove hemi ending in names
         hemi_names = [name[:-3] if name.endswith(hemi) else name
                       for name in hemi_names]
-        # write it
-        logger.info('   writing %d labels to %s' % (n_hemi_labels, fname))
+
+        to_save.append((fname, annot, ctab, hemi_names))
+
+    issues = []
+    if duplicate_colors:
+        msg = ("Some labels have the same color values (all labels in one "
+               "hemisphere must have a unique color):")
+        duplicate_colors.insert(0, msg)
+        issues.append(os.linesep.join(duplicate_colors))
+    if invalid_colors:
+        msg = ("Some labels have invalid color values (all colors should be "
+               "RGBA tuples with values between 0 and 1)")
+        invalid_colors.insert(0, msg)
+        issues.append(os.linesep.join(invalid_colors))
+    if overlap:
+        msg = ("Some labels occupy vertices that are also occupied by one or "
+               "more other labels. Each vertex can only be occupied by a "
+               "single label in *.annot files.")
+        overlap.insert(0, msg)
+        issues.append(os.linesep.join(overlap))
+
+    if issues:
+        raise ValueError('\n\n'.join(issues))
+
+    # write it
+    for fname, annot, ctab, hemi_names in to_save:
+        logger.info('   writing %d labels to %s' % (len(hemi_names), fname))
         _write_annot(fname, annot, ctab, hemi_names)
 
     logger.info('[done]')
diff --git a/mne/layouts/EEG1005.lay b/mne/layouts/EEG1005.lay
new file mode 100644
index 0000000..a600468
--- /dev/null
+++ b/mne/layouts/EEG1005.lay
@@ -0,0 +1,337 @@
+1	-0.485328	1.493835	0.069221	0.051916	Fp1
+2	0.000000	1.570696	0.069221	0.051916	Fpz
+3	0.485501	1.493884	0.069221	0.051916	Fp2
+4	-1.154207	1.588656	0.069221	0.051916	AF9
+5	-0.923319	1.270781	0.069221	0.051916	AF7
+6	-0.706117	1.226029	0.069221	0.051916	AF5
+7	-0.477022	1.197254	0.069221	0.051916	AF3
+8	-0.240008	1.182594	0.069221	0.051916	AF1
+9	0.000000	1.178022	0.069221	0.051916	AFz
+10	0.240008	1.182594	0.069221	0.051916	AF2
+11	0.476904	1.197159	0.069221	0.051916	AF4
+12	0.706117	1.226029	0.069221	0.051916	AF6
+13	0.923319	1.270781	0.069221	0.051916	AF8
+14	1.154207	1.588656	0.069221	0.051916	AF10
+15	-1.588376	1.154294	0.069221	0.051916	F9
+16	-1.270781	0.923319	0.069221	0.051916	F7
+17	-0.968950	0.852434	0.069221	0.051916	F5
+18	-0.652084	0.812357	0.069221	0.051916	F3
+19	-0.327689	0.791876	0.069221	0.051916	F1
+20	0.000000	0.785398	0.069221	0.051916	Fz
+21	0.327689	0.791876	0.069221	0.051916	F2
+22	0.652084	0.812357	0.069221	0.051916	F4
+23	0.968950	0.852434	0.069221	0.051916	F6
+24	1.270781	0.923319	0.069221	0.051916	F8
+25	1.588496	1.154168	0.069221	0.051916	F10
+26	-1.867677	0.606883	0.069221	0.051916	FT9
+27	-1.493930	0.485359	0.069221	0.051916	FT7
+28	-1.126134	0.436152	0.069221	0.051916	FC5
+29	-0.752811	0.409634	0.069221	0.051916	FC3
+30	-0.376942	0.396836	0.069221	0.051916	FC1
+31	0.000000	0.392844	0.069221	0.051916	FCz
+32	0.376942	0.396836	0.069221	0.051916	FC2
+33	0.752811	0.409634	0.069221	0.051916	FC4
+34	1.126134	0.436152	0.069221	0.051916	FC6
+35	1.493930	0.485359	0.069221	0.051916	FT8
+36	1.867677	0.606883	0.069221	0.051916	FT10
+37	-1.963487	-0.000213	0.069221	0.051916	T9
+38	-1.570796	0.000000	0.069221	0.051916	T7
+39	-1.178106	0.000128	0.069221	0.051916	C5
+40	-0.785398	0.000111	0.069221	0.051916	C3
+41	-0.392736	0.000205	0.069221	0.051916	C1
+42	0.000000	0.000200	0.069221	0.051916	Cz
+43	0.392736	0.000103	0.069221	0.051916	C2
+44	0.785398	0.000111	0.069221	0.051916	C4
+45	1.178106	0.000128	0.069221	0.051916	C6
+46	1.570796	-0.000000	0.069221	0.051916	T8
+47	1.963487	-0.000000	0.069221	0.051916	T10
+48	-1.867677	-0.606883	0.069221	0.051916	TP9
+49	-1.494026	-0.485389	0.069221	0.051916	TP7
+50	-1.126048	-0.435839	0.069221	0.051916	CP5
+51	-0.752775	-0.409460	0.069221	0.051916	CP3
+52	-0.376804	-0.396486	0.069221	0.051916	CP1
+53	-0.000000	-0.392551	0.069221	0.051916	CPz
+54	0.376804	-0.396486	0.069221	0.051916	CP2
+55	0.752795	-0.409357	0.069221	0.051916	CP4
+56	1.126048	-0.435839	0.069221	0.051916	CP6
+57	1.494026	-0.485389	0.069221	0.051916	TP8
+58	1.867603	-0.607072	0.069221	0.051916	TP10
+59	-1.588496	-1.154168	0.069221	0.051916	P9
+60	-1.270862	-0.923378	0.069221	0.051916	P7
+61	-0.969077	-0.852293	0.069221	0.051916	P5
+62	-0.652231	-0.811998	0.069221	0.051916	P3
+63	-0.327776	-0.791360	0.069221	0.051916	P1
+64	-0.000000	-0.785257	0.069221	0.051916	Pz
+65	0.327776	-0.791360	0.069221	0.051916	P2
+66	0.652231	-0.811998	0.069221	0.051916	P4
+67	0.969077	-0.852293	0.069221	0.051916	P6
+68	1.270862	-0.923378	0.069221	0.051916	P8
+69	1.588496	-1.154168	0.069221	0.051916	P10
+70	-1.154207	-1.588656	0.069221	0.051916	PO9
+71	-0.923319	-1.270781	0.069221	0.051916	PO7
+72	-0.706303	-1.225606	0.069221	0.051916	PO5
+73	-0.476710	-1.197888	0.069221	0.051916	PO3
+74	-0.240097	-1.182523	0.069221	0.051916	PO1
+75	-0.000000	-1.178022	0.069221	0.051916	POz
+76	0.240223	-1.182505	0.069221	0.051916	PO2
+77	0.476710	-1.197888	0.069221	0.051916	PO4
+78	0.706303	-1.225606	0.069221	0.051916	PO6
+79	0.923319	-1.270781	0.069221	0.051916	PO8
+80	1.154207	-1.588656	0.069221	0.051916	PO10
+81	-0.485359	-1.493930	0.069221	0.051916	O1
+82	-0.000000	-1.570796	0.069221	0.051916	Oz
+83	0.485359	-1.493930	0.069221	0.051916	O2
+84	-0.606613	-1.867239	0.069221	0.051916	I1
+85	-0.000000	-1.963478	0.069221	0.051916	Iz
+86	0.606613	-1.867239	0.069221	0.051916	I2
+87	-0.802226	1.574520	0.069221	0.051916	AFp9h
+88	-0.626475	1.393612	0.069221	0.051916	AFp7h
+89	-0.451133	1.382849	0.069221	0.051916	AFp5h
+90	-0.271959	1.376738	0.069221	0.051916	AFp3h
+91	-0.090887	1.374548	0.069221	0.051916	AFp1h
+92	0.090887	1.374548	0.069221	0.051916	AFp2h
+93	0.271959	1.376738	0.069221	0.051916	AFp4h
+94	0.451133	1.382849	0.069221	0.051916	AFp6h
+95	0.626475	1.393612	0.069221	0.051916	AFp8h
+96	0.802226	1.574520	0.069221	0.051916	AFp10h
+97	-1.249550	1.249550	0.069221	0.051916	AFF9h
+98	-0.982948	1.075122	0.069221	0.051916	AFF7h
+99	-0.713694	1.024626	0.069221	0.051916	AFF5h
+100	-0.432315	0.996167	0.069221	0.051916	AFF3h
+101	-0.144727	0.983315	0.069221	0.051916	AFF1h
+102	0.144727	0.983315	0.069221	0.051916	AFF2h
+103	0.432315	0.996167	0.069221	0.051916	AFF4h
+104	0.713694	1.024626	0.069221	0.051916	AFF6h
+105	0.982881	1.075049	0.069221	0.051916	AFF8h
+106	1.249550	1.249550	0.069221	0.051916	AFF10h
+107	-1.574645	0.802293	0.069221	0.051916	FFT9h
+108	-1.232019	0.675885	0.069221	0.051916	FFT7h
+109	-0.886990	0.627578	0.069221	0.051916	FFC5h
+110	-0.534535	0.601827	0.069221	0.051916	FFC3h
+111	-0.178478	0.590622	0.069221	0.051916	FFC1h
+112	0.178478	0.590622	0.069221	0.051916	FFC2h
+113	0.534535	0.601827	0.069221	0.051916	FFC4h
+114	0.886990	0.627578	0.069221	0.051916	FFC6h
+115	1.232019	0.675885	0.069221	0.051916	FFT8h
+116	1.574645	0.802293	0.069221	0.051916	FFT10h
+117	-1.745475	0.276484	0.069221	0.051916	FTT9h
+118	-1.358553	0.230430	0.069221	0.051916	FTT7h
+119	-0.971386	0.211155	0.069221	0.051916	FCC5h
+120	-0.583084	0.201295	0.069221	0.051916	FCC3h
+121	-0.194460	0.196994	0.069221	0.051916	FCC1h
+122	0.194460	0.196994	0.069221	0.051916	FCC2h
+123	0.583084	0.201295	0.069221	0.051916	FCC4h
+124	0.971386	0.211155	0.069221	0.051916	FCC6h
+125	1.358553	0.230430	0.069221	0.051916	FTT8h
+126	1.745475	0.276484	0.069221	0.051916	FTT10h
+127	-1.745506	-0.276309	0.069221	0.051916	TTP9h
+128	-1.358573	-0.230293	0.069221	0.051916	TTP7h
+129	-0.971375	-0.211008	0.069221	0.051916	CCP5h
+130	-0.583085	-0.200906	0.069221	0.051916	CCP3h
+131	-0.194448	-0.196679	0.069221	0.051916	CCP1h
+132	0.194448	-0.196679	0.069221	0.051916	CCP2h
+133	0.583078	-0.201010	0.069221	0.051916	CCP4h
+134	0.971375	-0.211008	0.069221	0.051916	CCP6h
+135	1.358573	-0.230293	0.069221	0.051916	TTP8h
+136	1.745475	-0.276484	0.069221	0.051916	TTP10h
+137	-1.574667	-0.802213	0.069221	0.051916	TPP9h
+138	-1.232021	-0.675979	0.069221	0.051916	TPP7h
+139	-0.887025	-0.627306	0.069221	0.051916	CPP5h
+140	-0.534524	-0.601312	0.069221	0.051916	CPP3h
+141	-0.178473	-0.590144	0.069221	0.051916	CPP1h
+142	0.178473	-0.590144	0.069221	0.051916	CPP2h
+143	0.534524	-0.601312	0.069221	0.051916	CPP4h
+144	0.887025	-0.627306	0.069221	0.051916	CPP6h
+145	1.231976	-0.676032	0.069221	0.051916	TPP8h
+146	1.574586	-0.802352	0.069221	0.051916	TPP10h
+147	-1.249639	-1.249639	0.069221	0.051916	PPO9h
+148	-0.983137	-1.074700	0.069221	0.051916	PPO7h
+149	-0.713821	-1.024109	0.069221	0.051916	PPO5h
+150	-0.432363	-0.995909	0.069221	0.051916	PPO3h
+151	-0.144761	-0.982953	0.069221	0.051916	PPO1h
+152	0.144761	-0.982953	0.069221	0.051916	PPO2h
+153	0.432253	-0.995937	0.069221	0.051916	PPO4h
+154	0.713967	-1.023998	0.069221	0.051916	PPO6h
+155	0.983137	-1.074700	0.069221	0.051916	PPO8h
+156	1.249639	-1.249639	0.069221	0.051916	PPO10h
+157	-0.802293	-1.574645	0.069221	0.051916	POO9h
+158	-0.626849	-1.393237	0.069221	0.051916	POO7h
+159	-0.451236	-1.382715	0.069221	0.051916	POO5h
+160	-0.271951	-1.377572	0.069221	0.051916	POO3h
+161	-0.090910	-1.374606	0.069221	0.051916	POO1h
+162	0.090910	-1.374606	0.069221	0.051916	POO2h
+163	0.271951	-1.377572	0.069221	0.051916	POO4h
+164	0.451236	-1.382715	0.069221	0.051916	POO6h
+165	0.626849	-1.393237	0.069221	0.051916	POO8h
+166	0.802293	-1.574645	0.069221	0.051916	POO10h
+167	-0.276453	-1.745460	0.069221	0.051916	OI1h
+168	0.276453	-1.745460	0.069221	0.051916	OI2h
+169	-0.245655	1.551367	0.069221	0.051916	Fp1h
+170	0.245655	1.551367	0.069221	0.051916	Fp2h
+171	-1.038573	1.429729	0.069221	0.051916	AF9h
+172	-0.816811	1.245775	0.069221	0.051916	AF7h
+173	-0.592502	1.210176	0.069221	0.051916	AF5h
+174	-0.359066	1.188527	0.069221	0.051916	AF3h
+175	-0.120203	1.179114	0.069221	0.051916	AF1h
+176	0.120212	1.179076	0.069221	0.051916	AF2h
+177	0.359066	1.188527	0.069221	0.051916	AF4h
+178	0.592545	1.210263	0.069221	0.051916	AF6h
+179	0.816811	1.245775	0.069221	0.051916	AF8h
+180	1.038668	1.429679	0.069221	0.051916	AF10h
+181	-1.429588	1.038701	0.069221	0.051916	F9h
+182	-1.122287	0.883303	0.069221	0.051916	F7h
+183	-0.811863	0.829210	0.069221	0.051916	F5h
+184	-0.490601	0.800049	0.069221	0.051916	F3h
+185	-0.164017	0.787126	0.069221	0.051916	F1h
+186	0.164017	0.787126	0.069221	0.051916	F2h
+187	0.490601	0.800049	0.069221	0.051916	F4h
+188	0.811863	0.829210	0.069221	0.051916	F6h
+189	1.122287	0.883303	0.069221	0.051916	F8h
+190	1.429588	1.038701	0.069221	0.051916	F10h
+191	-1.680799	0.546075	0.069221	0.051916	FT9h
+192	-1.310995	0.457012	0.069221	0.051916	FT7h
+193	-0.939857	0.420814	0.069221	0.051916	FC5h
+194	-0.565142	0.401905	0.069221	0.051916	FC3h
+195	-0.188491	0.393826	0.069221	0.051916	FC1h
+196	0.188491	0.393826	0.069221	0.051916	FC2h
+197	0.565142	0.401905	0.069221	0.051916	FC4h
+198	0.939857	0.420814	0.069221	0.051916	FC6h
+199	1.310995	0.457012	0.069221	0.051916	FT8h
+200	1.680740	0.546236	0.069221	0.051916	FT10h
+201	-1.767191	0.000000	0.069221	0.051916	T9h
+202	-1.374500	0.000000	0.069221	0.051916	T7h
+203	-0.981850	0.000118	0.069221	0.051916	C5h
+204	-0.589058	0.000212	0.069221	0.051916	C3h
+205	-0.196395	0.000101	0.069221	0.051916	C1h
+206	0.196395	0.000201	0.069221	0.051916	C2h
+207	0.589058	0.000212	0.069221	0.051916	C4h
+208	0.981850	0.000118	0.069221	0.051916	C6h
+209	1.374500	-0.000000	0.069221	0.051916	T8h
+210	1.767191	-0.000000	0.069221	0.051916	T10h
+211	-1.680646	-0.546088	0.069221	0.051916	TP9h
+212	-1.310970	-0.456960	0.069221	0.051916	TP7h
+213	-0.939815	-0.420500	0.069221	0.051916	CP5h
+214	-0.565062	-0.401491	0.069221	0.051916	CP3h
+215	-0.188515	-0.393352	0.069221	0.051916	CP1h
+216	0.188515	-0.393352	0.069221	0.051916	CP2h
+217	0.565062	-0.401491	0.069221	0.051916	CP4h
+218	0.939815	-0.420500	0.069221	0.051916	CP6h
+219	1.310970	-0.456960	0.069221	0.051916	TP8h
+220	1.680646	-0.546088	0.069221	0.051916	TP10h
+221	-1.429668	-1.038758	0.069221	0.051916	P9h
+222	-1.122286	-0.883271	0.069221	0.051916	P7h
+223	-0.812037	-0.829137	0.069221	0.051916	P5h
+224	-0.490726	-0.799336	0.069221	0.051916	P3h
+225	-0.164146	-0.786762	0.069221	0.051916	P1h
+226	0.164146	-0.786762	0.069221	0.051916	P2h
+227	0.490600	-0.799436	0.069221	0.051916	P4h
+228	0.812037	-0.829137	0.069221	0.051916	P6h
+229	1.122286	-0.883271	0.069221	0.051916	P8h
+230	1.429668	-1.038758	0.069221	0.051916	P10h
+231	-1.038821	-1.429709	0.069221	0.051916	PO9h
+232	-0.816502	-1.246067	0.069221	0.051916	PO7h
+233	-0.593079	-1.209372	0.069221	0.051916	PO5h
+234	-0.359230	-1.188332	0.069221	0.051916	PO3h
+235	-0.120221	-1.179168	0.069221	0.051916	PO1h
+236	0.120348	-1.179159	0.069221	0.051916	PO2h
+237	0.359230	-1.188332	0.069221	0.051916	PO4h
+238	0.593079	-1.209372	0.069221	0.051916	PO6h
+239	0.816502	-1.246067	0.069221	0.051916	PO8h
+240	1.038710	-1.429804	0.069221	0.051916	PO10h
+241	-0.245671	-1.551466	0.069221	0.051916	O1h
+242	0.245671	-1.551466	0.069221	0.051916	O2h
+243	-0.307129	-1.939338	0.069221	0.051916	I1h
+244	0.307129	-1.939338	0.069221	0.051916	I2h
+245	-0.891328	1.749684	0.069221	0.051916	AFp9
+246	-0.713143	1.399582	0.069221	0.051916	AFp7
+247	-0.539182	1.387878	0.069221	0.051916	AFp5
+248	-0.361777	1.379743	0.069221	0.051916	AFp3
+249	-0.181624	1.374948	0.069221	0.051916	AFp1
+250	0.000000	1.374461	0.069221	0.051916	AFpz
+251	0.181624	1.374948	0.069221	0.051916	AFp2
+252	0.361802	1.379839	0.069221	0.051916	AFp4
+253	0.539182	1.387878	0.069221	0.051916	AFp6
+254	0.713143	1.399582	0.069221	0.051916	AFp8
+255	0.891489	1.749582	0.069221	0.051916	AFp10
+256	-1.388504	1.388504	0.069221	0.051916	AFF9
+257	-1.110721	1.110721	0.069221	0.051916	AFF7
+258	-0.850463	1.046170	0.069221	0.051916	AFF5
+259	-0.574170	1.008058	0.069221	0.051916	AFF3
+260	-0.288981	0.988233	0.069221	0.051916	AFF1
+261	0.000000	0.981739	0.069221	0.051916	AFFz
+262	0.288981	0.988233	0.069221	0.051916	AFF2
+263	0.574170	1.008058	0.069221	0.051916	AFF4
+264	0.850463	1.046170	0.069221	0.051916	AFF6
+265	1.110721	1.110721	0.069221	0.051916	AFF8
+266	1.388504	1.388504	0.069221	0.051916	AFF10
+267	-1.749576	0.891591	0.069221	0.051916	FFT9
+268	-1.399582	0.713143	0.069221	0.051916	FFT7
+269	-1.060830	0.648168	0.069221	0.051916	FFC5
+270	-0.711350	0.612390	0.069221	0.051916	FFC3
+271	-0.356750	0.594619	0.069221	0.051916	FFC1
+272	0.000000	0.589085	0.069221	0.051916	FFCz
+273	0.356750	0.594619	0.069221	0.051916	FFC2
+274	0.711350	0.612390	0.069221	0.051916	FFC4
+275	1.060749	0.648119	0.069221	0.051916	FFC6
+276	1.399582	0.713143	0.069221	0.051916	FFT8
+277	1.749576	0.891591	0.069221	0.051916	FFT10
+278	-1.939489	0.307119	0.069221	0.051916	FTT9
+279	-1.551442	0.245824	0.069221	0.051916	FTT7
+280	-1.165132	0.219351	0.069221	0.051916	FCC5
+281	-0.777319	0.205363	0.069221	0.051916	FCC3
+282	-0.388766	0.198515	0.069221	0.051916	FCC1
+283	0.000000	0.196434	0.069221	0.051916	FCCz
+284	0.388766	0.198515	0.069221	0.051916	FCC2
+285	0.777319	0.205363	0.069221	0.051916	FCC4
+286	1.165132	0.219351	0.069221	0.051916	FCC6
+287	1.551466	0.245671	0.069221	0.051916	FTT8
+288	1.939489	0.307119	0.069221	0.051916	FTT10
+289	-1.939553	-0.307197	0.069221	0.051916	TTP9
+290	-1.551565	-0.245687	0.069221	0.051916	TTP7
+291	-1.165206	-0.219084	0.069221	0.051916	CCP5
+292	-0.777275	-0.205069	0.069221	0.051916	CCP3
+293	-0.388806	-0.198175	0.069221	0.051916	CCP1
+294	-0.000000	-0.196218	0.069221	0.051916	CCPz
+295	0.388801	-0.198275	0.069221	0.051916	CCP2
+296	0.777275	-0.205069	0.069221	0.051916	CCP4
+297	1.165206	-0.219084	0.069221	0.051916	CCP6
+298	1.551565	-0.245687	0.069221	0.051916	TTP8
+299	1.939553	-0.307197	0.069221	0.051916	TTP10
+300	-1.749664	-0.891531	0.069221	0.051916	TPP9
+301	-1.399671	-0.713188	0.069221	0.051916	TPP7
+302	-1.060852	-0.647970	0.069221	0.051916	CPP5
+303	-0.711356	-0.612379	0.069221	0.051916	CPP3
+304	-0.356663	-0.594548	0.069221	0.051916	CPP1
+305	-0.000000	-0.588863	0.069221	0.051916	CPPz
+306	0.356778	-0.594448	0.069221	0.051916	CPP2
+307	0.711384	-0.612287	0.069221	0.051916	CPP4
+308	1.060852	-0.647970	0.069221	0.051916	CPP6
+309	1.399671	-0.713188	0.069221	0.051916	TPP8
+310	1.749664	-0.891531	0.069221	0.051916	TPP10
+311	-1.388427	-1.388427	0.069221	0.051916	PPO9
+312	-1.110721	-1.110721	0.069221	0.051916	PPO7
+313	-0.850511	-1.046155	0.069221	0.051916	PPO5
+314	-0.574228	-1.007462	0.069221	0.051916	PPO3
+315	-0.289055	-0.987715	0.069221	0.051916	PPO1
+316	-0.000000	-0.981655	0.069221	0.051916	PPOz
+317	0.289055	-0.987715	0.069221	0.051916	PPO2
+318	0.574228	-1.007462	0.069221	0.051916	PPO4
+319	0.850454	-1.046223	0.069221	0.051916	PPO6
+320	1.110721	-1.110721	0.069221	0.051916	PPO8
+321	1.388427	-1.388427	0.069221	0.051916	PPO10
+322	-0.891143	-1.749540	0.069221	0.051916	POO9
+323	-0.713143	-1.399582	0.069221	0.051916	POO7
+324	-0.539360	-1.387717	0.069221	0.051916	POO5
+325	-0.362020	-1.379310	0.069221	0.051916	POO3
+326	-0.181486	-1.375484	0.069221	0.051916	POO1
+327	-0.000000	-1.374422	0.069221	0.051916	POOz
+328	0.181626	-1.375468	0.069221	0.051916	POO2
+329	0.362020	-1.379310	0.069221	0.051916	POO4
+330	0.539360	-1.387717	0.069221	0.051916	POO6
+331	0.713143	-1.399582	0.069221	0.051916	POO8
+332	0.891143	-1.749540	0.069221	0.051916	POO10
+333	-0.546073	-1.680586	0.069221	0.051916	OI1
+334	-0.000000	-1.767132	0.069221	0.051916	OIz
+335	0.546073	-1.680586	0.069221	0.051916	OI2
+336	-1.963487	1.749684	0.069221	0.051916	COMNT
+337	1.963487	1.749684	0.069221	0.051916	SCALE
diff --git a/mne/layouts/EGI256.lout b/mne/layouts/EGI256.lout
new file mode 100644
index 0000000..bc9076a
--- /dev/null
+++ b/mne/layouts/EGI256.lout
@@ -0,0 +1,259 @@
+-42.19    43.52   -41.70    28.71
+001	0.235020883	0.231411875	0.023840595	0.024283894	EEG 001
+002	0.180062322	0.24066255	0.023840595	0.024283894	EEG 002
+003	0.134498312	0.239722125	0.023840595	0.024283894	EEG 003
+004	0.098183698	0.230899463	0.023840595	0.024283894	EEG 004
+005	0.066117291	0.206774428	0.023840595	0.024283894	EEG 005
+006	0.038417416	0.175224454	0.023840595	0.024283894	EEG 006
+007	0.019093339	0.142334211	0.023840595	0.024283894	EEG 007
+008	0	0.106825455	0.023840595	0.024283894	EEG 008
+009	-0.017539353	0.062826857	0.023840595	0.024283894	EEG 009
+010	0.181942866	0.296413546	0.023840595	0.024283894	EEG 010
+011	0.13038807	0.293232492	0.023840595	0.024283894	EEG 011
+012	0.084273706	0.277147412	0.023840595	0.024283894	EEG 012
+013	0.050175359	0.251802841	0.023840595	0.024283894	EEG 013
+014	0.021773201	0.21699757	0.023840595	0.024283894	EEG 014
+015	0	0.180469732	0.023840595	0.024283894	EEG 015
+016	-0.019093339	0.142334211	0.023840595	0.024283894	EEG 016
+017	-0.036255497	0.09269913	0.023840595	0.024283894	EEG 017
+018	0.113098849	0.348229946	0.023840595	0.024283894	EEG 018
+019	0.069000992	0.329792276	0.023840595	0.024283894	EEG 019
+020	0.029776066	0.297506089	0.023840595	0.024283894	EEG 020
+021	0	0.258687873	0.023840595	0.024283894	EEG 021
+022	-0.021773201	0.21699757	0.023840595	0.024283894	EEG 022
+023	-0.038417416	0.175224454	0.023840595	0.024283894	EEG 023
+024	-0.055153266	0.126645408	0.023840595	0.024283894	EEG 024
+025	0.036940443	0.37703699	0.023840595	0.024283894	EEG 025
+026	0	0.343720309	0.023840595	0.024283894	EEG 026
+027	-0.029776066	0.297506089	0.023840595	0.024283894	EEG 027
+028	-0.050175359	0.251802841	0.023840595	0.024283894	EEG 028
+029	-0.066117291	0.206774428	0.023840595	0.024283894	EEG 029
+030	-0.079525249	0.158534511	0.023840595	0.024283894	EEG 030
+031	0	0.415202995	0.023840595	0.024283894	EEG 031
+032	-0.036940443	0.37703699	0.023840595	0.024283894	EEG 032
+033	-0.069000992	0.329792276	0.023840595	0.024283894	EEG 033
+034	-0.084273706	0.277147412	0.023840595	0.024283894	EEG 034
+035	-0.098183698	0.230899463	0.023840595	0.024283894	EEG 035
+036	-0.098479668	0.187945851	0.023840595	0.024283894	EEG 036
+037	-0.113098849	0.348229946	0.023840595	0.024283894	EEG 037
+038	-0.13038807	0.293232492	0.023840595	0.024283894	EEG 038
+039	-0.134498312	0.239722125	0.023840595	0.024283894	EEG 039
+040	-0.130890927	0.191286703	0.023840595	0.024283894	EEG 040
+041	-0.116009122	0.150111634	0.023840595	0.024283894	EEG 041
+042	-0.094840856	0.116834626	0.023840595	0.024283894	EEG 042
+043	-0.076990927	0.086006856	0.023840595	0.024283894	EEG 043
+044	-0.055587556	0.053147386	0.023840595	0.024283894	EEG 044
+045	-0.029699902	0.019405615	0.023840595	0.024283894	EEG 045
+046	-0.181942866	0.296413546	0.023840595	0.024283894	EEG 046
+047	-0.180062322	0.24066255	0.023840595	0.024283894	EEG 047
+048	-0.17285275	0.187572361	0.023840595	0.024283894	EEG 048
+049	-0.156410469	0.141423921	0.023840595	0.024283894	EEG 049
+050	-0.132742164	0.104084677	0.023840595	0.024283894	EEG 050
+051	-0.108362109	0.07207399	0.023840595	0.024283894	EEG 051
+052	-0.087032894	0.041560718	0.023840595	0.024283894	EEG 052
+053	-0.057033727	0.006635523	0.023840595	0.024283894	EEG 053
+054	-0.235020883	0.231411875	0.023840595	0.024283894	EEG 054
+055	-0.21721779	0.1735557	0.023840595	0.024283894	EEG 055
+056	-0.196096643	0.121848964	0.023840595	0.024283894	EEG 056
+057	-0.169122926	0.084563661	0.023840595	0.024283894	EEG 057
+058	-0.142622009	0.056366314	0.023840595	0.024283894	EEG 058
+059	-0.11607512	0.026701856	0.023840595	0.024283894	EEG 059
+060	-0.086703907	-0.006962228	0.023840595	0.024283894	EEG 060
+061	-0.271241865	0.131933691	0.023840595	0.024283894	EEG 061
+062	-0.237546771	0.082946276	0.023840595	0.024283894	EEG 062
+063	-0.20434592	0.049982898	0.023840595	0.024283894	EEG 063
+064	-0.175001011	0.027246728	0.023840595	0.024283894	EEG 064
+065	-0.144183544	0.006552794	0.023840595	0.024283894	EEG 065
+066	-0.117629392	-0.020953359	0.023840595	0.024283894	EEG 066
+067	-0.32017538	0.064356008	0.023840595	0.024283894	EEG 067
+068	-0.277394242	0.035815905	0.023840595	0.024283894	EEG 068
+069	-0.241320281	0.000293927	0.023840595	0.024283894	EEG 069
+070	-0.202988841	-0.017932839	0.023840595	0.024283894	EEG 070
+071	-0.170816713	-0.027588171	0.023840595	0.024283894	EEG 071
+072	-0.142940198	-0.038849379	0.023840595	0.024283894	EEG 072
+073	-0.364333595	-0.009526546	0.023840595	0.024283894	EEG 073
+074	-0.227828247	-0.074709585	0.023840595	0.024283894	EEG 074
+075	-0.186334435	-0.079063391	0.023840595	0.024283894	EEG 075
+076	-0.152612576	-0.080357072	0.023840595	0.024283894	EEG 076
+077	-0.122986168	-0.070147895	0.023840595	0.024283894	EEG 077
+078	-0.092860036	-0.059724481	0.023840595	0.024283894	EEG 078
+079	-0.063373134	-0.044961361	0.023840595	0.024283894	EEG 079
+080	-0.033138055	-0.028518783	0.023840595	0.024283894	EEG 080
+081	0	-0.006448832	0.023840595	0.024283894	EEG 081
+082	-0.384631539	-0.115563191	0.023840595	0.024283894	EEG 082
+083	-0.230231782	-0.157310034	0.023840595	0.024283894	EEG 083
+084	-0.201004697	-0.132397774	0.023840595	0.024283894	EEG 084
+085	-0.158874627	-0.130476761	0.023840595	0.024283894	EEG 085
+086	-0.125435162	-0.117006671	0.023840595	0.024283894	EEG 086
+087	-0.093818787	-0.102184911	0.023840595	0.024283894	EEG 087
+088	-0.063690231	-0.085009427	0.023840595	0.024283894	EEG 088
+089	-0.034226984	-0.069230419	0.023840595	0.024283894	EEG 089
+090	0	-0.043222928	0.023840595	0.024283894	EEG 090
+091	-0.376606255	-0.236283155	0.023840595	0.024283894	EEG 091
+092	-0.320841548	-0.246056831	0.023840595	0.024283894	EEG 092
+093	-0.264511728	-0.247963981	0.023840595	0.024283894	EEG 093
+094	-0.235119884	-0.22133859	0.023840595	0.024283894	EEG 094
+095	-0.200260526	-0.201104991	0.023840595	0.024283894	EEG 095
+096	-0.16089296	-0.182074387	0.023840595	0.024283894	EEG 096
+097	-0.123315473	-0.169463521	0.023840595	0.024283894	EEG 097
+098	-0.093577895	-0.148219199	0.023840595	0.024283894	EEG 098
+099	-0.062757092	-0.127508907	0.023840595	0.024283894	EEG 099
+100	-0.033465994	-0.105718695	0.023840595	0.024283894	EEG 100
+101	0	-0.123212516	0.023840595	0.024283894	EEG 101
+102	-0.309236143	-0.330394078	0.023840595	0.024283894	EEG 102
+103	-0.264402365	-0.317489099	0.023840595	0.024283894	EEG 103
+104	-0.215607267	-0.297916345	0.023840595	0.024283894	EEG 104
+105	-0.194042397	-0.266008675	0.023840595	0.024283894	EEG 105
+106	-0.156365562	-0.241406814	0.023840595	0.024283894	EEG 106
+107	-0.117304936	-0.222733874	0.023840595	0.024283894	EEG 107
+108	-0.08375779	-0.200153314	0.023840595	0.024283894	EEG 108
+109	-0.056791169	-0.173578646	0.023840595	0.024283894	EEG 109
+110	-0.028490371	-0.146436894	0.023840595	0.024283894	EEG 110
+111	-0.235425173	-0.391140875	0.023840595	0.024283894	EEG 111
+112	-0.20031364	-0.367491502	0.023840595	0.024283894	EEG 112
+113	-0.160198907	-0.335751192	0.023840595	0.024283894	EEG 113
+114	-0.148968879	-0.297338854	0.023840595	0.024283894	EEG 114
+115	-0.09913078	-0.279612547	0.023840595	0.024283894	EEG 115
+116	-0.06561825	-0.2506161	0.023840595	0.024283894	EEG 116
+117	-0.036528871	-0.219887692	0.023840595	0.024283894	EEG 117
+118	-0.01914107	-0.187670154	0.023840595	0.024283894	EEG 118
+119	0	-0.159638357	0.023840595	0.024283894	EEG 119
+120	-0.178151028	-0.424680349	0.023840595	0.024283894	EEG 120
+121	-0.142872329	-0.395550026	0.023840595	0.024283894	EEG 121
+122	-0.106134228	-0.360226213	0.023840595	0.024283894	EEG 122
+123	-0.074015552	-0.317797572	0.023840595	0.024283894	EEG 123
+124	-0.049414286	-0.292978277	0.023840595	0.024283894	EEG 124
+125	-0.020856534	-0.260833466	0.023840595	0.024283894	EEG 125
+126	0	-0.223512279	0.023840595	0.024283894	EEG 126
+127	0.01914107	-0.187670154	0.023840595	0.024283894	EEG 127
+128	0.028490371	-0.146436894	0.023840595	0.024283894	EEG 128
+129	0.033465994	-0.105718695	0.023840595	0.024283894	EEG 129
+130	0.034226984	-0.069230419	0.023840595	0.024283894	EEG 130
+131	0.033138055	-0.028518783	0.023840595	0.024283894	EEG 131
+132	0.029699902	0.019405615	0.023840595	0.024283894	EEG 132
+133	-0.11640639	-0.433892117	0.023840595	0.024283894	EEG 133
+134	-0.085226238	-0.411234759	0.023840595	0.024283894	EEG 134
+135	-0.054701526	-0.36252645	0.023840595	0.024283894	EEG 135
+136	-0.02321088	-0.335534555	0.023840595	0.024283894	EEG 136
+137	0	-0.303018075	0.023840595	0.024283894	EEG 137
+138	0.020856534	-0.260833466	0.023840595	0.024283894	EEG 138
+139	0.036528871	-0.219887692	0.023840595	0.024283894	EEG 139
+140	0.056791169	-0.173578646	0.023840595	0.024283894	EEG 140
+141	0.062757092	-0.127508907	0.023840595	0.024283894	EEG 141
+142	0.063690231	-0.085009427	0.023840595	0.024283894	EEG 142
+143	0.063373134	-0.044961361	0.023840595	0.024283894	EEG 143
+144	0.057033727	0.006635523	0.023840595	0.024283894	EEG 144
+145	-0.061719572	-0.45	0.023840595	0.024283894	EEG 145
+146	-0.032116421	-0.419782634	0.023840595	0.024283894	EEG 146
+147	-9.99E-17	-0.379508917	0.023840595	0.024283894	EEG 147
+148	0.02321088	-0.335534555	0.023840595	0.024283894	EEG 148
+149	0.049414286	-0.292978277	0.023840595	0.024283894	EEG 149
+150	0.06561825	-0.2506161	0.023840595	0.024283894	EEG 150
+151	0.08375779	-0.200153314	0.023840595	0.024283894	EEG 151
+152	0.093577895	-0.148219199	0.023840595	0.024283894	EEG 152
+153	0.093818787	-0.102184911	0.023840595	0.024283894	EEG 153
+154	0.092860036	-0.059724481	0.023840595	0.024283894	EEG 154
+155	0.086703907	-0.006962228	0.023840595	0.024283894	EEG 155
+156	0.032116421	-0.419782634	0.023840595	0.024283894	EEG 156
+157	0.054701526	-0.36252645	0.023840595	0.024283894	EEG 157
+158	0.074015552	-0.317797572	0.023840595	0.024283894	EEG 158
+159	0.09913078	-0.279612547	0.023840595	0.024283894	EEG 159
+160	0.117304936	-0.222733874	0.023840595	0.024283894	EEG 160
+161	0.123315473	-0.169463521	0.023840595	0.024283894	EEG 161
+162	0.125435162	-0.117006671	0.023840595	0.024283894	EEG 162
+163	0.122986168	-0.070147895	0.023840595	0.024283894	EEG 163
+164	0.117629392	-0.020953359	0.023840595	0.024283894	EEG 164
+165	0.061719572	-0.45	0.023840595	0.024283894	EEG 165
+166	0.085226238	-0.411234759	0.023840595	0.024283894	EEG 166
+167	0.106134228	-0.360226213	0.023840595	0.024283894	EEG 167
+168	0.148968879	-0.297338854	0.023840595	0.024283894	EEG 168
+169	0.156365562	-0.241406814	0.023840595	0.024283894	EEG 169
+170	0.16089296	-0.182074387	0.023840595	0.024283894	EEG 170
+171	0.158874627	-0.130476761	0.023840595	0.024283894	EEG 171
+172	0.152612576	-0.080357072	0.023840595	0.024283894	EEG 172
+173	0.142940198	-0.038849379	0.023840595	0.024283894	EEG 173
+174	0.11640639	-0.433892117	0.023840595	0.024283894	EEG 174
+175	0.142872329	-0.395550026	0.023840595	0.024283894	EEG 175
+176	0.160198907	-0.335751192	0.023840595	0.024283894	EEG 176
+177	0.194042397	-0.266008675	0.023840595	0.024283894	EEG 177
+178	0.200260526	-0.201104991	0.023840595	0.024283894	EEG 178
+179	0.201004697	-0.132397774	0.023840595	0.024283894	EEG 179
+180	0.186334435	-0.079063391	0.023840595	0.024283894	EEG 180
+181	0.170816713	-0.027588171	0.023840595	0.024283894	EEG 181
+182	0.144183544	0.006552794	0.023840595	0.024283894	EEG 182
+183	0.11607512	0.026701856	0.023840595	0.024283894	EEG 183
+184	0.087032894	0.041560718	0.023840595	0.024283894	EEG 184
+185	0.055587556	0.053147386	0.023840595	0.024283894	EEG 185
+186	0.017539353	0.062826857	0.023840595	0.024283894	EEG 186
+187	0.178151028	-0.424680349	0.023840595	0.024283894	EEG 187
+188	0.20031364	-0.367491502	0.023840595	0.024283894	EEG 188
+189	0.215607267	-0.297916345	0.023840595	0.024283894	EEG 189
+190	0.235119884	-0.22133859	0.023840595	0.024283894	EEG 190
+191	0.230231782	-0.157310034	0.023840595	0.024283894	EEG 191
+192	0.227828247	-0.074709585	0.023840595	0.024283894	EEG 192
+193	0.202988841	-0.017932839	0.023840595	0.024283894	EEG 193
+194	0.175001011	0.027246728	0.023840595	0.024283894	EEG 194
+195	0.142622009	0.056366314	0.023840595	0.024283894	EEG 195
+196	0.108362109	0.07207399	0.023840595	0.024283894	EEG 196
+197	0.076990927	0.086006856	0.023840595	0.024283894	EEG 197
+198	0.036255497	0.09269913	0.023840595	0.024283894	EEG 198
+199	0.235425173	-0.391140875	0.023840595	0.024283894	EEG 199
+200	0.264402365	-0.317489099	0.023840595	0.024283894	EEG 200
+201	0.264511728	-0.247963981	0.023840595	0.024283894	EEG 201
+202	0.241320281	0.000293927	0.023840595	0.024283894	EEG 202
+203	0.20434592	0.049982898	0.023840595	0.024283894	EEG 203
+204	0.169122926	0.084563661	0.023840595	0.024283894	EEG 204
+205	0.132742164	0.104084677	0.023840595	0.024283894	EEG 205
+206	0.094840856	0.116834626	0.023840595	0.024283894	EEG 206
+207	0.055153266	0.126645408	0.023840595	0.024283894	EEG 207
+208	0.309236143	-0.330394078	0.023840595	0.024283894	EEG 208
+209	0.320841548	-0.246056831	0.023840595	0.024283894	EEG 209
+210	0.277394242	0.035815905	0.023840595	0.024283894	EEG 210
+211	0.237546771	0.082946276	0.023840595	0.024283894	EEG 211
+212	0.196096643	0.121848964	0.023840595	0.024283894	EEG 212
+213	0.156410469	0.141423921	0.023840595	0.024283894	EEG 213
+214	0.116009122	0.150111634	0.023840595	0.024283894	EEG 214
+215	0.079525249	0.158534511	0.023840595	0.024283894	EEG 215
+216	0.376606255	-0.236283155	0.023840595	0.024283894	EEG 216
+217	0.384631539	-0.115563191	0.023840595	0.024283894	EEG 217
+218	0.364333595	-0.009526546	0.023840595	0.024283894	EEG 218
+219	0.32017538	0.064356008	0.023840595	0.024283894	EEG 219
+220	0.271241865	0.131933691	0.023840595	0.024283894	EEG 220
+221	0.21721779	0.1735557	0.023840595	0.024283894	EEG 221
+222	0.17285275	0.187572361	0.023840595	0.024283894	EEG 222
+223	0.130890927	0.191286703	0.023840595	0.024283894	EEG 223
+224	0.098479668	0.187945851	0.023840595	0.024283894	EEG 224
+225	0.316289645	0.145736715	0.023840595	0.024283894	EEG 225
+226	0.302702771	0.230332844	0.023840595	0.024283894	EEG 226
+227	0.368412876	0.104246485	0.023840595	0.024283894	EEG 227
+228	0.409165374	0.012374488	0.023840595	0.024283894	EEG 228
+229	0.423731189	-0.12797492	0.023840595	0.024283894	EEG 229
+230	0.298254153	0.303894316	0.023840595	0.024283894	EEG 230
+231	0.362100214	0.20909316	0.023840595	0.024283894	EEG 231
+232	0.410199617	0.143137194	0.023840595	0.024283894	EEG 232
+233	0.447869069	0.013249996	0.023840595	0.024283894	EEG 233
+234	0.269381414	0.382730951	0.023840595	0.024283894	EEG 234
+235	0.342518502	0.308483235	0.023840595	0.024283894	EEG 235
+236	0.395968691	0.254174349	0.023840595	0.024283894	EEG 236
+237	0.45	0.157922288	0.023840595	0.024283894	EEG 237
+238	0.2187115	0.45	0.023840595	0.024283894	EEG 238
+239	0.327880174	0.384827106	0.023840595	0.024283894	EEG 239
+240	0.38583302	0.329449945	0.023840595	0.024283894	EEG 240
+241	-0.2187115	0.45	0.023840595	0.024283894	EEG 241
+242	-0.327880174	0.384827106	0.023840595	0.024283894	EEG 242
+243	-0.38583302	0.329449945	0.023840595	0.024283894	EEG 243
+244	-0.269381414	0.382730951	0.023840595	0.024283894	EEG 244
+245	-0.342518502	0.308483235	0.023840595	0.024283894	EEG 245
+246	-0.395968691	0.254174349	0.023840595	0.024283894	EEG 246
+247	-0.45	0.157922288	0.023840595	0.024283894	EEG 247
+248	-0.298254153	0.303894316	0.023840595	0.024283894	EEG 248
+249	-0.362100214	0.20909316	0.023840595	0.024283894	EEG 249
+250	-0.410199617	0.143137194	0.023840595	0.024283894	EEG 250
+251	-0.447869069	0.013249996	0.023840595	0.024283894	EEG 251
+252	-0.302702771	0.230332844	0.023840595	0.024283894	EEG 252
+253	-0.316289645	0.145736715	0.023840595	0.024283894	EEG 253
+254	-0.368412876	0.104246485	0.023840595	0.024283894	EEG 254
+255	-0.409165374	0.012374488	0.023840595	0.024283894	EEG 255
+256	-0.423731189	-0.12797492	0.023840595	0.024283894	EEG 256
+257	-0.45	-0.45	0.023840595	0.024283894	EEG 257
+258	0.45	-0.45	0.023840595	0.024283894	EEG 258
diff --git a/mne/layouts/KIT-157.lout b/mne/layouts/KIT-157.lout
new file mode 100644
index 0000000..39d7d6b
--- /dev/null
+++ b/mne/layouts/KIT-157.lout
@@ -0,0 +1,158 @@
+-42.19	43.52	-41.7	28.71	
+001     9.78   -14.18     4.00     3.00 MEG 001
+002     3.31   -16.56     4.00     3.00 MEG 002
+003    12.02   -19.42     4.00     3.00 MEG 003
+004     8.08   -21.05     4.00     3.00 MEG 004
+005     4.12   -22.01     4.00     3.00 MEG 005
+006    15.80   -16.63     4.00     3.00 MEG 006
+007    10.21   -12.01     4.00     3.00 MEG 007
+008     7.23   -13.67     4.00     3.00 MEG 008
+009   -22.12    -3.07     4.00     3.00 MEG 009
+010   -13.99   -13.09     4.00     3.00 MEG 010
+011   -21.05    -7.51     4.00     3.00 MEG 011
+012   -18.85   -12.06     4.00     3.00 MEG 012
+013    -0.14   -16.77     4.00     3.00 MEG 013
+014    -6.69   -15.41     4.00     3.00 MEG 014
+015   -10.69   -15.56     4.00     3.00 MEG 015
+016    -3.91   -10.00     4.00     3.00 MEG 016
+017     0.80    -6.66     4.00     3.00 MEG 017
+018     3.74   -20.66     4.00     3.00 MEG 018
+019    15.01   -15.63     4.00     3.00 MEG 019
+020     4.16   -14.75     4.00     3.00 MEG 020
+021    16.72    -0.60     4.00     3.00 MEG 021
+022    14.31    -7.30     4.00     3.00 MEG 022
+023     1.27   -13.23     4.00     3.00 MEG 023
+024     9.63   -10.10     4.00     3.00 MEG 024
+025    -1.74   -14.94     4.00     3.00 MEG 025
+026    -4.68   -14.12     4.00     3.00 MEG 026
+027    -1.65    -8.33     4.00     3.00 MEG 027
+028    -6.53    -8.53     4.00     3.00 MEG 028
+029    -8.52    -6.61     4.00     3.00 MEG 029
+030   -10.18    -4.27     4.00     3.00 MEG 030
+031   -11.14    -1.21     4.00     3.00 MEG 031
+032    -4.02   -18.39     4.00     3.00 MEG 032
+033    19.69     0.13     4.00     3.00 MEG 033
+034     4.03    -8.21     4.00     3.00 MEG 034
+035     3.56     0.14     4.00     3.00 MEG 035
+036     4.19   -12.79     4.00     3.00 MEG 036
+037    19.43    -3.03     4.00     3.00 MEG 037
+038    20.99    -9.54     4.00     3.00 MEG 038
+039    15.93   -11.27     4.00     3.00 MEG 039
+040    22.46    -5.52     4.00     3.00 MEG 040
+041    -9.37    -8.82     4.00     3.00 MEG 041
+042    -6.93   -10.92     4.00     3.00 MEG 042
+043    -1.56   -13.07     4.00     3.00 MEG 043
+044    -7.75   -20.89     4.00     3.00 MEG 044
+045   -11.74   -19.07     4.00     3.00 MEG 045
+046     0.31   -22.23     4.00     3.00 MEG 046
+047    -3.75   -21.89     4.00     3.00 MEG 047
+048    -3.89    -5.28     4.00     3.00 MEG 048
+049    23.23    -0.95     4.00     3.00 MEG 049
+050    13.94   -14.13     4.00     3.00 MEG 050
+051     7.41   -17.72     4.00     3.00 MEG 051
+052    19.50    -8.59     4.00     3.00 MEG 052
+053    18.26    -7.47     4.00     3.00 MEG 053
+054    18.19    -2.34     4.00     3.00 MEG 054
+055    14.76    -9.91     4.00     3.00 MEG 055
+056    21.32    -0.18     4.00     3.00 MEG 056
+057    -1.88    -3.98     4.00     3.00 MEG 057
+058     3.56    -3.73     4.00     3.00 MEG 058
+059   -12.57    -8.25     4.00     3.00 MEG 059
+060    -7.56   -12.70     4.00     3.00 MEG 060
+061   -15.02    -1.73     4.00     3.00 MEG 061
+062   -11.53   -17.47     4.00     3.00 MEG 062
+063    -0.18   -18.90     4.00     3.00 MEG 063
+064    -6.61    -0.05     4.00     3.00 MEG 064
+065     6.73    -9.47     4.00     3.00 MEG 065
+066     1.16    -8.63     4.00     3.00 MEG 066
+067    18.43     8.05     4.00     3.00 MEG 067
+068    16.27    12.00     4.00     3.00 MEG 068
+069    19.53     3.47     4.00     3.00 MEG 069
+070    11.49     5.68     4.00     3.00 MEG 070
+071    12.54    -0.07     4.00     3.00 MEG 071
+072    12.40     3.05     4.00     3.00 MEG 072
+073   -15.98    -9.55     4.00     3.00 MEG 073
+074   -18.65    -1.75     4.00     3.00 MEG 074
+075   -17.81    -5.83     4.00     3.00 MEG 075
+076     -1.09     0.06     4.00     3.00 MEG 076
+077    -1.11     2.07     4.00     3.00 MEG 077
+078   -17.59   -10.78     4.00     3.00 MEG 078
+079   -20.36    -2.47     4.00     3.00 MEG 079
+080   -16.06    10.29     4.00     3.00 MEG 080
+081    10.71    -5.93     4.00     3.00 MEG 081
+082    12.02    -3.35     4.00     3.00 MEG 082
+083    19.99     8.66     4.00     3.00 MEG 083
+084    15.61    15.53     4.00     3.00 MEG 084
+085     5.76    -4.95     4.00     3.00 MEG 085
+086    12.48    13.62     4.00     3.00 MEG 086
+087    18.03     3.69     4.00     3.00 MEG 087
+088    14.69    11.11     4.00     3.00 MEG 088
+089   -19.42     6.89     4.00     3.00 MEG 089
+090   -16.09    14.39     4.00     3.00 MEG 090
+091    -6.70    -5.77     4.00     3.00 MEG 091
+092   -12.37   -11.31     4.00     3.00 MEG 092
+093    -1.72     9.34     4.00     3.00 MEG 093
+094    -4.12     1.65     4.00     3.00 MEG 094
+095   -18.66     2.58     4.00     3.00 MEG 095
+096   -17.76     6.59     4.00     3.00 MEG 096
+097     8.82    -5.11     4.00     3.00 MEG 097
+098     8.79    -7.85     4.00     3.00 MEG 098
+099    15.43     6.10     4.00     3.00 MEG 099
+100    11.93    11.57     4.00     3.00 MEG 100
+101    16.58     7.80     4.00     3.00 MEG 101
+102     8.27     6.69     4.00     3.00 MEG 102
+103    11.62    -8.00     4.00     3.00 MEG 103
+104    13.11    -5.40     4.00     3.00 MEG 104
+105   -13.38     0.11     4.00     3.00 MEG 105
+106   -12.78    -3.22     4.00     3.00 MEG 106
+107   -12.98     3.35     4.00     3.00 MEG 107
+108   -11.84     6.58     4.00     3.00 MEG 108
+109   -10.08     9.11     4.00     3.00 MEG 109
+110   -16.27    -5.03     4.00     3.00 MEG 110
+111   -11.45    -6.21     4.00     3.00 MEG 111
+112    -0.59     5.83     4.00     3.00 MEG 112
+113    14.18    -2.06     4.00     3.00 MEG 113
+114    14.48     1.15     4.00     3.00 MEG 114
+115    12.68     7.37     4.00     3.00 MEG 115
+116    13.93     4.46     4.00     3.00 MEG 116
+117     8.98    11.57     4.00     3.00 MEG 117
+118     6.35    12.95     4.00     3.00 MEG 118
+119    11.01     9.71     4.00     3.00 MEG 119
+120     0.01    16.08     4.00     3.00 MEG 120
+121   -16.87     2.69     4.00     3.00 MEG 121
+122   -16.02     6.38     4.00     3.00 MEG 122
+123   -14.38     9.83     4.00     3.00 MEG 123
+124   -12.23    12.65     4.00     3.00 MEG 124
+125   -10.14     5.19     4.00     3.00 MEG 125
+126    -5.63    12.72     4.00     3.00 MEG 126
+127    -2.90    13.72     4.00     3.00 MEG 127
+128    -7.93    11.11     4.00     3.00 MEG 128
+129     6.83    14.86     4.00     3.00 MEG 129
+130     7.63     3.51     4.00     3.00 MEG 130
+131     8.56     0.40     4.00     3.00 MEG 131
+132    -2.70     7.01     4.00     3.00 MEG 132
+133     3.09    11.73     4.00     3.00 MEG 133
+134     8.14     9.62     4.00     3.00 MEG 134
+135     2.84     2.47     4.00     3.00 MEG 135
+136     4.05     6.89     4.00     3.00 MEG 136
+137    -6.16    14.64     4.00     3.00 MEG 137
+138   -11.02     2.49     4.00     3.00 MEG 138
+139    -6.78     6.65     4.00     3.00 MEG 139
+140    -6.24     3.18     4.00     3.00 MEG 140
+141    -6.83     9.47     4.00     3.00 MEG 141
+142    -2.48    11.64     4.00     3.00 MEG 142
+143   -17.59    14.92     4.00     3.00 MEG 143
+144   -22.23     2.07     4.00     3.00 MEG 144
+145     3.20    13.71     4.00     3.00 MEG 145
+146     2.06     5.84     4.00     3.00 MEG 146
+147     5.76     1.93     4.00     3.00 MEG 147
+148    23.08     3.86     4.00     3.00 MEG 148
+149    21.96     8.34     4.00     3.00 MEG 149
+150    20.00    12.43     4.00     3.00 MEG 150
+151    17.22    16.08     4.00     3.00 MEG 151
+152     3.91     9.37     4.00     3.00 MEG 152
+153   -21.58     6.32     4.00     3.00 MEG 153
+154   -20.17    10.61     4.00     3.00 MEG 154
+155   -11.01    10.95     4.00     3.00 MEG 155
+156   -14.51     5.43     4.00     3.00 MEG 156
+157     1.28     9.74     4.00     3.00 MEG 157
diff --git a/mne/layouts/layout.py b/mne/layouts/layout.py
index 67eff81..4706576 100644
--- a/mne/layouts/layout.py
+++ b/mne/layouts/layout.py
@@ -1,5 +1,5 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 #
@@ -11,23 +11,19 @@ import os.path as op
 import numpy as np
 from scipy.optimize import leastsq
 from ..preprocessing.maxfilter import fit_sphere_to_headshape
-from ..fiff import FIFF, pick_types
+from .. import pick_types
+from ..io.constants import FIFF
 from ..utils import _clean_names
+from ..externals.six.moves import map
 
 
 class Layout(object):
     """Sensor layouts
 
-    Parameters
-    ----------
-    kind : str
-        Type of layout (can also be custom for EEG). Valid layouts are
-        {'Vectorview-all', 'Vectorview-grad', 'Vectorview-mag',  'CTF-275',
-         'magnesWH3600'}
-    path : string
-        Path to folder where to find the layout file.
+    Layouts are typically loaded from a file using read_layout. Only use this
+    class directly if you're constructing a new layout.
 
-    Attributes
+    Parameters
     ----------
     box : tuple of length 4
         The box dimension (x_min, x_max, y_min, y_max)
@@ -245,7 +241,7 @@ def make_grid_layout(info, picks=None):
     info : dict
         Measurement info (e.g., raw.info). If None, default names will be
         employed.
-    picks : array-like | None
+    picks : array-like of int | None
         The indices of the channels to be included. If None, al misc channels
         will be included.
 
@@ -262,7 +258,7 @@ def make_grid_layout(info, picks=None):
     if not names:
         raise ValueError('No misc data channels found.')
 
-    ids = range(len(picks))
+    ids = list(range(len(picks)))
     size = len(picks)
 
     # prepare square-like layout
@@ -299,14 +295,14 @@ def find_layout(info=None, ch_type=None, chs=None):
 
     Parameters
     ----------
-    info : instance of mne.fiff.meas_info.Info | None
+    info : instance of mne.io.meas_info.Info | None
         The measurement info.
     ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
         The channel type for selecting single channel layouts.
         Defaults to None. Note, this argument will only be considered for
         VectorView type layout. Use `meg` to force using the full layout
         in situations where the info does only contain one sensor type.
-    chs : instance of mne.fiff.meas_info.Info | None
+    chs : instance of mne.io.meas_info.Info | None
         The measurement info. Defaults to None. This keyword is deprecated and
         will be removed in MNE-Python 0.9. Use `info` instead.
 
@@ -337,8 +333,12 @@ def find_layout(info=None, ch_type=None, chs=None):
     coil_types = set([ch['coil_type'] for ch in chs])
     channel_types = set([ch['kind'] for ch in chs])
 
-    has_vv_mag = FIFF.FIFFV_COIL_VV_MAG_T3 in coil_types
-    has_vv_grad = FIFF.FIFFV_COIL_VV_PLANAR_T1 in coil_types
+    has_vv_mag = any([k in coil_types for k in [FIFF.FIFFV_COIL_VV_MAG_T1,
+                                                FIFF.FIFFV_COIL_VV_MAG_T2,
+                                                FIFF.FIFFV_COIL_VV_MAG_T3]])
+    has_vv_grad = any([k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
+                                                 FIFF.FIFFV_COIL_VV_PLANAR_T2,
+                                                 FIFF.FIFFV_COIL_VV_PLANAR_T3]])
     has_vv_meg = has_vv_mag and has_vv_grad
     has_vv_only_mag = has_vv_mag and not has_vv_grad
     has_vv_only_grad = has_vv_grad and not has_vv_mag
@@ -352,6 +352,8 @@ def find_layout(info=None, ch_type=None, chs=None):
                     (FIFF.FIFFV_MEG_CH in channel_types and
                      any([k in ctf_other_types for k in coil_types])))
                     # hack due to MNE-C bug in IO of CTF
+    n_kit_grads = len([ch for ch in chs
+                       if ch['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD])
 
     has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad])
     has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
@@ -382,6 +384,8 @@ def find_layout(info=None, ch_type=None, chs=None):
         layout_name = 'magnesWH3600'
     elif has_CTF_grad:
         layout_name = 'CTF-275'
+    elif n_kit_grads == 157:
+        layout_name = 'KIT-157'
     else:
         return None
 
@@ -395,7 +399,7 @@ def find_layout(info=None, ch_type=None, chs=None):
 
 
 def _find_topomap_coords(chs, layout=None):
-    """Try to guess the MEG system and return appropriate topomap coordinates
+    """Try to guess the E/MEG layout and return appropriate topomap coordinates
 
     Parameters
     ----------
@@ -423,6 +427,22 @@ def _find_topomap_coords(chs, layout=None):
     return pos
 
 
+def _cart_to_sph(x, y, z):
+    """Aux function"""
+    hypotxy = np.hypot(x, y)
+    r = np.hypot(hypotxy, z)
+    elev = np.arctan2(z, hypotxy)
+    az = np.arctan2(y, x)
+    return az, elev, r
+
+
+def _pol_to_cart(th, r):
+    """Aux function"""
+    x = r * np.cos(th)
+    y = r * np.sin(th)
+    return x, y
+
+
 def _auto_topomap_coords(chs):
     """Make a 2 dimensional sensor map from sensor positions in an info dict
 
@@ -436,33 +456,14 @@ def _auto_topomap_coords(chs):
     locs : array, shape = (n_sensors, 2)
         An array of positions of the 2 dimensional map.
     """
-    locs3d = np.array([ch['loc'][:3] for ch in chs])
-
-    # fit the 3d sensor locations to a sphere with center (cx, cy, cz)
-    # and radius r
-
-    # error function
-    def err(params):
-        r, cx, cy, cz = params
-        return   np.sum((locs3d - [cx, cy, cz]) ** 2, 1) - r ** 2
-
-    (r, cx, cy, cz), _ = leastsq(err, (1, 0, 0, 0))
-
-    # center the sensor locations based on the sphere and scale to
-    # radius 1
-    sphere_center = np.array((cx, cy, cz))
-    locs3d -= sphere_center
-    locs3d /= r
-
-    # implement projection
-    locs2d = np.copy(locs3d[:, :2])
-    z = max(locs3d[:, 2]) - locs3d[:, 2]  # distance form top
-    r = np.sqrt(z)  # desired 2d radius
-    r_xy = np.sqrt(np.sum(locs3d[:, :2] ** 2, 1))  # current radius in xy
-    idx = (r_xy != 0)  # avoid zero division
-    F = r[idx] / r_xy[idx]  # stretching factor accounting for current r
-    locs2d[idx, :] *= F[:, None]
-
+    locs3d = np.array([ch['loc'][:3] for ch in chs
+                       if ch['kind'] in [FIFF.FIFFV_MEG_CH,
+                                         FIFF.FIFFV_EEG_CH]])
+    if not np.any(locs3d):
+        raise RuntimeError('Cannot compute layout, no positions found')
+    x, y, z = locs3d[:, :3].T
+    az, el, r = _cart_to_sph(x, y, z)
+    locs2d = np.c_[_pol_to_cart(az, np.pi / 2 - el)]
     return locs2d
 
 
@@ -484,7 +485,7 @@ def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
 
     Returns
     -------
-    picks : list of int
+    picks : array of int
         Picks for the grad channels, ordered in pairs.
     coords : array, shape = (n_grad_channels, 3)
         Coordinates for a topomap plot (optional, only returned if
@@ -512,7 +513,7 @@ def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
     if topomap_coords:
         shape = (len(pairs), 2, -1)
         coords = (_find_topomap_coords(grad_chs, layout)
-                                      .reshape(shape).mean(axis=1))
+                  .reshape(shape).mean(axis=1))
         return picks, coords
     else:
         return picks
diff --git a/mne/layouts/tests/test_layout.py b/mne/layouts/tests/test_layout.py
index a7b909e..afb415d 100644
--- a/mne/layouts/tests/test_layout.py
+++ b/mne/layouts/tests/test_layout.py
@@ -1,5 +1,6 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+from __future__ import print_function
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 #
@@ -13,24 +14,30 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 from nose.tools import assert_true, assert_raises
 
-from mne.layouts import (make_eeg_layout, make_grid_layout, read_layout, 
+from mne.layouts import (make_eeg_layout, make_grid_layout, read_layout,
                          find_layout)
 
-from mne.fiff import Raw, pick_types, pick_info
+from mne import pick_types, pick_info
+from mne.io import Raw
+from mne.io import read_raw_kit
 from mne.utils import _TempDir
 
-fif_fname = op.join(op.dirname(__file__), '..', '..', 'fiff',
+warnings.simplefilter('always')
+
+fif_fname = op.join(op.dirname(__file__), '..', '..', 'io',
                    'tests', 'data', 'test_raw.fif')
 
-lout_path = op.join(op.dirname(__file__), '..', '..', 'fiff',
+lout_path = op.join(op.dirname(__file__), '..', '..', 'io',
                     'tests', 'data')
 
-bti_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'bti',
+bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
                   'tests', 'data')
 
-fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                         'data', 'test_ctf_comp_raw.fif')
 
+fname_kit_157 = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
+                        'tests', 'data', 'test.sqd')
 
 test_info = {'ch_names': ['ICA 001', 'ICA 002', 'EOG 061'],
  'chs': [{'cal': 1,
@@ -89,7 +96,7 @@ def test_io_layout_lout():
     assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
     assert_true(layout.names, layout_read.names)
 
-    print layout  # test repr
+    print(layout)  # test repr
 
 
 def test_io_layout_lay():
@@ -130,28 +137,30 @@ def test_make_grid_layout():
 
 def test_find_layout():
     """Test finding layout"""
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         find_layout(chs=test_info['chs'])
         assert_true(w[0].category == DeprecationWarning)
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         find_layout(test_info['chs'])
         assert_true(w[0].category == DeprecationWarning)
     assert_raises(ValueError, find_layout, dict())
     assert_raises(ValueError, find_layout, test_info, ch_type='meep')
-        
+
     sample_info = Raw(fif_fname).info
     grads = pick_types(sample_info, meg='grad')
     sample_info2 = pick_info(sample_info, grads)
-    
+
     mags = pick_types(sample_info, meg='mag')
     sample_info3 = pick_info(sample_info, mags)
-    
+
     # mock new convention
     sample_info4 = copy.deepcopy(sample_info)
     for ii, name in enumerate(sample_info4['ch_names']):
         new = name.replace(' ', '')
         sample_info4['ch_names'][ii] = new
-        sample_info4['chs'][ii]['ch_name'] = new 
+        sample_info4['chs'][ii]['ch_name'] = new
 
     mags = pick_types(sample_info, meg=False, eeg=True)
     sample_info5 = pick_info(sample_info, mags)
@@ -162,12 +171,12 @@ def test_find_layout():
 
     lout = find_layout(sample_info2, ch_type='meg')
     assert_true(lout.kind == 'Vectorview-all')
-    
+
     # test new vector-view
     lout = find_layout(sample_info4, ch_type=None)
     assert_true(lout.kind == 'Vectorview-all')
     assert_true(all(not ' ' in k for k in lout.names))
-    
+
     lout = find_layout(sample_info, ch_type='grad')
     assert_true(lout.kind == 'Vectorview-grad')
     lout = find_layout(sample_info2)
@@ -177,7 +186,7 @@ def test_find_layout():
     lout = find_layout(sample_info2, ch_type='meg')
     assert_true(lout.kind == 'Vectorview-all')
 
-    
+
     lout = find_layout(sample_info, ch_type='mag')
     assert_true(lout.kind == 'Vectorview-mag')
     lout = find_layout(sample_info3)
@@ -186,7 +195,7 @@ def test_find_layout():
     assert_true(lout.kind == 'Vectorview-mag')
     lout = find_layout(sample_info3, ch_type='meg')
     assert_true(lout.kind == 'Vectorview-all')
-    # 
+    #
     lout = find_layout(sample_info, ch_type='eeg')
     assert_true(lout.kind == 'EEG')
     lout = find_layout(sample_info5)
@@ -195,12 +204,15 @@ def test_find_layout():
     assert_true(lout.kind == 'EEG')
     # no common layout, 'meg' option not supported
 
-    fname_bti_raw = op.join(bti_dir, 'exported4D_linux.fif')
+    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
     lout = find_layout(Raw(fname_bti_raw).info)
     assert_true(lout.kind == 'magnesWH3600')
-    
+
     lout = find_layout(Raw(fname_ctf_raw).info)
     assert_true(lout.kind == 'CTF-275')
-    
+
+    lout = find_layout(read_raw_kit(fname_kit_157).info)
+    assert_true(lout.kind == 'KIT-157')
+
     sample_info5['dig'] = []
     assert_raises(RuntimeError, find_layout, sample_info5)
diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py
index c1316a4..07174dc 100644
--- a/mne/minimum_norm/__init__.py
+++ b/mne/minimum_norm/__init__.py
@@ -1,8 +1,9 @@
 """Linear inverse solvers based on L2 Minimum Norm Estimates (MNE)"""
 
-from .inverse import (read_inverse_operator, apply_inverse,
+from .inverse import (InverseOperator, read_inverse_operator, apply_inverse,
                       apply_inverse_raw, make_inverse_operator,
                       apply_inverse_epochs, write_inverse_operator,
                       compute_rank_inverse)
+from .psf_ctf import point_spread_function, cross_talk_function
 from .time_frequency import (source_band_induced_power, source_induced_power,
                              compute_source_psd, compute_source_psd_epochs)
diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py
index 08d838e..cd572c9 100644
--- a/mne/minimum_norm/inverse.py
+++ b/mne/minimum_norm/inverse.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
@@ -9,29 +9,64 @@ from math import sqrt
 import numpy as np
 from scipy import linalg
 
-from ..fiff.constants import FIFF
-from ..fiff.open import fiff_open
-from ..fiff.tag import find_tag
-from ..fiff.matrix import (_read_named_matrix, _transpose_named_matrix,
-                           write_named_matrix)
-from ..fiff.proj import read_proj, make_projector, write_proj
-from ..fiff.tree import dir_tree_find
-from ..fiff.write import (write_int, write_float_matrix, start_file,
-                          start_block, end_block, end_file, write_float,
-                          write_coord_trans, write_string)
-
-from ..fiff.cov import read_cov, write_cov
-from ..fiff.pick import channel_type, pick_info
-from ..cov import prepare_noise_cov
+from ..io.constants import FIFF
+from ..io.open import fiff_open
+from ..io.tag import find_tag
+from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
+                         write_named_matrix)
+from ..io.proj import _read_proj, make_projector, _write_proj
+from ..io.tree import dir_tree_find
+from ..io.write import (write_int, write_float_matrix, start_file,
+                        start_block, end_block, end_file, write_float,
+                        write_coord_trans, write_string)
+
+from ..io.pick import channel_type, pick_info, pick_types
+from ..cov import prepare_noise_cov, _read_cov, _write_cov
 from ..forward import (compute_depth_prior, read_forward_meas_info,
                        write_forward_meas_info, is_fixed_orient,
                        compute_orient_prior, _to_fixed_ori)
 from ..source_space import (read_source_spaces_from_tree,
                             find_source_space_hemi, _get_vertno,
                             _write_source_spaces_to_fid, label_src_vertno_sel)
-from ..transforms import invert_transform, transform_source_space_to
+from ..transforms import invert_transform, transform_surface_to
 from ..source_estimate import _make_stc
-from ..utils import logger, verbose
+from ..utils import check_fname, logger, verbose
+from functools import reduce
+
+
+class InverseOperator(dict):
+    """InverseOperator class to represent info from inverse operator
+    """
+
+    def __repr__(self):
+        """Summarize inverse info instead of printing all"""
+
+        entr = '<InverseOperator'
+
+        nchan = len(pick_types(self['info'], meg=True, eeg=False))
+        entr += ' | ' + 'MEG channels: %d' % nchan
+        nchan = len(pick_types(self['info'], meg=False, eeg=True))
+        entr += ' | ' + 'EEG channels: %d' % nchan
+
+        # XXX TODO: This and the __repr__ in SourceSpaces should call a
+        # function _get_name_str() in source_space.py
+        if self['src'][0]['type'] == 'surf':
+            entr += (' | Source space: Surface with %d vertices'
+                     % self['nsource'])
+        elif self['src'][0]['type'] == 'vol':
+            entr += (' | Source space: Volume with %d grid points'
+                     % self['nsource'])
+        elif self['src'][0]['type'] == 'discrete':
+            entr += (' | Source space: Discrete with %d dipoles'
+                     % self['nsource'])
+
+        source_ori = {FIFF.FIFFV_MNE_UNKNOWN_ORI: 'Unknown',
+                      FIFF.FIFFV_MNE_FIXED_ORI: 'Fixed',
+                      FIFF.FIFFV_MNE_FREE_ORI: 'Free'}
+        entr += ' | Source orientation: %s' % source_ori[self['source_ori']]
+        entr += '>'
+
+        return entr
 
 
 def _pick_channels_inverse_operator(ch_names, inv):
@@ -58,15 +93,17 @@ def read_inverse_operator(fname, verbose=None):
     Parameters
     ----------
     fname : string
-        The name of the FIF file.
+        The name of the FIF file, which ends with -inv.fif or -inv.fif.gz.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
-    inv : dict
+    inv : instance of InverseOperator
         The inverse operator.
     """
+    check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz'))
+
     #
     #   Open the file, create directory
     #
@@ -184,23 +221,23 @@ def read_inverse_operator(fname, verbose=None):
     #
     #   Read the covariance matrices
     #
-    inv['noise_cov'] = read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV)
+    inv['noise_cov'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV)
     logger.info('    Noise covariance matrix read.')
 
-    inv['source_cov'] = read_cov(fid, invs, FIFF.FIFFV_MNE_SOURCE_COV)
+    inv['source_cov'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_SOURCE_COV)
     logger.info('    Source covariance matrix read.')
     #
     #   Read the various priors
     #
-    inv['orient_prior'] = read_cov(fid, invs, FIFF.FIFFV_MNE_ORIENT_PRIOR_COV)
+    inv['orient_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_ORIENT_PRIOR_COV)
     if inv['orient_prior'] is not None:
         logger.info('    Orientation priors read.')
 
-    inv['depth_prior'] = read_cov(fid, invs, FIFF.FIFFV_MNE_DEPTH_PRIOR_COV)
+    inv['depth_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_DEPTH_PRIOR_COV)
     if inv['depth_prior'] is not None:
         logger.info('    Depth priors read.')
 
-    inv['fmri_prior'] = read_cov(fid, invs, FIFF.FIFFV_MNE_FMRI_PRIOR_COV)
+    inv['fmri_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_FMRI_PRIOR_COV)
     if inv['fmri_prior'] is not None:
         logger.info('    fMRI priors read.')
 
@@ -254,7 +291,7 @@ def read_inverse_operator(fname, verbose=None):
     #
     #  We also need the SSP operator
     #
-    inv['projs'] = read_proj(fid, tree)
+    inv['projs'] = _read_proj(fid, tree)
 
     #
     #  Some empty fields to be filled in later
@@ -268,9 +305,9 @@ def read_inverse_operator(fname, verbose=None):
     nuse = 0
     for k in range(len(inv['src'])):
         try:
-            inv['src'][k] = transform_source_space_to(inv['src'][k],
-                                                      inv['coord_frame'],
-                                                      mri_head_t)
+            inv['src'][k] = transform_surface_to(inv['src'][k],
+                                                 inv['coord_frame'],
+                                                 mri_head_t)
         except Exception as inst:
             fid.close()
             raise Exception('Could not transform source space (%s)' % inst)
@@ -284,7 +321,7 @@ def read_inverse_operator(fname, verbose=None):
     #
     fid.close()
 
-    return inv
+    return InverseOperator(inv)
 
 
 @verbose
@@ -294,12 +331,14 @@ def write_inverse_operator(fname, inv, verbose=None):
     Parameters
     ----------
     fname : string
-        The name of the FIF file.
+        The name of the FIF file, which ends with -inv.fif or -inv.fif.gz.
     inv : dict
         The inverse operator.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
+    check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz'))
+
     #
     #   Open the file, create directory
     #
@@ -325,7 +364,7 @@ def write_inverse_operator(fname, inv, verbose=None):
     #
     #   Write SSP operator
     #
-    write_proj(fid, inv['projs'])
+    _write_proj(fid, inv['projs'])
 
     #
     #   Write the source spaces
@@ -365,21 +404,21 @@ def write_inverse_operator(fname, inv, verbose=None):
     #   write the covariance matrices
     #
     logger.info('    Writing noise covariance matrix.')
-    write_cov(fid, inv['noise_cov'])
+    _write_cov(fid, inv['noise_cov'])
 
     logger.info('    Writing source covariance matrix.')
-    write_cov(fid, inv['source_cov'])
+    _write_cov(fid, inv['source_cov'])
 
     #
     #   write the various priors
     #
     logger.info('    Writing orientation priors.')
     if inv['depth_prior'] is not None:
-        write_cov(fid, inv['depth_prior'])
+        _write_cov(fid, inv['depth_prior'])
     if inv['orient_prior'] is not None:
-        write_cov(fid, inv['orient_prior'])
+        _write_cov(fid, inv['orient_prior'])
     if inv['fmri_prior'] is not None:
-        write_cov(fid, inv['fmri_prior'])
+        _write_cov(fid, inv['fmri_prior'])
 
     write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_FIELDS, inv['eigen_fields'])
 
@@ -477,7 +516,7 @@ def prepare_inverse_operator(orig, nave, lambda2, method, verbose=None):
 
     Returns
     -------
-    inv : dict
+    inv : instance of InverseOperator
         Prepared inverse operator.
     """
     if nave <= 0:
@@ -596,7 +635,7 @@ def prepare_inverse_operator(orig, nave, lambda2, method, verbose=None):
     else:
         inv['noisenorm'] = []
 
-    return inv
+    return InverseOperator(inv)
 
 
 @verbose
@@ -646,8 +685,8 @@ def _assemble_kernel(inv, label, method, pick_ori, verbose=None):
 
     trans = inv['reginv'][:, None] * reduce(np.dot,
                                             [inv['eigen_fields']['data'],
-                                            inv['whitener'],
-                                            inv['proj']])
+                                             inv['whitener'],
+                                             inv['proj']])
     #
     #   Transformation into current distributions by weighting the eigenleads
     #   with the weights computed above
@@ -754,7 +793,7 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
     sol = np.dot(K, evoked.data[sel])  # apply imaging kernel
 
     is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-                   and pick_ori == None)
+                   and pick_ori is None)
 
     if is_free_ori:
         logger.info('combining the current components...')
@@ -765,7 +804,7 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
         sol *= noise_norm
 
     tstep = 1.0 / evoked.info['sfreq']
-    tmin = float(evoked.first) / evoked.info['sfreq']
+    tmin = float(evoked.times[0])
     vertno = _get_vertno(inv['src'])
     subject = _subject_from_inverse(inverse_operator)
 
@@ -854,7 +893,7 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
     K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
 
     is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-                   and pick_ori == None)
+                   and pick_ori is None)
 
     if buffer_size is not None and is_free_ori:
         # Process the data in segments to conserve memory
@@ -864,10 +903,10 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
 
         # Allocate space for inverse solution
         n_times = data.shape[1]
-        sol = np.empty((K.shape[0] / 3, n_times),
+        sol = np.empty((K.shape[0] // 3, n_times),
                        dtype=(K[0, 0] * data[0, 0]).dtype)
 
-        for pos in xrange(0, n_times, buffer_size):
+        for pos in range(0, n_times, buffer_size):
             sol[:, pos:pos + buffer_size] = \
                 combine_xyz(np.dot(K, data[:, pos:pos + buffer_size]))
 
@@ -892,7 +931,7 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
     return stc
 
 
-def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method="dSPM",
+def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method='dSPM',
                               label=None, nave=1, pick_ori=None,
                               verbose=None, pick_normal=None):
     """ see apply_inverse_epochs """
@@ -917,7 +956,7 @@ def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method="dSPM",
     tmin = epochs.times[0]
 
     is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-                   and pick_ori == None)
+                   and pick_ori is None)
 
     if not is_free_ori and noise_norm is not None:
         # premultiply kernel with noise normalization
@@ -1029,7 +1068,7 @@ def _xyz2lf(Lf_xyz, normals):
         tangential orientations (tangent space of cortical surface).
     """
     n_sensors, n_dipoles = Lf_xyz.shape
-    n_positions = n_dipoles / 3
+    n_positions = n_dipoles // 3
     Lf_xyz = Lf_xyz.reshape(n_sensors, n_positions, 3)
     n_sensors, n_positions, _ = Lf_xyz.shape
     Lf_cortex = np.zeros_like(Lf_xyz)
@@ -1133,7 +1172,7 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
 
     Returns
     -------
-    inv : dict
+    inv : instance of InverseOperator
         Inverse operator.
 
     Notes
@@ -1331,7 +1370,7 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
     has_meg = False
     has_eeg = False
     ch_idx = [k for k, c in enumerate(info['chs'])
-                                    if c['ch_name'] in gain_info['ch_names']]
+              if c['ch_name'] in gain_info['ch_names']]
     for idx in ch_idx:
         ch_type = channel_type(info, idx)
         if ch_type == 'eeg':
@@ -1363,7 +1402,7 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
     inv_op['units'] = 'Am'
     inv_op['info'] = inv_info
 
-    return inv_op
+    return InverseOperator(inv_op)
 
 
 def compute_rank_inverse(inv):
diff --git a/mne/minimum_norm/psf_ctf.py b/mne/minimum_norm/psf_ctf.py
new file mode 100644
index 0000000..d317f46
--- /dev/null
+++ b/mne/minimum_norm/psf_ctf.py
@@ -0,0 +1,431 @@
+# Authors: Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+
+import numpy as np
+from scipy import linalg
+
+from ..utils import logger, verbose
+from ..io.constants import FIFF
+from ..evoked import EvokedArray
+from ..source_estimate import SourceEstimate
+from .inverse import _subject_from_inverse
+from . import apply_inverse
+
+
+ at verbose
+def point_spread_function(inverse_operator, forward, labels, method='dSPM',
+                          lambda2=1 / 9., pick_ori=None, mode='mean',
+                          n_svd_comp=1, verbose=None):
+    """Compute point-spread functions (PSFs) for linear estimators
+
+    Compute point-spread functions (PSF) in labels for a combination of inverse
+    operator and forward solution. PSFs are computed for test sources that are
+    perpendicular to cortical surface.
+
+    Parameters
+    ----------
+    inverse_operator : instance of InverseOperator
+        Inverse operator read with mne.read_inverse_operator.
+    forward : dict
+        Forward solution, created with "surf_ori=True" and "force_fixed=False"
+        Note: (Bad) channels not included in forward solution will not be used
+        in PSF computation.
+    labels : list of Label
+        Labels for which PSFs shall be computed.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse method for which PSFs shall be computed (for apply_inverse).
+    lambda2 : float
+        The regularization parameter (for apply_inverse).
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations (for apply_inverse).
+    mode : 'mean' | 'sum' | 'svd' |
+        PSFs can be computed for different summary measures with labels:
+        'sum' or 'mean': sum or means of sub-leadfields for labels
+        This corresponds to situations where labels can be assumed to be
+        homogeneously activated.
+        'svd': SVD components of sub-leadfields for labels
+        This is better suited for situations where activation patterns are
+        assumed to be more variable.
+        "sub-leadfields" are the parts of the forward solutions that belong to
+        vertices within invidual labels.
+    n_svd_comp : integer
+        Number of SVD components for which PSFs will be computed and output
+        (irrelevant for 'sum' and 'mean'). Explained variances within
+        sub-leadfields are shown in screen output.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc_psf : SourceEstimate
+        The PSFs for the specified labels
+        If mode='svd': n_svd_comp components per label are created
+        (i.e. n_svd_comp successive time points in mne_analyze)
+        The last sample is the summed PSF across all labels
+        Scaling of PSFs is arbitrary, and may differ greatly among methods
+        (especially for MNE compared to noise-normalized estimates).
+    evoked_fwd : Evoked
+        Forward solutions corresponding to PSFs in stc_psf
+        If mode='svd': n_svd_comp components per label are created
+        (i.e. n_svd_comp successive time points in mne_analyze)
+        The last sample is the summed forward solution across all labels
+        (sum is taken across summary measures).
+    """
+    mode = mode.lower()
+    if mode not in ['mean', 'sum', 'svd']:
+        raise ValueError("mode must be 'svd', 'mean' or 'sum'. Got %s."
+                         % mode)
+
+    logger.info("About to process %d labels" % len(labels))
+
+    if not forward['surf_ori']:
+        raise RuntimeError('Forward has to be surface oriented '
+                           '(surf_ori=True).')
+
+    # get whole leadfield matrix with normal dipole components
+    if not (forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI):
+        # if forward solution already created with force_fixed=True
+        leadfield = forward['sol']['data']
+    else:  # pick normal components of forward solution
+        leadfield = forward['sol']['data'][:, 2::3]
+
+    # in order to convert sub-leadfield matrix to evoked data type (pretending
+    # it's an epoch, see in loop below), uses 'info' from forward solution,
+    # need to add 'sfreq' and 'proj'
+    info = deepcopy(forward['info'])
+    info['sfreq'] = 1000.  # add sfreq or it won't work
+    info['projs'] = []  # add projs
+
+    # will contain means of subleadfields for all labels
+    label_psf_summary = []
+    # if mode='svd', this will collect all SVD singular values for labels
+    label_singvals = []
+
+    # loop over labels
+    for ll in labels:
+        logger.info(ll)
+        if ll.hemi == 'rh':
+            # for RH labels, add number of LH vertices
+            offset = forward['src'][0]['vertno'].shape[0]
+            # remember whether we are in the LH or RH
+            this_hemi = 1
+        elif ll.hemi == 'lh':
+            offset = 0
+            this_hemi = 0
+
+        # get vertices on cortical surface inside label
+        idx = np.intersect1d(ll.vertices, forward['src'][this_hemi]['vertno'])
+
+        # get vertices in source space inside label
+        fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)
+
+        # get sub-leadfield matrix for label vertices
+        sub_leadfield = leadfield[:, fwd_idx + offset]
+
+        # compute summary data for labels
+        if mode == 'sum':  # sum across forward solutions in label
+            logger.info("Computing sums within labels")
+            this_label_psf_summary = sub_leadfield.sum(axis=1)[np.newaxis, :]
+        elif mode == 'mean':
+            logger.info("Computing means within labels")
+            this_label_psf_summary = sub_leadfield.mean(axis=1)[np.newaxis, :]
+        elif mode == 'svd':  # takes svd of forward solutions in label
+            logger.info("Computing SVD within labels, using %d component(s)"
+                        % n_svd_comp)
+
+            # compute SVD of sub-leadfield
+            u_svd, s_svd, _ = linalg.svd(sub_leadfield,
+                                         full_matrices=False,
+                                         compute_uv=True)
+
+            # keep singular values (might be useful to some people)
+            label_singvals.append(s_svd)
+
+            # get first n_svd_comp components, weighted with their
+            # corresponding singular values
+            logger.info("First 5 singular values: %s" % s_svd[0:5])
+            logger.info("(This tells you something about variability of "
+                        "forward solutions in sub-leadfield for label)")
+            # explained variance by chosen components within sub-leadfield
+            my_comps = s_svd[:n_svd_comp]
+            comp_var = (100. * np.sum(my_comps * my_comps) /
+                        np.sum(s_svd * s_svd))
+            logger.info("Your %d component(s) explain(s) %.1f%% "
+                        "variance in label." % (n_svd_comp, comp_var))
+            this_label_psf_summary = (u_svd[:, :n_svd_comp]
+                                      * s_svd[:n_svd_comp][np.newaxis, :])
+            # transpose required for conversion to "evoked"
+            this_label_psf_summary = this_label_psf_summary.T
+
+        # initialise or append to existing collection
+        label_psf_summary.append(this_label_psf_summary)
+
+    label_psf_summary = np.concatenate(label_psf_summary, axis=0)
+    # compute sum across forward solutions for labels, append to end
+    label_psf_summary = np.r_[label_psf_summary,
+                              label_psf_summary.sum(axis=0)[np.newaxis, :]].T
+
+    # convert sub-leadfield matrix to evoked data type (a bit of a hack)
+    evoked_fwd = EvokedArray(label_psf_summary, info=info, tmin=0.)
+
+    # compute PSFs by applying inverse operator to sub-leadfields
+    logger.info("About to apply inverse operator for method='%s' and "
+                "lambda2=%s" % (method, lambda2))
+
+    stc_psf = apply_inverse(evoked_fwd, inverse_operator, lambda2,
+                            method=method, pick_ori=pick_ori)
+
+    return stc_psf, evoked_fwd
+
+
+def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
+                                      method='dSPM', lambda2=1. / 9., mode='mean',
+                                      n_svd_comp=1):
+    """Get inverse matrix from an inverse operator
+
+    Currently works only for fixed/loose orientation constraints
+    For loose orientation constraint, the CTFs are computed for the radial
+    component (pick_ori='normal').
+
+    Parameters
+    ----------
+    inverse_operator : instance of InverseOperator
+        Inverse operator read with mne.read_inverse_operator.
+    forward : dict
+         The forward operator.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse methods (for apply_inverse).
+    labels : list of Label | None
+        Labels for which CTFs shall be computed. If None, inverse matrix for
+        all vertices will be returned.
+    lambda2 : float
+        The regularization parameter (for apply_inverse).
+    pick_ori : None | "normal"
+        pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations (for apply_inverse).
+        Determines whether whole inverse matrix G will have one or three rows
+        per vertex. This will also affect summary measures for labels.
+    mode : 'mean' | 'sum' | 'svd'
+        CTFs can be computed for different summary measures with labels:
+        'sum' or 'mean': sum or means of sub-inverse for labels
+        This corresponds to situations where labels can be assumed to be
+        homogeneously activated.
+        'svd': SVD components of sub-inverse for labels
+        This is better suited for situations where activation patterns are
+        assumed to be more variable.
+        "sub-inverse" is the part of the inverse matrix that belongs to
+        vertices within invidual labels.
+    n_svd_comp : int
+        Number of SVD components for which CTFs will be computed and output
+        (irrelevant for 'sum' and 'mean'). Explained variances within
+        sub-inverses are shown in screen output.
+
+    Returns
+    -------
+    invmat : ndarray
+        Inverse matrix associated with inverse operator and specified
+        parameters.
+    label_singvals : list of ndarray
+        Singular values of svd for sub-inverses.
+        Provides information about how well labels are represented by chosen
+        components. Explained variances within sub-inverses are shown in
+        screen output.
+    """
+    mode = mode.lower()
+
+    if not forward['surf_ori']:
+        raise RuntimeError('Forward has to be surface oriented and '
+                           'force_fixed=True.')
+    if not (forward['source_ori'] == 1):
+        raise RuntimeError('Forward has to be surface oriented and '
+                           'force_fixed=True.')
+
+    if labels:
+        logger.info("About to process %d labels" % len(labels))
+    else:
+        logger.info("Computing whole inverse operator.")
+
+    # in order to convert sub-leadfield matrix to evoked data type (pretending
+    # it's an epoch, see in loop below), uses 'info' from forward solution,
+    # need to add 'sfreq' and 'proj'
+    info = deepcopy(forward['info'])
+    info['sfreq'] = 1000.  # add sfreq or it won't work
+    info['projs'] = []  # add projs
+
+    # create identity matrix as input for inverse operator
+    id_mat = np.eye(forward['nchan'])
+
+    # convert identity matrix to evoked data type (pretending it's an epoch)
+    ev_id = EvokedArray(id_mat, info=info, tmin=0.)
+
+    snr = 3.0
+    lambda2 = 1.0 / snr ** 2
+
+    # apply inverse operator to identity matrix in order to get inverse matrix
+    # free orientation constraint not possible because apply_inverse would
+    # combined components
+    invmat_mat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
+                                  method=method, pick_ori='normal')
+
+    logger.info("Dimension of inverse matrix: %s" % str(invmat_mat_op.shape))
+
+    # turn source estimate into numpty array
+    invmat_mat = invmat_mat_op.data
+    invmat_summary = []
+    # if mode='svd', label_singvals will collect all SVD singular values for
+    # labels
+    label_singvals = []
+
+    if labels:
+        for ll in labels:
+            if ll.hemi == 'rh':
+                # for RH labels, add number of LH vertices
+                offset = forward['src'][0]['vertno'].shape[0]
+                # remember whether we are in the LH or RH
+                this_hemi = 1
+            elif ll.hemi == 'lh':
+                offset = 0
+                this_hemi = 0
+            else:
+                raise RuntimeError("Cannot determine hemisphere of label.")
+
+            # get vertices on cortical surface inside label
+            idx = np.intersect1d(ll.vertices,
+                                 forward['src'][this_hemi]['vertno'])
+
+            # get vertices in source space inside label
+            fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)
+
+            # get sub-inverse for label vertices, one row per vertex
+            invmat_lbl = invmat_mat[fwd_idx + offset, :]
+
+            # compute summary data for labels
+            if mode == 'sum':  # takes sum across estimators in label
+                logger.info("Computing sums within labels")
+                this_invmat_summary = invmat_lbl.sum(axis=0)
+                this_invmat_summary = np.vstack(this_invmat_summary).T
+            elif mode == 'mean':
+                logger.info("Computing means within labels")
+                this_invmat_summary = invmat_lbl.mean(axis=0)
+                this_invmat_summary = np.vstack(this_invmat_summary).T
+            elif mode == 'svd':  # takes svd of sub-inverse in label
+                logger.info("Computing SVD within labels, using %d "
+                            "component(s)" % n_svd_comp)
+
+                # compute SVD of sub-inverse
+                u_svd, s_svd, _ = linalg.svd(invmat_lbl.T,
+                                             full_matrices=False,
+                                             compute_uv=True)
+
+                # keep singular values (might be useful to some people)
+                label_singvals.append(s_svd)
+
+                # get first n_svd_comp components, weighted with their
+                # corresponding singular values
+                logger.info("First 5 singular values: %s" % s_svd[:5])
+                logger.info("(This tells you something about variability of "
+                            "estimators in sub-inverse for label)")
+                # explained variance by chosen components within sub-inverse
+                my_comps = s_svd[:n_svd_comp]
+                comp_var = ((100 * np.sum(my_comps * my_comps)) /
+                            np.sum(s_svd * s_svd))
+                logger.info("Your %d component(s) explain(s) %.1f%% "
+                            "variance in label." % (n_svd_comp, comp_var))
+                this_invmat_summary = (u_svd[:, :n_svd_comp].T
+                                       * s_svd[:n_svd_comp][:, np.newaxis])
+
+            invmat_summary.append(this_invmat_summary)
+
+        invmat = np.concatenate(invmat_summary, axis=0)
+    else:   # no labels provided: return whole matrix
+        invmat = invmat_mat
+
+    return invmat, label_singvals
+
+
+ at verbose
+def cross_talk_function(inverse_operator, forward, labels,
+                        method='dSPM', lambda2=1 / 9., signed=False,
+                        mode='mean', n_svd_comp=1, verbose=None):
+    """Compute cross-talk functions (CTFs) for linear estimators
+
+    Compute cross-talk functions (CTF) in labels for a combination of inverse
+    operator and forward solution. CTFs are computed for test sources that are
+    perpendicular to cortical surface.
+
+    Parameters
+    ----------
+    inverse_operator : instance of InverseOperator
+        Inverse operator read with mne.read_inverse_operator.
+    forward : dict
+         Forward solution, created with "force_fixed=True"
+         Note: (Bad) channels not included in forward solution will not be used
+         in CTF computation.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse method for which CTFs shall be computed.
+    labels : list of Label
+        Labels for which CTFs shall be computed.
+    lambda2 : float
+        The regularization parameter.
+    signed : bool
+        If True, CTFs will be written as signed source estimates. If False,
+        absolute (unsigned) values will be written
+    mode : 'mean' | 'sum' | 'svd'
+        CTFs can be computed for different summary measures with labels:
+        'sum' or 'mean': sum or means of sub-inverses for labels
+        This corresponds to situations where labels can be assumed to be
+        homogeneously activated.
+        'svd': SVD components of sub-inverses for labels
+        This is better suited for situations where activation patterns are
+        assumed to be more variable. "sub-inverse" is the part of the inverse
+        matrix that belongs to vertices within invidual labels.
+    n_svd_comp : int
+        Number of SVD components for which CTFs will be computed and output
+        (irrelevant for 'sum' and 'mean'). Explained variances within
+        sub-inverses are shown in screen output.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc_ctf : SourceEstimate
+        The CTFs for the specified labels.
+        If mode='svd': n_svd_comp components per label are created
+        (i.e. n_svd_comp successive time points in mne_analyze)
+        The last sample is the summed CTF across all labels.
+    """
+    # get the inverse matrix corresponding to inverse operator
+    out = _get_matrix_from_inverse_operator(inverse_operator, forward,
+                                            labels=labels, method=method,
+                                            lambda2=lambda2, mode=mode,
+                                            n_svd_comp=n_svd_comp)
+    invmat, label_singvals = out
+
+    # get the leadfield matrix from forward solution
+    leadfield = forward['sol']['data']
+
+    # compute cross-talk functions (CTFs)
+    ctfs = np.dot(invmat, leadfield)
+
+    # compute sum across forward solutions for labels, append to end
+    ctfs = np.vstack((ctfs, ctfs.sum(axis=0)))
+
+    # if unsigned output requested, take absolute values
+    if not signed:
+        ctfs = np.abs(ctfs, out=ctfs)
+
+    # create source estimate object
+    vertno = [ss['vertno'] for ss in inverse_operator['src']]
+    stc_ctf = SourceEstimate(ctfs.T, vertno, tmin=0., tstep=1.)
+
+    stc_ctf.subject = _subject_from_inverse(inverse_operator)
+
+    return stc_ctf
diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py
index bbd6dbc..31cccd2 100644
--- a/mne/minimum_norm/tests/test_inverse.py
+++ b/mne/minimum_norm/tests/test_inverse.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import os.path as op
 import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_equal
@@ -11,19 +12,22 @@ from mne.label import read_label, label_sign_flip
 from mne.event import read_events
 from mne.epochs import Epochs
 from mne.source_estimate import read_source_estimate, VolSourceEstimate
-from mne import fiff, read_cov, read_forward_solution
+from mne import read_cov, read_forward_solution, read_evokeds, pick_types
+from mne.io import Raw
 from mne.minimum_norm.inverse import (apply_inverse, read_inverse_operator,
                                       apply_inverse_raw, apply_inverse_epochs,
                                       make_inverse_operator,
                                       write_inverse_operator,
                                       compute_rank_inverse)
 from mne.utils import _TempDir
+from ...externals import six
 
 s_path = op.join(sample.data_path(download=False), 'MEG', 'sample')
 fname_inv = op.join(s_path, 'sample_audvis-meg-oct-6-meg-inv.fif')
 fname_inv_fixed = op.join(s_path, 'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
 fname_inv_nodepth = op.join(s_path,
-                           'sample_audvis-meg-oct-6-meg-nodepth-fixed-inv.fif')
+                            'sample_audvis-meg-oct-6-meg-nodepth'
+                            '-fixed-inv.fif')
 fname_inv_diag = op.join(s_path,
                          'sample_audvis-meg-oct-6-meg-diagnoise-inv.fif')
 fname_vol_inv = op.join(s_path, 'sample_audvis-meg-vol-7-meg-inv.fif')
@@ -43,7 +47,7 @@ last_keys = [None] * 10
 
 
 def _get_evoked():
-    evoked = fiff.Evoked(fname_data, setno=0, baseline=(None, 0))
+    evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
     evoked.crop(0, 0.2)
     return evoked
 
@@ -55,7 +59,7 @@ def _compare(a, b):
     try:
         if isinstance(a, dict):
             assert_true(isinstance(b, dict))
-            for k, v in a.iteritems():
+            for k, v in six.iteritems(a):
                 if not k in b and k not in skip_types:
                     raise ValueError('First one had one second one didn\'t:\n'
                                      '%s not in %s' % (k, b.keys()))
@@ -63,7 +67,7 @@ def _compare(a, b):
                     last_keys.pop()
                     last_keys = [k] + last_keys
                     _compare(v, b[k])
-            for k, v in b.iteritems():
+            for k, v in six.iteritems(b):
                 if not k in a and k not in skip_types:
                     raise ValueError('Second one had one first one didn\'t:\n'
                                      '%s not in %s' % (k, a.keys()))
@@ -80,7 +84,7 @@ def _compare(a, b):
         else:
             assert_true(a == b)
     except Exception as exptn:
-        print last_keys
+        print(last_keys)
         raise exptn
 
 
@@ -141,7 +145,7 @@ def test_warn_inverse_operator():
     bad_info['projs'] = list()
     fwd_op = read_forward_solution(fname_fwd_meeg, surf_ori=True)
     noise_cov = read_cov(fname_cov)
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
         make_inverse_operator(bad_info, fwd_op, noise_cov)
     assert_equal(len(w), 1)
 
@@ -307,9 +311,18 @@ def test_io_inverse_operator():
     """Test IO of inverse_operator with GZip
     """
     inverse_operator = read_inverse_operator(fname_inv)
+    print(inverse_operator)
     # just do one example for .gz, as it should generalize
     _compare_io(inverse_operator, '.gz')
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_inverse_operator(inv_badname, inverse_operator)
+        read_inverse_operator(inv_badname)
+    assert_true(len(w) == 2)
+
 
 @sample.requires_sample_data
 def test_apply_mne_inverse_raw():
@@ -317,7 +330,7 @@ def test_apply_mne_inverse_raw():
     """
     start = 3
     stop = 10
-    raw = fiff.Raw(fname_raw)
+    raw = Raw(fname_raw)
     label_lh = read_label(fname_label % 'Aud-lh')
     _, times = raw[0, start:stop]
     inverse_operator = read_inverse_operator(fname_inv)
@@ -346,7 +359,7 @@ def test_apply_mne_inverse_raw():
 def test_apply_mne_inverse_fixed_raw():
     """Test MNE with fixed-orientation inverse operator on Raw
     """
-    raw = fiff.Raw(fname_raw)
+    raw = Raw(fname_raw)
     start = 3
     stop = 10
     _, times = raw[0, start:stop]
@@ -381,11 +394,10 @@ def test_apply_mne_inverse_epochs():
     label_lh = read_label(fname_label % 'Aud-lh')
     label_rh = read_label(fname_label % 'Aud-rh')
     event_id, tmin, tmax = 1, -0.2, 0.5
-    raw = fiff.Raw(fname_raw)
+    raw = Raw(fname_raw)
 
-    picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
-                            ecg=True, eog=True, include=['STI 014'],
-                            exclude='bads')
+    picks = pick_types(raw.info, meg=True, eeg=False, stim=True, ecg=True,
+                       eog=True, include=['STI 014'], exclude='bads')
     reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
     flat = dict(grad=1e-15, mag=1e-15)
 
diff --git a/mne/minimum_norm/tests/test_psf_ctf.py b/mne/minimum_norm/tests/test_psf_ctf.py
new file mode 100644
index 0000000..faaf6cd
--- /dev/null
+++ b/mne/minimum_norm/tests/test_psf_ctf.py
@@ -0,0 +1,79 @@
+
+import os.path as op
+import mne
+from mne.datasets import sample
+from mne import read_forward_solution
+from mne.minimum_norm import (read_inverse_operator,
+                              point_spread_function, cross_talk_function)
+
+from nose.tools import assert_true
+
+data_path = op.join(sample.data_path(download=False), 'MEG', 'sample')
+fname_inv = op.join(data_path, 'sample_audvis-meg-oct-6-meg-inv.fif')
+fname_fwd = op.join(data_path, 'sample_audvis-meg-oct-6-fwd.fif')
+
+fname_label = [op.join(data_path, 'labels', 'Aud-rh.label'),
+               op.join(data_path, 'labels', 'Aud-lh.label')]
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+
+
+ at sample.requires_sample_data
+def test_psf_ctf():
+    """Test computation of PSFs and CTFs for linear estimators
+    """
+
+    inverse_operator = read_inverse_operator(fname_inv)
+    forward = read_forward_solution(fname_fwd, force_fixed=False,
+                                    surf_ori=True)
+    labels = [mne.read_label(ss) for ss in fname_label]
+
+    method = 'MNE'
+    n_svd_comp = 2
+
+    # Test PSFs (then CTFs)
+    for mode in ('sum', 'svd'):
+        stc_psf, psf_ev = point_spread_function(inverse_operator,
+                                                forward,
+                                                method=method,
+                                                labels=labels,
+                                                lambda2=lambda2,
+                                                pick_ori='normal',
+                                                mode=mode,
+                                                n_svd_comp=n_svd_comp)
+
+        n_vert, n_samples = stc_psf.shape
+        should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
+                         inverse_operator['src'][0]['vertno'].shape[0])
+        if mode == 'svd':
+            should_n_samples = len(labels) * n_svd_comp + 1
+        else:
+            should_n_samples = len(labels) + 1
+
+        assert_true(n_vert == should_n_vert)
+        assert_true(n_samples == should_n_samples)
+
+        n_chan, n_samples = psf_ev.data.shape
+        assert_true(n_chan == forward['nchan'])
+
+    forward = read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True)
+
+    # Test CTFs
+    for mode in ('sum', 'svd'):
+        stc_ctf = cross_talk_function(inverse_operator, forward,
+                                      labels, method=method,
+                                      lambda2=lambda2,
+                                      signed=False, mode=mode,
+                                      n_svd_comp=n_svd_comp)
+
+        n_vert, n_samples = stc_ctf.shape
+        should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
+                         inverse_operator['src'][0]['vertno'].shape[0])
+        if mode == 'svd':
+            should_n_samples = len(labels) * n_svd_comp + 1
+        else:
+            should_n_samples = len(labels) + 1
+
+        assert_true(n_vert == should_n_vert)
+        assert_true(n_samples == should_n_samples)
diff --git a/mne/minimum_norm/tests/test_time_frequency.py b/mne/minimum_norm/tests/test_time_frequency.py
index cf40ab9..e8dd78d 100644
--- a/mne/minimum_norm/tests/test_time_frequency.py
+++ b/mne/minimum_norm/tests/test_time_frequency.py
@@ -5,7 +5,7 @@ from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true
 
 from mne.datasets import sample
-from mne import fiff, find_events, Epochs
+from mne import io, find_events, Epochs, pick_types
 from mne.label import read_label
 from mne.minimum_norm.inverse import (read_inverse_operator,
                                       apply_inverse_epochs)
@@ -32,14 +32,14 @@ def test_tfr_with_inverse_operator():
     tmin, tmax, event_id = -0.2, 0.5, 1
 
     # Setup for reading the raw data
-    raw = fiff.Raw(fname_data)
+    raw = io.Raw(fname_data)
     events = find_events(raw, stim_channel='STI 014')
     inverse_operator = read_inverse_operator(fname_inv)
 
     raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
     # picks MEG gradiometers
-    picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+    picks = pick_types(raw.info, meg=True, eeg=False, eog=True,
                             stim=False, exclude='bads')
 
     # Load condition 1
@@ -58,7 +58,7 @@ def test_tfr_with_inverse_operator():
                                      label=label)
 
     stc = stcs['alpha']
-    assert_true(len(stcs) == len(bands.keys()))
+    assert_true(len(stcs) == len(list(bands.keys())))
     assert_true(np.all(stc.data > 0))
     assert_array_almost_equal(stc.times, epochs.times)
 
@@ -85,16 +85,16 @@ def test_tfr_with_inverse_operator():
 @sample.requires_sample_data
 def test_source_psd():
     """Test source PSD computation in label"""
-    raw = fiff.Raw(fname_data)
+    raw = io.Raw(fname_data)
     inverse_operator = read_inverse_operator(fname_inv)
     label = read_label(fname_label)
     tmin, tmax = 0, 20  # seconds
     fmin, fmax = 55, 65  # Hz
-    NFFT = 2048
+    n_fft = 2048
     stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9.,
                              method="dSPM", tmin=tmin, tmax=tmax,
                              fmin=fmin, fmax=fmax, pick_ori="normal",
-                             NFFT=NFFT, label=label, overlap=0.1)
+                             n_fft=n_fft, label=label, overlap=0.1)
     assert_true(stc.times[0] >= fmin * 1e-3)
     assert_true(stc.times[-1] <= fmax * 1e-3)
     # Time max at line frequency (60 Hz in US)
@@ -106,7 +106,7 @@ def test_source_psd():
 def test_source_psd_epochs():
     """Test multi-taper source PSD computation in label from epochs"""
 
-    raw = fiff.Raw(fname_data)
+    raw = io.Raw(fname_data)
     inverse_operator = read_inverse_operator(fname_inv)
     label = read_label(fname_label)
 
@@ -115,7 +115,7 @@ def test_source_psd_epochs():
     bandwidth = 8.
     fmin, fmax = 0, 100
 
-    picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
+    picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
                             ecg=True, eog=True, include=['STI 014'],
                             exclude='bads')
     reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py
index f7e0e9c..b7575cd 100644
--- a/mne/minimum_norm/time_frequency.py
+++ b/mne/minimum_norm/time_frequency.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
@@ -8,7 +8,7 @@ from warnings import warn
 import numpy as np
 from scipy import linalg, signal, fftpack
 
-from ..fiff.constants import FIFF
+from ..io.constants import FIFF
 from ..source_estimate import _make_stc
 from ..time_frequency.tfr import cwt, morlet
 from ..time_frequency.multitaper import (dpss_windows, _psd_from_mt,
@@ -19,6 +19,7 @@ from .inverse import (combine_xyz, prepare_inverse_operator, _assemble_kernel,
                       _check_ori, _subject_from_inverse)
 from ..parallel import parallel_func
 from ..utils import logger, verbose
+from ..externals import six
 
 
 @verbose
@@ -83,7 +84,7 @@ def source_band_induced_power(epochs, inverse_operator, bands, label=None,
     method = _check_method(method)
 
     frequencies = np.concatenate([np.arange(band[0], band[1] + df / 2.0, df)
-                                 for _, band in bands.iteritems()])
+                                 for _, band in six.iteritems(bands)])
 
     powers, _, vertno = _source_induced_power(epochs,
                                       inverse_operator, frequencies,
@@ -97,7 +98,7 @@ def source_band_induced_power(epochs, inverse_operator, bands, label=None,
     stcs = dict()
 
     subject = _subject_from_inverse(inverse_operator)
-    for name, band in bands.iteritems():
+    for name, band in six.iteritems(bands):
         idx = [k for k, f in enumerate(frequencies) if band[0] <= f <= band[1]]
 
         # average power in band + mean over epochs
@@ -128,7 +129,7 @@ def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, with_plv,
     is_free_ori = False
     if (source_ori == FIFF.FIFFV_MNE_FREE_ORI and pick_ori == None):
         is_free_ori = True
-        n_sources /= 3
+        n_sources //= 3
 
     shape = (n_sources, n_freqs, n_times)
     power = np.zeros(shape, dtype=np.float)  # power
@@ -267,7 +268,7 @@ def source_induced_power(epochs, inverse_operator, frequencies, label=None,
     ----------
     epochs : instance of Epochs
         The epochs.
-    inverse_operator : instance of inverse operator
+    inverse_operator : instance of InverseOperator
         The inverse operator.
     label : Label
         Restricts the source estimates to a given label.
@@ -334,15 +335,16 @@ def source_induced_power(epochs, inverse_operator, frequencies, label=None,
 @verbose
 def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
                        tmin=None, tmax=None, fmin=0., fmax=200.,
-                       NFFT=2048, overlap=0.5, pick_ori=None, label=None,
-                       nave=1, pca=True, verbose=None, pick_normal=None):
+                       n_fft=2048, overlap=0.5, pick_ori=None, label=None,
+                       nave=1, pca=True, verbose=None, pick_normal=None,
+                       NFFT=None):
     """Compute source power spectrum density (PSD)
 
     Parameters
     ----------
     raw : instance of Raw
         The raw data
-    inverse_operator : dict
+    inverse_operator : instance of InverseOperator
         The inverse operator
     lambda2: float
         The regularization parameter
@@ -358,7 +360,7 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
         The lower frequency of interest
     fmax : float
         The upper frequency of interest
-    NFFT: int
+    n_fft: int
         Window size for the FFT. Should be a power of 2.
     overlap: float
         The overlap fraction between windows. Should be between 0 and 1.
@@ -383,6 +385,11 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
     stc : SourceEstimate | VolSourceEstimate
         The PSD (in dB) of each of the sources.
     """
+    if NFFT is not None:
+        n_fft = NFFT
+        warnings.warn("`NFFT` is deprecated and will be removed in v0.9. "
+                      "Use `n_fft` instead")
+
     pick_ori = _check_ori(pick_ori, pick_normal)
 
     logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
@@ -419,19 +426,19 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
         start = raw.time_as_index(tmin)[0]
     if tmax is not None:
         stop = raw.time_as_index(tmax)[0] + 1
-    NFFT = int(NFFT)
+    n_fft = int(n_fft)
     Fs = raw.info['sfreq']
-    window = signal.hanning(NFFT)
-    freqs = fftpack.fftfreq(NFFT, 1. / Fs)
+    window = signal.hanning(n_fft)
+    freqs = fftpack.fftfreq(n_fft, 1. / Fs)
     freqs_mask = (freqs >= 0) & (freqs >= fmin) & (freqs <= fmax)
     freqs = freqs[freqs_mask]
     fstep = np.mean(np.diff(freqs))
     psd = np.zeros((K.shape[0], np.sum(freqs_mask)))
     n_windows = 0
 
-    for this_start in np.arange(start, stop, int(NFFT * (1. - overlap))):
-        data, _ = raw[sel, this_start:this_start + NFFT]
-        if data.shape[1] < NFFT:
+    for this_start in np.arange(start, stop, int(n_fft * (1. - overlap))):
+        data, _ = raw[sel, this_start:this_start + n_fft]
+        if data.shape[1] < n_fft:
             logger.info("Skipping last buffer")
             break
 
@@ -568,7 +575,8 @@ def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
             # compute the psd
             if adaptive:
                 out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
-                       for x in np.array_split(x_mt_src, n_jobs))
+                       for x in np.array_split(x_mt_src,
+                                               min(n_jobs, len(x_mt_src))))
                 this_psd = np.concatenate(out)
             else:
                 x_mt_src = x_mt_src[:, :, freq_mask]
@@ -606,7 +614,7 @@ def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
     ----------
     epochs : instance of Epochs
         The raw data.
-    inverse_operator : dict
+    inverse_operator : instance of InverseOperator
         The inverse operator.
     lambda2 : float
         The regularization parameter.
diff --git a/mne/misc.py b/mne/misc.py
index 36dffd7..6367e36 100644
--- a/mne/misc.py
+++ b/mne/misc.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Scott Burns <sburns at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
diff --git a/mne/parallel.py b/mne/parallel.py
index 37415e4..1a9885f 100644
--- a/mne/parallel.py
+++ b/mne/parallel.py
@@ -1,10 +1,11 @@
 """Parallel util function
 """
 
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: Simplified BSD
 
+from .externals.six import string_types
 import inspect
 import logging
 import os
@@ -61,7 +62,7 @@ def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
         try:
             from sklearn.externals.joblib import Parallel, delayed
         except ImportError:
-            logger.warn('joblib not installed. Cannot run in parallel.')
+            logger.warning('joblib not installed. Cannot run in parallel.')
             n_jobs = 1
             my_func = func
             parallel = list
@@ -72,13 +73,13 @@ def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
     joblib_mmap = ('temp_folder' in aspec.args and 'max_nbytes' in aspec.args)
 
     cache_dir = get_config('MNE_CACHE_DIR', None)
-    if isinstance(max_nbytes, basestring) and max_nbytes == 'auto':
+    if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':
         max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
 
     if max_nbytes is not None:
         if not joblib_mmap and cache_dir is not None:
-            logger.warn('"MNE_CACHE_DIR" is set but a newer version of joblib '
-                        'is needed to use the memmapping pool.')
+            logger.warning('"MNE_CACHE_DIR" is set but a newer version of '
+                           'joblib is needed to use the memmapping pool.')
         if joblib_mmap and cache_dir is None:
             logger.info('joblib supports memapping pool but "MNE_CACHE_DIR" '
                         'is not set in MNE-Python config. To enable it, use, '
@@ -119,19 +120,20 @@ def check_n_jobs(n_jobs, allow_cuda=False):
     """
     if _force_serial:
         n_jobs = 1
-        logger.info('... MNE_FORCE_SERIAL set. Processing in forced serial mode.')
+        logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
+                    'serial mode.')
 
     elif not isinstance(n_jobs, int):
         if not allow_cuda:
             raise ValueError('n_jobs must be an integer')
-        elif not isinstance(n_jobs, basestring) or n_jobs != 'cuda':
+        elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':
             raise ValueError('n_jobs must be an integer, or "cuda"')
         #else, we have n_jobs='cuda' and this is okay, so do nothing
     elif n_jobs <= 0:
         try:
             import multiprocessing
             n_cores = multiprocessing.cpu_count()
-            n_jobs = n_cores + n_jobs
+            n_jobs = min(n_cores + n_jobs + 1, n_cores)
             if n_jobs <= 0:
                 raise ValueError('If n_jobs has a negative value it must not '
                                  'be less than the number of CPUs present. '
@@ -139,8 +141,8 @@ def check_n_jobs(n_jobs, allow_cuda=False):
         except ImportError:
             # only warn if they tried to use something other than 1 job
             if n_jobs != 1:
-                logger.warn('multiprocessing not installed. Cannot run in '
-                            'parallel.')
+                logger.warning('multiprocessing not installed. Cannot run in '
+                               'parallel.')
                 n_jobs = 1
 
     return n_jobs
diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py
index 787a56c..920f922 100644
--- a/mne/preprocessing/__init__.py
+++ b/mne/preprocessing/__init__.py
@@ -1,15 +1,16 @@
 """Preprocessing with artifact detection, SSP, and ICA"""
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 from .maxfilter import apply_maxfilter
 from .ssp import compute_proj_ecg, compute_proj_eog
-from .eog import find_eog_events
-from .ecg import find_ecg_events
+from .eog import find_eog_events, create_eog_epochs
+from .ecg import find_ecg_events, create_ecg_epochs
 from .ica import (ICA, ica_find_eog_events, ica_find_ecg_events, score_funcs,
                   read_ica, run_ica)
+from .bads import find_outliers
diff --git a/mne/preprocessing/bads.py b/mne/preprocessing/bads.py
new file mode 100644
index 0000000..9ea677c
--- /dev/null
+++ b/mne/preprocessing/bads.py
@@ -0,0 +1,36 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+# License: BSD (3-clause)
+
+
+import numpy as np
+from scipy import stats
+
+
+def find_outliers(X, threshold=3.0):
+    """Find outliers based on Gaussian mixture
+
+    Parameters
+    ----------
+    X : np.ndarray of float, shape (n_elemenets,)
+        The scores for which to find outliers.
+    threshold : float
+        The value above which a feature is classified as outlier.
+
+    Returns
+    -------
+    bad_idx : np.ndarray of int, shape (n_features)
+        The outlier indices.
+    """
+    max_iter = 2
+    my_mask = np.zeros(len(X), dtype=np.bool)
+    X = np.abs(X)
+    for _ in range(max_iter):
+        X = np.ma.masked_array(X, my_mask)
+        this_z = stats.zscore(X)
+        local_bad = this_z > threshold
+        my_mask = np.max([my_mask, local_bad], 0)
+        if not np.any(local_bad):
+            break
+
+    bad_idx = np.where(my_mask)[0]
+    return bad_idx
diff --git a/mne/preprocessing/ctps_.py b/mne/preprocessing/ctps_.py
new file mode 100644
index 0000000..3699a72
--- /dev/null
+++ b/mne/preprocessing/ctps_.py
@@ -0,0 +1,169 @@
+# Authors: Juergen Dammers <j.dammers at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: Simplified BSD
+import math
+
+import numpy as np
+from scipy.signal import hilbert
+
+
+def _compute_normalized_phase(data):
+    """Compute normalized phase angles
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_epochs, n_sources, n_times)
+        The data to compute the phase angles for.
+
+    Returns
+    -------
+    phase_angles : ndarray, shape (n_epochs, n_sources, n_times)
+        The normalized phase angles.
+    """
+    return (np.angle(hilbert(data)) + np.pi) / (2 * np.pi)
+
+
+def ctps(data, is_raw=True):
+    """Compute cross-trial-phase-statistics [1]
+
+    Note. It is assumed that the sources are already
+    appropriately filtered
+
+    Parameters
+    ----------
+    data: ndarray, shape (n_epochs, n_channels, n_times)
+        Any kind of data of dimensions trials, traces, features.
+    is_raw : bool
+        If True it is assumed that data haven't been transformed to Hilbert
+        space and phase angles haven't been normalized. Defaults to True.
+
+    Returns
+    -------
+    ks_dynamics : ndarray, shape (n_sources, n_times)
+        The kuiper statistics.
+    pk_dynamics : ndarray, shape (n_sources, n_times)
+        The normalized kuiper index for ICA sources and
+        time slices.
+    phase_angles : ndarray, shape (n_epochs, n_sources, n_times) | None
+        The phase values for epochs, sources and time slices. If ``is_raw``
+        is False, None is returned.
+
+    References
+    ----------
+    [1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
+        M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
+        and phase statistics for complete artifact removal in independent
+        components of neuromagnetic recordings. Biomedical
+        Engineering, IEEE Transactions on 55 (10), 2353-2362.
+    """
+    if not data.ndim == 3:
+        ValueError('Data must have 3 dimensions, not %i.' % data.ndim)
+
+    if is_raw:
+        phase_angles = _compute_normalized_phase(data)
+    else:
+        phase_angles = data  # phase angles can be computed externally
+
+    # initialize array for results
+    ks_dynamics = np.zeros_like(phase_angles[0])
+    pk_dynamics = np.zeros_like(phase_angles[0])
+
+    # calculate Kuiper's statistic for each source
+    for ii, source in enumerate(np.transpose(phase_angles, [1, 0, 2])):
+        ks, pk = kuiper(source)
+        pk_dynamics[ii, :] = pk
+        ks_dynamics[ii, :] = ks
+
+    return ks_dynamics, pk_dynamics, phase_angles if is_raw else None
+
+
+def kuiper(data, dtype=np.float64):
+    """ Kuiper's test of uniform distribution
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_sources,) | (n_sources, n_times)
+           Empirical distribution.
+    dtype : str | obj
+        The data type to be used.
+
+    Returns
+    -------
+    ks : ndarray
+        Kuiper's statistic.
+    pk : ndarray
+        Normalized probability of Kuiper's statistic [0, 1].
+    """
+    # if data not numpy array, implicitly convert and make to use copied data
+    # ! sort data array along first axis !
+    data = np.sort(data, axis=0).astype(dtype)
+    shape = data.shape
+    n_dim = len(shape)
+    n_trials = shape[0]
+
+    # create uniform cdf
+    j1 = (np.arange(n_trials, dtype=dtype) + 1.) / float(n_trials)
+    j2 = np.arange(n_trials, dtype=dtype) / float(n_trials)
+    if n_dim > 1:  # single phase vector (n_trials)
+        j1 = j1[:, np.newaxis]
+        j2 = j2[:, np.newaxis]
+    d1 = (j1 - data).max(axis=0)
+    d2 = (data - j2).max(axis=0)
+    n_eff = n_trials
+
+    d = d1 + d2  # Kuiper's statistic [n_time_slices]
+
+    return d, _prob_kuiper(d, n_eff, dtype=dtype)
+
+
+def _prob_kuiper(d, n_eff, dtype='f8'):
+    """ Test for statistical significance against uniform distribution.
+
+    Parameters
+    ----------
+    d : float
+        The kuiper distance value.
+    n_eff : int
+        The effective number of elements.
+    dtype : str | obj
+        The data type to be used. Defaults to double precision floats.
+
+    Returns
+    -------
+    pk_norm : float
+        The normalized Kuiper value such that 0 < ``pk_norm`` < 1.
+
+    References
+    ----------
+    [1] Stephens MA 1970. Journal of the Royal Statistical Society, ser. B,
+    vol 32, pp 115-122.
+
+    [2] Kuiper NH 1962. Proceedings of the Koninklijke Nederlands Akademie
+    van Wetenschappen, ser Vol 63 pp 38-47
+    """
+    n_time_slices = np.size(d)  # single value or vector
+    n_points = 100
+
+    en = math.sqrt(n_eff)
+    k_lambda = (en + 0.155 + 0.24 / en) * d  # see [1]
+    l2 = k_lambda ** 2.0
+    j2 = (np.arange(n_points) + 1) ** 2
+    j2 = j2.repeat(n_time_slices).reshape(n_points, n_time_slices)
+    fact = 4. * j2 * l2 - 1.
+    expo = np.exp(-2. * j2 * l2)
+    term = 2. * fact * expo
+    pk = term.sum(axis=0, dtype=dtype)
+
+    # Normalized pK to range [0,1]
+    pk_norm = np.zeros(n_time_slices)  # init pk_norm
+    pk_norm[pk > 0] = -np.log(pk[pk > 0]) / (2. * n_eff)
+    pk_norm[pk <= 0] = 1
+
+    # check for no difference to uniform cdf
+    pk_norm = np.where(k_lambda < 0.4, 0.0, pk_norm)
+
+    # check for round off errors
+    pk_norm = np.where(pk_norm > 1.0, 1.0, pk_norm)
+
+    return pk_norm
diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py
index 2f3dd63..2293e5d 100644
--- a/mne/preprocessing/ecg.py
+++ b/mne/preprocessing/ecg.py
@@ -1,8 +1,12 @@
+from ..externals.six import string_types
 import numpy as np
 
-from ..fiff import pick_types, pick_channels
+from .. import pick_types, pick_channels
 from ..utils import logger, verbose, sum_squared
 from ..filter import band_pass_filter
+from ..epochs import Epochs, _BaseEpochs
+from ..io.base import _BaseRaw
+from ..evoked import Evoked
 
 
 def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
@@ -60,7 +64,7 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
 
     if thresh_value == 'auto':
         thresh_runs = np.arange(0.3, 1.1, 0.05)
-    elif isinstance(thresh_value, basestring):
+    elif isinstance(thresh_value, string_types):
         raise ValueError('threshold value must be "auto" or a float')
     else:
         thresh_runs = [thresh_value]
@@ -78,7 +82,8 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
             if window[0] > thresh1:
                 max_time = np.argmax(window)
                 time.append(ii + max_time)
-                nx = np.sum(np.diff((window > thresh1).astype(np.int) == 1))
+                nx = np.sum(np.diff(((window > thresh1).astype(np.int)
+                                     == 1).astype(int)))
                 numcross.append(nx)
                 rms.append(np.sqrt(sum_squared(window) / window.size))
                 ii += win_size
@@ -150,37 +155,24 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
     -------
     ecg_events : array
         Events.
-    ch_ECG : string
+    ch_ecg : string
         Name of channel used.
     average_pulse : float
         Estimated average pulse.
     """
-    info = raw.info
+    try:
+        idx_ecg = _get_ecg_channel_index(ch_name, raw)
+        assert len(idx_ecg) == 1
+        logger.info('Using channel %s to identify heart beats'
+                    % raw.ch_names[idx_ecg[0]])
 
-    # Geting ECG Channel
-    if ch_name is None:
-        ch_ECG = pick_types(info, meg=False, eeg=False, stim=False,
-                            eog=False, ecg=True, emg=False, ref_meg=False,
-                            exclude='bads')
-    else:
-        ch_ECG = pick_channels(raw.ch_names, include=[ch_name])
-        if len(ch_ECG) == 0:
-            raise ValueError('%s not in channel list (%s)' %
-                             (ch_name, raw.ch_names))
-
-    if len(ch_ECG) == 0 and ch_name is None:
-        raise Exception('No ECG channel found. Please specify ch_name '
-                        'parameter e.g. MEG 1531')
-
-    assert len(ch_ECG) == 1
-
-    logger.info('Using channel %s to identify heart beats'
-                % raw.ch_names[ch_ECG[0]])
-
-    ecg, times = raw[ch_ECG, :]
+        ecg, times = raw[idx_ecg, :]
+    except RuntimeError:
+        ecg, times = _make_ecg(raw, None, None, verbose)
+        idx_ecg = None
 
     # detecting QRS and generating event file
-    ecg_events = qrs_detector(info['sfreq'], ecg.ravel(), tstart=tstart,
+    ecg_events = qrs_detector(raw.info['sfreq'], ecg.ravel(), tstart=tstart,
                               thresh_value=qrs_threshold, l_freq=l_freq,
                               h_freq=h_freq, filter_length=filter_length)
 
@@ -191,4 +183,115 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
 
     ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
                        event_id * np.ones(n_events)]
-    return ecg_events, ch_ECG, average_pulse
+    return ecg_events, idx_ecg, average_pulse
+
+
+def _get_ecg_channel_index(ch_name, inst):
+     # Geting ECG Channel
+    if ch_name is None:
+        ecg_idx = pick_types(inst.info, meg=False, eeg=False, stim=False,
+                             eog=False, ecg=True, emg=False, ref_meg=False,
+                             exclude='bads')
+    else:
+        ecg_idx = pick_channels(inst.ch_names, include=[ch_name])
+        if len(ecg_idx) == 0:
+            raise ValueError('%s not in channel list (%s)' %
+                             (ch_name, inst.ch_names))
+
+    if len(ecg_idx) == 0 and ch_name is None:
+        raise RuntimeError('No ECG channel found. Please specify ch_name '
+                           'parameter e.g. MEG 1531')
+
+    return ecg_idx
+
+
+ at verbose
+def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
+                      tmin=-0.5, tmax=0.5, l_freq=8, h_freq=16, reject=None,
+                      flat=None, verbose=None, baseline=None):
+    """Conveniently generate epochs around ECG artifact events
+
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    ch_name : str
+        The name of the channel to use for ECG peak detection.
+        The argument is mandatory if the dataset contains no ECG
+        channels.
+    event_id : int
+        The index to assign to found events
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels are used).
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    reject : dict | None
+        Rejection parameters based on peak to peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. You should
+        use such parameters to reject big measurement artifacts
+        and not ECG for example
+    flat : dict | None
+        Rejection parameters based on flatness of signal
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        If flat is None then no rejection is done.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eog_epochs : instance of Epochs
+        Data epoched around EOG events.
+    """
+
+    events, _, _ = find_ecg_events(raw, ch_name=ch_name, event_id=event_id,
+                                   l_freq=l_freq, h_freq=h_freq,
+                                   verbose=verbose)
+    if picks is not None:
+        picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=False)
+
+    # create epochs around ECG events and baseline (important)
+    ecg_epochs = Epochs(raw, events=events, event_id=event_id,
+                        tmin=tmin, tmax=tmax, proj=False,
+                        picks=picks, reject=reject, baseline=baseline,
+                        verbose=verbose, preload=True)
+    return ecg_epochs
+
+
+ at verbose
+def _make_ecg(inst, start, stop, verbose=None):
+    """Create ECG signal from cross channel average
+    """
+    if not any([c in inst for c in ['mag', 'grad']]):
+        raise ValueError('Unable to generate artifical ECG channel')
+    for ch in ['mag', 'grad']:
+        if ch in inst:
+            break
+    logger.info('Reconstructing ECG signal from {0}'
+                .format({'mag': 'Magnetometers',
+                         'grad': 'Gradiometers'}[ch]))
+    picks = pick_types(inst.info, meg=ch, eeg=False, ref_meg=False)
+    if isinstance(inst, _BaseRaw):
+        ecg, times = inst[picks, start:stop]
+    elif isinstance(inst, _BaseEpochs):
+        ecg = np.hstack(inst.crop(start, stop, copy=True).get_data())
+        times = inst.times
+    elif isinstance(inst, Evoked):
+        ecg = inst.data
+        times = inst.times
+    return ecg.mean(0), times
diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py
index 7da7158..b4b29e7 100644
--- a/mne/preprocessing/eog.py
+++ b/mne/preprocessing/eog.py
@@ -1,9 +1,10 @@
 import numpy as np
 
 from .peak_finder import peak_finder
-from ..fiff import pick_types, pick_channels
+from .. import pick_types, pick_channels
 from ..utils import logger, verbose
 from ..filter import band_pass_filter
+from ..epochs import Epochs
 
 
 @verbose
@@ -37,40 +38,11 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
         Events.
     """
 
-    info = raw.info
-
     # Getting EOG Channel
-    if ch_name is None:
-        ch_eog = pick_types(info, meg=False, eeg=False, stim=False,
-                            eog=True, ecg=False, emg=False, ref_meg=False,
-                            exclude='bads')
-        if len(ch_eog) == 0:
-            logger.info('No EOG channels found')
-            logger.info('Trying with EEG 061 and EEG 062')
-            ch_eog = pick_channels(raw.ch_names,
-                                   include=['EEG 061', 'EEG 062'])
-            if len(ch_eog) != 2:
-                raise ValueError('EEG 61 or EEG 62 channel not found !!')
-
-    else:
-
-        # Check if multiple EOG Channels
-        if ',' in ch_name:
-            ch_name = ch_name.split(',')
-        else:
-            ch_name = [ch_name]
-
-        ch_eog = pick_channels(raw.ch_names, include=ch_name)
-
-        if len(ch_eog) == 0:
-            raise ValueError('%s not in channel list' % ch_name)
-        else:
-            logger.info('Using channel %s as EOG channel%s' % (
-                        " and ".join(ch_name), '' if len(ch_eog) < 2 else 's'))
-
-    logger.info('EOG channel index for this subject is: %s' % ch_eog)
+    eog_inds = _get_eog_channel_index(ch_name, raw)
+    logger.info('EOG channel index for this subject is: %s' % eog_inds)
 
-    eog, _ = raw[ch_eog, :]
+    eog, _ = raw[eog_inds, :]
 
     eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
                                   h_freq=h_freq,
@@ -120,3 +92,102 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
                        event_id * np.ones(n_events)]
 
     return eog_events
+
+
+def _get_eog_channel_index(ch_name, inst):
+    if isinstance(ch_name, str):
+        # Check if multiple EOG Channels
+        if ',' in ch_name:
+            ch_name = ch_name.split(',')
+        else:
+            ch_name = [ch_name]
+
+        eog_inds = pick_channels(inst.ch_names, include=ch_name)
+
+        if len(eog_inds) == 0:
+            raise ValueError('%s not in channel list' % ch_name)
+        else:
+            logger.info('Using channel %s as EOG channel%s' % (
+                        " and ".join(ch_name),
+                        '' if len(eog_inds) < 2 else 's'))
+    elif ch_name is None:
+
+        eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False,
+                              eog=True, ecg=False, emg=False, ref_meg=False,
+                              exclude='bads')
+
+        if len(eog_inds) == 0:
+            logger.info('No EOG channels found')
+            logger.info('Trying with EEG 061 and EEG 062')
+            eog_inds = pick_channels(inst.ch_names,
+                                     include=['EEG 061', 'EEG 062'])
+            if len(eog_inds) != 2:
+                raise RuntimeError('EEG 61 or EEG 62 channel not found !!')
+
+    else:
+        raise ValueError('Could not find EOG channel.')
+    return eog_inds
+
+
+ at verbose
+def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
+                      tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10,
+                      reject=None, flat=None,
+                      baseline=None, verbose=None):
+    """Conveniently generate epochs around EOG artifact events
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    ch_name : str
+        The name of the channel to use for ECG peak detection.
+        The argument is mandatory if the dataset contains no ECG channels.
+    event_id : int
+        The index to assign to found events
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels
+        are used).
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    reject : dict | None
+        Rejection parameters based on peak to peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. You should
+        use such parameters to reject big measurement artifacts
+        and not ECG for example
+    flat : dict | None
+        Rejection parameters based on flatness of signal
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        If flat is None then no rejection is done.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+
+    Returns
+    -------
+    ecg_epochs : instance of Epochs
+        Data epoched around ECG r-peaks.
+    """
+    events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
+                             l_freq=l_freq, h_freq=h_freq)
+
+    # create epochs around EOG events
+    eog_epochs = Epochs(raw, events=events, event_id=event_id,
+                        tmin=tmin, tmax=tmax, proj=False, reject=reject,
+                        flat=flat, picks=picks, baseline=baseline,
+                        preload=True)
+    return eog_epochs
diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py
index 35d94b6..180ac50 100644
--- a/mne/preprocessing/ica.py
+++ b/mne/preprocessing/ica.py
@@ -1,14 +1,14 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Juergen Dammers <j.dammers at fz-juelich.de>
 #
 # License: BSD (3-clause)
 
 import warnings
+
 from copy import deepcopy
 from inspect import getargspec, isfunction
 from collections import namedtuple
-from math import ceil
 
 import os
 import json
@@ -18,25 +18,34 @@ from scipy import stats
 from scipy.spatial import distance
 from scipy import linalg
 
-from .ecg import qrs_detector
-from .eog import _find_eog_events
+from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
+                  create_ecg_epochs)
+from .eog import _find_eog_events, _get_eog_channel_index
+from .infomax_ import infomax
 
 from ..cov import compute_whitener
-from .. import Covariance
-from ..fiff.pick import (pick_types, pick_channels, pick_info,
-                         channel_indices_by_type)
-from ..fiff.write import (write_double_matrix, write_string,
-                          write_name_list, write_int, start_block,
-                          end_block)
-from ..fiff.tree import dir_tree_find
-from ..fiff.open import fiff_open
-from ..fiff.tag import read_tag
-from ..fiff.meas_info import write_meas_info, read_meas_info
-from ..fiff.constants import Bunch, FIFF
-from ..viz import plot_ica_panel, plot_ica_topomap
-from ..fiff.write import start_file, end_file, write_id
-from ..epochs import _is_good
-from ..utils import check_sklearn_version, logger, verbose
+from .. import Covariance, Evoked
+from ..io.pick import (pick_types, pick_channels, pick_info)
+from ..io.write import (write_double_matrix, write_string,
+                        write_name_list, write_int, start_block,
+                        end_block)
+from ..io.tree import dir_tree_find
+from ..io.open import fiff_open
+from ..io.tag import read_tag
+from ..io.meas_info import write_meas_info, read_meas_info
+from ..io.constants import Bunch, FIFF
+from ..io.base import _BaseRaw
+from ..epochs import _BaseEpochs
+from ..viz import (plot_ica_components, plot_ica_scores,
+                   plot_ica_sources, plot_ica_overlay)
+from ..channels import _contains_ch_type, ContainsMixin
+from ..io.write import start_file, end_file, write_id
+from ..utils import (check_sklearn_version, logger, check_fname, verbose,
+                     deprecated, _reject_data_segments)
+from ..filter import band_pass_filter
+from .bads import find_outliers
+from .ctps_ import ctps
+from ..externals.six import string_types, text_type
 
 try:
     from sklearn.utils.extmath import fast_dot
@@ -75,7 +84,8 @@ __all__ = ['ICA', 'ica_find_ecg_events', 'ica_find_eog_events', 'score_funcs',
            'read_ica', 'run_ica']
 
 
-class ICA(object):
+class ICA(ContainsMixin):
+
     """M/EEG signal decomposition using Independent Component Analysis (ICA)
 
     This object can be used to estimate ICA components and then
@@ -101,7 +111,7 @@ class ICA(object):
     max_pca_components : int | None
         The number of components used for PCA decomposition. If None, no
         dimension reduction will be applied and max_pca_components will equal
-        the number of channels supplied on decomposing data.
+        the number of channels supplied on decomposing data. Defaults to None.
     n_pca_components : int | float
         The number of PCA components used after ICA recomposition. The ensuing
         attribute allows to balance noise reduction against potential loss of
@@ -120,19 +130,28 @@ class ICA(object):
         np.random.RandomState to initialize the FastICA estimation.
         As the estimation is non-deterministic it can be useful to
         fix the seed to have reproducible results.
+    method : {'fastica', 'infomax', 'extended-infomax'}
+        The ICA method to use. Defaults to 'fastica'.
     algorithm : {'parallel', 'deflation'}
-        Apply parallel or deflational algorithm for FastICA.
+        Apply parallel or deflational algorithm for FastICA. This parameter
+        belongs to FastICA and is deprecated. Please use `fit_params` instead.
     fun : string or function, optional. Default: 'logcosh'
         The functional form of the G function used in the
         approximation to neg-entropy. Could be either 'logcosh', 'exp',
         or 'cube'.
         You can also provide your own function. It should return a tuple
         containing the value of the function, and of its derivative, in the
-        point.
+        point. This parameter belongs to FastICA and is deprecated.
+        Please use `fit_params` instead.
     fun_args: dictionary, optional
         Arguments to send to the functional form.
         If empty and if fun='logcosh', fun_args will take value
-        {'alpha' : 1.0}
+        {'alpha' : 1.0}. This parameter belongs to FastICA and is deprecated.
+        Please use `fit_params` instead.
+    fit_params : dict | None.
+        Additional parameters passed to the ICA estimator chosen by `method`.
+    max_iter : int, optional
+        Maximum number of iterations during fit.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -164,23 +183,27 @@ class ICA(object):
         If fit, the matrix to unmix observed data, else None.
     exclude : list
         List of sources indices to exclude, i.e. artifact components identified
-        throughout the ICA session. Indices added to this list, will be
+        throughout the ICA solution. Indices added to this list, will be
         dispatched to the .pick_sources methods. Source indices passed to
         the .pick_sources method via the 'exclude' argument are added to the
         .exclude attribute. When saving the ICA also the indices are restored.
         Hence, artifact components once identified don't have to be added
         again. To dump this 'artifact memory' say: ica.exclude = []
-    info : None | instance of mne.fiff.meas_info.Info
+    info : None | instance of mne.io.meas_info.Info
         The measurement info copied from the object fitted.
     `n_samples_` : int
         the number of samples used on fit.
     """
     @verbose
-    def __init__(self, n_components, max_pca_components=100,
-                 n_pca_components=64, noise_cov=None, random_state=None,
-                 algorithm='parallel', fun='logcosh', fun_args=None,
-                 verbose=None):
-
+    def __init__(self, n_components=None, max_pca_components=None,
+                 n_pca_components=None, noise_cov=None, random_state=None,
+                 method='fastica',
+                 algorithm=None, fun=None, fun_args=None,
+                 fit_params=None, max_iter=200, verbose=None):
+        methods = ('fastica', 'infomax', 'extended-infomax')
+        if method not in methods:
+            raise ValueError('`method` must be "%s". You passed: "%s"' %
+                             ('" or "'.join(methods), method))
         if not check_sklearn_version(min_version='0.12'):
             raise RuntimeError('the scikit-learn package (version >= 0.12)'
                                'is required for ICA')
@@ -195,7 +218,7 @@ class ICA(object):
         if isinstance(n_components, float) \
                 and not 0 < n_components <= 1:
             raise ValueError('Selecting ICA components by explained variance '
-                             'necessitates values between 0.0 and 1.0 ')
+                             'needs values between 0.0 and 1.0 ')
 
         self.current_fit = 'unfitted'
         self.verbose = verbose
@@ -203,12 +226,38 @@ class ICA(object):
         self.max_pca_components = max_pca_components
         self.n_pca_components = n_pca_components
         self.ch_names = None
-        self.random_state = random_state if random_state is not None else 0
+        self.random_state = random_state if random_state is not None else 42
+
+        for attr in ['algorithm', 'fun', 'fun_args']:
+            if eval(attr) is not None:
+                warnings.warn('The parameter `%s` is deprecated and will be'
+                              'removed in MNE 0.9. Please use '
+                              '`fit_params` instead' % attr,
+                              DeprecationWarning)
+
         self.algorithm = algorithm
         self.fun = fun
         self.fun_args = fun_args
+
+        if fit_params is None:
+            fit_params = {}
+        if method == 'fastica':
+            update = {'algorithm': 'parallel', 'fun': 'logcosh',
+                      'fun_args': None}
+            fit_params.update(dict((k, v) for k, v in update.items() if k
+                              not in fit_params))
+        elif method == 'infomax':
+            fit_params.update({'extended': False})
+        elif method == 'extended-infomax':
+            fit_params.update({'extended': True})
+        if 'max_iter' not in fit_params:
+            fit_params['max_iter'] = max_iter
+        self.max_iter = max_iter
+        self.fit_params = fit_params
+
         self.exclude = []
         self.info = None
+        self.method = method
 
     def __repr__(self):
         """ICA fit information"""
@@ -219,19 +268,22 @@ class ICA(object):
         else:
             s = 'epochs'
         s += ' decomposition, '
-        s += 'fit: %s samples, ' % str(getattr(self, 'n_samples_', ''))
+        s += 'fit (%s): %s samples, ' % (self.method,
+                                         str(getattr(self, 'n_samples_', '')))
         s += ('%s components' % str(self.n_components_) if
               hasattr(self, 'n_components_') else
               'no dimension reduction')
+        if self.info is not None:
+            ch_fit = ['"%s"' % c for c in ['mag', 'grad', 'eeg'] if c in self]
+            s += ', channels used: {0}'.format('; '.join(ch_fit))
         if self.exclude:
             s += ', %i sources marked for exclusion' % len(self.exclude)
 
         return '<ICA  |  %s>' % s
 
     @verbose
-    def decompose_raw(self, raw, picks=None, start=None, stop=None,
-                      decim=None, reject=None, flat=None, tstep=2.0,
-                      verbose=None):
+    def fit(self, inst, picks=None, start=None, stop=None, decim=None,
+            reject=None, flat=None, tstep=2.0, verbose=None):
         """Run the ICA decomposition on raw data
 
         Caveat! If supplying a noise covariance keep track of the projections
@@ -241,11 +293,11 @@ class ICA(object):
 
         Parameters
         ----------
-        raw : instance of mne.fiff.Raw
+        inst : instance of Raw, Epochs or Evoked
             Raw measurements to be decomposed.
-        picks : array-like
+        picks : array-like of int
             Channels to be included. This selection remains throughout the
-            initialized ICA session. If None only good data channels are used.
+            initialized ICA solution. If None only good data channels are used.
         start : int | float | None
             First sample to include. If float, data will be interpreted as
             time in seconds. If None, data will be used from the first sample.
@@ -260,13 +312,15 @@ class ICA(object):
             Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
             If reject is None then no rejection is done. You should
             use such parameters to reject big measurement artifacts
-            and not EOG for example.
+            and not EOG for example. It only applies if `inst` is of type Raw.
         flat : dict | None
             Rejection parameters based on flatness of signal
             Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
             If flat is None then no rejection is done.
+            It only applies if `inst` is of type Raw.
         tstep : float
-            Length of data chunks for artefact rejection in seconds.
+            Length of data chunks for artifact rejection in seconds.
+            It only applies if `inst` is of type Raw.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
@@ -276,17 +330,38 @@ class ICA(object):
         self : instance of ICA
             Returns the modified instance.
         """
-        if self.current_fit != 'unfitted':
-            raise RuntimeError('ICA decomposition has already been fitted. '
-                               'Please start a new ICA session.')
+        if isinstance(inst, _BaseRaw):
+            self._fit_raw(inst, picks, start, stop, decim, reject, flat,
+                          tstep, verbose)
+        elif isinstance(inst, _BaseEpochs):
+            self._fit_epochs(inst, picks, decim, verbose)
+        else:
+            raise ValueError('Data input must be of Raw or Epochs type')
+        return self
 
-        logger.info('Computing signal decomposition on raw data. '
-                    'Please be patient, this may take some time')
+    def _reset(self):
+        """Aux method"""
+        del self._pre_whitener
+        del self.unmixing_matrix_
+        del self.mixing_matrix_
+        del self.n_components_
+        del self.n_samples_
+        if hasattr(self, 'drop_inds_'):
+            del self.drop_inds_
+
+    def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
+                 verbose):
+        """Aux method
+        """
+        if self.current_fit != 'unfitted':
+            self._reset()
 
         if picks is None:  # just use good data channels
             picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
                                ecg=False, misc=False, stim=False,
-                               exclude='bads')
+                               ref_meg=False, exclude='bads')
+        logger.info('Fitting ICA to data using %i channels. \n'
+                    'Please be patient, this may take some time' % len(picks))
 
         if self.max_pca_components is None:
             self.max_pca_components = len(picks)
@@ -303,81 +378,31 @@ class ICA(object):
             data = data[:, ::decim].copy()
 
         if (reject is not None) or (flat is not None):
-            info = self.info
-            data_clean = np.empty_like(data)
-            idx_by_type = channel_indices_by_type(info)
-            step = int(ceil(tstep * info['sfreq']))
-            if decim is not None:
-                step = int(ceil(step / float(decim)))
-            this_start = 0
-            this_stop = 0
-            for first in xrange(0, data.shape[1], step):
-                last = first + step
-                data_buffer = data[:, first:last]
-                if data_buffer.shape[1] < (last - first):
-                    break  # end of the time segment
-                if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
-                            flat, ignore_chs=info['bads']):
-                    this_stop = this_start + data_buffer.shape[1]
-                    data_clean[:, this_start:this_stop] = data_buffer
-                    this_start += data_buffer.shape[1]
-                else:
-                    logger.info("Artifact detected in [%d, %d]" % (first,
-                                                                   last))
-            data = data_clean[:, :this_stop]
+            data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
+                                                          decim, self.info,
+                                                          tstep)
+
         self.n_samples_ = data.shape[1]
-        if not data.any():
-            raise RuntimeError('No clean segment found. Please '
-                               'consider updating your rejection '
-                               'thresholds.')
 
         data, self._pre_whitener = self._pre_whiten(data,
                                                     raw.info, picks)
 
-        self._decompose(data, self.max_pca_components, 'raw')
+        self._fit(data, self.max_pca_components, 'raw')
 
         return self
 
-    @verbose
-    def decompose_epochs(self, epochs, picks=None, decim=None, verbose=None):
-        """Run the ICA decomposition on epochs
-
-        Caveat! If supplying a noise covariance keep track of the projections
-        available in the cov, the raw or the epochs object. For example,
-        if you are interested in EOG or ECG artifacts, EOG and ECG projections
-        should be temporally removed before fitting the ICA.
-
-        Parameters
-        ----------
-        epochs : instance of Epochs
-            The epochs. The ICA is estimated on the concatenated epochs.
-        picks : array-like
-            Channels to be included relative to the channels already picked on
-            epochs-initialization. This selection remains throughout the
-            initialized ICA session.
-        decim : int | None
-            Increment for selecting each nth time slice. If None, all samples
-            within ``start`` and ``stop`` are used.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
-            Defaults to self.verbose.
-
-        Returns
-        -------
-        self : instance of ICA
-            Returns the modified instance.
+    def _fit_epochs(self, epochs, picks, decim, verbose):
+        """Aux method
         """
         if self.current_fit != 'unfitted':
-            raise RuntimeError('ICA decomposition has already been fitted. '
-                               'Please start a new ICA session.')
-
-        logger.info('Computing signal decomposition on epochs. '
-                    'Please be patient, this may take some time')
+            self._reset()
 
         if picks is None:
             picks = pick_types(epochs.info, meg=True, eeg=True, eog=False,
                                ecg=False, misc=False, stim=False,
                                ref_meg=False, exclude='bads')
+        logger.info('Fitting ICA to data using %i channels. \n'
+                    'Please be patient, this may take some time' % len(picks))
 
         # filter out all the channels the raw wouldn't have initialized
         self.info = pick_info(epochs.info, picks)
@@ -392,16 +417,111 @@ class ICA(object):
         data = epochs.get_data()[:, picks]
         if decim is not None:
             data = data[:, :, ::decim].copy()
-        self.n_samples_ = np.prod(data.shape[1:])
+
+        self.n_samples_ = np.prod(data[:, 0, :].shape)
 
         data, self._pre_whitener = \
             self._pre_whiten(np.hstack(data), epochs.info, picks)
 
-        self._decompose(data, self.max_pca_components, 'epochs')
+        self._fit(data, self.max_pca_components, 'epochs')
 
         return self
 
-    def _get_sources(self, data):
+    def _pre_whiten(self, data, info, picks):
+        """Aux function"""
+        has_pre_whitener = hasattr(self, '_pre_whitener')
+        if not has_pre_whitener and self.noise_cov is None:
+            # use standardization as whitener
+            # Scale (z-score) the data by channel type
+            info = pick_info(deepcopy(info), picks)
+            pre_whitener = np.empty([len(data), 1])
+            for ch_type in ['mag', 'grad', 'eeg']:
+                if _contains_ch_type(info, ch_type):
+                    if ch_type == 'eeg':
+                        this_picks = pick_types(info, meg=False, eeg=True)
+                    else:
+                        this_picks = pick_types(info, meg=ch_type, eeg=False)
+                    pre_whitener[this_picks] = np.std(data[this_picks])
+            data /= pre_whitener
+        elif not has_pre_whitener and self.noise_cov is not None:
+            pre_whitener, _ = compute_whitener(self.noise_cov, info, picks)
+            assert data.shape[0] == pre_whitener.shape[1]
+            data = fast_dot(pre_whitener, data)
+        elif has_pre_whitener and self.noise_cov is None:
+            data /= self._pre_whitener
+            pre_whitener = self._pre_whitener
+        else:
+            data = fast_dot(self._pre_whitener, data)
+            pre_whitener = self._pre_whitener
+
+        return data, pre_whitener
+
+    def _fit(self, data, max_pca_components, fit_type):
+        """Aux function """
+        from sklearn.decomposition import RandomizedPCA
+
+        # XXX fix copy==True later. Bug in sklearn, see PR #2273
+        pca = RandomizedPCA(n_components=max_pca_components, whiten=True,
+                            copy=True, random_state=self.random_state)
+
+        if isinstance(self.n_components, float):
+            # compute full feature variance before doing PCA
+            full_var = np.var(data, axis=1).sum()
+
+        data = pca.fit_transform(data.T)
+
+        if isinstance(self.n_components, float):
+            # compute eplained variance manually, cf. sklearn bug
+            # fixed in #2664
+            explained_variance_ratio_ = pca.explained_variance_ / full_var
+            n_components_ = np.sum(explained_variance_ratio_.cumsum()
+                                   <= self.n_components)
+            if n_components_ < 1:
+                raise RuntimeError('One PCA component captures most of the '
+                                   'explained variance, your threshold resu'
+                                   'lts in 0 components. You should select '
+                                   'a higher value.')
+            logger.info('Selection by explained variance: %i components' %
+                        n_components_)
+            sel = slice(n_components_)
+        else:
+            logger.info('Selection by number: %i components' %
+                        self.n_components)
+            if self.n_components is not None:  # normal n case
+                sel = slice(self.n_components)
+            else:  # None case
+                logger.info('Using all PCA components: %i' % pca.components_)
+                sel = slice(len(pca.components_))
+
+        # the things to store for PCA
+        self.pca_mean_ = pca.mean_
+        self.pca_components_ = pca.components_
+        # unwhiten pca components and put scaling in unmixintg matrix later.
+        self.pca_explained_variance_ = exp_var = pca.explained_variance_
+        self.pca_components_ *= np.sqrt(exp_var[:, None])
+        del pca
+        # update number of components
+        self.n_components_ = sel.stop
+        if self.n_pca_components is not None:
+            if self.n_pca_components > len(self.pca_components_):
+                self.n_pca_components = len(self.pca_components_)
+
+        # Take care of ICA
+        if self.method == 'fastica':
+            from sklearn.decomposition import FastICA  # to avoid strong dep.
+            ica = FastICA(whiten=False,
+                          random_state=self.random_state, **self.fit_params)
+            ica.fit(data[:, sel])
+            # get unmixing and add scaling
+            self.unmixing_matrix_ = getattr(ica, 'components_',
+                                            'unmixing_matrix_')
+        elif self.method in ('infomax', 'extended-infomax'):
+            self.unmixing_matrix_ = infomax(data[:, sel], **self.fit_params)
+        self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :]
+        self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
+        self.current_fit = fit_type
+
+    def _transform(self, data):
         """Compute sources from data (operates inplace)"""
         if self.pca_mean_ is not None:
             data -= self.pca_mean_[:, None]
@@ -412,52 +532,27 @@ class ICA(object):
         sources = fast_dot(self.unmixing_matrix_, pca_data)
         return sources
 
-    def get_sources_raw(self, raw, start=None, stop=None):
-        """Estimate raw sources given the unmixing matrix
-
-        Parameters
-        ----------
-        raw : instance of Raw
-            Raw object to draw sources from.
-        start : int | float | None
-            First sample to include. If float, data will be interpreted as
-            time in seconds. If None, the entire data will be used.
-        stop : int | float | None
-            Last sample to not include. If float, data will be interpreted as
-            time in seconds. If None, the entire data will be used.
-
-        Returns
-        -------
-        sources : array, shape = (n_components, n_times)
-            The ICA sources time series.
-        """
+    def _transform_raw(self, raw, start, stop):
         if not hasattr(self, 'mixing_matrix_'):
-            raise RuntimeError('No fit available. Please first fit ICA '
-                               'decomposition.')
+            raise RuntimeError('No fit available. Please fit ICA.')
         start, stop = _check_start_stop(raw, start, stop)
 
         picks = [raw.ch_names.index(k) for k in self.ch_names]
-        data, _ = self._pre_whiten(raw[picks, start:stop][0], raw.info, picks)
-        return self._get_sources(data)
-
-    def get_sources_epochs(self, epochs, concatenate=False):
-        """Estimate epochs sources given the unmixing matrix
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
 
-        Parameters
-        ----------
-        epochs : instance of Epochs
-            Epochs object to draw sources from.
-        concatenate : bool
-            If true, epochs and time slices will be concatenated.
+        data, _ = self._pre_whiten(raw[picks, start:stop][0], raw.info, picks)
+        return self._transform(data)
 
-        Returns
-        -------
-        epochs_sources : ndarray of shape (n_epochs, n_sources, n_times)
-            The sources for each epoch
+    def _transform_epochs(self, epochs, concatenate):
+        """Aux method
         """
         if not hasattr(self, 'mixing_matrix_'):
-            raise RuntimeError('No fit available. Please first fit ICA '
-                               'decomposition.')
+            raise RuntimeError('No fit available. Please fit ICA')
 
         picks = pick_types(epochs.info, include=self.ch_names, exclude=[],
                            ref_meg=False)
@@ -472,7 +567,7 @@ class ICA(object):
 
         data = np.hstack(epochs.get_data()[:, picks])
         data, _ = self._pre_whiten(data, epochs.info, picks)
-        sources = self._get_sources(data)
+        sources = self._transform(data)
 
         if not concatenate:
             # Put the data back in 3D
@@ -480,93 +575,148 @@ class ICA(object):
 
         return sources
 
-    @verbose
-    def save(self, fname):
-        """Store ICA session into a fiff file.
-
-        Parameters
-        ----------
-        fname : str
-            The absolute path of the file name to save the ICA session into.
+    def _transform_evoked(self, evoked):
+        """Aux method
         """
-        if self.current_fit == 'unfitted':
-            raise RuntimeError('No fit available. Please first fit ICA '
-                               'decomposition.')
+        if not hasattr(self, 'mixing_matrix_'):
+            raise RuntimeError('No fit available. Please first fit ICA')
 
-        logger.info('Wrting ica session to %s...' % fname)
-        fid = start_file(fname)
+        picks = pick_types(evoked.info, include=self.ch_names, exclude=[],
+                           ref_meg=False)
 
-        try:
-            _write_ica(fid, self)
-        except Exception as inst:
-            os.remove(fname)
-            raise inst
-        end_file(fid)
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
 
-        return self
+        data, _ = self._pre_whiten(evoked.data[picks], evoked.info, picks)
+        sources = self._transform(data)
 
-    def sources_as_raw(self, raw, picks=None, start=None, stop=None):
-        """Export sources as raw object
+        return sources
+
+    def get_sources(self, inst, add_channels=None, start=None, stop=None):
+        """Estimate sources given the unmixing matrix
+
+        This method will return the sources in the container format passed.
+        Typical usecases:
+
+        1. pass Raw object to use `raw.plot` for ICA sources
+        2. pass Epochs object to compute trial-based statistics in ICA space
+        3. pass Evoked object to investigate time-locking in ICA space
 
         Parameters
         ----------
-        raw : instance of Raw
-            Raw object to export sources from.
-        picks : array-like
-            Channels to be included in addition to the sources. If None,
-            artifact and stimulus channels will be included.
+        inst : instance of Raw, Epochs or Evoked
+            Object to compute sources from and to represent sources in.
+        add_channels : None | list of str
+            Additional channels  to be added. Useful to e.g. compare sources
+            with some reference. Defaults to None
         start : int | float | None
             First sample to include. If float, data will be interpreted as
-            time in seconds. If None, data will be used from the first sample.
+            time in seconds. If None, the entire data will be used.
         stop : int | float | None
             Last sample to not include. If float, data will be interpreted as
-            time in seconds. If None, data will be used to the last sample.
+            time in seconds. If None, the entire data will be used.
 
         Returns
         -------
-        out : instance of mne.Raw
-            Container object for ICA sources
+        sources : instance of Raw, Epochs or Evoked
+            The ICA sources time series.
         """
-        # include 'reference' channels for comparison with ICA
-        if picks is None:
-            picks = pick_types(raw.info, meg=False, eeg=False, misc=True,
-                               ecg=True, eog=True, stim=True, exclude='bads')
+        if isinstance(inst, _BaseRaw):
+            sources = self._sources_as_raw(inst, add_channels, start, stop)
+        elif isinstance(inst, _BaseEpochs):
+            sources = self._sources_as_epochs(inst, add_channels, False)
+        elif isinstance(inst, Evoked):
+            sources = self._sources_as_evoked(inst, add_channels)
+        else:
+            raise ValueError('Data input must be of Raw, Epochs or Evoked '
+                             'type')
+
+        return sources
 
+    def _sources_as_raw(self, raw, add_channels, start, stop):
+        """Aux method
+        """
         # merge copied instance and picked data with sources
-        start, stop = _check_start_stop(raw, start, stop)
-        sources = self.get_sources_raw(raw, start=start, stop=stop)
-        if raw._preloaded:  # get data and temporarily delete
+        sources = self._transform_raw(raw, start=start, stop=stop)
+        if raw.preload:  # get data and temporarily delete
             data, times = raw._data, raw._times
             del raw._data, raw._times
 
         out = raw.copy()  # copy and reappend
-        if raw._preloaded:
+        if raw.preload:
             raw._data, raw._times = data, times
 
         # populate copied raw.
-        out.fids = []
-        data_, times_ = raw[picks, start:stop]
-        out._data = np.r_[sources, data_]
+        start, stop = _check_start_stop(raw, start, stop)
+        if add_channels is not None:
+            raw_picked = raw.pick_channels(add_channels, copy=True)
+            data_, times_ = raw_picked[:, start:stop]
+            data_ = np.r_[sources, data_]
+        else:
+            data_ = sources
+            _, times_ = raw[0, start:stop]
+        out._data = data_
         out._times = times_
-        out._preloaded = True
+        out._filenames = list()
+        out.preload = True
 
         # update first and last samples
         out.first_samp = raw.first_samp + (start if start else 0)
         out.last_samp = out.first_samp + stop if stop else raw.last_samp
 
-        # XXX use self.info later, for now this is better
-        self._export_info(out.info, raw, picks)
         out._projector = None
+        self._export_info(out.info, raw, add_channels)
 
         return out
 
-    def _export_info(self, info, container, picks):
-        """Aux function
+    def _sources_as_epochs(self, epochs, add_channels, concatenate):
+        """Aux method"""
+        out = epochs.copy()
+        sources = self._transform_epochs(epochs, concatenate)
+        if add_channels is not None:
+            picks = [epochs.ch_names.index(k) for k in add_channels]
+        else:
+            picks = []
+        out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
+                                   axis=1) if len(picks) > 0 else sources
+
+        self._export_info(out.info, epochs, add_channels)
+        out.preload = True
+        out.raw = None
+        out._projector = None
+
+        return out
+
+    def _sources_as_evoked(self, evoked, add_channels):
+        """Aux method
+        """
+        if add_channels is not None:
+            picks = [evoked.ch_names.index(k) for k in add_channels]
+        else:
+            picks = []
+
+        sources = self._transform_evoked(evoked)
+        if len(picks) > 1:
+            data = np.r_[sources, evoked.data[picks]]
+        else:
+            data = sources
+        out = evoked.copy()
+        out.data = data
+        self._export_info(out.info, evoked, add_channels)
+
+        return out
+
+    def _export_info(self, info, container, add_channels):
+        """Aux method
         """
         # set channel names and info
         ch_names = info['ch_names'] = []
         ch_info = info['chs'] = []
-        for ii in xrange(self.n_components_):
+        for ii in range(self.n_components_):
             this_source = 'ICA %03d' % (ii + 1)
             ch_names.append(this_source)
             ch_info.append(dict(ch_name=this_source, cal=1,
@@ -578,78 +728,628 @@ class ICA(object):
                                 range=1.0, scanno=ii + 1, unit_mul=0,
                                 coil_trans=None))
 
-        # re-append additionally picked ch_names
-        ch_names += [container.ch_names[k] for k in picks]
-        # re-append additionally picked ch_info
-        ch_info += [container.info['chs'][k] for k in picks]
-
-        # update number of channels
-        info['nchan'] = len(picks) + self.n_components_
+        if add_channels is not None:
+            # re-append additionally picked ch_names
+            ch_names += add_channels
+            # re-append additionally picked ch_info
+            ch_info += [k for k in container.info['chs'] if k['ch_name'] in
+                        add_channels]
+            # update number of channels
+        info['nchan'] = self.n_components_
+        if add_channels is not None:
+            info['nchan'] += len(add_channels)
         info['bads'] = [ch_names[k] for k in self.exclude]
         info['projs'] = []  # make sure projections are removed.
-        info['filenames'] = []
 
-    def sources_as_epochs(self, epochs, picks=None):
-        """Create epochs in ICA space from epochs object
+    @verbose
+    def score_sources(self, inst, target=None, score_func='pearsonr',
+                      start=None, stop=None, l_freq=None, h_freq=None,
+                      verbose=None):
+        """Assign score to components based on statistic or metric
 
         Parameters
         ----------
-        epochs : instance of Epochs
-            Epochs object to draw sources from.
-        picks : array-like
-            Channels to be included in addition to the sources. If None,
-            artifact channels will be included.
+        inst : instance of Raw, Epochs or Evoked
+            The object to reconstruct the sources from.
+        target : array-like | ch_name | None
+            Signal to which the sources shall be compared. It has to be of
+            the same shape as the sources. If some string is supplied, a
+            routine will try to find a matching channel. If None, a score
+            function expecting only one input-array argument must be used,
+            for instance, scipy.stats.skew (default).
+        score_func : callable | str label
+            Callable taking as arguments either two input arrays
+            (e.g. Pearson correlation) or one input
+            array (e. g. skewness) and returns a float. For convenience the
+            most common score_funcs are available via string labels: Currently,
+            all distance metrics from scipy.spatial and all functions from
+            scipy.stats taking compatible input arguments are supported. These
+            function have been modified to support iteration over the rows of a
+            2D array.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        l_freq : float
+            Low pass frequency.
+        h_freq : float
+            High pass frequency.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
 
         Returns
         -------
-        ica_epochs : instance of Epochs
-            The epochs in ICA space.
+        scores : ndarray
+            scores for each source as returned from score_func
         """
+        if isinstance(inst, _BaseRaw):
+            sources = self._transform_raw(inst, start, stop)
+        elif isinstance(inst, _BaseEpochs):
+            sources = self._transform_epochs(inst, concatenate=True)
+        elif isinstance(inst, Evoked):
+            sources = self._transform_evoked(inst)
+        else:
+            raise ValueError('Input must be of Raw, Epochs or Evoked type')
 
-        out = epochs.copy()
-        sources = self.get_sources_epochs(epochs)
-        if picks is None:
-            picks = pick_types(epochs.info, meg=False, eeg=False, misc=True,
-                               ecg=True, eog=True, stim=True, exclude='bads')
+        if target is not None:  # we can have univariate metrics without target
+            target = self._check_target(target, inst, start, stop)
+
+            if sources.shape[-1] != target.shape[-1]:
+                raise ValueError('Sources and target do not have the same'
+                                 'number of time slices.')
+            # auto target selection
+            if verbose is None:
+                verbose = self.verbose
+            if isinstance(inst, (_BaseRaw, _BaseRaw)):
+                sources, target = _band_pass_filter(self, sources, target, l_freq,
+                                                    h_freq, verbose)
+
+        scores = _find_sources(sources, target, score_func)
+
+        return scores
+
+    def _check_target(self, target, inst, start, stop):
+        """Aux Method"""
+        if isinstance(inst, _BaseRaw):
+            start, stop = _check_start_stop(inst, start, stop)
+            if hasattr(target, 'ndim'):
+                if target.ndim < 2:
+                    target = target.reshape(1, target.shape[-1])
+            if isinstance(target, string_types):
+                pick = _get_target_ch(inst, target)
+                target, _ = inst[pick, start:stop]
+
+        elif isinstance(inst, _BaseEpochs):
+            if isinstance(target, string_types):
+                pick = _get_target_ch(inst, target)
+                target = inst.get_data()[:, pick]
+
+            if hasattr(target, 'ndim'):
+                if target.ndim == 3 and min(target.shape) == 1:
+                    target = target.ravel()
+
+        elif isinstance(inst, Evoked):
+            if isinstance(target, string_types):
+                pick = _get_target_ch(inst, target)
+                target = inst.data[pick]
+
+        return target
+
+    @verbose
+    def find_bads_ecg(self, inst, ch_name=None, threshold=None,
+                      start=None, stop=None, l_freq=8, h_freq=16,
+                      method='ctps', verbose=None):
+        """Detect ECG related components using correlation
+
+        Note. If no ECG channel is available, routine attempts to create
+        an artificial ECG based on cross-channel averaging.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            Object to compute sources from.
+        ch_name : str
+            The name of the channel to use for ECG peak detection.
+            The argument is mandatory if the dataset contains no ECG
+            channels.
+        threshold : float
+            The value above which a feature is classified as outlier. If
+            method is 'ctps', defaults to 0.25, else defaults to 3.0.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        l_freq : float
+            Low pass frequency.
+        h_freq : float
+            High pass frequency.
+        method : {'ctps', 'correlation'}
+            The method used for detection. If 'ctps', cross-trial phase
+            statistics [1] are used to detect ECG related components.
+            Thresholding is then based on the significance value of a Kuiper
+            statistic.
+            If 'correlation', detection is based on Pearson correlation
+            between the filtered data and the filtered ECG channel.
+            Thresholding is based on iterative z-scoring. The above
+            threshold components will be masked and the z-score will
+            be recomputed until no supra-threshold component remains.
+            Defaults to 'ctps'.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        ecg_idx : list of int
+            The indices of EOG related components.
+        scores : np.ndarray of float, shape (ica.n_components_)
+            The correlation scores.
+
+        References
+        ----------
+        [1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
+            M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
+            and phase statistics for complete artifact removal in independent
+            components of neuromagnetic recordings. Biomedical
+            Engineering, IEEE Transactions on 55 (10), 2353-2362.
+        """
+        if verbose is None:
+            verbose = self.verbose
+        try:
+            idx_ecg = _get_ecg_channel_index(ch_name, inst)
+        except RuntimeError:
+            idx_ecg = []
+        if not np.any(idx_ecg):
+            if verbose is not None:
+                verbose = self.verbose
+            ecg, times = _make_ecg(inst, start, stop, verbose)
+            ch_name = 'ECG'
+        else:
+            ecg = inst.ch_names[idx_ecg]
+
+        # some magic we need inevitably ...
+        if inst.ch_names != self.ch_names:
+            inst = inst.pick_channels(self.ch_names, copy=True)
+
+        if method == 'ctps':
+            if threshold is None:
+                threshold = 0.25
+            if isinstance(inst, _BaseRaw):
+                sources = self.get_sources(create_ecg_epochs(inst)).get_data()
+            elif isinstance(inst, _BaseEpochs):
+                sources = self.get_sources(inst).get_data()
+            else:
+                raise ValueError('With `ctps` only Raw and Epochs input is '
+                                 'supported')
+            _, p_vals, _ = ctps(sources)
+            scores = p_vals.max(-1)
+            ecg_idx = np.where(scores >= threshold)[0]
+        elif method == 'correlation':
+            if threshold is None:
+                threshold = 3.0
+            scores = self.score_sources(inst, target=ecg,
+                                        score_func='pearsonr',
+                                        start=start, stop=stop,
+                                        l_freq=l_freq, h_freq=h_freq,
+                                        verbose=verbose)
+            ecg_idx = find_outliers(scores, threshold=threshold)
+        else:
+            raise ValueError('Mehtod "%s" not supported.' % method)
+        # sort indices by scores
+        ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
+        return list(ecg_idx), scores
+
+    @verbose
+    def find_bads_eog(self, inst, ch_name=None, threshold=3.0,
+                      start=None, stop=None, l_freq=1, h_freq=10,
+                      verbose=None):
+        """Detect EOG related components using correlation
+
+        Detection is based on Pearson correlation between the
+        filtered data and the filtered ECG channel.
+        Thresholding is based on adaptive z-scoring. The above threshold
+        components will be masked and the z-score will be recomputed
+        until no supra-threshold component remains.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            Object to compute sources from.
+        ch_name : str
+            The name of the channel to use for ECG peak detection.
+            The argument is mandatory if the dataset contains no ECG
+            channels.
+        threshold : int | float
+            The value above which a feature is classified as outlier.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        l_freq : float
+            Low pass frequency.
+        h_freq : float
+            High pass frequency.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        ecg_idx : list of int
+            The indices of EOG related components, sorted by score.
+        scores : np.ndarray of float, shape (ica.n_components_) | list of array
+            The correlation scores.
+        """
+        if verbose is None:
+            verbose = self.verbose
+
+        eog_inds = _get_eog_channel_index(ch_name, inst)
+        if len(eog_inds) > 2:
+            eog_inds = eog_inds[:1]
+            logger.info('Using EOG channel %s' % inst.ch_names[eog_inds[0]])
+        scores, eog_idx = [], []
+        eog_chs = [inst.ch_names[k] for k in eog_inds]
+
+        # some magic we need inevitably ...
+        # get targets befor equalizing
+        targets = [self._check_target(k, inst, start, stop) for k in eog_chs]
+
+        if inst.ch_names != self.ch_names:
+            inst = inst.pick_channels(self.ch_names, copy=True)
+
+        for eog_ch, target in zip(eog_chs, targets):
+            scores += [self.score_sources(inst, target=target,
+                                          score_func='pearsonr',
+                                          start=start, stop=stop,
+                                          l_freq=l_freq, h_freq=h_freq,
+                                          verbose=verbose)]
+            eog_idx += [find_outliers(scores[-1], threshold=threshold)]
+
+        # remove duplicates but keep order by score, even across multiple
+        # EOG channels
+        scores_ = np.concatenate([scores[ii][inds]
+                                  for ii, inds in enumerate(eog_idx)])
+        eog_idx_ = np.concatenate(eog_idx)[np.abs(scores_).argsort()[::-1]]
+
+        eog_idx_unique = list(np.unique(eog_idx_))
+        eog_idx = []
+        for i in eog_idx_:
+            if i in eog_idx_unique:
+                eog_idx.append(i)
+                eog_idx_unique.remove(i)
+        if len(scores) == 1:
+            scores = scores[0]
+
+        return eog_idx, scores
+
+    def apply(self, inst, include=None, exclude=None,
+              n_pca_components=None, start=None, stop=None,
+              copy=False):
+        """Remove selected components from the signal.
+
+        Given the unmixing matrix, transform data,
+        zero out components, and inverse transform the data.
+        This procedure will reconstruct M/EEG signals from which
+        the dynamics described by the excluded components is subtracted.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            The data to be processed.
+        include : array_like of int.
+            The indices refering to columns in the ummixing matrix. The
+            components to be kept.
+        exclude : array_like of int.
+            The indices refering to columns in the ummixing matrix. The
+            components to be zeroed out.
+        n_pca_components : int | float | None
+            The number of PCA components to be kept, either absolute (int)
+            or percentage of the explained variance (float). If None (default),
+            all PCA components will be used.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        copy : bool
+            Whether to return a copy or whether to apply the solution in place.
+            Defaults to False.
+        """
+        if isinstance(inst, _BaseRaw):
+            out = self._apply_raw(raw=inst, include=include,
+                                  exclude=exclude,
+                                  n_pca_components=n_pca_components,
+                                  start=start, stop=stop, copy=copy)
+        elif isinstance(inst, _BaseEpochs):
+            out = self._apply_epochs(epochs=inst, include=include,
+                                     exclude=exclude,
+                                     n_pca_components=n_pca_components,
+                                     copy=copy)
+        elif isinstance(inst, Evoked):
+            out = self._apply_evoked(evoked=inst, include=include,
+                                     exclude=exclude,
+                                     n_pca_components=n_pca_components,
+                                     copy=copy)
+        else:
+            raise ValueError('Data input must be of Raw, Epochs or Evoked '
+                             'type')
+        return out
+
+    def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop,
+                   copy=True):
+        """Aux method"""
+        if not raw.preload:
+            raise ValueError('Raw data must be preloaded to apply ICA')
+
+        if exclude is None:
+            exclude = list(set(self.exclude))
+        else:
+            exclude = list(set(self.exclude + exclude))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        start, stop = _check_start_stop(raw, start, stop)
+
+        picks = pick_types(raw.info, meg=False, include=self.ch_names,
+                           exclude='bads')
+
+        data = raw[picks, start:stop][0]
+        data, _ = self._pre_whiten(data, raw.info, picks)
+
+        data = self._pick_sources(data, include, exclude)
+
+        if copy is True:
+            raw = raw.copy()
+
+        raw[picks, start:stop] = data
+        return raw
+
+    def _apply_epochs(self, epochs, include, exclude,
+                      n_pca_components, copy):
+
+        if not epochs.preload:
+            raise ValueError('Epochs must be preloaded to apply ICA')
+
+        picks = pick_types(epochs.info, meg=False, ref_meg=False,
+                           include=self.ch_names,
+                           exclude='bads')
+
+        # special case where epochs come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        data = np.hstack(epochs.get_data()[:, picks])
+        data, _ = self._pre_whiten(data, epochs.info, picks)
+        data = self._pick_sources(data, include=include, exclude=exclude)
+
+        if copy is True:
+            epochs = epochs.copy()
+
+        # restore epochs, channels, tsl order
+        epochs._data[:, picks] = np.array(np.split(data,
+                                          len(epochs.events), 1))
+        epochs.preload = True
+
+        return epochs
+
+    def _apply_evoked(self, evoked, include, exclude,
+                      n_pca_components, copy):
+
+        picks = pick_types(evoked.info, meg=False, ref_meg=False,
+                           include=self.ch_names,
+                           exclude='bads')
+
+        # special case where evoked come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Evoked does not match fitted data: %i channels'
+                               ' fitted but %i channels supplied. \nPlease '
+                               'provide an Evoked object that\'s compatible '
+                               'with ica.ch_names' % (len(self.ch_names),
+                                                      len(picks)))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        data = evoked.data[picks]
+        data, _ = self._pre_whiten(data, evoked.info, picks)
+        data = self._pick_sources(data, include=include,
+                                  exclude=exclude)
+
+        if copy is True:
+            evoked = evoked.copy()
+
+        # restore evoked
+        evoked.data[picks] = data
+
+        return evoked
+
+    def _pick_sources(self, data, include, exclude):
+        """Aux function"""
+        if exclude is None:
+            exclude = self.exclude
+        else:
+            exclude = list(set(self.exclude + list(exclude)))
+
+        _n_pca_comp = _check_n_pca_components(self, self.n_pca_components,
+                                              self.verbose)
+
+        if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
+            raise ValueError('n_pca_components must be >= '
+                             'n_components and <= max_pca_components.')
+
+        n_components = self.n_components_
+        logger.info('Transforming to ICA space (%i components)' % n_components)
+
+        # Apply first PCA
+        if self.pca_mean_ is not None:
+            data -= self.pca_mean_[:, None]
+
+        pca_data = fast_dot(self.pca_components_, data)
+        # Apply unmixing to low dimension PCA
+        sources = fast_dot(self.unmixing_matrix_, pca_data[:n_components])
+
+        if include not in (None, []):
+            mask = np.ones(len(sources), dtype=np.bool)
+            mask[np.unique(include)] = False
+            sources[mask] = 0.
+            logger.info('Zeroing out %i ICA components' % mask.sum())
+        elif exclude not in (None, []):
+            exclude_ = np.unique(exclude)
+            sources[exclude_] = 0.
+            logger.info('Zeroing out %i ICA components' % len(exclude_))
+        logger.info('Inverse transforming to PCA space')
+        pca_data[:n_components] = fast_dot(self.mixing_matrix_, sources)
+        data = fast_dot(self.pca_components_[:n_components].T,
+                        pca_data[:n_components])
+        logger.info('Reconstructing sensor space signals from %i PCA '
+                    'components' % max(_n_pca_comp, n_components))
+        if _n_pca_comp > n_components:
+            data += fast_dot(self.pca_components_[n_components:_n_pca_comp].T,
+                             pca_data[n_components:_n_pca_comp])
+
+        if self.pca_mean_ is not None:
+            data += self.pca_mean_[:, None]
+
+        # restore scaling
+        if self.noise_cov is None:  # revert standardization
+            data *= self._pre_whitener
+        else:
+            data = fast_dot(linalg.pinv(self._pre_whitener), data)
+
+        return data
+
+    @verbose
+    def save(self, fname):
+        """Store ICA solution into a fiff file.
+
+        Parameters
+        ----------
+        fname : str
+            The absolute path of the file name to save the ICA solution into.
+            The file name should end with -ica.fif or -ica.fif.gz.
+        """
+        if self.current_fit == 'unfitted':
+            raise RuntimeError('No fit available. Please first fit ICA')
+
+        check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz'))
+
+        logger.info('Writing ica solution to %s...' % fname)
+        fid = start_file(fname)
+
+        try:
+            _write_ica(fid, self)
+        except Exception as inst:
+            os.remove(fname)
+            raise inst
+        end_file(fid)
+
+        return self
+
+    def plot_components(self, picks=None, ch_type='mag', res=64, layout=None,
+                        vmin=None, vmax=None, cmap='RdBu_r', sensors='k,',
+                        colorbar=False, title=None, show=True, outlines='head',
+                        contours=6, image_interp='bilinear'):
+        """Project unmixing matrix on interpolated sensor topogrpahy.
+
+        Parameters
+        ----------
+        picks : int | array-like | None
+            The indices of the sources to be plotted.
+            If None all are plotted in batches of 20.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout is
+            inferred from the data.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses).
+        colorbar : bool
+            Plot a colorbar.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        show : bool
+            Call pyplot.show() at the end.
+        outlines : 'head' | dict | None
+            The outlines to be drawn. If 'head', a head scheme will be drawn.
+            If dict, each key refers to a tuple of x and y positions. The
+            values in 'mask_pos' will serve as image mask. If None,
+            nothing will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw. If 0, no contours will
+            be drawn.
+        image_interp : str
+            The image interpolation to be used. All matplotlib options are
+            accepted.
 
-        out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
-                                   axis=1) if len(picks) > 0 else sources
+        Returns
+        -------
+        fig : instance of matplotlib.pyplot.Figure
+            The figure object.
+        """
+        return plot_ica_components(self, picks=picks,
+                                   ch_type=ch_type,
+                                   res=res, layout=layout, vmax=vmax,
+                                   cmap=cmap,
+                                   sensors=sensors, colorbar=colorbar,
+                                   title=title, show=show,
+                                   outlines=outlines, contours=contours,
+                                   image_interp=image_interp)
 
-        self._export_info(out.info, epochs, picks)
-        out.preload = True
-        out.raw = None
-        out._projector = None
+    def plot_sources(self, inst, picks=None, exclude=None, start=None,
+                     stop=None, show=True, title=None):
+        """Plot estimated latent sources given the unmixing matrix.
 
-        return out
+        Typical usecases:
+
+        1. plot evolution of latent sources over time based on (Raw input)
+        2. plot latent source around event related time windows (Epochs input)
+        3. plot time-locking in ICA space (Evoked input)
 
-    def plot_sources_raw(self, raw, order=None, start=None, stop=None,
-                         n_components=None, source_idx=None, ncol=3, nrow=None,
-                         title=None, show=True):
-        """Create panel plots of ICA sources. Wrapper around viz.plot_ica_panel
 
         Parameters
         ----------
-        raw : instance of mne.fiff.Raw
-            Raw object to plot the sources from.
-        order : ndarray | None.
-            Index of length `n_components_`. If None, plot will show the
+        inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
+            The object to plot the sources from.
+        picks : ndarray | None.
+            The components to be displayed. If None, plot will show the
             sources in the order as fitted.
-            Example::
-
-                arg_sort = np.argsort(np.var(sources)).
-
         start : int
             X-axis start index. If None from the beginning.
         stop : int
             X-axis stop index. If None to the end.
-        n_components : int
-            Number of components fitted.
-        source_idx : array-like
-            Indices for subsetting the sources.
-        ncol : int | None
-            Number of panel-columns. If None, the entire data will be plotted.
-        nrow : int | None
-            Number of panel-rows. If None, the entire data will be plotted.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
         title : str | None
             The figure title. If None a default is provided.
         show : bool
@@ -658,79 +1358,146 @@ class ICA(object):
         Returns
         -------
         fig : instance of pyplot.Figure
+            The figure.
         """
-        start, stop = _check_start_stop(raw, start, stop)
-        sources = self.get_sources_raw(raw, start=start, stop=stop)
-
-        if order is not None:
-            if np.isscalar(order):
-                order = [order]
-            sources = sources[order]
-        fig = plot_ica_panel(sources, n_components=n_components,
-                             source_idx=source_idx, ncol=ncol, nrow=nrow,
-                             title=title)
-        if show:
-            import matplotlib.pyplot as plt
-            plt.show()
 
-        return fig
+        return plot_ica_sources(self, inst=inst, picks=picks, exclude=exclude,
+                                title=title, start=start, stop=stop, show=show)
 
-    def plot_sources_epochs(self, epochs, order=None, epoch_idx=None,
-                            start=None, stop=None, n_components=None,
-                            source_idx=None, ncol=3, nrow=None, title=None,
-                            show=True):
-        """Create panel plots of ICA sources. Wrapper around viz.plot_ica_panel
+    def plot_scores(self, scores, exclude=None, axhline=None,
+                    title='ICA component scores', figsize=(12, 6)):
+        """Plot scores related to detected components.
+
+        Use this function to asses how well your score describes outlier
+        sources and how well you were detecting them.
 
         Parameters
         ----------
-        epochs : instance of mne.Epochs
-            Epochs object to plot the sources from.
-        order : ndarray | None.
-            Index of length n_components. If None, plot will show the sources
-            in the order as fitted.
-            Example: arg_sort = np.argsort(np.var(sources)).
-        epoch_idx : int
-            Index to plot particular epoch.
-        start : int | float | None
-            First sample to include. If None, data will be shown from the first
-            sample.
-        stop : int | float | None
-            Last sample to not include. If None, data will be shown to the last
-            sample.
-        n_components : int
-            Number of components fitted.
-        source_idx : array-like
-            Indices for subsetting the sources.
-        ncol : int
-            Number of panel-columns.
-        nrow : int
-            Number of panel-rows.
-        title : str | None
-            The figure title. If None a default is provided.
-        show : bool
-            If True, plot will be shown, else just the figure is returned.
+        scores : array_like of float, shape (n ica components,) | list of array
+            Scores based on arbitrary metric to characterize ICA components.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
+        axhline : float
+            Draw horizontal line to e.g. visualize rejection threshold.
+        title : str
+            The figure title.
+        figsize : tuple of int
+            The figure size. Defaults to (12, 6)
+
+        Returns
+        -------
+        fig : instance of matplotlib.pyplot.Figure
+            The figure object.
+        """
+        return plot_ica_scores(ica=self, scores=scores, exclude=exclude,
+                               axhline=axhline, title=title, figsize=figsize)
+
+    def plot_overlay(self, inst, exclude=None, start=None, stop=None,
+                     title=None):
+        """Overlay of raw and cleaned signals given the unmixing matrix.
+
+        This method helps visualizing signal quality and arficat rejection.
+
+        Parameters
+        ----------
+        inst : instance of mne.io.Raw or mne.Evoked
+            The signals to be compared given the ICA solution. If Raw input,
+            The raw data are displayed before and after cleaning. In a second
+            panel the cross channel average will be displayed. Since dipolar
+            sources will be canceled out this display is sensitive to
+            artifacts. If evoked input, butterfly plots for clean and raw
+            signals will be superimposed.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
+        picks : array-like of int | None (default)
+            Indices of channels to include (if None, all channels
+            are used that were included on fitting).
+        start : int
+            X-axis start index. If None from the beginning.
+        stop : int
+            X-axis stop index. If None to the end.
+        title : str
+            The figure title.
 
         Returns
         -------
         fig : instance of pyplot.Figure
+            The figure.
         """
-        sources = self.get_sources_epochs(epochs, concatenate=True)
-        if order is not None:
-            if np.isscalar(order):
-                order = [order]
-            sources = np.atleast_2d(sources[order])
-        if epoch_idx is not None:
-            warnings.warn('`epochs_idx` is deprecated and will be removed in '
-                          'MNE-Python 0.8. Instead plass indexed epochs.')
-
-        fig = plot_ica_panel(sources, start=start, stop=stop,
-                             n_components=n_components, source_idx=source_idx,
-                             ncol=ncol, nrow=nrow, title=title, show=show)
+        return plot_ica_overlay(self, inst=inst, exclude=exclude, start=start,
+                                stop=stop, title=title)
 
-        return fig
+    @deprecated('`decompose_raw` is deprecated and will be removed in MNE 0.9.'
+                ' Use `fit` instead')
+    @verbose
+    def decompose_raw(self, raw, picks=None, start=None, stop=None,
+                      decim=None, reject=None, flat=None, tstep=2.0,
+                      verbose=None):
+        """This method is deprecated.
+        See ``ICA.fit``
+        """
+        return self.fit(raw, picks, start, stop, decim, reject, flat, tstep,
+                        verbose)
+
+    @deprecated('`decompose_epochs` is deprecated and will be removed in MNE'
+                ' 1.0. Use `fit` instead')
+    @verbose
+    def decompose_epochs(self, epochs, picks=None, decim=None, verbose=None):
+        """This method is deprecated.
+        See ``ICA.fit``
+        """
+        return self._fit_epochs(epochs, picks, decim, verbose)
+
+    @deprecated('`get_sources_raw` is deprecated and will be removed in '
+                'MNE 0.9. Use `get_sources` instead')
+    def get_sources_raw(self, raw, start=None, stop=None):
+        """This method is deprecated.
+        See ``ICA.fit``
+        """
+        return self._transform_raw(raw, start, stop)
+
+    @deprecated('`get_sources_epochs` is deprecated and will be removed in '
+                'MNE 0.9. Use `get_sources` instead')
+    def get_sources_epochs(self, epochs, concatenate=False):
+        """This method is deprecated.
+        See ``ICA.get_sources``
+        """
+        return self._transform_epochs(epochs, concatenate)
+
+    @deprecated('`sources_as_raw` is deprecated and will be removed in '
+                'MNE 0.9. Use `get_sources` instead')
+    def sources_as_raw(self, raw, picks=None, start=None, stop=None):
+        """This method is deprecated
+
+        see ``ICA.get_sources``.
+        """
+        if picks is None:
+            picks = pick_types(raw.info, meg=False, eeg=False, misc=True,
+                               ecg=True, eog=True, stim=True, exclude='bads')
 
+        add_channels = [raw.ch_names[k] for k in picks]
+        return self.get_sources(raw, add_channels, start, stop)
+
+    @deprecated('`sources_as_raw` is deprecated and will be removed in '
+                'MNE 0.9. Use `get_sources` instead')
+    def sources_as_epochs(self, epochs, picks=None):
+        """This method is deprecated
+
+        see ``ICA.get_sources``.
+        """
+        if picks is None:
+            picks = pick_types(epochs.info, meg=False, eeg=False, misc=True,
+                               ecg=True, eog=True, stim=True, exclude='bads')
+
+        add_channels = [epochs.ch_names[k] for k in picks]
+        return self.get_sources(epochs, add_channels, False)
+
+    @deprecated('`find_sources_raw` is deprecated and will be removed in '
+                'MNE 0.9. Use `find_bads` instead')
     def find_sources_raw(self, raw, target=None, score_func='pearsonr',
-                         start=None, stop=None):
+                         start=None, stop=None, l_freq=None, h_freq=None):
         """Find sources based on own distribution or based on similarity to
         other sources or between source and target.
 
@@ -767,25 +1534,15 @@ class ICA(object):
         scores : ndarray
             scores for each source as returned from score_func
         """
-        start, stop = _check_start_stop(raw, start, stop)
-        sources = self.get_sources_raw(raw=raw, start=start, stop=stop)
-
-        # auto target selection
-        if target is not None:
-            if hasattr(target, 'ndim'):
-                if target.ndim < 2:
-                    target = target.reshape(1, target.shape[-1])
-            if isinstance(target, basestring):
-                pick = _get_target_ch(raw, target)
-                target, _ = raw[pick, start:stop]
-            if sources.shape[1] != target.shape[1]:
-                raise ValueError('Source and targets do not have the same'
-                                 'number of time slices.')
-            target = target.ravel()
-
-        return _find_sources(sources, target, score_func)
-
-    def find_sources_epochs(self, epochs, target=None, score_func='pearsonr'):
+        return self.score_sources(inst=raw, target=target,
+                                  score_func=score_func,
+                                  start=start, stop=stop, l_freq=l_freq,
+                                  h_freq=h_freq)
+
+    @deprecated('`find_sources_epochs` is deprecated and will be removed in '
+                'MNE 0.9. Use `find_bads` instead')
+    def find_sources_epochs(self, epochs, target=None, score_func='pearsonr',
+                            l_freq=None, h_freq=None):
         """Find sources based on relations between source and target
 
         Parameters
@@ -813,22 +1570,12 @@ class ICA(object):
         scores : ndarray
             scores for each source as returned from score_func
         """
-        sources = self.get_sources_epochs(epochs=epochs)
-        # auto target selection
-        if target is not None:
-            if hasattr(target, 'ndim'):
-                if target.ndim < 3:
-                    target = target.reshape(1, 1, target.shape[-1])
-            if isinstance(target, basestring):
-                pick = _get_target_ch(epochs, target)
-                target = epochs.get_data()[:, pick]
-            if sources.shape[2] != target.shape[2]:
-                raise ValueError('Source and targets do not have the same'
-                                 'number of time slices.')
-            target = target.ravel()
-
-        return _find_sources(np.hstack(sources), target, score_func)
+        return self.score_sources(inst=epochs, target=target,
+                                  score_func=score_func, l_freq=l_freq,
+                                  h_freq=h_freq)
 
+    @deprecated('`pick_sources_raw` is deprecated and will be removed in '
+                'MNE 0.9. Use `apply` instead')
     def pick_sources_raw(self, raw, include=None, exclude=None,
                          n_pca_components=None, start=None, stop=None,
                          copy=True):
@@ -865,41 +1612,12 @@ class ICA(object):
         raw : instance of Raw
             raw instance with selected ICA components removed
         """
-        if not raw._preloaded:
-            raise ValueError('raw data should be preloaded to have this '
-                             'working. Please read raw data with '
-                             'preload=True.')
-
-        if self.current_fit != 'raw':
-            raise ValueError('Currently no raw data fitted.'
-                             'Please fit raw data first.')
-
-        if exclude is None:
-            self.exclude = list(set(self.exclude))
-        else:
-            self.exclude = list(set(self.exclude + exclude))
-            logger.info('Adding sources %s to .exclude' % ', '.join(
-                        [str(i) for i in exclude if i not in self.exclude]))
-
-        if n_pca_components is not None:
-            self.n_pca_components = n_pca_components
-
-        start, stop = _check_start_stop(raw, start, stop)
-
-        picks = pick_types(raw.info, meg=False, include=self.ch_names,
-                           exclude='bads')
-
-        data = raw[picks, start:stop][0]
-        data, _ = self._pre_whiten(data, raw.info, picks)
-
-        data = self._pick_sources(data, include, self.exclude)
-
-        if copy is True:
-            raw = raw.copy()
-
-        raw[picks, start:stop] = data
-        return raw
+        return self.apply(inst=raw, include=include, exclude=exclude,
+                          n_pca_components=n_pca_components, start=stop,
+                          stop=stop, copy=copy)
 
+    @deprecated('`pick_sources_epochs` is deprecated and will be removed in '
+                'MNE 0.9. Use `apply` instead')
     def pick_sources_epochs(self, epochs, include=None, exclude=None,
                             n_pca_components=None, copy=True):
         """Recompose epochs
@@ -930,75 +1648,52 @@ class ICA(object):
         epochs : instance of Epochs
             Epochs with selected ICA components removed.
         """
-        if not epochs.preload:
-            raise ValueError('epochs should be preloaded to have this '
-                             'working. Please read raw data with '
-                             'preload=True.')
-
-        picks = pick_types(epochs.info, meg=False, ref_meg=False,
-                           include=self.ch_names,
-                           exclude='bads')
-
-        # special case where epochs come picked but fit was 'unpicked'.
-        if len(picks) != len(self.ch_names):
-            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
-                               'fitted but %i channels supplied. \nPlease '
-                               'provide Epochs compatible with '
-                               'ica.ch_names' % (len(self.ch_names),
-                                                 len(picks)))
-
-        if n_pca_components is not None:
-            self.n_pca_components = n_pca_components
+        return self.apply(inst=epochs, include=include,
+                          exclude=exclude, n_pca_components=n_pca_components,
+                          copy=copy)
 
-        data = np.hstack(epochs.get_data()[:, picks])
-        data, _ = self._pre_whiten(data, epochs.info, picks)
-        data = self._pick_sources(data, include=include,
-                                  exclude=exclude)
+    @deprecated('`pick_topomap` is deprecated and will be removed in '
+                'MNE 0.9. Use `plot_components` instead')
+    def plot_topomap(self, source_idx, ch_type='mag', res=64, layout=None,
+                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
+                     show=True):
+        """This method is deprecatd
 
-        if copy is True:
-            epochs = epochs.copy()
+        see ``ica.plot_components``.
+        """
+        return self.plot_components(picks=source_idx,
+                                    ch_type=ch_type,
+                                    res=res, layout=layout, vmax=vmax,
+                                    cmap=cmap,
+                                    sensors=sensors, colorbar=colorbar,
+                                    show=show)
+
+    @deprecated('`plot_sources_raw` is deprecated and will be removed in '
+                'MNE 0.9. Use `plot_sources` instead')
+    def plot_sources_raw(self, raw, order=None, start=None, stop=None,
+                         n_components=None, source_idx=None, ncol=3, nrow=None,
+                         title=None, show=True):
+        """This method is deprecated.
 
-        # restore epochs, channels, tsl order
-        epochs._data[:, picks] = np.array(np.split(data,
-                                          len(epochs.events), 1))
-        epochs.preload = True
+        See ``ica.plot_sources``
+        """
+        fig = self.plot_sources(inst=raw, picks=source_idx, ncol=ncol,
+                                title=title, show=show)
 
-        return epochs
+        return fig
 
-    def plot_topomap(self, source_idx, ch_type='mag', res=500, layout=None,
-                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
-                     show=True):
-        """Plot topographic map of ICA source
+    @deprecated('`plot_sources_epochs` is deprecated and will be removed in '
+                'MNE 0.9. Use `plot_sources` instead')
+    def plot_sources_epochs(self, epochs, order=None, epoch_idx=None,
+                            start=None, stop=None, n_components=None,
+                            source_idx=None, ncol=3, nrow=None, title=None,
+                            show=True):
+        """This method is deprecated.
 
-        Parameters
-        ----------
-        source_idx : int | array-like
-            The indices of the sources to be plotted.
-        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
-            The channel type to plot. For 'grad', the gradiometers are
-            collected in pairs and the RMS for each pair is plotted.
-        layout : None | Layout
-            Layout instance specifying sensor positions (does not need to
-            be specified for Neuromag data). If possible, the correct layout is
-            inferred from the data.
-        vmax : scalar
-            The value specfying the range of the color scale (-vmax to +vmax).
-            If None, the largest absolute value in the data is used.
-        cmap : matplotlib colormap
-            Colormap.
-        sensors : bool | str
-            Add markers for sensor locations to the plot. Accepts matplotlib
-            plot format string (e.g., 'r+' for red plusses).
-        colorbar : bool
-            Plot a colorbar.
-        res : int
-            The resolution of the topomap image (n pixels along each side).
-        show : bool
-            Call pyplot.show() at the end.
+        See ``ica.plot_sources``
         """
-        return plot_ica_topomap(self, source_idx=source_idx, ch_type=ch_type,
-                                res=res, layout=layout, vmax=vmax, cmap=cmap,
-                                sensors=sensors, colorbar=colorbar, show=show)
+        return plot_ica_sources(self, inst=epochs[epoch_idx], picks=order,
+                                start=start, stop=stop, ncol=ncol)
 
     def detect_artifacts(self, raw, start_find=None, stop_find=None,
                          ecg_ch=None, ecg_score_func='pearsonr',
@@ -1009,6 +1704,10 @@ class ICA(object):
                          add_nodes=None):
         """Run ICA artifacts detection workflow.
 
+        Note. This is still experimental and will most likely change. Over
+        the next releases. For maximum control use the workflow exposed in
+        the examples.
+
         Hints and caveats:
         - It is highly recommended to bandpass filter ECG and EOG
         data and pass them instead of the channel names as ecg_ch and eog_ch
@@ -1106,122 +1805,6 @@ class ICA(object):
 
         return self
 
-    def _pre_whiten(self, data, info, picks):
-        """Aux function"""
-        if self.noise_cov is None:  # use standardization as whitener
-            pre_whitener = np.atleast_1d(np.std(data)) ** -1
-            data *= pre_whitener
-        elif not hasattr(self, '_pre_whitener'):  # pick cov
-            ncov = deepcopy(self.noise_cov)
-            if data.shape[0] != ncov['data'].shape[0]:
-                ncov['data'] = ncov['data'][picks][:, picks]
-                assert data.shape[0] == ncov['data'].shape[0]
-
-            pre_whitener, _ = compute_whitener(ncov, info, picks)
-            data = fast_dot(pre_whitener, data)
-        else:
-            data = fast_dot(self._pre_whitener, data)
-            pre_whitener = self._pre_whitener
-
-        return data, pre_whitener
-
-    def _decompose(self, data, max_pca_components, fit_type):
-        """Aux function """
-        from sklearn.decomposition import RandomizedPCA
-
-        # XXX fix copy==True later. Bug in sklearn, see PR #2273
-        pca = RandomizedPCA(n_components=max_pca_components, whiten=True,
-                            copy=True)
-        data = pca.fit_transform(data.T)
-
-        if isinstance(self.n_components, float):
-            logger.info('Selecting PCA components by explained variance.')
-            n_components_ = np.sum(pca.explained_variance_ratio_.cumsum()
-                                   <= self.n_components)
-            sel = slice(n_components_)
-        else:
-            logger.info('Selecting PCA components by number.')
-            if self.n_components is not None:  # normal n case
-                sel = slice(self.n_components)
-            else:  # None case
-                logger.info('Using all PCA components.')
-                sel = slice(len(pca.components_))
-
-        # the things to store for PCA
-        self.pca_mean_ = pca.mean_
-        self.pca_components_ = pca.components_
-        # unwhiten pca components and put scaling in unmixintg matrix later.
-        self.pca_explained_variance_ = exp_var = pca.explained_variance_
-        self.pca_components_ *= np.sqrt(exp_var[:, None])
-        del pca
-        # update number of components
-        self.n_components_ = sel.stop
-        if self.n_pca_components > len(self.pca_components_):
-            self.n_pca_components = len(self.pca_components_)
-
-        # Take care of ICA
-        from sklearn.decomposition import FastICA  # to avoid strong dep.
-        ica = FastICA(algorithm=self.algorithm, fun=self.fun,
-                      fun_args=self.fun_args, whiten=False,
-                      random_state=self.random_state)
-        ica.fit(data[:, sel])
-
-        # get unmixing and add scaling
-        self.unmixing_matrix_ = getattr(ica, 'components_', 'unmixing_matrix_')
-        self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :]
-        self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
-        self.current_fit = fit_type
-
-    def _pick_sources(self, data, include, exclude):
-        """Aux function"""
-        if exclude is None:
-            exclude = self.exclude
-        else:
-            exclude = self.exclude = list(set(self.exclude + list(exclude)))
-
-        _n_pca_comp = _check_n_pca_components(self, self.n_pca_components,
-                                              self.verbose)
-
-        if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
-            raise ValueError('n_pca_components must be between '
-                             'n_components and max_pca_components.')
-
-        n_components = self.n_components_
-        n_pca_components = self.n_pca_components
-
-        # Apply first PCA
-        if self.pca_mean_ is not None:
-            data -= self.pca_mean_[:, None]
-
-        pca_data = fast_dot(self.pca_components_, data)
-        # Apply unmixing to low dimension PCA
-        sources = fast_dot(self.unmixing_matrix_, pca_data[:n_components])
-
-        if include not in (None, []):
-            mask = np.ones(len(sources), dtype=np.bool)
-            mask[np.unique(include)] = False
-            sources[mask] = 0.
-        elif exclude not in (None, []):
-            sources[np.unique(exclude)] = 0.
-
-        pca_data[:n_components] = fast_dot(self.mixing_matrix_, sources)
-        data = fast_dot(self.pca_components_[:n_components].T,
-                        pca_data[:n_components])
-        if n_pca_components > n_components:
-            data += fast_dot(self.pca_components_[n_components:_n_pca_comp].T,
-                             pca_data[n_components:_n_pca_comp])
-
-        if self.pca_mean_ is not None:
-            data += self.pca_mean_[:, None]
-
-        # restore scaling
-        if self.noise_cov is None:  # revert standardization
-            data /= self._pre_whitener[:, None]
-        else:
-            data = fast_dot(linalg.pinv(self._pre_whitener), data)
-
-        return data
-
 
 @verbose
 def _check_n_pca_components(ica, _n_pca_comp, verbose=None):
@@ -1233,14 +1816,17 @@ def _check_n_pca_components(ica, _n_pca_comp, verbose=None):
         logger.info('Selected %i PCA components by explained '
                     'variance' % _n_pca_comp)
     elif _n_pca_comp is None:
+        _n_pca_comp = ica.max_pca_components
+    elif _n_pca_comp < ica.n_components_:
         _n_pca_comp = ica.n_components_
+
     return _n_pca_comp
 
 
 def _check_start_stop(raw, start, stop):
     """Aux function"""
     return [c if (isinstance(c, int) or c is None) else
-            raw.time_as_index(c)[0] for c in start, stop]
+            raw.time_as_index(c)[0] for c in (start, stop)]
 
 
 @verbose
@@ -1344,7 +1930,7 @@ def _get_target_ch(container, target):
 
 def _find_sources(sources, target, score_func):
     """Aux function"""
-    if isinstance(score_func, basestring):
+    if isinstance(score_func, string_types):
         score_func = score_funcs.get(score_func, score_func)
 
     if not callable(score_func):
@@ -1379,7 +1965,7 @@ def _deserialize(str_, outer_sep=';', inner_sep=':'):
     for mapping in str_.split(outer_sep):
         k, v = mapping.split(inner_sep)
         vv = json.loads(v)
-        out[k] = vv if not isinstance(vv, unicode) else str(vv)
+        out[k] = vv if not isinstance(vv, text_type) else str(vv)
 
     return out
 
@@ -1460,18 +2046,20 @@ def _write_ica(fid, ica):
 
 @verbose
 def read_ica(fname):
-    """Restore ICA sessions from fif file.
+    """Restore ICA solution from fif file.
 
     Parameters
     ----------
     fname : str
         Absolute path to fif file containing ICA matrices.
+        The file name should end with -ica.fif or -ica.fif.gz.
 
     Returns
     -------
     ica : instance of ICA
         The ICA estimator.
     """
+    check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz'))
 
     logger.info('Reading %s ...' % fname)
     fid, tree, _ = fiff_open(fname)
@@ -1524,12 +2112,12 @@ def read_ica(fname):
 
     fid.close()
 
-    ica_init, ica_misc = [_deserialize(k) for k in ica_init, ica_misc]
+    ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
     current_fit = ica_init.pop('current_fit')
     if ica_init['noise_cov'] == Covariance.__name__:
         logger.info('Reading whitener drawn from noise covariance ...')
 
-    logger.info('Now restoring ICA session ...')
+    logger.info('Now restoring ICA solution ...')
     # make sure dtypes are np.float64 to satisfy fast_dot
     f = lambda x: x.astype(np.float64)
     ica = ICA(**ica_init)
@@ -1556,8 +2144,9 @@ _ica_node = namedtuple('Node', 'name target score_func criterion')
 
 
 def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
-                    ecg_criterion, eog_ch, eog_score_func, eog_criterion,
-                    skew_criterion, kurt_criterion, var_criterion, add_nodes):
+                      ecg_criterion, eog_ch, eog_score_func, eog_criterion,
+                      skew_criterion, kurt_criterion, var_criterion,
+                      add_nodes):
     """Aux Function"""
 
     nodes = []
@@ -1584,9 +2173,9 @@ def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
         nodes.extend(add_nodes)
 
     for node in nodes:
-        scores = ica.find_sources_raw(raw, start=start_find, stop=stop_find,
-                                      target=node.target,
-                                      score_func=node.score_func)
+        scores = ica.score_sources(raw, start=start_find, stop=stop_find,
+                                   target=node.target,
+                                   score_func=node.score_func)
         if isinstance(node.criterion, float):
             found = list(np.where(np.abs(scores) > node.criterion)[0])
         else:
@@ -1675,9 +2264,9 @@ def run_ica(raw, n_components, max_pca_components=100,
         {'alpha' : 1.0}
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
-    picks : array-like
+    picks : array-like of int
         Channels to be included. This selection remains throughout the
-        initialized ICA session. If None only good data channels are used.
+        initialized ICA solution. If None only good data channels are used.
     start : int | float | None
         First sample to include for decomposition. If float, data will be
         interpreted as time in seconds. If None, data will be used from the
@@ -1772,3 +2361,21 @@ def run_ica(raw, n_components, max_pca_components=100,
                       var_criterion=var_criterion,
                       add_nodes=add_nodes)
     return ica
+
+
+ at verbose
+def _band_pass_filter(ica, sources, target, l_freq, h_freq, verbose=None):
+    if l_freq is not None and h_freq is not None:
+        logger.info('... filtering ICA sources')
+        # use fft, here, steeper is better here.
+        sources = band_pass_filter(sources, ica.info['sfreq'],
+                                   l_freq, h_freq,  method='fft',
+                                   verbose=verbose)
+        logger.info('... filtering target')
+        target = band_pass_filter(target, ica.info['sfreq'],
+                                  l_freq, h_freq,  method='fft',
+                                  verbose=verbose)
+    elif l_freq is not None or h_freq is not None:
+        raise ValueError('Must specify both pass bands')
+
+    return sources, target
diff --git a/mne/preprocessing/infomax_.py b/mne/preprocessing/infomax_.py
new file mode 100644
index 0000000..ee06e9b
--- /dev/null
+++ b/mne/preprocessing/infomax_.py
@@ -0,0 +1,276 @@
+# Authors: Lukas Breuer <l.breuer at fz-juelich.de>
+#          Juergen Dammers <j.dammers at fz-juelich.de>
+#          Denis A. Engeman <denis.engemann at gemail.com>
+#
+# License: BSD (3-clause)
+
+import math
+
+import numpy as np
+from scipy.stats import kurtosis
+
+from ..utils import logger, verbose, check_random_state
+
+
+ at verbose
+def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
+            anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
+            kurt_size=6000, ext_blocks=1, max_iter=200,
+            random_state=None, verbose=None):
+    """Run the (extended) Infomax ICA decomposition on raw data
+
+    based on the publications of Bell & Sejnowski 1995 (Infomax)
+    and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
+
+    Parameters
+    ----------
+    data : np.ndarray, shape (n_samples, n_features)
+        The data to unmix.
+    w_init : np.ndarray, shape (n_features, n_features)
+        The initialized unmixing matrix. Defaults to None. If None, the
+        identity matrix is used.
+    l_rate : float
+        This quantity indicates the relative size of the change in weights.
+        Note. Smaller learining rates will slow down the procedure.
+        Defaults to 0.010d / alog(n_features ^ 2.0)
+    block : int
+        The block size of randomly chosen data segment.
+        Defaults to floor(sqrt(n_times / 3d))
+    w_change : float
+        The change at which to stop iteration. Defaults to 1e-12.
+    anneal_deg : float
+        The angle at which (in degree) the learning rate will be reduced.
+        Defaults to 60.0
+    anneal_step : float
+        The factor by which the learning rate will be reduced once
+        ``anneal_deg`` is exceeded:
+            l_rate *= anneal_step
+        Defaults to 0.9
+    extended : bool
+        Wheather to use the extended infomax algorithm or not. Defaults to
+        True.
+    n_subgauss : int
+        The number of subgaussian components. Only considered for extended
+        Infomax.
+    kurt_size : int
+        The window size for kurtosis estimation. Only considered for extended
+        Infomax.
+    ext_blocks : int
+        The number of blocks after which to recompute Kurtosis.
+        Only considered for extended Infomax.
+    max_iter : int
+        The maximum number of iterations. Defaults to 200.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
+        The linear unmixing operator.
+    """
+    rng = check_random_state(random_state)
+
+    # define some default parameter
+    max_weight = 1e8
+    restart_fac = 0.9
+    min_l_rate = 1e-10
+    blowup = 1e4
+    blowup_fac = 0.5
+    n_small_angle = 20
+    degconst = 180.0 / np.pi
+
+    # for extended Infomax
+    extmomentum = 0.5
+    signsbias = 0.02
+    signcount_threshold = 25
+    signcount_step = 2
+    if ext_blocks > 0:  # allow not to recompute kurtosis
+        n_subgauss = 1  # but initialize n_subgauss to 1 if you recompute
+
+    # check data shape
+    n_samples, n_features = data.shape
+    n_features_square = n_features ** 2
+
+    # check input parameter
+    # heuristic default - may need adjustment for
+    # large or tiny data sets
+    if l_rate is None:
+        l_rate = 0.01 / math.log(n_features ** 2.0)
+
+    if block is None:
+        block = int(math.floor(math.sqrt(n_samples / 3.0)))
+
+    logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
+                else ' ')
+
+    # collect parameter
+    nblock = n_samples // block
+    lastt = (nblock - 1) * block + 1
+
+    # initialize training
+    if weights is None:
+        # initialize weights as identity matrix
+        weights = np.identity(n_features, dtype=np.float64)
+
+    BI = block * np.identity(n_features, dtype=np.float64)
+    bias = np.zeros((n_features, 1), dtype=np.float64)
+    onesrow = np.ones((1, block), dtype=np.float64)
+    startweights = weights.copy()
+    oldweights = startweights.copy()
+    step = 0
+    count_small_angle = 0
+    wts_blowup = False
+    blockno = 0
+    signcount = 0
+
+    # for extended Infomax
+    if extended is True:
+        signs = np.identity(n_features)
+        signs.flat[slice(0, n_features * n_subgauss, n_features)]
+        kurt_size = min(kurt_size, n_samples)
+        old_kurt = np.zeros(n_features, dtype=np.float64)
+        oldsigns = np.zeros((n_features, n_features))
+
+    # trainings loop
+    olddelta, oldchange = 1., 0.
+    while step < max_iter:
+
+        # shuffle data at each step
+        rng.seed(step)  # --> permutation is fixed but differs at each step
+        permute = list(range(n_samples))
+        rng.shuffle(permute)
+
+        # ICA training block
+        # loop across block samples
+        for t in range(0, lastt, block):
+            u = np.dot(data[permute[t:t + block], :], weights)
+            u += np.dot(bias, onesrow).T
+
+            if extended is True:
+                # extended ICA update
+                y = np.tanh(u)
+                weights += l_rate * np.dot(weights,
+                                           BI - np.dot(np.dot(u.T, y), signs) -
+                                           np.dot(u.T, u))
+                bias += l_rate * np.reshape(np.sum(y, axis=0,
+                                            dtype=np.float64) * -2.0,
+                                            (n_features, 1))
+
+            else:
+                # logistic ICA weights update
+                y = 1.0 / (1.0 + np.exp(-u))
+                weights += l_rate * np.dot(weights,
+                                           BI + np.dot(u.T, (1.0 - 2.0 * y)))
+                bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
+                                            dtype=np.float64), (n_features, 1))
+
+            # check change limit
+            max_weight_val = np.max(np.abs(weights))
+            if max_weight_val > max_weight:
+                wts_blowup = True
+
+            blockno += 1
+            if wts_blowup:
+                break
+
+            # ICA kurtosis estimation
+            if extended is True:
+
+                n = np.fix(blockno / ext_blocks)
+
+                if np.abs(n) * ext_blocks == blockno:
+                    if kurt_size < n_samples:
+                        rp = np.floor(rng.uniform(0, 1, kurt_size) *
+                                      (n_samples - 1))
+                        tpartact = np.dot(data[rp.astype(int), :], weights).T
+                    else:
+                        tpartact = np.dot(data, weights).T
+
+                    # estimate kurtosis
+                    kurt = kurtosis(tpartact, axis=1, fisher=True)
+
+                    if extmomentum != 0:
+                        kurt = (extmomentum * old_kurt +
+                                (1.0 - extmomentum) * kurt)
+                        old_kurt = kurt
+
+                    # estimate weighted signs
+                    signs.flat[::n_features + 1] = ((kurt + signsbias) /
+                                                    np.abs(kurt + signsbias))
+
+                    ndiff = ((signs.flat[::n_features + 1] -
+                              oldsigns.flat[::n_features + 1]) != 0).sum()
+                    if ndiff == 0:
+                        signcount += 1
+                    else:
+                        signcount = 0
+                    oldsigns = signs
+
+                    if signcount >= signcount_threshold:
+                        ext_blocks = np.fix(ext_blocks * signcount_step)
+                        signcount = 0
+
+        # here we continue after the for
+        # loop over the ICA training blocks
+        # if weights in bounds:
+        if not wts_blowup:
+            oldwtchange = weights - oldweights
+            step += 1
+            angledelta = 0.0
+            delta = oldwtchange.reshape(1, n_features_square)
+            change = np.sum(delta * delta, dtype=np.float64)
+            if step > 1:
+                angledelta = math.acos(np.sum(delta * olddelta) /
+                                       math.sqrt(change * oldchange))
+                angledelta *= degconst
+
+            # anneal learning rate
+            oldweights = weights.copy()
+            if angledelta > anneal_deg:
+                l_rate *= anneal_step    # anneal learning rate
+                # accumulate angledelta until anneal_deg reached l_rates
+                olddelta = delta
+                oldchange = change
+                count_small_angle = 0  # reset count when angle delta is large
+            else:
+                if step == 1:  # on first step only
+                    olddelta = delta  # initialize
+                    oldchange = change
+                count_small_angle += 1
+                if count_small_angle > n_small_angle:
+                    max_iter = step
+
+            # apply stopping rule
+            if step > 2 and change < w_change:
+                step = max_iter
+            elif change > blowup:
+                l_rate *= blowup_fac
+
+        # restart if weights blow up
+        # (for lowering l_rate)
+        else:
+            step = 0  # start again
+            wts_blowup = 0  # re-initialize variables
+            blockno = 1
+            l_rate *= restart_fac  # with lower learning rate
+            weights = startweights.copy()
+            oldweights = startweights.copy()
+            olddelta = np.zeros((1, n_features_square), dtype=np.float64)
+            bias = np.zeros((n_features, 1), dtype=np.float64)
+
+            # for extended Infomax
+            if extended:
+                signs = np.identity(n_features)
+                signs.flat[slice(0, n_features * n_subgauss, n_features)]
+                oldsigns = np.zeros((n_features, n_features))
+
+            if l_rate > min_l_rate:
+                if verbose:
+                    logger.info('... lowering learning rate to %g'
+                                '\n... re-starting...' % l_rate)
+            else:
+                raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
+                                 'might not be invertible!')
+
+    # prepare return values
+    return weights.T
diff --git a/mne/preprocessing/maxfilter.py b/mne/preprocessing/maxfilter.py
index 5545678..8b6ce1e 100644
--- a/mne/preprocessing/maxfilter.py
+++ b/mne/preprocessing/maxfilter.py
@@ -1,9 +1,10 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
+from ..externals.six import string_types
 import os
 from warnings import warn
 import logging
@@ -11,9 +12,11 @@ import logging
 import numpy as np
 from scipy import optimize, linalg
 
-from ..fiff import Raw
-from ..fiff.constants import FIFF
+from ..io import Raw
+from ..io.constants import FIFF
 from ..utils import logger, verbose
+from ..externals.six.moves import map
+from ..externals.six.moves import zip
 
 
 @verbose
@@ -217,7 +220,7 @@ def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
         else:
             RuntimeError('invalid frame for origin')
 
-    if not isinstance(origin, basestring):
+    if not isinstance(origin, string_types):
         origin = '%0.1f %0.1f %0.1f' % (origin[0], origin[1], origin[2])
 
     # format command
diff --git a/mne/preprocessing/ssp.py b/mne/preprocessing/ssp.py
index d6727d4..f8abc73 100644
--- a/mne/preprocessing/ssp.py
+++ b/mne/preprocessing/ssp.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
@@ -10,7 +10,8 @@ import numpy as np
 
 from .. import Epochs, compute_proj_evoked, compute_proj_epochs
 from ..utils import logger, verbose
-from ..fiff import pick_types, make_eeg_average_ref_proj
+from .. import pick_types
+from ..io import make_eeg_average_ref_proj
 from .ecg import find_ecg_events
 from .eog import find_eog_events
 
@@ -41,9 +42,9 @@ def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
     ----------
     mode : string ('ECG', or 'EOG')
         What type of events to detect.
-    raw : mne.fiff.Raw
+    raw : mne.io.Raw
         Raw input file.
-    raw_event : mne.fiff.Raw or None
+    raw_event : mne.io.Raw or None
         Raw file to use for event detection (if None, raw is used).
     tmin : float
         Time before event in seconds.
@@ -105,7 +106,7 @@ def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
     events : ndarray
         Detected events.
     """
-    if not raw._preloaded:
+    if not raw.preload:
         raise ValueError('raw needs to be preloaded, '
                          'use preload=True in constructor')
 
@@ -225,9 +226,9 @@ def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
 
     Parameters
     ----------
-    raw : mne.fiff.Raw
+    raw : mne.io.Raw
         Raw input file.
-    raw_event : mne.fiff.Raw or None
+    raw_event : mne.io.Raw or None
         Raw file to use for event detection (if None, raw is used).
     tmin : float
         Time before event in seconds.
@@ -321,9 +322,9 @@ def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
 
     Parameters
     ----------
-    raw : mne.fiff.Raw
+    raw : mne.io.Raw
         Raw input file.
-    raw_event : mne.fiff.Raw or None
+    raw_event : mne.io.Raw or None
         Raw file to use for event detection (if None, raw is used).
     tmin : float
         Time before event in seconds.
diff --git a/mne/preprocessing/stim.py b/mne/preprocessing/stim.py
index 4704289..820830c 100644
--- a/mne/preprocessing/stim.py
+++ b/mne/preprocessing/stim.py
@@ -5,7 +5,7 @@
 import numpy as np
 from scipy import signal, interpolate
 
-from ..fiff import pick_types
+from .. import pick_types
 
 
 def eliminate_stim_artifact(raw, events, event_id, tmin=-0.005,
@@ -36,7 +36,7 @@ def eliminate_stim_artifact(raw, events, event_id, tmin=-0.005,
     raw: Raw object
         raw data object.
     """
-    if not raw._preloaded:
+    if not raw.preload:
         raise RuntimeError('Modifying data of Raw is only supported '
                            'when preloading is used. Use preload=True '
                            '(or string) in the constructor.')
diff --git a/mne/preprocessing/tests/test_ctps.py b/mne/preprocessing/tests/test_ctps.py
new file mode 100644
index 0000000..c562775
--- /dev/null
+++ b/mne/preprocessing/tests/test_ctps.py
@@ -0,0 +1,84 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD 3 clause
+
+import numpy as np
+from mne.time_frequency import morlet
+from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_array_equal
+from mne.preprocessing.ctps_ import (ctps, _prob_kuiper,
+                                     _compute_normalized_phase)
+
+###############################################################################
+# Generate testing signal
+
+tmin = -0.3
+sfreq = 1000.  # Hz
+tstep = 1. / sfreq
+n_samples = 600
+times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
+
+# Generate times series from Morlet wavelet
+single_trial = np.zeros((1, len(times)))
+Ws = morlet(sfreq, [3], n_cycles=[1])
+
+single_trial[0][:len(Ws[0])] = np.real(Ws[0])
+roll_to = 300 - 265  # shift data to center of time window
+single_trial = np.roll(single_trial, roll_to)
+rng = np.random.RandomState(42)
+
+
+def get_data(n_trials, j_extent):
+    """Generate ground truth and testing data"""
+    ground_truth = np.tile(single_trial,  n_trials)
+    my_shape = n_trials, 1, 600
+    random_data = rng.random_sample(my_shape)
+    rand_ints = rng.random_integers(-j_extent, j_extent, n_trials)
+    jittered_data = np.array([np.roll(single_trial, i) for i in rand_ints])
+    data = np.concatenate([ground_truth.reshape(my_shape),
+                           jittered_data.reshape(my_shape),
+                           random_data.reshape(my_shape)], 1)
+
+    assert_true(data.shape == (n_trials, 3, 600))
+    return data
+
+# vary extent of jittering --> creates phaselocks at the borders if
+# 2 * extent != n_samples
+iter_test_ctps = enumerate(zip([400, 400], [150, 300], [0.6, 0.2]))
+
+
+def test_ctps():
+    """ Test basic ctps functionality
+    """
+    for ii, (n_trials, j_extent, pk_max) in iter_test_ctps:
+        data = get_data(n_trials, j_extent)
+        ks_dyn, pk_dyn, phase_trial = ctps(data)
+        data2 = _compute_normalized_phase(data)
+        ks_dyn2, pk_dyn2, phase_trial2 = ctps(data2, is_raw=False)
+        for a, b in zip([ks_dyn, pk_dyn, phase_trial],
+                        [ks_dyn2, pk_dyn2, data2]):
+            assert_array_equal(a, b)
+            assert_true(a.min() >= 0)
+            assert_true(a.max() <= 1)
+            assert_true(b.min() >= 0)
+            assert_true(b.max() <= 1)
+
+        # test for normalization
+        assert_true((pk_dyn.min() > 0.0) or (pk_dyn.max() < 1.0))
+        # test shapes
+        assert_true(phase_trial.shape == data.shape)
+        assert_true(pk_dyn.shape == data.shape[1:])
+        # tets ground_truth + random + jittered case
+        assert_true(pk_dyn[0].max() == 1.0)
+        assert_true(len(np.unique(pk_dyn[0])) == 1.0)
+        assert_true(pk_dyn[1].max() < pk_max)
+        assert_true(pk_dyn[2].max() > 0.3)
+        if ii < 1:
+            assert_raises(ValueError, ctps,
+                          data[:, :, :, None])
+
+    assert_true(_prob_kuiper(1.0, 400) == 1.0)
+    # test vecrosization
+    assert_array_equal(_prob_kuiper(np.array([1.0, 1.0]), 400),
+                       _prob_kuiper(np.array([1.0, 1.0]), 400))
+    assert_true(_prob_kuiper(0.1, 400) < 0.1)
diff --git a/mne/preprocessing/tests/test_ecg.py b/mne/preprocessing/tests/test_ecg.py
index 7aeb87b..e034227 100644
--- a/mne/preprocessing/tests/test_ecg.py
+++ b/mne/preprocessing/tests/test_ecg.py
@@ -1,14 +1,14 @@
 import os.path as op
 
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_equal
 
-from mne.fiff import Raw
-from mne.preprocessing.ecg import find_ecg_events
+from mne.io import Raw
+from mne.preprocessing.ecg import find_ecg_events, create_ecg_epochs
 
-data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_path, 'test_raw.fif')
 event_fname = op.join(data_path, 'test-eve.fif')
-proj_fname = op.join(data_path, 'test_proj.fif')
+proj_fname = op.join(data_path, 'test-proj.fif')
 
 
 def test_find_ecg():
@@ -19,3 +19,6 @@ def test_find_ecg():
     n_events = len(events)
     _, times = raw[0, :]
     assert_true(55 < average_pulse < 60)
+
+    ecg_epochs = create_ecg_epochs(raw, ch_name='MEG 1531')
+    assert_equal(len(ecg_epochs.events), n_events)
diff --git a/mne/preprocessing/tests/test_eog.py b/mne/preprocessing/tests/test_eog.py
index d594045..97220dd 100644
--- a/mne/preprocessing/tests/test_eog.py
+++ b/mne/preprocessing/tests/test_eog.py
@@ -1,13 +1,13 @@
 import os.path as op
 from nose.tools import assert_true
 
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.preprocessing.eog import find_eog_events
 
-data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_path, 'test_raw.fif')
 event_fname = op.join(data_path, 'test-eve.fif')
-proj_fname = op.join(data_path, 'test_proj.fif')
+proj_fname = op.join(data_path, 'test-proj.fif')
 
 
 def test_find_eog():
diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py
index 48e353e..c969b01 100644
--- a/mne/preprocessing/tests/test_ica.py
+++ b/mne/preprocessing/tests/test_ica.py
@@ -1,5 +1,7 @@
-# Author: Denis Engemann <d.engemann at fz-juelich.de>
-#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+from __future__ import print_function
+
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -16,19 +18,19 @@ from numpy.testing import (assert_array_almost_equal, assert_array_equal,
 from scipy import stats
 from itertools import product
 
-from mne import fiff, Epochs, read_events
+from mne import io, Epochs, read_events, pick_types
 from mne.cov import read_cov
 from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
                                read_ica, run_ica)
 from mne.preprocessing.ica import score_funcs, _check_n_pca_components
-from mne.fiff.meas_info import Info
+from mne.io.meas_info import Info
 from mne.utils import set_log_file, check_sklearn_version, _TempDir
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 tempdir = _TempDir()
 
-data_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_dir, 'test_raw.fif')
 event_name = op.join(data_dir, 'test-eve.fif')
 evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
@@ -63,55 +65,98 @@ def requires_sklearn(function):
 def test_ica_full_data_recovery():
     """Test recovery of full data when no source is rejected"""
     # Most basic recovery
-    raw = fiff.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')[:10]
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
+    evoked = epochs.average()
     n_channels = 5
     data = raw._data[:n_channels].copy()
     data_epochs = epochs.get_data()
-    for n_components, n_pca_components, ok in [(2, n_channels, True),
-                                               (2, n_channels // 2, False)]:
-        ica = ICA(n_components=n_components,
-                  max_pca_components=n_pca_components,
-                  n_pca_components=n_pca_components)
-        ica.decompose_raw(raw, picks=range(n_channels))
-        raw2 = ica.pick_sources_raw(raw, exclude=[])
-        if ok:
-            assert_allclose(data[:n_channels], raw2._data[:n_channels],
-                            rtol=1e-10, atol=1e-15)
-        else:
-            diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
-            assert_true(np.max(diff) > 1e-14)
-
-        ica = ICA(n_components=n_components,
-                  max_pca_components=n_pca_components,
-                  n_pca_components=n_pca_components)
-        ica.decompose_epochs(epochs, picks=range(n_channels))
-        epochs2 = ica.pick_sources_epochs(epochs, exclude=[])
-        data2 = epochs2.get_data()[:, :n_channels]
-        if ok:
-            assert_allclose(data_epochs[:, :n_channels], data2,
-                            rtol=1e-10, atol=1e-15)
-        else:
-            diff = np.abs(data_epochs[:, :n_channels] - data2)
-            assert_true(np.max(diff) > 1e-14)
+    data_evoked = evoked.data
+    for method in ['fastica']:
+        stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
+        for n_components, n_pca_components, ok in stuff:
+            ica = ICA(n_components=n_components,
+                      max_pca_components=n_pca_components,
+                      n_pca_components=n_pca_components,
+                      method=method, max_iter=1)
+            with warnings.catch_warnings(record=True):
+                ica.fit(raw, picks=list(range(n_channels)))
+            raw2 = ica.apply(raw, exclude=[], copy=True)
+            if ok:
+                assert_allclose(data[:n_channels], raw2._data[:n_channels],
+                                rtol=1e-10, atol=1e-15)
+            else:
+                diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
+                assert_true(np.max(diff) > 1e-14)
+
+            ica = ICA(n_components=n_components,
+                      max_pca_components=n_pca_components,
+                      n_pca_components=n_pca_components)
+            with warnings.catch_warnings(record=True):
+                ica.fit(epochs, picks=list(range(n_channels)))
+            epochs2 = ica.apply(epochs, exclude=[], copy=True)
+            data2 = epochs2.get_data()[:, :n_channels]
+            if ok:
+                assert_allclose(data_epochs[:, :n_channels], data2,
+                                rtol=1e-10, atol=1e-15)
+            else:
+                diff = np.abs(data_epochs[:, :n_channels] - data2)
+                assert_true(np.max(diff) > 1e-14)
+
+            evoked2 = ica.apply(evoked, exclude=[], copy=True)
+            data2 = evoked2.data[:n_channels]
+            if ok:
+                assert_allclose(data_evoked[:n_channels], data2,
+                                rtol=1e-10, atol=1e-15)
+            else:
+                diff = np.abs(evoked.data[:n_channels] - data2)
+                assert_true(np.max(diff) > 1e-14)
+    assert_raises(ValueError, ICA, method='pizza-decomposision')
+
+
+ at requires_sklearn
+def test_ica_rank_reduction():
+    """Test recovery of full data when no source is rejected"""
+    # Most basic recovery
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')[:10]
+    n_components = 5
+    max_pca_components = len(picks)
+    for n_pca_components in [6, 10]:
+        with warnings.catch_warnings(record=True):  # non-convergence
+            warnings.simplefilter('always')
+            ica = ICA(n_components=n_components,
+                      max_pca_components=max_pca_components,
+                      n_pca_components=n_pca_components,
+                      method='fastica', max_iter=1).fit(raw, picks=picks)
+
+        rank_before = raw.estimate_rank(picks=picks)
+        assert_equal(rank_before, len(picks))
+        raw_clean = ica.apply(raw, copy=True)
+        rank_after = raw_clean.estimate_rank(picks=picks)
+        # interaction between ICA rejection and PCA components difficult
+        # to preduct. Rank_after often seems to be 1 higher then
+        # n_pca_components
+        assert_true(n_components < n_pca_components <= rank_after <=
+                    rank_before)
 
 
 @requires_sklearn
 def test_ica_core():
-    """Test ICA on raw and epochs
-    """
-    raw = fiff.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+    """Test ICA on raw and epochs"""
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
     # XXX. The None cases helped revealing bugs but are time consuming.
     test_cov = read_cov(test_cov_name)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
     noise_cov = [None, test_cov]
@@ -119,85 +164,103 @@ def test_ica_core():
     n_components = [2, 1.0]  # for future dbg add cases
     max_pca_components = [3]
     picks_ = [picks]
+    methods = ['fastica']
     iter_ica_params = product(noise_cov, n_components, max_pca_components,
-                              picks_)
+                              picks_, methods)
 
     # # test init catchers
     assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
     assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
 
     # test essential core functionality
-    for n_cov, n_comp, max_n, pcks in iter_ica_params:
+    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
       # Test ICA raw
         ica = ICA(noise_cov=n_cov, n_components=n_comp,
                   max_pca_components=max_n, n_pca_components=max_n,
-                  random_state=0)
+                  random_state=0, method=method, max_iter=1)
 
-        print ica  # to test repr
+        print(ica)  # to test repr
 
         # test fit checker
-        assert_raises(RuntimeError, ica.get_sources_raw, raw)
-        assert_raises(RuntimeError, ica.get_sources_epochs, epochs)
+        assert_raises(RuntimeError, ica.get_sources, raw)
+        assert_raises(RuntimeError, ica.get_sources, epochs)
 
         # test decomposition
-        ica.decompose_raw(raw, picks=pcks, start=start, stop=stop)
-        print ica  # to test repr
-        # test re-init exception
-        assert_raises(RuntimeError, ica.decompose_raw, raw, picks=picks)
+        with warnings.catch_warnings(record=True):
+            ica.fit(raw, picks=pcks, start=start, stop=stop)
+            repr(ica)  # to test repr
+
+        # test re-fit
+        unmixing1 = ica.unmixing_matrix_
+        with warnings.catch_warnings(record=True):
+            ica.fit(raw, picks=pcks, start=start, stop=stop)
+        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
 
-        sources = ica.get_sources_raw(raw)
+        sources = ica.get_sources(raw)[:, :][0]
         assert_true(sources.shape[0] == ica.n_components_)
 
         # test preload filter
         raw3 = raw.copy()
-        raw3._preloaded = False
-        assert_raises(ValueError, ica.pick_sources_raw, raw3,
+        raw3.preload = False
+        assert_raises(ValueError, ica.apply, raw3,
                       include=[1, 2])
 
         #######################################################################
         # test epochs decomposition
-
-        # test re-init exception
-        assert_raises(RuntimeError, ica.decompose_epochs, epochs, picks=picks)
         ica = ICA(noise_cov=n_cov, n_components=n_comp,
                   max_pca_components=max_n, n_pca_components=max_n,
                   random_state=0)
-
-        ica.decompose_epochs(epochs, picks=picks)
-        print ica  # to test repr
-        # test pick block after epochs fit
-        assert_raises(ValueError, ica.pick_sources_raw, raw)
-
-        sources = ica.get_sources_epochs(epochs)
+        with warnings.catch_warnings(record=True):
+            ica.fit(epochs, picks=picks)
+        data = epochs.get_data()[:, 0, :]
+        n_samples = np.prod(data.shape)
+        assert_equal(ica.n_samples_, n_samples)
+        print(ica)  # to test repr
+
+        sources = ica.get_sources(epochs).get_data()
         assert_true(sources.shape[1] == ica.n_components_)
 
-        assert_raises(ValueError, ica.find_sources_epochs, epochs,
+        assert_raises(ValueError, ica.score_sources, epochs,
                       target=np.arange(1))
 
         # test preload filter
         epochs3 = epochs.copy()
         epochs3.preload = False
-        assert_raises(ValueError, ica.pick_sources_epochs, epochs3,
+        assert_raises(ValueError, ica.apply, epochs3,
                       include=[1, 2])
 
+    # test for bug with whitener updating
+    _pre_whitener = ica._pre_whitener.copy()
+    epochs._data[:, 0, 10:15] *= 1e12
+    ica.apply(epochs, copy=True)
+    assert_array_equal(_pre_whitener, ica._pre_whitener)
+
+    # test expl. var threshold leading to empty sel
+    ica.n_components = 0.1
+    assert_raises(RuntimeError, ica.fit, epochs)
+
+    offender = 1, 2, 3,
+    assert_raises(ValueError, ica.get_sources, offender)
+    assert_raises(ValueError, ica.fit, offender)
+    assert_raises(ValueError, ica.apply, offender)
+
 
 @requires_sklearn
 def test_ica_additional():
-    """Test additional ICA functionality
-    """
+    """Test additional ICA functionality"""
     stop2 = 500
-    raw = fiff.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
     test_cov = read_cov(test_cov_name)
     events = read_events(event_name)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
     # for testing eog functionality
-    picks2 = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                             eog=True, exclude='bads')
+    picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                        eog=True, exclude='bads')
     epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                         baseline=(None, 0), preload=True)
 
@@ -205,14 +268,24 @@ def test_ica_additional():
     ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
               n_pca_components=4)
     assert_true(ica.info is None)
-    ica.decompose_raw(raw, picks[:5])
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks[:5])
     assert_true(isinstance(ica.info, Info))
     assert_true(ica.n_components_ < 5)
 
     ica = ICA(n_components=3, max_pca_components=4,
               n_pca_components=4)
     assert_raises(RuntimeError, ica.save, '')
-    ica.decompose_raw(raw, picks=None, start=start, stop=stop2)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks=None, start=start, stop=stop2)
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
+        ica.save(ica_badname)
+        read_ica(ica_badname)
+    assert_true(len(w) == 2)
 
     # test decim
     ica = ICA(n_components=3, max_pca_components=4,
@@ -221,25 +294,27 @@ def test_ica_additional():
     for _ in range(3):
         raw_.append(raw_)
     n_samples = raw_._data.shape[1]
-    ica.decompose_raw(raw, picks=None, decim=3)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks=None, decim=3)
     assert_true(raw_._data.shape[1], n_samples)
 
     # test expl var
     ica = ICA(n_components=1.0, max_pca_components=4,
               n_pca_components=4)
-    ica.decompose_raw(raw, picks=None, decim=3)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks=None, decim=3)
     assert_true(ica.n_components_ == 4)
 
     # epochs extraction from raw fit
-    assert_raises(RuntimeError, ica.get_sources_epochs, epochs)
+    assert_raises(RuntimeError, ica.get_sources, epochs)
     # test reading and writing
-    test_ica_fname = op.join(op.dirname(tempdir), 'ica_test.fif')
+    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
     for cov in (None, test_cov):
         ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
                   n_pca_components=4)
-        with warnings.catch_warnings(True):  # ICA does not converge
-            ica.decompose_raw(raw, picks=picks, start=start, stop=stop2)
-        sources = ica.get_sources_epochs(epochs)
+        with warnings.catch_warnings(record=True):  # ICA does not converge
+            ica.fit(raw, picks=picks, start=start, stop=stop2)
+        sources = ica.get_sources(epochs).get_data()
         assert_true(ica.mixing_matrix_.shape == (2, 2))
         assert_true(ica.unmixing_matrix_.shape == (2, 2))
         assert_true(ica.pca_components_.shape == (4, len(picks)))
@@ -250,34 +325,26 @@ def test_ica_additional():
             ica.save(test_ica_fname)
             ica_read = read_ica(test_ica_fname)
             assert_true(ica.exclude == ica_read.exclude)
-            # test pick merge -- add components
-            ica.pick_sources_raw(raw, exclude=[1])
-            assert_true(ica.exclude == [0, 1])
-            #                 -- only as arg
+
             ica.exclude = []
-            ica.pick_sources_raw(raw, exclude=[0, 1])
-            assert_true(ica.exclude == [0, 1])
-            #                 -- remove duplicates
-            ica.exclude += [1]
-            ica.pick_sources_raw(raw, exclude=[0, 1])
+            ica.apply(raw, exclude=[1])
+            assert_true(ica.exclude == [])
+
+            ica.exclude = [0, 1]
+            ica.apply(raw, exclude=[1])
             assert_true(ica.exclude == [0, 1])
-            
-            # test basic include
-            ica.exclude = []
-            ica.pick_sources_raw(raw, include=[1])
-            
 
-            ica_raw = ica.sources_as_raw(raw)
+            ica_raw = ica.get_sources(raw)
             assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
                                         ica_raw.info['bads']])
 
         # test filtering
         d1 = ica_raw._data[0].copy()
-        with warnings.catch_warnings(True):  # dB warning
+        with warnings.catch_warnings(record=True):  # dB warning
             ica_raw.filter(4, 20)
         assert_true((d1 != ica_raw._data[0]).any())
         d1 = ica_raw._data[0].copy()
-        with warnings.catch_warnings(True):  # dB warning
+        with warnings.catch_warnings(record=True):  # dB warning
             ica_raw.notch_filter([10])
         assert_true((d1 != ica_raw._data[0]).any())
 
@@ -308,13 +375,12 @@ def test_ica_additional():
         assert_true(ica.ch_names == ica_read.ch_names)
         assert_true(isinstance(ica_read.info, Info))
 
-        assert_raises(RuntimeError, ica_read.decompose_raw, raw)
-        sources = ica.get_sources_raw(raw)
-        sources2 = ica_read.get_sources_raw(raw)
+        sources = ica.get_sources(raw)[:, :][0]
+        sources2 = ica_read.get_sources(raw)[:, :][0]
         assert_array_almost_equal(sources, sources2)
 
-        _raw1 = ica.pick_sources_raw(raw, exclude=[1])
-        _raw2 = ica_read.pick_sources_raw(raw, exclude=[1])
+        _raw1 = ica.apply(raw, exclude=[1])
+        _raw2 = ica_read.apply(raw, exclude=[1])
         assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
 
     os.remove(test_ica_fname)
@@ -322,14 +388,14 @@ def test_ica_additional():
     for name, func in score_funcs.items():
         if name in score_funcs_unsuited:
             continue
-        scores = ica.find_sources_raw(raw, target='EOG 061', score_func=func,
-                                      start=0, stop=10)
+        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
+                                   start=0, stop=10)
         assert_true(ica.n_components_ == len(scores))
 
     # check univariate stats
-    scores = ica.find_sources_raw(raw, score_func=stats.skew)
+    scores = ica.score_sources(raw, score_func=stats.skew)
     # check exception handling
-    assert_raises(ValueError, ica.find_sources_raw, raw,
+    assert_raises(ValueError, ica.score_sources, raw,
                   target=np.arange(1))
 
     params = []
@@ -339,62 +405,76 @@ def test_ica_additional():
         ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
                              eog_ch=ch_name, skew_criterion=idx,
                              var_criterion=idx, kurt_criterion=idx)
-    ## score funcs epochs ##
+    with warnings.catch_warnings(record=True):
+        idx, scores = ica.find_bads_ecg(raw, method='ctps')
+        assert_equal(len(scores), ica.n_components_)
+        idx, scores = ica.find_bads_ecg(raw, method='correlation')
+        assert_equal(len(scores), ica.n_components_)
+        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
+        assert_equal(len(scores), ica.n_components_)
+        assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
+                      method='ctps')
+        assert_raises(ValueError, ica.find_bads_ecg, raw,
+                      method='crazy-coupling')
+
+        idx, scores = ica.find_bads_eog(raw)
+        assert_equal(len(scores), ica.n_components_)
+        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
+        idx, scores = ica.find_bads_eog(raw)
+        assert_true(isinstance(scores, list))
+        assert_equal(len(scores[0]), ica.n_components_)
 
     # check score funcs
     for name, func in score_funcs.items():
         if name in score_funcs_unsuited:
             continue
-        scores = ica.find_sources_epochs(epochs_eog, target='EOG 061',
-                                         score_func=func)
+        scores = ica.score_sources(epochs_eog, target='EOG 061',
+                                   score_func=func)
         assert_true(ica.n_components_ == len(scores))
 
     # check univariate stats
-    scores = ica.find_sources_epochs(epochs, score_func=stats.skew)
+    scores = ica.score_sources(epochs, score_func=stats.skew)
 
     # check exception handling
-    assert_raises(ValueError, ica.find_sources_epochs, epochs,
+    assert_raises(ValueError, ica.score_sources, epochs,
                   target=np.arange(1))
 
     # ecg functionality
-    ecg_scores = ica.find_sources_raw(raw, target='MEG 1531',
-                                      score_func='pearsonr')
+    ecg_scores = ica.score_sources(raw, target='MEG 1531',
+                                   score_func='pearsonr')
 
-    with warnings.catch_warnings(True):  # filter attenuation warning
+    with warnings.catch_warnings(record=True):  # filter attenuation warning
         ecg_events = ica_find_ecg_events(raw,
                                          sources[np.abs(ecg_scores).argmax()])
 
     assert_true(ecg_events.ndim == 2)
 
     # eog functionality
-    eog_scores = ica.find_sources_raw(raw, target='EOG 061',
-                                      score_func='pearsonr')
-    with warnings.catch_warnings(True):  # filter attenuation warning
+    eog_scores = ica.score_sources(raw, target='EOG 061',
+                                   score_func='pearsonr')
+    with warnings.catch_warnings(record=True):  # filter attenuation warning
         eog_events = ica_find_eog_events(raw,
                                          sources[np.abs(eog_scores).argmax()])
 
     assert_true(eog_events.ndim == 2)
 
     # Test ica fiff export
-    ica_raw = ica.sources_as_raw(raw, start=0, stop=100)
+    ica_raw = ica.get_sources(raw, start=0, stop=100)
     assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
-    assert_true(isinstance(ica_raw.info.get('filenames', None),
-                           (list, type(None))))  # API consistency
+    assert_true(len(ica_raw._filenames) == 0)  # API consistency
     ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
     assert_true(ica.n_components_ == len(ica_chans))
-    test_ica_fname = op.join(op.abspath(op.curdir), 'test_ica.fif')
+    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
     ica.n_components = np.int32(ica.n_components)
     ica_raw.save(test_ica_fname, overwrite=True)
-    ica_raw2 = fiff.Raw(test_ica_fname, preload=True)
+    ica_raw2 = io.Raw(test_ica_fname, preload=True)
     assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
     ica_raw2.close()
     os.remove(test_ica_fname)
 
     # Test ica epochs export
-    ica_epochs = ica.sources_as_epochs(epochs)
+    ica_epochs = ica.get_sources(epochs)
     assert_true(ica_epochs.events.shape == epochs.events.shape)
-    sources_epochs = ica.get_sources_epochs(epochs)
-    assert_array_equal(ica_epochs.get_data(), sources_epochs)
     ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
     assert_true(ica.n_components_ == len(ica_chans))
     assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
@@ -403,6 +483,7 @@ def test_ica_additional():
 
     # test float n pca components
     ica.pca_explained_variance_ = np.array([0.2] * 5)
+    ica.n_components_ = 0
     for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
         ncomps_ = _check_n_pca_components(ica, ncomps)
         assert_true(ncomps_ == expected)
@@ -411,28 +492,54 @@ def test_ica_additional():
 @requires_sklearn
 def test_run_ica():
     """Test run_ica function"""
-    raw = fiff.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
     params = []
     params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx
     params += [(None, 'MEG 1531')]  # ECG / EOG channel params
     for idx, ch_name in product(*params):
-        run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
-                stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
-                skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
+        warnings.simplefilter('always')
+        with warnings.catch_warnings(record=True):
+            run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
+                    stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
+                    skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
 
 
 @requires_sklearn
 def test_ica_reject_buffer():
     """Test ICA data raw buffer rejection"""
-    raw = fiff.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
-    picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
     ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
     raw._data[2, 1000:1005] = 5e-12
     drop_log = op.join(op.dirname(tempdir), 'ica_drop.log')
     set_log_file(drop_log, overwrite=True)
-    ica.decompose_raw(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
-                      tstep=0.01, verbose=True)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
+                tstep=0.01, verbose=True)
     assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
-    log = [l for l in open(drop_log) if 'detected' in l]
+    with open(drop_log) as fid:
+        log = [l for l in fid if 'detected' in l]
     assert_equal(len(log), 1)
+
+
+ at requires_sklearn
+def test_ica_twice():
+    """Test running ICA twice"""
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    picks = pick_types(raw.info, meg='grad', exclude='bads')
+    n_components = 0.9
+    max_pca_components = None
+    n_pca_components = 1.1
+    with warnings.catch_warnings(record=True):
+        ica1 = ICA(n_components=n_components,
+                   max_pca_components=max_pca_components,
+                   n_pca_components=n_pca_components, random_state=0)
+
+        ica1.fit(raw, picks=picks, decim=3)
+        raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
+        ica2 = ICA(n_components=n_components,
+                   max_pca_components=max_pca_components,
+                   n_pca_components=1.0, random_state=0)
+        ica2.fit(raw_new, picks=picks, decim=3)
+        assert_equal(ica1.n_components_, ica2.n_components_)
diff --git a/mne/preprocessing/tests/test_infomax.py b/mne/preprocessing/tests/test_infomax.py
new file mode 100644
index 0000000..cdfdc9f
--- /dev/null
+++ b/mne/preprocessing/tests/test_infomax.py
@@ -0,0 +1,136 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+"""
+Test the infomax algorithm.
+Parts of this code are taken from scikit-learn
+"""
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+
+from scipy import stats
+from scipy import linalg
+
+from mne.preprocessing.infomax_ import infomax
+from mne.utils import requires_sklearn
+
+
+def center_and_norm(x, axis=-1):
+    """ Centers and norms x **in place**
+
+    Parameters
+    -----------
+    x: ndarray
+        Array with an axis of observations (statistical units) measured on
+        random variables.
+    axis: int, optional
+        Axis along which the mean and variance are calculated.
+    """
+    x = np.rollaxis(x, axis)
+    x -= x.mean(axis=0)
+    x /= x.std(axis=0)
+
+
+ at requires_sklearn
+def test_infomax_simple(add_noise=False):
+    """ Test the infomax algorithm on very simple data.
+    """
+    from sklearn.decomposition import RandomizedPCA
+    rng = np.random.RandomState(0)
+    # scipy.stats uses the global RNG:
+    np.random.seed(0)
+    n_samples = 1000
+    # Generate two sources:
+    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
+    s2 = stats.t.rvs(1, size=n_samples)
+    s = np.c_[s1, s2].T
+    center_and_norm(s)
+    s1, s2 = s
+
+    # Mixing angle
+    phi = 0.6
+    mixing = np.array([[np.cos(phi),  np.sin(phi)],
+                       [np.sin(phi), -np.cos(phi)]])
+    m = np.dot(mixing, s)
+
+    if add_noise:
+        m += 0.1 * rng.randn(2, 1000)
+
+    center_and_norm(m)
+
+    algos = [True, False]
+    for algo in algos:
+        X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
+        k_ = infomax(X, extended=algo)
+        s_ = np.dot(k_, X.T)
+
+        center_and_norm(s_)
+        s1_, s2_ = s_
+        # Check to see if the sources have been estimated
+        # in the wrong order
+        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+            s2_, s1_ = s_
+        s1_ *= np.sign(np.dot(s1_, s1))
+        s2_ *= np.sign(np.dot(s2_, s2))
+
+        # Check that we have estimated the original sources
+        if not add_noise:
+            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+        else:
+            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
+            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
+
+
+ at requires_sklearn
+def test_non_square_infomax(add_noise=False):
+    """ Test the infomax algorithm on very simple data.
+    """
+    from sklearn.decomposition import RandomizedPCA
+
+    rng = np.random.RandomState(0)
+
+    n_samples = 1000
+    # Generate two sources:
+    t = np.linspace(0, 100, n_samples)
+    s1 = np.sin(t)
+    s2 = np.ceil(np.sin(np.pi * t))
+    s = np.c_[s1, s2].T
+    center_and_norm(s)
+    s1, s2 = s
+
+    # Mixing matrix
+    n_observed = 6
+    mixing = rng.randn(n_observed, 2)
+    m = np.dot(mixing, s)
+
+    if add_noise:
+        m += 0.1 * rng.randn(n_observed, n_samples)
+
+    center_and_norm(m)
+    pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
+    m = m.T
+    m = pca.fit_transform(m)
+    # we need extended since input signals are sub-gaussian
+    unmixing_ = infomax(m, random_state=rng, extended=True)
+    s_ = np.dot(unmixing_, m.T)
+    # Check that the mixing model described in the docstring holds:
+    mixing_ = linalg.pinv(unmixing_.T)
+
+    assert_almost_equal(m, s_.T.dot(mixing_))
+
+    center_and_norm(s_)
+    s1_, s2_ = s_
+    # Check to see if the sources have been estimated
+    # in the wrong order
+    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+        s2_, s1_ = s_
+    s1_ *= np.sign(np.dot(s1_, s1))
+    s2_ *= np.sign(np.dot(s2_, s2))
+
+    # Check that we have estimated the original sources
+    if not add_noise:
+        assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+        assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
diff --git a/mne/preprocessing/tests/test_ssp.py b/mne/preprocessing/tests/test_ssp.py
index 88d22b0..ca6fae2 100644
--- a/mne/preprocessing/tests/test_ssp.py
+++ b/mne/preprocessing/tests/test_ssp.py
@@ -5,13 +5,13 @@ from nose.tools import assert_true, assert_equal
 from numpy.testing import assert_array_almost_equal
 import numpy as np
 
-from ...fiff import Raw
-from ...fiff.proj import make_projector, activate_proj
+from ...io import Raw
+from ...io.proj import make_projector, activate_proj
 from ..ssp import compute_proj_ecg, compute_proj_eog
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_path, 'test_raw.fif')
 dur_use = 5.0
 eog_times = np.array([0.5, 2.3, 3.6, 14.5])
@@ -36,6 +36,7 @@ def test_compute_proj_ecg():
 
         # without setting a bad channel, this should throw a warning
         with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
             projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
                                              ch_name='MEG 1531', bads=[],
                                              average=average, avg_ref=True,
@@ -62,6 +63,7 @@ def test_compute_proj_eog():
 
         # This will throw a warning b/c simplefilter('always')
         with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
             projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
                                              average=average, bads=[],
                                              avg_ref=True, no_proj=False,
diff --git a/mne/preprocessing/tests/test_stim.py b/mne/preprocessing/tests/test_stim.py
index 2b1a659..13af01c 100644
--- a/mne/preprocessing/tests/test_stim.py
+++ b/mne/preprocessing/tests/test_stim.py
@@ -8,11 +8,11 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true
 
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.event import read_events
 from mne.preprocessing.stim import eliminate_stim_artifact
 
-data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_path, 'test_raw.fif')
 event_fname = op.join(data_path, 'test-eve.fif')
 
diff --git a/mne/proj.py b/mne/proj.py
index 76fc5c1..fda15cb 100644
--- a/mne/proj.py
+++ b/mne/proj.py
@@ -1,21 +1,21 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
 import numpy as np
 from scipy import linalg
 
-from . import fiff, Epochs
-from .utils import logger, verbose
-from .fiff.pick import pick_types, pick_types_forward
-from .fiff.proj import Projection, _has_eeg_average_ref_proj
+from . import io, Epochs
+from .utils import check_fname, logger, verbose
+from .io.pick import pick_types, pick_types_forward
+from .io.proj import Projection, _has_eeg_average_ref_proj
 from .event import make_fixed_length_events
 from .parallel import parallel_func
 from .cov import _check_n_samples
 from .forward import (is_fixed_orient, _subject_from_forward,
                       convert_forward_solution)
 from .source_estimate import SourceEstimate
-from .fiff.proj import make_projector, make_eeg_average_ref_proj
+from .io.proj import make_projector, make_eeg_average_ref_proj
 
 
 def read_proj(fname):
@@ -24,15 +24,19 @@ def read_proj(fname):
     Parameters
     ----------
     fname : string
-        The name of file containing the projections vectors.
+        The name of file containing the projections vectors. It should end with
+        -proj.fif or -proj.fif.gz.
 
     Returns
     -------
     projs : list
         The list of projection vectors.
     """
-    fid, tree, _ = fiff.fiff_open(fname)
-    projs = fiff.proj.read_proj(fid, tree)
+    check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz'))
+
+    ff, tree, _ = io.fiff_open(fname)
+    with ff as fid:
+        projs = io.proj._read_proj(fid, tree)
     return projs
 
 
@@ -42,14 +46,17 @@ def write_proj(fname, projs):
     Parameters
     ----------
     fname : string
-        The name of file containing the projections vectors.
+        The name of file containing the projections vectors. It should end with
+        -proj.fif or -proj.fif.gz.
 
     projs : list
         The list of projection vectors.
     """
-    fid = fiff.write.start_file(fname)
-    fiff.proj.write_proj(fid, projs)
-    fiff.write.end_file(fid)
+    check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz'))
+
+    fid = io.write.start_file(fname)
+    io.proj._write_proj(fid, projs)
+    io.write.end_file(fid)
 
 
 @verbose
@@ -123,10 +130,10 @@ def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
     # compute data covariance
     data = _compute_cov_epochs(epochs, n_jobs)
     event_id = epochs.event_id
-    if event_id is None or len(event_id.keys()) == 0:
+    if event_id is None or len(list(event_id.keys())) == 0:
         event_id = '0'
     elif len(event_id.keys()) == 1:
-        event_id = str(event_id.values()[0])
+        event_id = str(list(event_id.values())[0])
     else:
         event_id = 'Multiple-events'
     desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
@@ -141,7 +148,7 @@ def _compute_cov_epochs(epochs, n_jobs):
     if n_epochs == 0:
         raise RuntimeError('No good epochs found')
 
-    n_chan, n_samples = epochs.__iter__().next().shape
+    n_chan, n_samples = epochs.info['nchan'], len(epochs.times)
     _check_n_samples(n_samples * n_epochs, n_chan)
     data = sum(data)
     return data
@@ -321,7 +328,7 @@ def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
     n_locations = n_dipoles // 3
     sensitivity_map = np.empty(n_locations)
 
-    for k in xrange(n_locations):
+    for k in range(n_locations):
         gg = gain[:, 3 * k:3 * (k + 1)]
         if mode != 'fixed':
             s = linalg.svd(gg, full_matrices=False, compute_uv=False)
diff --git a/mne/realtime/__init__.py b/mne/realtime/__init__.py
index 4c794e9..cee63e9 100644
--- a/mne/realtime/__init__.py
+++ b/mne/realtime/__init__.py
@@ -10,4 +10,5 @@
 from .client import RtClient
 from .epochs import RtEpochs
 from .mockclient import MockRtClient
+from .fieldtrip_client import FieldTripClient
 from .stim_server_client import StimServer, StimClient
diff --git a/mne/realtime/client.py b/mne/realtime/client.py
index 3e89e75..d82e414 100644
--- a/mne/realtime/client.py
+++ b/mne/realtime/client.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 # Authors: Christoph Dinh <chdinh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
@@ -7,16 +8,16 @@
 import socket
 import time
 import struct
-import StringIO
+from ..externals.six.moves import StringIO
 import threading
 
 import numpy as np
 
 from ..utils import logger, verbose
-from ..fiff.constants import FIFF
-from ..fiff.meas_info import read_meas_info
-from ..fiff.tag import Tag, read_tag
-from ..fiff.tree import make_dir_tree
+from ..io.constants import FIFF
+from ..io.meas_info import read_meas_info
+from ..io.tag import Tag, read_tag
+from ..io.tree import make_dir_tree
 
 # Constants for fiff realtime fiff messages
 MNE_RT_GET_CLIENT_ID = 1
@@ -68,7 +69,7 @@ def _buffer_recv_worker(rt_client, nchan):
     except RuntimeError as err:
         # something is wrong, the server stopped (or something)
         rt_client._recv_thread = None
-        print 'Buffer receive thread stopped: %s' % err
+        print('Buffer receive thread stopped: %s' % err)
 
 
 class RtClient(object):
@@ -145,7 +146,7 @@ class RtClient(object):
 
         logger.debug('Sending command: %s' % command)
         command += '\n'
-        self._cmd_sock.sendall(command)
+        self._cmd_sock.sendall(command.encode('utf-8'))
 
         buf, chunk, begin = [], '', time.time()
         while True:
@@ -223,7 +224,7 @@ class RtClient(object):
 
         buff = ''.join(buff)
 
-        fid = StringIO.StringIO(buff)
+        fid = StringIO(buff)
         tree, _ = make_dir_tree(fid, directory)
         info, meas = read_meas_info(fid, tree)
 
@@ -343,7 +344,7 @@ class RtClient(object):
         while tag.kind != FIFF.FIFF_DATA_BUFFER:
             tag, this_buff = _recv_tag_raw(self._data_sock)
 
-        buff = StringIO.StringIO(this_buff)
+        buff = StringIO(this_buff)
         tag = read_tag(buff)
         raw_buffer = tag.data.reshape(-1, nchan).T
 
diff --git a/mne/realtime/epochs.py b/mne/realtime/epochs.py
index b356c06..a257a79 100644
--- a/mne/realtime/epochs.py
+++ b/mne/realtime/epochs.py
@@ -1,8 +1,8 @@
 # Authors: Christoph Dinh <chdinh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 import time
@@ -10,13 +10,13 @@ import copy
 
 import numpy as np
 
-from ..fiff import pick_channels, pick_types
+from .. import pick_channels, pick_types
 from ..utils import logger, verbose
 from ..baseline import rescale
 from ..epochs import _BaseEpochs
 from ..event import _find_events
 from ..filter import detrend
-from ..fiff.proj import setup_proj
+from ..io.proj import setup_proj
 
 
 class RtEpochs(_BaseEpochs):
@@ -347,7 +347,7 @@ class RtEpochs(_BaseEpochs):
             The raw epoch (only calibration has been applied) over all
             channels.
         event_samp : int
-            The time in samples when the epoch occured.
+            The time in samples when the epoch occurred.
         event_id : int
             The event ID of the epoch.
         """
@@ -362,19 +362,8 @@ class RtEpochs(_BaseEpochs):
         if self.proj and self._projector is not None:
             epoch = np.dot(self._projector, epoch)
 
-        # Detrend
-        if self.detrend is not None:
-            picks = pick_types(self.info, meg=True, eeg=True, stim=False,
-                               eog=False, ecg=False, emg=False, ref_meg=False)
-            epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
-
-        # Baseline correct
-        epoch = rescale(epoch, self._raw_times, self.baseline, 'mean',
-                        copy=False, verbose='ERROR')
-
-        # Decimate
-        if self.decim > 1:
-            epoch = epoch[:, self._decim_idx]
+        # Detrend, baseline correct, decimate
+        epoch = self._preprocess(epoch, verbose='ERROR')
 
         # Decide if this is a good epoch
         is_good, _ = self._is_good_epoch(epoch, verbose='ERROR')
diff --git a/mne/realtime/fieldtrip_client.py b/mne/realtime/fieldtrip_client.py
new file mode 100644
index 0000000..bee2746
--- /dev/null
+++ b/mne/realtime/fieldtrip_client.py
@@ -0,0 +1,296 @@
+# Author: Mainak Jas
+#
+# License: BSD (3-clause)
+
+import re
+import copy
+import time
+import threading
+import warnings
+import numpy as np
+
+from ..io.constants import FIFF
+from ..io.meas_info import Info
+from ..utils import logger
+from ..externals.FieldTrip import Client as FtClient
+
+
+def _buffer_recv_worker(ft_client):
+    """Worker thread that constantly receives buffers."""
+
+    try:
+        for raw_buffer in ft_client.iter_raw_buffers():
+            ft_client._push_raw_buffer(raw_buffer)
+    except RuntimeError as err:
+        # something is wrong, the server stopped (or something)
+        ft_client._recv_thread = None
+        print('Buffer receive thread stopped: %s' % err)
+
+
+class FieldTripClient(object):
+    """ Realtime FieldTrip client
+
+    Parameters
+    ----------
+    info : dict | None
+        The measurement info read in from a file. If None, it is guessed from
+        the Fieldtrip Header object.
+    host : str
+        Hostname (or IP address) of the host where Fieldtrip buffer is running.
+    port : int
+        Port to use for the connection.
+    wait_max : float
+        Maximum time (in seconds) to wait for Fieldtrip buffer to start
+    tmin : float | None
+        Time instant to start receiving buffers. If None, start from the latest
+        samples available.
+    tmax : float
+        Time instant to stop receiving buffers.
+    buffer_size : int
+        Size of each buffer in terms of number of samples.
+    verbose : bool, str, int, or None
+        Log verbosity see mne.verbose.
+    """
+    def __init__(self, info=None, host='localhost', port=1972, wait_max=30,
+                 tmin=None, tmax=np.inf, buffer_size=1000, verbose=None):
+        self.verbose = verbose
+
+        self.info = info
+        self.wait_max = wait_max
+        self.tmin = tmin
+        self.tmax = tmax
+        self.buffer_size = buffer_size
+
+        self.host = host
+        self.port = port
+
+        self._recv_thread = None
+        self._recv_callbacks = list()
+
+    def __enter__(self):
+        # instantiate Fieldtrip client and connect
+        self.ft_client = FtClient()
+
+        # connect to FieldTrip buffer
+        logger.info("FieldTripClient: Waiting for server to start")
+        start_time, current_time = time.time(), time.time()
+        success = False
+        while current_time < (start_time + self.wait_max):
+            try:
+                self.ft_client.connect(self.host, self.port)
+                logger.info("FieldTripClient: Connected")
+                success = True
+                break
+            except:
+                current_time = time.time()
+                time.sleep(0.1)
+
+        if not success:
+            raise RuntimeError('Could not connect to FieldTrip Buffer')
+
+        # retrieve header
+        logger.info("FieldTripClient: Retrieving header")
+        start_time, current_time = time.time(), time.time()
+        while current_time < (start_time + self.wait_max):
+            self.ft_header = self.ft_client.getHeader()
+            if self.ft_header is None:
+                current_time = time.time()
+                time.sleep(0.1)
+            else:
+                break
+
+        if self.ft_header is None:
+            raise RuntimeError('Failed to retrieve Fieldtrip header!')
+        else:
+            logger.info("FieldTripClient: Header retrieved")
+
+        self.info = self._guess_measurement_info()
+        self.ch_names = self.ft_header.labels
+
+        # find start and end samples
+
+        sfreq = self.info['sfreq']
+
+        if self.tmin is None:
+            self.tmin_samp = max(0, self.ft_header.nSamples - 1)
+        else:
+            self.tmin_samp = int(round(sfreq * self.tmin))
+
+        if self.tmax != np.inf:
+            self.tmax_samp = int(round(sfreq * self.tmax))
+        else:
+            self.tmax_samp = np.iinfo(np.uint32).max
+
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.ft_client.disconnect()
+
+    def _guess_measurement_info(self):
+        """
+        Creates a minimal Info dictionary required for epoching, averaging
+        et al.
+        """
+
+        if self.info is None:
+
+            warnings.warn('Info dictionary not provided. Trying to guess it '
+                          'from FieldTrip Header object')
+
+            info = Info()  # create info dictionary
+
+            # modify info attributes according to the FieldTrip Header object
+            info['nchan'] = self.ft_header.nChannels
+            info['sfreq'] = self.ft_header.fSample
+            info['ch_names'] = self.ft_header.labels
+
+            info['comps'] = list()
+            info['projs'] = list()
+            info['bads'] = list()
+
+            # channel dictionary list
+            info['chs'] = []
+
+            for idx, ch in enumerate(info['ch_names']):
+                this_info = dict()
+
+                this_info['scanno'] = idx
+
+                # extract numerical part of channel name
+                this_info['logno'] = int(re.findall('[^\W\d_]+|\d+', ch)[-1])
+
+                if ch.startswith('EEG'):
+                    this_info['kind'] = FIFF.FIFFV_EEG_CH
+                elif ch.startswith('MEG'):
+                    this_info['kind'] = FIFF.FIFFV_MEG_CH
+                elif ch.startswith('MCG'):
+                    this_info['kind'] = FIFF.FIFFV_MCG_CH
+                elif ch.startswith('EOG'):
+                    this_info['kind'] = FIFF.FIFFV_EOG_CH
+                elif ch.startswith('STI'):
+                    this_info['kind'] = FIFF.FIFFV_STIM_CH
+                elif ch.startswith('ECG'):
+                    this_info['kind'] = FIFF.FIFFV_ECG_CH
+                elif ch.startswith('MISC'):
+                    this_info['kind'] = FIFF.FIFFV_MISC_CH
+
+                # Fieldtrip already does calibration
+                this_info['range'] = 1.0
+                this_info['cal'] = 1.0
+
+                this_info['ch_name'] = ch
+                this_info['coil_trans'] = None
+                this_info['loc'] = None
+                this_info['eeg_loc'] = None
+
+                if ch.startswith('EEG'):
+                    this_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                elif ch.startswith('MEG'):
+                    this_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                else:
+                    this_info['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
+
+                if ch.startswith('MEG') and ch.endswith('1'):
+                    this_info['unit'] = FIFF.FIFF_UNIT_T
+                elif ch.startswith('MEG') and (ch.endswith('2')
+                                               or ch.endswith('3')):
+                    this_info['unit'] = FIFF.FIFF_UNIT_T_M
+                else:
+                    this_info['unit'] = FIFF.FIFF_UNIT_V
+
+                this_info['unit_mul'] = 0
+
+                info['chs'].append(this_info)
+
+        else:
+
+            info = copy.deepcopy(self.info)
+
+        return info
+
+    def get_measurement_info(self):
+        """Returns the measurement info.
+
+        Returns
+        -------
+        self.info : dict
+            The measurement info.
+        """
+        return self.info
+
+    def register_receive_callback(self, callback):
+        """Register a raw buffer receive callback.
+
+        Parameters
+        ----------
+        callback : callable
+            The callback. The raw buffer is passed as the first parameter
+            to callback.
+        """
+        if callback not in self._recv_callbacks:
+            self._recv_callbacks.append(callback)
+
+    def unregister_receive_callback(self, callback):
+        """Unregister a raw buffer receive callback."""
+        if callback in self._recv_callbacks:
+            self._recv_callbacks.remove(callback)
+
+    def _push_raw_buffer(self, raw_buffer):
+        """Push raw buffer to clients using callbacks."""
+        for callback in self._recv_callbacks:
+            callback(raw_buffer)
+
+    def start_receive_thread(self, nchan):
+        """Start the receive thread.
+
+        If the measurement has not been started, it will also be started.
+
+        Parameters
+        ----------
+        nchan : int
+            The number of channels in the data.
+        """
+
+        if self._recv_thread is None:
+
+            self._recv_thread = threading.Thread(target=_buffer_recv_worker,
+                                                 args=(self, ))
+            self._recv_thread.daemon = True
+            self._recv_thread.start()
+
+    def stop_receive_thread(self, stop_measurement=False):
+        """Stop the receive thread
+
+        Parameters
+        ----------
+        stop_measurement : bool
+            Also stop the measurement.
+        """
+        if self._recv_thread is not None:
+            self._recv_thread.stop()
+            self._recv_thread = None
+
+    def iter_raw_buffers(self):
+        """Return an iterator over raw buffers
+
+        Returns
+        -------
+        raw_buffer : generator
+            Generator for iteration over raw buffers.
+        """
+
+        iter_times = zip(range(self.tmin_samp, self.tmax_samp,
+                               self.buffer_size),
+                         range(self.tmin_samp + self.buffer_size,
+                               self.tmax_samp, self.buffer_size))
+
+        for ii, (start, stop) in enumerate(iter_times):
+
+            # wait for correct number of samples to be available
+            self.ft_client.wait(stop, np.iinfo(np.uint32).max,
+                                np.iinfo(np.uint32).max)
+
+            # get the samples
+            raw_buffer = self.ft_client.getData([start, stop]).transpose()
+
+            yield raw_buffer
diff --git a/mne/realtime/mockclient.py b/mne/realtime/mockclient.py
index a1039be..fd6e757 100644
--- a/mne/realtime/mockclient.py
+++ b/mne/realtime/mockclient.py
@@ -1,6 +1,6 @@
 # Authors: Mainak Jas <mainak at neuro.hut.fi>
-#          Denis Engemann <d.engemann at fz-juelich.de>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -46,7 +46,7 @@ class MockRtClient(object):
         ----------
         epochs : instance of RtEpochs
             The epochs object.
-        picks : array of int
+        picks : array-like of int
             Indices of channels.
         tmin : float
             Time instant to start receiving buffers.
@@ -63,8 +63,8 @@ class MockRtClient(object):
         tmin_samp = int(round(sfreq * tmin))
         tmax_samp = int(round(sfreq * tmax))
 
-        iter_times = zip(range(tmin_samp, tmax_samp, buffer_size),
-                         range(buffer_size, tmax_samp, buffer_size))
+        iter_times = zip(list(range(tmin_samp, tmax_samp, buffer_size)),
+                         list(range(buffer_size, tmax_samp, buffer_size)))
 
         for ii, (start, stop) in enumerate(iter_times):
             # channels are picked in _append_epoch_to_queue. No need to pick
diff --git a/mne/realtime/stim_server_client.py b/mne/realtime/stim_server_client.py
index a572176..57606f0 100644
--- a/mne/realtime/stim_server_client.py
+++ b/mne/realtime/stim_server_client.py
@@ -1,10 +1,10 @@
 # Author: Mainak Jas <mainak at neuro.hut.fi>
 # License: BSD (3-clause)
 
-import Queue
+from ..externals.six.moves import queue
 import time
 import socket
-import SocketServer
+from ..externals.six.moves import socketserver
 import threading
 
 import numpy as np
@@ -12,7 +12,7 @@ import numpy as np
 from ..utils import logger, verbose
 
 
-class _ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
+class _ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
     """Creates a threaded TCP server
 
     Parameters
@@ -32,14 +32,14 @@ class _ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
     # except that it has an additional attribute stim_server
 
         # Create the server and bind it to the desired server address
-        SocketServer.TCPServer.__init__(self, server_address,
+        socketserver.TCPServer.__init__(self, server_address,
                                         request_handler_class,
                                         False)
 
         self.stim_server = stim_server
 
 
-class _TriggerHandler(SocketServer.BaseRequestHandler):
+class _TriggerHandler(socketserver.BaseRequestHandler):
     """Request handler on the server side."""
 
     def handle(self):
@@ -48,8 +48,8 @@ class _TriggerHandler(SocketServer.BaseRequestHandler):
         self.request.settimeout(None)
 
         while self.server.stim_server._running:
-
             data = self.request.recv(1024)  # clip input at 1Kb
+            data = data.decode()  # need to turn it into a string (Py3k)
 
             if data == 'add client':
                 # Add stim_server._client
@@ -60,25 +60,25 @@ class _TriggerHandler(SocketServer.BaseRequestHandler):
                 # Instantiate queue for communication between threads
                 # Note: new queue for each handler
                 if not hasattr(self, '_tx_queue'):
-                    self._tx_queue = Queue.Queue()
+                    self._tx_queue = queue.Queue()
 
-                self.request.sendall("Client added")
+                self.request.sendall("Client added".encode('utf-8'))
 
                 # Mark the client as running
                 for client in self.server.stim_server._clients:
                     if client['id'] == client_id:
                         client['running'] = True
 
-            if data == 'get trigger':
+            elif data == 'get trigger':
 
                 # Pop triggers and send them
                 if (self._tx_queue.qsize() > 0 and
                         self.server.stim_server, '_clients'):
 
                     trigger = self._tx_queue.get()
-                    self.request.sendall(str(trigger))
+                    self.request.sendall(str(trigger).encode('utf-8'))
                 else:
-                    self.request.sendall("Empty")
+                    self.request.sendall("Empty".encode('utf-8'))
 
 
 class StimServer(object):
@@ -244,17 +244,25 @@ class StimClient(object):
             self._sock.connect((host, port))
 
             logger.info("Establishing connection with server")
-            self._sock.send("add client")
-            resp = self._sock.recv(1024)
+            data = "add client".encode('utf-8')
+            n_sent = self._sock.send(data)
+            if n_sent != len(data):
+                raise RuntimeError('Could not communicate with server')
+            resp = self._sock.recv(1024).decode()  # turn bytes into str (Py3k)
 
             if resp == 'Client added':
                 logger.info("Connection established")
+            else:
+                raise RuntimeError('Client not added')
 
         except Exception:
             raise RuntimeError('Setting up acquisition <-> stimulation '
                                'computer connection (host: %s '
                                'port: %d) failed. Make sure StimServer '
                                'is running.' % (host, port))
+    def close(self):
+        """Close the socket object"""
+        self._sock.close()
 
     @verbose
     def get_trigger(self, timeout=5.0, verbose=None):
@@ -278,7 +286,7 @@ class StimClient(object):
                         logger.info("received nothing")
                         return None
 
-                self._sock.send("get trigger")
+                self._sock.send("get trigger".encode('utf-8'))
                 trigger = self._sock.recv(1024)
 
                 if trigger != 'Empty':
diff --git a/mne/realtime/tests/test_fieldtrip_client.py b/mne/realtime/tests/test_fieldtrip_client.py
new file mode 100644
index 0000000..3ce945d
--- /dev/null
+++ b/mne/realtime/tests/test_fieldtrip_client.py
@@ -0,0 +1,68 @@
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+import time
+import os
+import threading
+import subprocess
+import warnings
+import os.path as op
+
+from nose.tools import assert_true
+
+from mne.utils import requires_neuromag2ft
+from mne.realtime import FieldTripClient
+from mne.externals.six.moves import queue
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.realpath(op.join(base_dir, 'test_raw.fif'))
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _run_buffer(kill_signal, neuromag2ft_fname):
+    cmd = (neuromag2ft_fname, '--file', raw_fname, '--speed', '4.0')
+
+    process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+    # Let measurement continue for the entire duration
+    kill_signal.get(timeout=10.0)
+    print('Terminating subprocess')
+    process.terminate()
+
+
+ at requires_neuromag2ft
+def test_fieldtrip_client():
+    """Test fieldtrip_client"""
+
+    neuromag2ft_fname = op.realpath(op.join(os.environ['NEUROMAG2FT_ROOT'],
+                                    'neuromag2ft'))
+
+    kill_signal = queue.Queue()
+    thread = threading.Thread(target=_run_buffer, args=(kill_signal,
+                                                        neuromag2ft_fname))
+    thread.daemon = True
+    thread.start()
+
+    # Start the FieldTrip buffer
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        with FieldTripClient(host='localhost', port=1972,
+                             tmax=5, wait_max=1) as rt_client:
+            tmin_samp1 = rt_client.tmin_samp
+
+    time.sleep(1)  # Pause measurement
+    assert_true(len(w) == 1)
+
+    # Start the FieldTrip buffer again
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        with FieldTripClient(host='localhost', port=1972,
+                             tmax=5, wait_max=1) as rt_client:
+            print(rt_client.tmin_samp)
+            tmin_samp2 = rt_client.tmin_samp
+
+    kill_signal.put(False)  # stop the buffer
+    assert_true(tmin_samp2 > tmin_samp1)
+    assert_true(len(w) == 1)
diff --git a/mne/realtime/tests/test_mockclient.py b/mne/realtime/tests/test_mockclient.py
index 5def1ad..d5698a5 100644
--- a/mne/realtime/tests/test_mockclient.py
+++ b/mne/realtime/tests/test_mockclient.py
@@ -1,22 +1,22 @@
 import os.path as op
 
+from nose.tools import assert_true
+from numpy.testing import assert_array_equal
+
 import mne
 from mne import Epochs, read_events
 from mne.realtime import MockRtClient, RtEpochs
 
-from nose.tools import assert_true
-from numpy.testing import assert_array_equal
-
-base_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_name = op.join(base_dir, 'test-eve.fif')
 
-raw = mne.fiff.Raw(raw_fname, preload=True, verbose=False)
+raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
 
 events = read_events(event_name)
 
-picks = mne.fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                            stim=True, exclude=raw.info['bads'])
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=True, exclude=raw.info['bads'])
 
 
 def test_mockclient():
diff --git a/mne/realtime/tests/test_stim_client_server.py b/mne/realtime/tests/test_stim_client_server.py
index 51612cb..852af42 100644
--- a/mne/realtime/tests/test_stim_client_server.py
+++ b/mne/realtime/tests/test_stim_client_server.py
@@ -1,11 +1,13 @@
 import threading
 import time
-import Queue
+from nose.tools import assert_equal, assert_raises
 
 from mne.realtime import StimServer, StimClient
-from nose.tools import assert_equal, assert_raises
+from mne.externals.six.moves import queue
+from mne.utils import requires_good_network
 
 
+ at requires_good_network
 def test_connection():
     """Test TCP/IP connection for StimServer <-> StimClient.
     """
@@ -15,29 +17,29 @@ def test_connection():
     # be a blocking method
 
     # use separate queues because timing matters
-    trig_queue1 = Queue.Queue()
-    trig_queue2 = Queue.Queue()
+    trig_queue1 = queue.Queue()
+    trig_queue2 = queue.Queue()
 
     # start a thread to emulate 1st client
     thread1 = threading.Thread(target=connect_client, args=(trig_queue1,))
     thread1.daemon = True
-    thread1.start()
 
     # start another thread to emulate 2nd client
     thread2 = threading.Thread(target=connect_client, args=(trig_queue2,))
     thread2.daemon = True
-    thread2.start()
 
     with StimServer('localhost', port=4218, n_clients=2) as stim_server:
-        stim_server.start()
+        thread1.start()
+        thread2.start()
+        stim_server.start(timeout=4.0)  # don't allow test to hang
 
         # Add the trigger to the queue for both clients
         stim_server.add_trigger(20)
 
         # the assert_equal must be in the test_connection() method
         # Hence communication between threads is necessary
-        trig1 = trig_queue1.get()
-        trig2 = trig_queue2.get()
+        trig1 = trig_queue1.get(timeout=4.0)
+        trig2 = trig_queue2.get(timeout=4.0)
         assert_equal(trig1, 20)
 
         # test if both clients receive the same trigger
@@ -45,19 +47,21 @@ def test_connection():
 
     # test timeout for stim_server
     with StimServer('localhost', port=4218) as stim_server:
-        assert_raises(StopIteration, stim_server.start, 1.0)
+        assert_raises(StopIteration, stim_server.start, 0.1)
 
 
+ at requires_good_network
 def connect_client(trig_queue):
     """Helper method that instantiates the StimClient.
     """
     # just wait till the main thread reaches stim_server.start()
-    time.sleep(1.)
+    time.sleep(2.0)
 
     # instantiate StimClient
     stim_client = StimClient('localhost', port=4218)
 
     # wait a bit more for script to reach stim_server.add_trigger()
-    time.sleep(1.)
+    time.sleep(2.0)
 
     trig_queue.put(stim_client.get_trigger())
+    stim_client.close()
diff --git a/mne/report.py b/mne/report.py
new file mode 100644
index 0000000..980d08c
--- /dev/null
+++ b/mne/report.py
@@ -0,0 +1,1287 @@
+"""Generate html report from MNE database
+"""
+
+# Authors: Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import fnmatch
+import re
+import numpy as np
+import time
+from glob import glob
+import warnings
+import base64
+
+from . import read_evokeds, read_events, Covariance
+from .io import Raw, read_info
+from .utils import _TempDir, logger, verbose, get_subjects_dir
+from .viz import plot_events, plot_trans, plot_cov
+from .viz._3d import _plot_mri_contours
+from .forward import read_forward_solution
+from .epochs import read_epochs
+from .minimum_norm import read_inverse_operator
+from .parallel import parallel_func, check_n_jobs
+
+from .externals.decorator import decorator
+from .externals.tempita import HTMLTemplate, Template
+from .externals.six import BytesIO
+from .externals.six import moves
+
+tempdir = _TempDir()
+temp_fname = op.join(tempdir, 'test')
+
+VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
+                    '-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
+                    '-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
+                    '-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
+                    '-ave.fif', '-ave.fif.gz', 'T1.mgz']
+SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
+                 'mri', 'forward', 'inverse']
+
+###############################################################################
+# PLOTTING FUNCTIONS
+
+
+ at decorator
+def _check_report_mode(function, *args, **kwargs):
+    """Check whether to actually render or not.
+
+    Parameters
+    ----------
+    function : function
+        Function to be decorated by setting the verbosity level.
+
+    Returns
+    -------
+    dec : function
+        The decorated function
+    """
+
+    if 'MNE_REPORT_TESTING' not in os.environ:
+        return function(*args, **kwargs)
+    else:
+        return ''
+
+
+ at _check_report_mode
+def _fig_to_img(function=None, fig=None, close_fig=True, **kwargs):
+    """Wrapper function to plot figure and
+       for fig <-> binary image.
+    """
+    import matplotlib.pyplot as plt
+
+    if function is not None:
+        plt.close('all')
+        fig = function(**kwargs)
+
+    output = BytesIO()
+    fig.savefig(output, format='png', bbox_inches='tight')
+    if close_fig is True:
+        plt.close(fig)
+
+    return base64.b64encode(output.getvalue()).decode('ascii')
+
+
+ at _check_report_mode
+def _fig_to_mrislice(function, orig_size, sl, **kwargs):
+    import matplotlib.pyplot as plt
+    from PIL import Image
+
+    plt.close('all')
+    fig = _plot_mri_contours(**kwargs)
+    temp_sl_fname = temp_fname + str(sl)
+
+    fig_size = fig.get_size_inches()
+    w, h = orig_size[0], orig_size[1]
+    w2, h2 = fig_size[0], fig_size[1]
+    fig.set_size_inches([(w2 / w) * w, (w2 / w) * h])
+    a = fig.gca()
+    a.set_xticks([]), a.set_yticks([])
+    plt.xlim(0, h), plt.ylim(w, 0)
+    fig.savefig(temp_sl_fname, bbox_inches='tight',
+                pad_inches=0, format='png')
+    Image.open(temp_sl_fname).resize((w, h)).save(temp_sl_fname,
+                                                  format='png')
+    output = BytesIO()
+    Image.open(temp_sl_fname).save(output, format='png')
+    return output.getvalue().encode('base64')
+
+
+ at _check_report_mode
+def _iterate_trans_views(function, **kwargs):
+    """Auxiliary function to iterate over views in trans fig.
+    """
+    from PIL import Image
+    import matplotlib.pyplot as plt
+    import mayavi
+
+    fig = function(**kwargs)
+
+    if isinstance(fig, mayavi.core.scene.Scene):
+
+        views = [(90, 90), (0, 90), (0, -90)]
+        fig2, axes = plt.subplots(1, len(views))
+        for view, ax in zip(views, axes):
+            mayavi.mlab.view(view[0], view[1])
+            # XXX: save_bmp / save_png / ...
+            fig.scene.save_bmp(temp_fname)
+            im = Image.open(temp_fname)
+            ax.imshow(im)
+            ax.axis('off')
+
+        img = _fig_to_img(fig=fig2)
+        mayavi.mlab.close(all=True)
+
+        return img
+    else:
+        return None
+
+###############################################################################
+# TOC FUNCTIONS
+
+
+def _is_bad_fname(fname):
+    """Auxiliary function for identifying bad file naming patterns
+       and highlighting them in red in the TOC.
+    """
+    if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
+        return 'red'
+    else:
+        return ''
+
+
+def _get_toc_property(fname):
+    """Auxiliary function to assign class names to TOC
+       list elements to allow toggling with buttons.
+    """
+    if fname.endswith(('-eve.fif', '-eve.fif.gz')):
+        div_klass = 'events'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
+        div_klass = 'evoked'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
+        div_klass = 'covariance'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('raw.fif', 'raw.fif.gz',
+                         'sss.fif', 'sss.fif.gz')):
+        div_klass = 'raw'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
+        div_klass = 'trans'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
+        div_klass = 'forward'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
+        div_klass = 'inverse'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
+        div_klass = 'epochs'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
+        div_klass = 'mri'
+        tooltip = 'MRI'
+        text = 'MRI'
+    elif fname.endswith(('bem')):
+        div_klass = 'mri'
+        tooltip = 'MRI'
+        text = 'MRI'
+    else:
+        div_klass = fname.split('-#-')[1]
+        tooltip = fname.split('-#-')[0]
+        text = fname.split('-#-')[0]
+
+    return div_klass, tooltip, text
+
+
+def _iterate_files(report, fnames, info, sfreq):
+    """Auxiliary function to parallel process in batch mode.
+    """
+    htmls, report_fnames, report_sectionlabels = [], [], []
+    for fname in fnames:
+        logger.info("Rendering : %s"
+                    % op.join('...' + report.data_path[-20:],
+                              fname))
+        try:
+            if fname.endswith(('raw.fif', 'raw.fif.gz',
+                               'sss.fif', 'sss.fif.gz')):
+                html = report._render_raw(fname)
+                report_fname = fname
+                report_sectionlabel = 'raw'
+            elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
+                html = report._render_forward(fname)
+                report_fname = fname
+                report_sectionlabel = 'forward'
+            elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
+                html = report._render_inverse(fname)
+                report_fname = fname
+                report_sectionlabel = 'inverse'
+            elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
+                html = report._render_evoked(fname)
+                report_fname = fname
+                report_sectionlabel = 'evoked'
+            elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
+                html = report._render_eve(fname, sfreq)
+                report_fname = fname
+                report_sectionlabel = 'events'
+            elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
+                html = report._render_epochs(fname)
+                report_fname = fname
+                report_sectionlabel = 'epochs'
+            elif (fname.endswith(('-cov.fif', '-cov.fif.gz'))
+                  and report.info_fname is not None):
+                html = report._render_cov(fname, info)
+                report_fname = fname
+                report_sectionlabel = 'covariance'
+            elif (fname.endswith(('-trans.fif', '-trans.fif.gz'))
+                  and report.info_fname is not None and report.subjects_dir
+                  is not None and report.subject is not None):
+                html = report._render_trans(fname, report.data_path, info,
+                                            report.subject,
+                                            report.subjects_dir)
+                report_fname = fname
+                report_sectionlabel = 'trans'
+            else:
+                html = None
+                report_fname = None
+                report_sectionlabel = None
+        except Exception as e:
+            logger.info(e)
+            html = None
+            report_fname = None
+            report_sectionlabel = None
+        htmls.append(html)
+        report_fnames.append(report_fname)
+        report_sectionlabels.append(report_sectionlabel)
+
+    return htmls, report_fnames, report_sectionlabels
+
+###############################################################################
+# IMAGE FUNCTIONS
+
+
+def _build_image(data, cmap='gray'):
+    """Build an image encoded in base64.
+    """
+
+    import matplotlib.pyplot as plt
+    from matplotlib.figure import Figure
+    from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+
+    figsize = data.shape[::-1]
+    if figsize[0] == 1:
+        figsize = tuple(figsize[1:])
+        data = data[:, :, 0]
+    fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
+    FigureCanvas(fig)
+    cmap = getattr(plt.cm, cmap, plt.cm.gray)
+    fig.figimage(data, cmap=cmap)
+    output = BytesIO()
+    fig.savefig(output, dpi=1.0, format='png')
+    return output.getvalue().encode('base64')
+
+
+def _iterate_sagittal_slices(array, limits=None):
+    """Iterate sagittal slice.
+    """
+    shape = array.shape[0]
+    for ind in xrange(shape):
+        if limits and ind not in limits:
+            continue
+        yield ind, array[ind, :, :]
+
+
+def _iterate_axial_slices(array, limits=None):
+    """Iterate axial slice.
+    """
+    shape = array.shape[1]
+    for ind in xrange(shape):
+        if limits and ind not in limits:
+            continue
+        yield ind, array[:, ind, :]
+
+
+def _iterate_coronal_slices(array, limits=None):
+    """Iterate coronal slice.
+    """
+    shape = array.shape[2]
+    for ind in xrange(shape):
+        if limits and ind not in limits:
+            continue
+        yield ind, np.flipud(np.rot90(array[:, :, ind]))
+
+
+def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap):
+    """Auxiliary function for parallel processing of mri slices.
+    """
+    img_klass = 'slideimg-%s' % name
+
+    caption = u'Slice %s %s' % (name, ind)
+    slice_id = '%s-%s-%s' % (name, global_id, ind)
+    div_klass = 'span12 %s' % slides_klass
+    img = _build_image(data, cmap=cmap)
+    first = True if ind == 0 else False
+    html = _build_html_image(img, slice_id, div_klass,
+                             img_klass, caption,
+                             first)
+    return ind, html
+
+
+def _iterate_bem_slices(name, global_id, slides_klass, orig_size,
+                        mri_fname, surf_fnames, orientation, sl):
+    """Auxiliary function for parallel processing of bem slices.
+    """
+
+    img_klass = 'slideimg-%s' % name
+    logger.info('Rendering BEM contours : orientation = %s, '
+                'slice = %d' % (orientation, sl))
+    caption = u'Slice %s %s' % (name, sl)
+    slice_id = '%s-%s-%s' % (name, global_id, sl)
+    div_klass = 'span12 %s' % slides_klass
+
+    kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames,
+                  orientation=orientation, slices=[sl],
+                  show=False)
+    img = _fig_to_mrislice(function=_plot_mri_contours,
+                           orig_size=orig_size, sl=sl, **kwargs)
+    first = True if sl == 0 else False
+    return _build_html_image(img, slice_id, div_klass,
+                             img_klass, caption,
+                             first)
+
+
+###############################################################################
+# HTML functions
+
+def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
+    """Build a html image from a slice array.
+    """
+    html = []
+    add_style = u'' if show else u'style="display: none"'
+    html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
+    html.append(u'<div class="thumbnail">')
+    html.append(u'<img class="%s" alt="" style="width:90%%;" '
+                'src="data:image/png;base64,%s">'
+                % (img_klass, img))
+    html.append(u'</div>')
+    if caption:
+        html.append(u'<h4>%s</h4>' % caption)
+    html.append(u'</li>')
+    return u'\n'.join(html)
+
+slider_template = HTMLTemplate(u"""
+<script>$("#{{slider_id}}").slider({
+                       range: "min",
+                       /*orientation: "vertical",*/
+                       min: {{minvalue}},
+                       max: {{maxvalue}},
+                       step: 2,
+                       value: {{startvalue}},
+                       create: function(event, ui) {
+                       $(".{{klass}}").hide();
+                       $("#{{klass}}-{{startvalue}}").show();},
+                       stop: function(event, ui) {
+                       var list_value = $("#{{slider_id}}").slider("value");
+                       $(".{{klass}}").hide();
+                       $("#{{klass}}-"+list_value).show();}
+                       })</script>
+""")
+
+
+def _build_html_slider(slices_range, slides_klass, slider_id):
+    """Build an html slider for a given slices range and a slices klass.
+    """
+    startvalue = (slices_range[0] + slices_range[-1]) / 2 + 1
+    return slider_template.substitute(slider_id=slider_id,
+                                      klass=slides_klass,
+                                      minvalue=slices_range[0],
+                                      maxvalue=slices_range[-1],
+                                      startvalue=startvalue)
+
+
+###############################################################################
+# HTML scan renderer
+
+header_template = Template(u"""
+<!DOCTYPE html>
+<html lang="fr">
+<head>
+{{include}}
+<script type="text/javascript">
+
+        function togglebutton(class_name){
+            $(class_name).toggle();
+
+            if ($(class_name + '-btn').hasClass('active'))
+                $(class_name + '-btn').removeClass('active');
+            else
+                $(class_name + '-btn').addClass('active');
+        }
+
+        /* Scroll down on click to #id so that caption is not hidden
+        by navbar */
+        var shiftWindow = function() { scrollBy(0, -60) };
+        if (location.hash) shiftWindow();
+        window.addEventListener("hashchange", shiftWindow);
+
+        </script>
+<style type="text/css">
+
+body {
+    line-height: 1.5em;
+    font-family: arial, sans-serif;
+}
+
+h1 {
+    font-size: 30px;
+    text-align: center;
+}
+
+h4 {
+    text-align: center;
+}
+
+ at link-color:       @brand-primary;
+ at link-hover-color: darken(@link-color, 15%);
+
+a{
+    color: @link-color;
+    &:hover {
+        color: @link-hover-color;
+        text-decoration: underline;
+  }
+}
+
+li{
+    list-style-type:none;
+}
+
+#wrapper {
+    text-align: left;
+    margin: 5em auto;
+    width: 700px;
+}
+
+#container{
+    position: relative;
+}
+
+#content{
+    margin-left: 22%;
+    margin-top: 60px;
+    width: 75%;
+}
+
+#toc {
+  margin-top: navbar-height;
+  position: fixed;
+  width: 20%;
+  height: 90%;
+  overflow: auto;
+}
+
+#toc li {
+    overflow: hidden;
+    padding-bottom: 2px;
+    margin-left: 20px;
+}
+
+#toc span {
+    float: left;
+    padding: 0 2px 3px 0;
+}
+
+div.footer {
+    background-color: #C0C0C0;
+    color: #000000;
+    padding: 3px 8px 3px 0;
+    clear: both;
+    font-size: 0.8em;
+    text-align: right;
+}
+
+</style>
+</head>
+<body>
+
+<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
+    <div class="container-fluid">
+        <div class="navbar-header navbar-left">
+            <ul class="nav nav-pills"><li class="active">
+                <a class="navbar-btn" data-toggle="collapse"
+                data-target="#viewnavbar" href="javascript:void(0)">
+                ></a></li></ul>
+    </div>
+        <h3 class="navbar-text" style="color:white">{{title}}</h3>
+        <ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
+        id="viewnavbar">
+
+        {{for section in sections}}
+
+        <li class="active {{sectionvars[section]}}-btn">
+           <a href="javascript:void(0)"
+           onclick="togglebutton('.{{sectionvars[section]}}')">
+    {{section if section != 'mri' else 'MRI'}}
+           </a>
+        </li>
+
+        {{endfor}}
+
+        </ul>
+    </div>
+</nav>
+""")
+
+footer_template = HTMLTemplate(u"""
+</div></body>
+<div class="footer">
+        © Copyright 2012-2013, MNE Developers.
+      Created on {{date}}.
+      Powered by <a href="http://martinos.org/mne">MNE.
+</div>
+</html>
+""")
+
+image_template = Template(u"""
+
+{{default interactive = False}}
+{{default width = 50}}
+{{default id = False}}
+
+<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
+{{if not show}}style="display: none"{{endif}}>
+
+{{if caption}}
+<h4>{{caption}}</h4>
+{{endif}}
+<div class="thumbnail">
+{{if not interactive}}
+    <img alt="" style="width:{{width}}%;"
+    src="data:image/png;base64,{{img}}">
+{{else}}
+    <center>{{interactive}}</center>
+{{endif}}
+</div>
+</li>
+""")
+
+repr_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+<h4>{{caption}}</h4><hr>
+{{repr}}
+<hr></li>
+""")
+
+toc_list = Template(u"""
+<li class="{{div_klass}}">
+    {{if id}}
+        <a href="javascript:void(0)" onclick="window.location.hash={{id}};">
+    {{endif}}
+<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
+{{if id}}</a>{{endif}}
+</li>
+""")
+
+
+class Report(object):
+    """Object for rendering HTML
+
+    Parameters
+    ----------
+    info_fname : str
+        Name of the file containing the info dictionary.
+    subjects_dir : str | None
+        Path to the SUBJECTS_DIR. If None, the path is obtained by using
+        the environment variable SUBJECTS_DIR.
+    subject : str | None
+        Subject name.
+    title : str
+        Title of the report.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+
+    def __init__(self, info_fname=None, subjects_dir=None, subject=None,
+                 title=None, verbose=None):
+
+        self.info_fname = info_fname
+        self.subjects_dir = subjects_dir
+        self.subject = subject
+        self.title = title
+        self.verbose = verbose
+
+        self.initial_id = 0
+        self.html = []
+        self.fnames = []  # List of file names rendered
+        self.sections = []  # List of sections
+        self._sectionlabels = []  # Section labels
+        self._sectionvars = {}  # Section variable names in js
+
+        self._init_render(verbose=self.verbose)  # Initialize the renderer
+
+    def _get_id(self):
+        """Get id of plot.
+        """
+        self.initial_id += 1
+        return self.initial_id
+
+    def add_section(self, figs, captions, section='custom'):
+        """Append custom user-defined figures.
+
+        Parameters
+        ----------
+        figs : list of matplotlib.pyplot.Figure
+            A list of figures to be included in the report.
+        captions : list of str
+            A list of captions to the figures.
+        section : str
+            Name of the section. If section already exists, the figures
+            will be appended to the end of the section
+        """
+
+        if not isinstance(figs, (list, tuple)):
+            figs = [figs]
+        if not isinstance(captions, (list, tuple)):
+            captions = [captions]
+        if not len(figs) == len(captions):
+            raise ValueError('Captions and figures must have the same length.')
+        if section not in self.sections:
+            self.sections.append(section)
+            self._sectionvars[section] = _clean_varnames(section)
+
+        for fig, caption in zip(figs, captions):
+            sectionvar = self._sectionvars[section]
+            global_id = self._get_id()
+            div_klass = self._sectionvars[section]
+            img_klass = self._sectionvars[section]
+            img = _fig_to_img(fig=fig)
+            html = image_template.substitute(img=img, id=global_id,
+                                             div_klass=div_klass,
+                                             img_klass=img_klass,
+                                             caption=caption,
+                                             show=True)
+            self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
+            self._sectionlabels.append(sectionvar)
+            self.html.append(html)
+
+    ###########################################################################
+    # HTML rendering
+    def _render_one_axe(self, slices_iter, name, global_id=None, cmap='gray',
+                        n_jobs=1):
+        """Render one axe of the array.
+        """
+        global_id = global_id or name
+        html = []
+        slices, slices_range = [], []
+        html.append(u'<div class="col-xs-6 col-md-4">')
+        slides_klass = '%s-%s' % (name, global_id)
+
+        parallel, p_fun, _ = parallel_func(_iterate_mri_slices, n_jobs)
+        r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
+                     for ind, data in slices_iter)
+        slices_range, slices = zip(*r)
+
+        # Render the slider
+        slider_id = 'select-%s-%s' % (name, global_id)
+        html.append(u'<div id="%s"></div>' % slider_id)
+        html.append(u'<ul class="thumbnails">')
+        # Render the slices
+        html.append(u'\n'.join(slices))
+        html.append(u'</ul>')
+        html.append(_build_html_slider(slices_range, slides_klass, slider_id))
+        html.append(u'</div>')
+        return '\n'.join(html)
+
+    ###########################################################################
+    # global rendering functions
+    @verbose
+    def _init_render(self, verbose=None):
+        """Initialize the renderer.
+        """
+
+        inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
+                      'bootstrap.min.js', 'jquery-ui.min.css',
+                      'bootstrap.min.css']
+
+        include = list()
+        for inc_fname in inc_fnames:
+            logger.info('Embedding : %s' % inc_fname)
+            f = open(op.join(op.dirname(__file__), 'html', inc_fname),
+                     'r')
+            if inc_fname.endswith('.js'):
+                include.append(u'<script type="text/javascript">'
+                               + f.read() + u'</script>')
+            elif inc_fname.endswith('.css'):
+                include.append(u'<style type="text/css">'
+                               + f.read() + u'</style>')
+            f.close()
+
+        self.include = ''.join(include)
+
+    @verbose
+    def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, verbose=None):
+        """Renders all the files in the folder.
+
+        Parameters
+        ----------
+        data_path : str
+            Path to the folder containing data whose HTML report will be
+            created.
+        pattern : str
+            Filename pattern to include in the report. e.g., -ave.fif will
+            include all evoked files.
+        n_jobs : int
+          Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+        n_jobs = check_n_jobs(n_jobs)
+        self.data_path = data_path
+
+        if self.title is None:
+            self.title = 'MNE Report for ...%s' % self.data_path[-20:]
+
+        fnames = _recursive_search(self.data_path, pattern)
+
+        if self.info_fname is not None:
+            info = read_info(self.info_fname)
+            sfreq = info['sfreq']
+        else:
+            warnings.warn('`info_fname` not provided. Cannot render'
+                          '-cov.fif(.gz) and -trans.fif(.gz) files.')
+            info, sfreq = None, None
+
+        # render plots in parallel
+        parallel, p_fun, _ = parallel_func(_iterate_files, n_jobs)
+        r = parallel(p_fun(self, fname, info, sfreq) for fname in
+                     np.array_split(fnames, n_jobs))
+        htmls, report_fnames, report_sectionlabels = zip(*r)
+
+        # combine results from n_jobs discarding plots not rendered
+        self.html = [html for html in sum(htmls, []) if html is not None]
+        self.fnames = [fname for fname in sum(report_fnames, []) if
+                       fname is not None]
+        self._sectionlabels = [slabel for slabel in
+                               sum(report_sectionlabels, [])
+                               if slabel is not None]
+
+        # find unique section labels
+        self.sections = sorted(set(self._sectionlabels))
+        self._sectionvars = dict(zip(self.sections, self.sections))
+
+        # render mri
+        if self.subjects_dir is not None and self.subject is not None:
+            self.html.append(self._render_bem(subject=self.subject,
+                                              subjects_dir=
+                                              self.subjects_dir,
+                                              n_jobs=n_jobs))
+            self.fnames.append('bem')
+            self._sectionlabels.append('mri')
+        else:
+            warnings.warn('`subjects_dir` and `subject` not provided.'
+                          ' Cannot render MRI and -trans.fif(.gz) files.')
+
+    def save(self, fname=None, open_browser=True, overwrite=False):
+        """Save html report and open it in browser.
+
+        Parameters
+        ----------
+        fname : str
+            File name of the report.
+        open_browser : bool
+            Open html browser after saving if True.
+        overwrite : bool
+            If True, overwrite report if it already exists.
+        """
+
+        if fname is None:
+            if not hasattr(self, 'data_path'):
+                self.data_path = op.dirname(__file__)
+                warnings.warn('`data_path` not provided. Using %s instead'
+                              % self.data_path)
+            fname = op.realpath(op.join(self.data_path, 'report.html'))
+        else:
+            fname = op.realpath(fname)
+
+        self._render_toc(verbose=self.verbose)
+
+        html = footer_template.substitute(date=time.strftime("%B %d, %Y"))
+        self.html.append(html)
+
+        if not overwrite and op.isfile(fname):
+            msg = ('Report already exists at location %s. '
+                   'Overwrite it (y/[n])? '
+                   % fname)
+            answer = moves.input(msg)
+            if answer.lower() == 'y':
+                overwrite = True
+
+        if overwrite or not op.isfile(fname):
+            logger.info('Saving report to location %s' % fname)
+            fobj = open(fname, 'w')
+            fobj.write(_fix_global_ids(''.join(self.html)))
+            fobj.close()
+
+            # remove header, TOC and footer to allow more saves
+            self.html.pop(0)
+            self.html.pop(0)
+            self.html.pop()
+
+        if open_browser:
+            import webbrowser
+            webbrowser.open_new_tab('file://' + fname)
+
+        return fname
+
+    @verbose
+    def _render_toc(self, verbose=None):
+        """Render the Table of Contents.
+        """
+
+        logger.info('Rendering : Table of Contents')
+
+        html_toc = u'<div id="container">'
+        html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
+
+        global_id = 1
+
+        # Reorder self.sections to reflect natural ordering
+        sections = list(set(self.sections) & set(SECTION_ORDER))
+        custom = [section for section in self.sections if section
+                  not in SECTION_ORDER]
+        order = [sections.index(section) for section in SECTION_ORDER if
+                 section in sections]
+        self.sections = np.array(sections)[order].tolist() + custom
+
+        # Sort by section
+        html, fnames, sectionlabels = [], [], []
+        for section in self.sections:
+            logger.info('%s' % section)
+            for sectionlabel, this_html, fname in (zip(self._sectionlabels,
+                                                   self.html, self.fnames)):
+                if self._sectionvars[section] == sectionlabel:
+                    html.append(this_html)
+                    fnames.append(fname)
+                    sectionlabels.append(sectionlabel)
+                    logger.info('\t... %s' % fname[-20:])
+                    color = _is_bad_fname(fname)
+                    div_klass, tooltip, text = _get_toc_property(fname)
+
+                    # loop through conditions for evoked
+                    if fname.endswith(('-ave.fif', '-ave.fif.gz')):
+                       # XXX: remove redundant read_evokeds
+                        evokeds = read_evokeds(fname, verbose=False)
+
+                        html_toc += toc_list.substitute(div_klass=div_klass,
+                                                        id=None, tooltip=fname,
+                                                        color='#428bca',
+                                                        text=
+                                                        os.path.basename(fname)
+                                                        )
+
+                        html_toc += u'<li class="evoked"><ul>'
+                        for ev in evokeds:
+                            html_toc += toc_list.substitute(div_klass=
+                                                            div_klass,
+                                                            id=global_id,
+                                                            tooltip=fname,
+                                                            color=color,
+                                                            text=ev.comment)
+                            global_id += 1
+                        html_toc += u'</ul></li>'
+
+                    elif fname.endswith(tuple(VALID_EXTENSIONS +
+                                        ['bem', 'custom'])):
+                        html_toc += toc_list.substitute(div_klass=div_klass,
+                                                        id=global_id,
+                                                        tooltip=tooltip,
+                                                        color=color,
+                                                        text=text)
+                        global_id += 1
+
+        html_toc += u'\n</ul></div>'
+        html_toc += u'<div id="content">'
+
+        # The sorted html (according to section)
+        self.html = html
+        self.fnames = fnames
+        self._sectionlabels = sectionlabels
+
+        html_header = header_template.substitute(title=self.title,
+                                                 include=self.include,
+                                                 sections=self.sections,
+                                                 sectionvars=self._sectionvars)
+        self.html.insert(0, html_header)  # Insert header at position 0
+        self.html.insert(1, html_toc)  # insert TOC
+
+    def _render_array(self, array, global_id=None, cmap='gray',
+                      limits=None, n_jobs=1):
+        """Render mri without bem contours.
+        """
+        html = []
+        html.append(u'<div class="row">')
+        # Axial
+        limits = limits or {}
+        axial_limit = limits.get('axial')
+        axial_slices_gen = _iterate_axial_slices(array, axial_limit)
+        html.append(
+            self._render_one_axe(axial_slices_gen, 'axial', global_id, cmap,
+                                 n_jobs=n_jobs))
+        # Sagittal
+        sagittal_limit = limits.get('sagittal')
+        sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
+        html.append(self._render_one_axe(sagittal_slices_gen, 'sagittal',
+                    global_id, cmap, n_jobs=n_jobs))
+        html.append(u'</div>')
+        html.append(u'<div class="row">')
+        # Coronal
+        coronal_limit = limits.get('coronal')
+        coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
+        html.append(
+            self._render_one_axe(coronal_slices_gen, 'coronal',
+                                 global_id, cmap, n_jobs=n_jobs))
+        # Close section
+        html.append(u'</div>')
+        return '\n'.join(html)
+
+    def _render_one_bem_axe(self, mri_fname, surf_fnames, global_id,
+                            shape, orientation='coronal', n_jobs=1):
+        """Render one axe of bem contours.
+        """
+
+        orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
+        orientation_axis = orientation_name2axis[orientation]
+        n_slices = shape[orientation_axis]
+        orig_size = np.roll(shape, orientation_axis)[[1, 2]]
+
+        name = orientation
+        html, img = [], []
+        slices, slices_range = [], []
+        html.append(u'<div class="col-xs-6 col-md-4">')
+        slides_klass = '%s-%s' % (name, global_id)
+
+        slices_range = range(0, n_slices, 2)
+
+        parallel, p_fun, _ = parallel_func(_iterate_bem_slices, n_jobs)
+        slices = parallel(p_fun(name, global_id, slides_klass, orig_size,
+                          mri_fname, surf_fnames, orientation, sl)
+                          for sl in slices_range)
+
+        # Render the slider
+        slider_id = 'select-%s-%s' % (name, global_id)
+        html.append(u'<div id="%s"></div>' % slider_id)
+        html.append(u'<ul class="thumbnails">')
+        # Render the slices
+        html.append(u'\n'.join(slices))
+        html.append(u'</ul>')
+        html.append(_build_html_slider(slices_range, slides_klass, slider_id))
+        html.append(u'</div>')
+
+        return '\n'.join(html)
+
+    def _render_image(self, image, cmap='gray', n_jobs=1):
+        """Render one slice of mri without bem.
+        """
+        import nibabel as nib
+
+        global_id = self._get_id()
+
+        if 'mri' not in self.sections:
+            self.sections.append('mri')
+            self._sectionvars['mri'] = 'mri'
+
+        nim = nib.load(image)
+        data = nim.get_data()
+        shape = data.shape
+        limits = {'sagittal': range(0, shape[0], 2),
+                  'axial': range(0, shape[1], 2),
+                  'coronal': range(0, shape[2], 2)}
+        name = op.basename(image)
+        html = u'<li class="mri" id="%d">\n' % global_id
+        html += u'<h2>%s</h2>\n' % name
+        html += self._render_array(data, global_id=global_id,
+                                   cmap=cmap, limits=limits,
+                                   n_jobs=n_jobs)
+        html += u'</li>\n'
+        return html
+
+    def _render_raw(self, raw_fname):
+        """Render raw.
+        """
+        global_id = self._get_id()
+        div_klass = 'raw'
+        caption = u'Raw : %s' % raw_fname
+
+        raw = Raw(raw_fname)
+
+        repr_raw = re.sub('>', '', re.sub('<', '', repr(raw)))
+        repr_info = re.sub('\\n', '\\n</br>',
+                           re.sub('>', '',
+                                  re.sub('<', '',
+                                         repr(raw.info))))
+
+        repr_html = repr_raw + '%s<br/>%s' % (repr_raw, repr_info)
+
+        html = repr_template.substitute(div_klass=div_klass,
+                                        id=global_id,
+                                        caption=caption,
+                                        repr=repr_html)
+        return html
+
+    def _render_forward(self, fwd_fname):
+        """Render forward.
+        """
+        div_klass = 'forward'
+        caption = u'Forward: %s' % fwd_fname
+        fwd = read_forward_solution(fwd_fname)
+        repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
+        global_id = self._get_id()
+        html = repr_template.substitute(div_klass=div_klass,
+                                        id=global_id,
+                                        caption=caption,
+                                        repr=repr_fwd)
+        return html
+
+    def _render_inverse(self, inv_fname):
+        """Render inverse.
+        """
+        div_klass = 'inverse'
+        caption = u'Inverse: %s' % inv_fname
+        inv = read_inverse_operator(inv_fname)
+        repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
+        global_id = self._get_id()
+        html = repr_template.substitute(div_klass=div_klass,
+                                        id=global_id,
+                                        caption=caption,
+                                        repr=repr_inv)
+        return html
+
+    def _render_evoked(self, evoked_fname, figsize=None):
+        """Render evoked.
+        """
+        evokeds = read_evokeds(evoked_fname, verbose=False)
+
+        html = []
+        for ev in evokeds:
+            global_id = self._get_id()
+
+            kwargs = dict(show=False)
+            img = _fig_to_img(function=ev.plot, **kwargs)
+
+            caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
+            div_klass = 'evoked'
+            img_klass = 'evoked'
+            show = True
+            html.append(image_template.substitute(img=img, id=global_id,
+                                                  div_klass=div_klass,
+                                                  img_klass=img_klass,
+                                                  caption=caption,
+                                                  show=show))
+
+            for ch_type in ['eeg', 'grad', 'mag']:
+                kwargs = dict(ch_type=ch_type, show=False)
+                img = _fig_to_img(function=ev.plot_topomap, **kwargs)
+                caption = u'Topomap (ch_type = %s)' % ch_type
+                html.append(image_template.substitute(img=img,
+                                                      div_klass=div_klass,
+                                                      img_klass=img_klass,
+                                                      caption=caption,
+                                                      show=show))
+
+        return '\n'.join(html)
+
+    def _render_eve(self, eve_fname, sfreq=None):
+        """Render events.
+        """
+        global_id = self._get_id()
+        events = read_events(eve_fname)
+
+        kwargs = dict(events=events, sfreq=sfreq, show=False)
+        img = _fig_to_img(function=plot_events, **kwargs)
+
+        caption = 'Events : ' + eve_fname
+        div_klass = 'events'
+        img_klass = 'events'
+        show = True
+
+        html = image_template.substitute(img=img, id=global_id,
+                                         div_klass=div_klass,
+                                         img_klass=img_klass,
+                                         caption=caption,
+                                         show=show)
+        return html
+
+    def _render_epochs(self, epo_fname):
+        """Render epochs.
+        """
+        global_id = self._get_id()
+
+        epochs = read_epochs(epo_fname)
+        kwargs = dict(subject=self.subject, show=False, return_fig=True)
+        img = _fig_to_img(function=epochs.plot_drop_log, **kwargs)
+        caption = 'Epochs : ' + epo_fname
+        div_klass = 'epochs'
+        img_klass = 'epochs'
+        show = True
+        html = image_template.substitute(img=img, id=global_id,
+                                         div_klass=div_klass,
+                                         img_klass=img_klass,
+                                         caption=caption,
+                                         show=show)
+        return html
+
+    def _render_cov(self, cov_fname, info_fname):
+        """Render cov.
+        """
+        global_id = self._get_id()
+        cov = Covariance(cov_fname)
+        fig, _ = plot_cov(cov, info_fname, show=False)
+
+        img = _fig_to_img(fig=fig)
+        caption = 'Covariance : ' + cov_fname
+        div_klass = 'covariance'
+        img_klass = 'covariance'
+        show = True
+        html = image_template.substitute(img=img, id=global_id,
+                                         div_klass=div_klass,
+                                         img_klass=img_klass,
+                                         caption=caption,
+                                         show=show)
+        return html
+
+    def _render_trans(self, trans_fname, path, info, subject,
+                      subjects_dir):
+        """Render trans.
+        """
+        kwargs = dict(info=info, trans_fname=trans_fname, subject=subject,
+                      subjects_dir=subjects_dir)
+        img = _iterate_trans_views(function=plot_trans, **kwargs)
+
+        if img is not None:
+
+            global_id = self._get_id()
+
+            caption = 'Trans : ' + trans_fname
+            div_klass = 'trans'
+            img_klass = 'trans'
+            show = True
+            html = image_template.substitute(img=img, id=global_id,
+                                             div_klass=div_klass,
+                                             img_klass=img_klass,
+                                             caption=caption,
+                                             width=75,
+                                             show=show)
+            return html
+
+    def _render_bem(self, subject, subjects_dir, n_jobs=1):
+        """Render mri+bem.
+        """
+        import nibabel as nib
+
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+        # Get the MRI filename
+        mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
+        if not op.isfile(mri_fname):
+            warnings.warn('MRI file "%s" does not exist' % mri_fname)
+
+        # Get the BEM surface filenames
+        bem_path = op.join(subjects_dir, subject, 'bem')
+
+        if not op.isdir(bem_path):
+            warnings.warn('Subject bem directory "%s" does not exist' %
+                          bem_path)
+            return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
+
+        surf_fnames = []
+        for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
+            surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
+            if len(surf_fname) > 0:
+                surf_fname = surf_fname[0]
+            else:
+                warnings.warn('No surface found for %s.' % surf_name)
+                return self._render_image(mri_fname, cmap='gray')
+            surf_fnames.append(surf_fname)
+
+        # XXX : find a better way to get max range of slices
+        nim = nib.load(mri_fname)
+        data = nim.get_data()
+        shape = data.shape
+        del data  # free up memory
+
+        html = []
+
+        global_id = self._get_id()
+
+        if 'mri' not in self.sections:
+            self.sections.append('mri')
+            self._sectionvars['mri'] = 'mri'
+
+        name, caption = 'BEM', 'BEM contours'
+
+        html += u'<li class="mri" id="%d">\n' % global_id
+        html += u'<h2>%s</h2>\n' % name
+        html += u'<div class="row">'
+        html += self._render_one_bem_axe(mri_fname, surf_fnames, global_id,
+                                         shape, orientation='axial',
+                                         n_jobs=n_jobs)
+        html += self._render_one_bem_axe(mri_fname, surf_fnames, global_id,
+                                         shape, orientation='sagittal',
+                                         n_jobs=n_jobs)
+        html += u'</div><div class="row">'
+        html += self._render_one_bem_axe(mri_fname, surf_fnames, global_id,
+                                         shape, orientation='coronal',
+                                         n_jobs=n_jobs)
+        html += u'</div>'
+        html += u'</li>\n'
+        return ''.join(html)
+
+
+def _clean_varnames(s):
+
+    # Remove invalid characters
+    s = re.sub('[^0-9a-zA-Z_]', '', s)
+
+    # Remove leading characters until we find a letter or underscore
+    s = re.sub('^[^a-zA-Z_]+', '', s)
+
+    return s
+
+
+def _recursive_search(path, pattern):
+    """Auxiliary function for recursive_search of the directory.
+    """
+    filtered_files = list()
+    for dirpath, dirnames, files in os.walk(path):
+        for f in fnmatch.filter(files, pattern):
+            # only the following file types are supported
+            # this ensures equitable distribution of jobs
+            if f.endswith(tuple(VALID_EXTENSIONS)):
+                filtered_files.append(op.realpath(op.join(dirpath, f)))
+
+    return filtered_files
+
+
+def _fix_global_ids(html):
+    """Auxiliary function for fixing the global_ids after reordering in
+       _render_toc().
+    """
+    html = re.sub('id="\d+"', 'id="###"', html)
+    global_id = 1
+    while len(re.findall('id="###"', html)) > 0:
+        html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
+        global_id += 1
+    return html
diff --git a/mne/selection.py b/mne/selection.py
index 5f2d9fe..2b9ab12 100644
--- a/mne/selection.py
+++ b/mne/selection.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
@@ -7,6 +7,7 @@
 from os import path
 
 from .utils import logger, verbose
+from .externals import six
 
 
 @verbose
@@ -90,7 +91,7 @@ def read_selection(name, fname=None, verbose=None):
     fid.close()
 
     # make sure we found at least one match for each name
-    for n, found in name_found.iteritems():
+    for n, found in six.iteritems(name_found):
         if not found:
             raise ValueError('No match for selection name "%s" found' % n)
 
diff --git a/mne/simulation/evoked.py b/mne/simulation/evoked.py
index f7b9895..79a2f3f 100644
--- a/mne/simulation/evoked.py
+++ b/mne/simulation/evoked.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
@@ -8,7 +8,7 @@ import copy
 import numpy as np
 from scipy import signal
 
-from ..fiff.pick import pick_channels_cov
+from ..io.pick import pick_channels_cov
 from ..utils import check_random_state
 from ..forward import apply_forward
 
diff --git a/mne/simulation/source.py b/mne/simulation/source.py
index 1cc4792..9e3cf8d 100644
--- a/mne/simulation/source.py
+++ b/mne/simulation/source.py
@@ -1,12 +1,14 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
 #
 # License: BSD (3-clause)
 
 import numpy as np
+
 from ..source_estimate import SourceEstimate
 from ..utils import check_random_state
+from ..externals.six.moves import zip
 
 
 def select_source_in_label(src, label, random_state=None):
@@ -89,7 +91,7 @@ def generate_sparse_stc(src, labels, stc_data, tmin, tstep, random_state=None):
         else:
             raise ValueError('No vertno found.')
 
-    vertno = map(np.array, vertno)
+    vertno = [np.array(v) for v in vertno]
 
     # the data is in the order left, right
     data = list()
@@ -176,7 +178,7 @@ def generate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
             vertno[idx] = np.concatenate(vertno[idx])
         elif len(vertno[idx]) == 1:
             vertno[idx] = vertno[idx][0]
-    vertno = map(np.array, vertno)
+    vertno = [np.array(v) for v in vertno]
 
     # the data is in the order left, right
     data = list()
diff --git a/mne/simulation/tests/test_evoked.py b/mne/simulation/tests/test_evoked.py
index 3950757..2f2a348 100644
--- a/mne/simulation/tests/test_evoked.py
+++ b/mne/simulation/tests/test_evoked.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -7,23 +7,26 @@ import os.path as op
 import numpy as np
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true, assert_raises
+import warnings
 
 from mne.datasets import sample
 from mne import read_label, read_forward_solution
 from mne.time_frequency import morlet
 from mne.simulation import generate_sparse_stc, generate_evoked
-import mne
-from mne.fiff.pick import pick_types_evoked, pick_types_forward
+from mne import read_cov
+from mne.io import Raw
+from mne import pick_types_evoked, pick_types_forward, read_evokeds
 
+warnings.simplefilter('always')
 
 data_path = sample.data_path(download=False)
 fwd_fname = op.join(data_path, 'MEG', 'sample',
                     'sample_audvis-meg-eeg-oct-6-fwd.fif')
-raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                     'data', 'test_raw.fif')
-ave_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                     'data', 'test-ave.fif')
-cov_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                     'data', 'test-cov.fif')
 
 
@@ -31,15 +34,15 @@ cov_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
 def test_simulate_evoked():
     """ Test simulation of evoked data """
 
-    raw = mne.fiff.Raw(raw_fname)
+    raw = Raw(raw_fname)
     fwd = read_forward_solution(fwd_fname, force_fixed=True)
     fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
-    cov = mne.read_cov(cov_fname)
+    cov = read_cov(cov_fname)
     label_names = ['Aud-lh', 'Aud-rh']
     labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
                          '%s.label' % label)) for label in label_names]
 
-    evoked_template = mne.fiff.read_evoked(ave_fname, setno=0, baseline=None)
+    evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
     evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
                                         exclude=raw.info['bads'])
 
@@ -64,8 +67,10 @@ def test_simulate_evoked():
 
     # Generate noisy evoked data
     iir_filter = [1, -0.9]
-    evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
-                             tmin=0.0, tmax=0.2, iir_filter=iir_filter)
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')  # positive semidefinite warning
+        evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
+                                 tmin=0.0, tmax=0.2, iir_filter=iir_filter)
     assert_array_almost_equal(evoked.times, stc.times)
     assert_true(len(evoked.data) == len(fwd['sol']['data']))
 
diff --git a/mne/source_estimate.py b/mne/source_estimate.py
index 2500c93..76936cd 100644
--- a/mne/source_estimate.py
+++ b/mne/source_estimate.py
@@ -1,9 +1,11 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Mads Jensen <mje.mads at gmail.com>
 #
 # License: BSD (3-clause)
 
+from .externals.six import string_types
 import os
 import copy
 from math import ceil
@@ -12,7 +14,9 @@ from scipy import linalg, sparse
 from scipy.sparse import csr_matrix, coo_matrix
 import warnings
 
+from ._hdf5 import read_hdf5, write_hdf5
 from .filter import resample
+from .evoked import _get_peak
 from .parallel import parallel_func
 from .surface import (read_surface, _get_ico_surface, read_morph_map,
                       _compute_nearest)
@@ -21,6 +25,7 @@ from .utils import (get_subjects_dir, _check_subject,
                     logger, verbose)
 from .viz import plot_source_estimates
 from .fixes import in1d
+from .externals.six.moves import zip
 
 
 def _read_stc(filename):
@@ -131,28 +136,25 @@ def _read_w(filename):
            data           The data matrix (nvert long)
     """
 
-    fid = open(filename, 'rb')
-
-    # skip first 2 bytes
-    fid.read(2)
+    with open(filename, 'rb', buffering=0) as fid:  # buffering=0 for np bug
+        # skip first 2 bytes
+        fid.read(2)
 
-    # read number of vertices/sources (3 byte integer)
-    vertices_n = int(_read_3(fid))
+        # read number of vertices/sources (3 byte integer)
+        vertices_n = int(_read_3(fid))
 
-    vertices = np.zeros((vertices_n), dtype=np.int32)
-    data = np.zeros((vertices_n), dtype=np.float32)
+        vertices = np.zeros((vertices_n), dtype=np.int32)
+        data = np.zeros((vertices_n), dtype=np.float32)
 
-    # read the vertices and data
-    for i in range(vertices_n):
-        vertices[i] = _read_3(fid)
-        data[i] = np.fromfile(fid, dtype='>f4', count=1)
+        # read the vertices and data
+        for i in range(vertices_n):
+            vertices[i] = _read_3(fid)
+            data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
 
-    w = dict()
-    w['vertices'] = vertices
-    w['data'] = data
+        w = dict()
+        w['vertices'] = vertices
+        w['data'] = data
 
-    # close the file
-    fid.close()
     return w
 
 
@@ -264,14 +266,24 @@ def read_source_estimate(fname, subject=None):
                        "hemisphere tag ('...-lh.w' or '...-rh.w')"
                        % fname)
                 raise IOError(err)
+        elif fname.endswith('-stc.h5'):
+            ftype = 'h5'
+            fname = fname[:-7]
+        else:
+            raise RuntimeError('Unknown extension for file %s' % fname_arg)
 
     if ftype is not 'volume':
-        stc_exist = map(os.path.exists, (fname + '-rh.stc', fname + '-lh.stc'))
-        w_exist = map(os.path.exists, (fname + '-rh.w', fname + '-lh.w'))
+        stc_exist = [os.path.exists(f)
+                     for f in [fname + '-rh.stc', fname + '-lh.stc']]
+        w_exist = [os.path.exists(f)
+                   for f in [fname + '-rh.w', fname + '-lh.w']]
+        h5_exist = os.path.exists(fname + '-stc.h5')
         if all(stc_exist) and (ftype is not 'w'):
             ftype = 'surface'
         elif all(w_exist):
             ftype = 'w'
+        elif h5_exist:
+            ftype = 'h5'
         elif any(stc_exist) or any(w_exist):
             raise IOError("Hemisphere missing for %r" % fname_arg)
         else:
@@ -306,6 +318,8 @@ def read_source_estimate(fname, subject=None):
         # w files only have a single time point
         kwargs['tmin'] = 0.0
         kwargs['tstep'] = 1.0
+    elif ftype == 'h5':
+        kwargs = read_hdf5(fname + '-stc.h5')
 
     if ftype != 'volume':
         # Make sure the vertices are ordered
@@ -317,7 +331,12 @@ def read_source_estimate(fname, subject=None):
             kwargs['vertices'] = vertices
             kwargs['data'] = data
 
-    kwargs['subject'] = subject
+    if 'subject' not in kwargs:
+        kwargs['subject'] = subject
+    if subject is not None and subject != kwargs['subject']:
+        raise RuntimeError('provided subject name "%s" does not match '
+                           'subject name from the file "%s'
+                           % (subject, kwargs['subject']))
 
     if ftype == 'volume':
         stc = VolSourceEstimate(**kwargs)
@@ -356,7 +375,7 @@ def _verify_source_estimate_compat(a, b):
                          'same vertices. Consider using stc.expand().')
     if a.subject != b.subject:
         raise ValueError('source estimates do not have the same subject '
-                         'names, "%s" and "%s"' % (a.name, b.name))
+                         'names, %r and %r' % (a.subject, b.subject))
 
 
 class _BaseSourceEstimate(object):
@@ -578,11 +597,17 @@ class _BaseSourceEstimate(object):
             self._data -= a
         return self
 
+    def __truediv__(self, a):
+        return self.__div__(a)
+
     def __div__(self, a):
         stc = copy.deepcopy(self)
         stc /= a
         return stc
 
+    def __itruediv__(self, a):
+        return self.__idiv__(a)
+
     def __idiv__(self, a):
         self._remove_kernel_sens_data_()
         if isinstance(a, _BaseSourceEstimate):
@@ -681,7 +706,7 @@ class _BaseSourceEstimate(object):
         nv, _ = self.shape
         nt = len(times) - 1
         data = np.empty((nv, nt), dtype=self.data.dtype)
-        for i in xrange(nt):
+        for i in range(nt):
             idx = (self.times >= times[i]) & (self.times < times[i + 1])
             data[:, i] = func(self.data[:, idx], axis=1)
 
@@ -856,7 +881,7 @@ class _BaseSourceEstimate(object):
             if copy:
                 stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
                                        self.tstep, self.subject)
-                                       for a in range(data_t.shape[-1])]
+                        for a in range(data_t.shape[-1])]
             else:
                 raise ValueError('copy must be True if transformed data has '
                                  'more than 2 dimensions')
@@ -928,7 +953,7 @@ class _BaseSourceEstimate(object):
         if index is not None:
             if 'time' in index:
                 df['time'] = df['time'].astype(np.int64)
-            with warnings.catch_warnings(True):
+            with warnings.catch_warnings(record=True):
                 df.set_index(index, inplace=True)
 
         return df
@@ -993,14 +1018,15 @@ class SourceEstimate(_BaseSourceEstimate):
             and "-rh.w") to the stem provided, for the left and the right
             hemisphere, respectively.
         ftype : string
-            File format to use. Allowed values are "stc" (default) and "w".
-            The "w" format only supports a single time point.
+            File format to use. Allowed values are "stc" (default), "w",
+            and "h5". The "w" format only supports a single time point.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
         """
-        if ftype not in ['stc', 'w']:
-            raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
+        if ftype not in ('stc', 'w', 'h5'):
+            raise ValueError('ftype must be "stc", "w", or "h5", not "%s"'
+                             % ftype)
 
         lh_data = self.data[:len(self.lh_vertno)]
         rh_data = self.data[-len(self.rh_vertno):]
@@ -1020,7 +1046,11 @@ class SourceEstimate(_BaseSourceEstimate):
                      data=lh_data[:, 0])
             _write_w(fname + '-rh.w', vertices=self.rh_vertno,
                      data=rh_data[:, 0])
-
+        elif ftype == 'h5':
+            write_hdf5(fname + '-stc.h5',
+                       dict(vertices=self.vertno, data=self.data,
+                            tmin=self.tmin, tstep=self.tstep,
+                            subject=self.subject))
         logger.info('[done]')
 
     def __repr__(self):
@@ -1061,7 +1091,7 @@ class SourceEstimate(_BaseSourceEstimate):
             stc_vertices = self.vertno[1]
 
         # find index of the Label's vertices
-        idx = np.nonzero(map(label.vertices.__contains__, stc_vertices))[0]
+        idx = np.nonzero(in1d(stc_vertices, label.vertices))[0]
 
         # find output vertices
         vertices = stc_vertices[idx]
@@ -1108,7 +1138,7 @@ class SourceEstimate(_BaseSourceEstimate):
         else:
             raise TypeError("Expected  Label or BiHemiLabel; got %r" % label)
 
-        if sum(map(len, vertices)) == 0:
+        if sum([len(v) for v in vertices]) == 0:
             raise ValueError('No vertices match the label in the stc file')
 
         label_stc = SourceEstimate(values, vertices=vertices,
@@ -1274,7 +1304,7 @@ class SourceEstimate(_BaseSourceEstimate):
         surf = os.path.join(subjects_dir, subject, 'surf',
                             hemis[hemi] + '.sphere')
 
-        if isinstance(surf, basestring):  # read in surface
+        if isinstance(surf, string_types):  # read in surface
             surf = read_surface(surf)
 
         if restrict_vertices is False:
@@ -1375,8 +1405,8 @@ class SourceEstimate(_BaseSourceEstimate):
         return brain
 
     @verbose
-    def morph(self, subject_to, grade=5, smooth=None,
-              subjects_dir=None, buffer_size=64, n_jobs=1, subject_from=None,
+    def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None,
+              buffer_size=64, n_jobs=1, subject_from=None, sparse=False,
               verbose=None):
         """Morph a source estimate from one subject to another
 
@@ -1396,6 +1426,7 @@ class SourceEstimate(_BaseSourceEstimate):
             computing vertex locations. Note that if subject='fsaverage'
             and 'grade=5', this set of vertices will automatically be used
             (instead of computed) for speed, since this is a common morph.
+            NOTE : If sparse=True, grade has to be set to None.
         smooth : int or None
             Number of iterations for the smoothing of the surface data.
             If None, smooth is automatically defined to fill the surface
@@ -1410,6 +1441,10 @@ class SourceEstimate(_BaseSourceEstimate):
         subject_from : string
             Name of the original subject as named in the SUBJECTS_DIR.
             If None, self.subject will be used.
+        sparse : bool
+            Morph as a sparse source estimate. If True the only
+            parameters used are subject_to and subject_from,
+            and grade has to be None.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
 
@@ -1419,8 +1454,13 @@ class SourceEstimate(_BaseSourceEstimate):
             Source estimate for the destination subject.
         """
         subject_from = _check_subject(self.subject, subject_from)
-        return morph_data(subject_from, subject_to, self, grade, smooth,
-                          subjects_dir, buffer_size, n_jobs, verbose)
+        if sparse:
+            if grade is not None:
+                raise RuntimeError('grade must be set to None if sparse=True.')
+            return _morph_sparse(self, subject_from, subject_to, subjects_dir)
+        else:
+            return morph_data(subject_from, subject_to, self, grade, smooth,
+                              subjects_dir, buffer_size, n_jobs, verbose)
 
     def morph_precomputed(self, subject_to, vertices_to, morph_mat,
                           subject_from=None):
@@ -1447,6 +1487,46 @@ class SourceEstimate(_BaseSourceEstimate):
         return morph_data_precomputed(subject_from, subject_to, self,
                                       vertices_to, morph_mat)
 
+    def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
+                 vert_as_index=False, time_as_index=False):
+        """Get location and latency of peak amplitude
+
+        hemi : {'lh', 'rh', None}
+            The hemi to be considered. If None, the entire source space is
+            considered.
+        tmin : float | None
+            The minimum point in time to be considered for peak getting.
+        tmax : float | None
+            The maximum point in time to be considered for peak getting.
+        mode : {'pos', 'neg', 'abs'}
+            How to deal with the sign of the data. If 'pos' only positive
+            values will be considered. If 'neg' only negative values will
+            be considered. If 'abs' absolute values will be considered.
+            Defaults to 'abs'.
+        vert_as_index : bool
+            whether to return the vertex index instead of of its ID.
+            Defaults to False.
+        time_as_index : bool
+            Whether to return the time index instead of the latency.
+            Defaults to False.
+
+        Returns
+        -------
+        pos : int
+            The vertex exhibiting the maximum response, either ID or index.
+        latency : float | int
+            The time point of the maximum response, either latency in seconds
+            or index.
+        """
+        data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
+        vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
+                  None: np.concatenate(self.vertno)}[hemi]
+
+        vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode)
+
+        return (vert_idx if vert_as_index else vertno[vert_idx],
+                time_idx if time_as_index else self.times[time_idx])
+
 
 class VolSourceEstimate(_BaseSourceEstimate):
     """Container for volume source estimates
@@ -1595,6 +1675,40 @@ class VolSourceEstimate(_BaseSourceEstimate):
         s += ", data size : %s x %s" % self.shape
         return "<VolSourceEstimate  |  %s>" % s
 
+    def get_peak(self, tmin=None, tmax=None, mode='abs',
+                 vert_as_index=False, time_as_index=False):
+        """Get location and latency of peak amplitude
+
+        tmin : float | None
+            The minimum point in time to be considered for peak getting.
+        tmax : float | None
+            The maximum point in time to be considered for peak getting.
+        mode : {'pos', 'neg', 'abs'}
+            How to deal with the sign of the data. If 'pos' only positive
+            values will be considered. If 'neg' only negative values will
+            be considered. If 'abs' absolute values will be considered.
+            Defaults to 'abs'.
+        vert_as_index : bool
+            whether to return the vertex index instead of of its ID.
+            Defaults to False.
+        time_as_index : bool
+            Whether to return the time index instead of the latency.
+            Defaults to False.
+
+        Returns
+        -------
+        pos : int
+            The vertex exhibiting the maximum response, either ID or index.
+        latency : float
+            The latency in seconds.
+        """
+
+        vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax,
+                                       mode)
+
+        return (vert_idx if vert_as_index else self.vertno[vert_idx],
+                time_idx if time_as_index else self.times[time_idx])
+
 
 ###############################################################################
 # Morphing
@@ -1780,6 +1894,57 @@ def _get_subject_sphere_tris(subject, subjects_dir):
     return tris
 
 
+def _sparse_argmax_nnz_row(csr_mat):
+    """Return index of the maximum non-zero index in each row
+    """
+    n_rows = csr_mat.shape[0]
+    idx = np.empty(n_rows, dtype=np.int)
+    for k in range(n_rows):
+        row = csr_mat[k].tocoo()
+        idx[k] = row.col[np.argmax(row.data)]
+    return idx
+
+
+def _morph_sparse(stc, subject_from, subject_to, subjects_dir=None):
+    """Morph sparse source estimates to an other subject
+
+    Parameters
+    ----------
+    stc : SourceEstimate
+        The sparse STC.
+    subject_from : str
+        The subject on which stc is defined.
+    subject_to : str
+        The target subject.
+    subjects_dir : str
+        Path to SUBJECTS_DIR if it is not set in the environment.
+
+    Returns
+    -------
+    stc_morph : SourceEstimate
+        The morphed source estimates.
+    """
+    maps = read_morph_map(subject_to, subject_from, subjects_dir)
+    stc_morph = stc.copy()
+    stc_morph.subject = subject_to
+
+    cnt = 0
+    for k, hemi in enumerate(['lh', 'rh']):
+        if stc.vertno[k].size > 0:
+            map_hemi = maps[k]
+            vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertno[k]])
+            order = np.argsort(vertno_k)
+            n_active_hemi = len(vertno_k)
+            data_hemi = stc_morph._data[cnt:cnt + n_active_hemi]
+            stc_morph._data[cnt:cnt + n_active_hemi] = data_hemi[order]
+            stc_morph.vertno[k] = vertno_k[order]
+            cnt += n_active_hemi
+        else:
+            stc_morph.vertno[k] = np.array([], dtype=np.int64)
+
+    return stc_morph
+
+
 @verbose
 def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
                subjects_dir=None, buffer_size=64, n_jobs=1, verbose=None):
@@ -2396,9 +2561,10 @@ def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
     header = nib.nifti1.Nifti1Header()
     header.set_xyzt_units('mm', 'msec')
     header['pixdim'][4] = 1e3 * stc.tstep
-    img = nib.Nifti1Image(vol, affine, header=header)
-    if fname is not None:
-        nib.save(img, fname)
+    with warnings.catch_warnings(record=True):  # nibabel<->numpy warning
+        img = nib.Nifti1Image(vol, affine, header=header)
+        if fname is not None:
+            nib.save(img, fname)
     return img
 
 
@@ -2459,7 +2625,8 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
             if not allow_empty:
                 raise ValueError(msg)
             else:
-                logger.warn(msg + '. Assigning all-zero time series to label.')
+                logger.warning(msg + '. Assigning all-zero time series to '
+                               'label.')
             this_vertidx = None  # to later check if label is empty
 
         label_vertidx.append(this_vertidx)
@@ -2473,6 +2640,8 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
     elif mode == 'pca_flip':
        # get the sign-flip vector for every label
         label_flip = _get_label_flip(labels, label_vertidx, src)
+    elif mode == 'max':
+        pass  # we calculate the maximum value later
     else:
         raise ValueError('%s is an invalid mode' % mode)
 
@@ -2513,6 +2682,10 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
                     scale = linalg.norm(s) / np.sqrt(len(vertidx))
 
                     label_tc[i] = sign * scale * V[0]
+        elif mode == 'max':
+            for i, vertidx in enumerate(label_vertidx):
+                if vertidx is not None:
+                    label_tc[i] = np.max(np.abs(stc.data[vertidx, :]), axis=0)
         else:
             raise ValueError('%s is an invalid mode' % mode)
 
@@ -2531,18 +2704,24 @@ def extract_label_time_course(stcs, labels, src, mode='mean_flip',
     parameter.
 
     Valid values for mode are:
-    'mean': Average within each label.
-    'mean_flip': Average within each label with sign flip depending on source
-    orientation.
-    'pca_flip': Apply an SVD to the time courses within each label and use the
-    scaled and sign-flipped first right-singular vector as the label time
-    course. The scaling is performed such that the power of the label time
-    course is the same as the average per-vertex time course power within
-    the label. The sign of the resulting time course is adjusted by multiplying
-    it with "sign(dot(u, flip))" where u is the first left-singular vector,
-    and flip is a sing-flip vector based on the vertex normals. This procedure
-    assures that the phase does not randomly change by 180 degrees from one
-    stc to the next.
+    --------------------------
+
+    mean : Average within each label.
+
+    mean_flip : Average within each label with sign flip depending on source
+        orientation.
+
+    pca_flip : Apply an SVD to the time courses within each label and use the
+        scaled and sign-flipped first right-singular vector as the label time
+        course. The scaling is performed such that the power of the label time
+        course is the same as the average per-vertex time course power within
+        the label. The sign of the resulting time course is adjusted by
+        multiplying it with "sign(dot(u, flip))" where u is the first
+        left-singular vector, and flip is a sing-flip vector based on the
+        vertex normals. This procedure assures that the phase does not randomly
+        change by 180 degrees from one stc to the next.
+
+    max : Max value within each label.
 
     Parameters
     ----------
diff --git a/mne/source_space.py b/mne/source_space.py
index 9b076e6..8191e19 100644
--- a/mne/source_space.py
+++ b/mne/source_space.py
@@ -1,22 +1,23 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
+from .externals.six import string_types
 import numpy as np
 import os
 import os.path as op
 from scipy import sparse, linalg
 from copy import deepcopy
 
-from .fiff.constants import FIFF
-from .fiff.tree import dir_tree_find
-from .fiff.tag import find_tag, read_tag
-from .fiff.open import fiff_open
-from .fiff.write import (start_block, end_block, write_int,
-                         write_float_sparse_rcs, write_string,
-                         write_float_matrix, write_int_matrix,
-                         write_coord_trans, start_file, end_file, write_id)
+from .io.constants import FIFF
+from .io.tree import dir_tree_find
+from .io.tag import find_tag, read_tag
+from .io.open import fiff_open
+from .io.write import (start_block, end_block, write_int,
+                       write_float_sparse_rcs, write_string,
+                       write_float_matrix, write_int_matrix,
+                       write_coord_trans, start_file, end_file, write_id)
 from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
                       _tessellate_sphere_surf, read_bem_surfaces,
                       _read_surface_geom, _normalize_vectors,
@@ -24,13 +25,12 @@ from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
                       fast_cross_3d)
 from .source_estimate import mesh_dist
 from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
-                    has_nibabel, logger, verbose, check_scipy_version)
-from .fixes import in1d, partial
+                    has_nibabel, check_fname, logger, verbose,
+                    check_scipy_version)
+from .fixes import in1d, partial, gzip_open
 from .parallel import parallel_func, check_n_jobs
 from .transforms import (invert_transform, apply_trans, _print_coord_trans,
                          combine_transforms)
-if has_nibabel():
-    import nibabel as nib
 
 
 class SourceSpaces(list):
@@ -185,7 +185,8 @@ def read_source_spaces(fname, add_geom=False, verbose=None):
     Parameters
     ----------
     fname : str
-        The name of the file.
+        The name of the file, which should end with -src.fif or
+        -src.fif.gz.
     add_geom : bool, optional (default False)
         Add geometry information to the surfaces.
     verbose : bool, str, int, or None
@@ -196,23 +197,27 @@ def read_source_spaces(fname, add_geom=False, verbose=None):
     src : SourceSpaces
         The source spaces.
     """
-    fid, tree, _ = fiff_open(fname)
-    src = read_source_spaces_from_tree(fid, tree, add_geom=add_geom,
-                                       verbose=verbose)
-    src.info['fname'] = fname
-
-    node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
-    if node:
-        node = node[0]
-        for p in range(node['nent']):
-            kind = node['directory'][p].kind
-            pos = node['directory'][p].pos
-            tag = read_tag(fid, pos)
-            if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
-                src.info['working_dir'] = tag.data
-            elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
-                src.info['command_line'] = tag.data
-
+    # be more permissive on read than write (fwd/inv can contain src)
+    check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
+                                        '-fwd.fif', '-fwd.fif.gz',
+                                        '-inv.fif', '-inv.fif.gz'))
+
+    ff, tree, _ = fiff_open(fname)
+    with ff as fid:
+        src = read_source_spaces_from_tree(fid, tree, add_geom=add_geom,
+                                           verbose=verbose)
+        src.info['fname'] = fname
+        node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
+        if node:
+            node = node[0]
+            for p in range(node['nent']):
+                kind = node['directory'][p].kind
+                pos = node['directory'][p].pos
+                tag = read_tag(fid, pos)
+                if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
+                    src.info['working_dir'] = tag.data
+                elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
+                    src.info['command_line'] = tag.data
     return src
 
 
@@ -551,12 +556,15 @@ def write_source_spaces(fname, src, verbose=None):
     Parameters
     ----------
     fname : str
-        File to write.
+        The name of the file, which should end with -src.fif or
+        -src.fif.gz.
     src : SourceSpaces
         The source spaces (as returned by read_source_spaces).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
+    check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz'))
+
     fid = start_file(fname)
     start_block(fid, FIFF.FIFFB_MNE)
 
@@ -782,6 +790,7 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
     if use_nibabel is True and mode == 'freesurfer':
         use_nibabel = False
     if use_nibabel:
+        import nibabel as nib
         img = nib.load(path)
         hdr = img.get_header()
         n_orig = hdr.get_vox2ras()
@@ -809,7 +818,8 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
 
 @verbose
 def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
-                       overwrite=False, subjects_dir=None, verbose=None):
+                       overwrite=False, subjects_dir=None, add_dist=None,
+                       verbose=None):
     """Setup a source space with subsampling
 
     Parameters
@@ -829,6 +839,10 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
         If True, overwrite output file (if it exists).
     subjects_dir : string, or None
         Path to SUBJECTS_DIR if it is not set in the environment.
+    add_dist : bool
+        Add distance and patch information to the source space. This takes some
+        time so precomputing it is recommended. The default is currently False
+        but will change to True in release 0.9.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -837,15 +851,22 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
     src : list
         The source space for each hemisphere.
     """
+    if add_dist is None:
+        msg = ("The add_dist parameter to mne.setup_source_space currently "
+               "defaults to False, but the default will change to True in "
+               "release 0.9. Specify the parameter explicitly to avoid this "
+               "warning.")
+        logger.warning(msg)
+
     cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
-           'overwrite=%s, subjects_dir=%s, verbose=%s)'
+           'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
            % (subject, fname, spacing, surface, overwrite,
-              subjects_dir, verbose))
+              subjects_dir, add_dist, verbose))
     # check to make sure our parameters are good, parse 'spacing'
     space_err = ('"spacing" must be a string with values '
                  '"ico#", "oct#", or "all", and "ico" and "oct"'
                  'numbers must be integers')
-    if not isinstance(spacing, basestring) or len(spacing) < 3:
+    if not isinstance(spacing, string_types) or len(spacing) < 3:
         raise ValueError(space_err)
     if spacing == 'all':
         stype = 'all'
@@ -875,7 +896,7 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
             raise IOError('Could not find the %s surface %s'
                           % (hemi, surf))
 
-    if not (fname is True or fname is None or isinstance(fname, basestring)):
+    if not (fname is True or fname is None or isinstance(fname, string_types)):
         raise ValueError('"fname" must be a string, True, or None')
     if fname is True:
         extra = '%s-%s' % (stype, sval) if sval != '' else stype
@@ -944,6 +965,9 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
     # upconvert to object format from lists
     src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
 
+    if add_dist:
+        add_source_space_distances(src, verbose=verbose)
+
     # write out if requested, then return the data
     if fname is not None:
         write_source_spaces(fname, src)
@@ -972,7 +996,8 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
         with the spacing given by `pos` in mm, generating a volume source
         space. If dict, pos['rr'] and pos['nn'] will be used as the source
         space locations (in meters) and normals, respectively, creating a
-        discrete source space.
+        discrete source space. NOTE: For a discrete source space (`pos` is
+        a dict), `mri` must be None.
     mri : str | None
         The filename of an MRI volume (mgh or mgz) to create the
         interpolation matrix over. Source estimates obtained in the
@@ -1021,10 +1046,10 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
     if mri is not None:
         if not op.isfile(mri):
             raise IOError('mri file "%s" not found' % mri)
-        if not has_nibabel(vox2ras_tkr=True):
-            raise RuntimeError('nibabel with "vox2ras_tkr" property is '
-                               'required to process mri data, consider '
-                               'installing and/or updating nibabel')
+        if isinstance(pos, dict):
+            raise ValueError('Cannot create interpolation matrix for '
+                             'discrete source space, mri must be None if '
+                             'pos is a dict')
     elif not isinstance(pos, dict):
         # "pos" will create a discrete src, so we don't need "mri"
         # if "pos" is None, we must have "mri" b/c it will be vol src
@@ -1047,7 +1072,7 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
             # let's make sure we have geom info
             surface = _read_surface_geom(surface, verbose=False)
             surf_extra = 'dict()'
-        elif isinstance(surface, basestring):
+        elif isinstance(surface, string_types):
             if not op.isfile(surface):
                 raise IOError('surface file "%s" not found' % surface)
             surf_extra = surface
@@ -1097,12 +1122,14 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
             logger.info('Loaded inner skull from %s (%d nodes)'
                         % (bem, surf['np']))
         elif surface is not None:
-            if isinstance(surf, basestring):
+            if isinstance(surface, string_types):
                 surf = _read_surface_geom(surface)
             else:
                 surf = surface
             logger.info('Loaded bounding surface from %s (%d nodes)'
                         % (surface, surf['np']))
+            surf = deepcopy(surf)
+            surf['rr'] *= 1e-3  # must be converted to meters
         else:  # Load an icosahedron and use that as the surface
             logger.info('Setting up the sphere...')
             surf = _get_ico_surface(3)
@@ -1267,7 +1294,7 @@ def _make_volume_source_space(surf, grid, exclude, mindist):
     idx3 = np.logical_and(idx2, y > minn[1])
     neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
 
-    idx2 = np.logical_and(idx1,  y > minn[1])
+    idx2 = np.logical_and(idx1, y > minn[1])
     neigh[12, idx2] = k[idx2] - nrow - nplane
     idx3 = np.logical_and(idx2, x < maxn[0])
     neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
@@ -1347,7 +1374,6 @@ def _make_volume_source_space(surf, grid, exclude, mindist):
     ras = np.eye(3)
     sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size)
     sp['vol_dims'] = maxn - minn + 1
-    sp['voxel_dims'] = voxel_size
     return sp
 
 
@@ -1355,20 +1381,57 @@ def _vol_vertex(width, height, jj, kk, pp):
     return jj + width * kk + pp * (width * height)
 
 
+def _get_mgz_header(fname):
+    """Adapted from nibabel to quickly extract header info"""
+    if not fname.endswith('.mgz'):
+        raise IOError('Filename must end with .mgz')
+    header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
+                  ('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
+                  ('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
+                  ('Pxyz_c', '>f4', (3,))]
+    header_dtype = np.dtype(header_dtd)
+    with gzip_open(fname, 'rb') as fid:
+        hdr_str = fid.read(header_dtype.itemsize)
+    header = np.ndarray(shape=(), dtype=header_dtype,
+                        buffer=hdr_str)
+    # dims
+    dims = header['dims'].astype(int)
+    dims = dims[:3] if len(dims) == 4 else dims
+    # vox2ras_tkr
+    delta = header['delta']
+    ds = np.array(delta, float)
+    ns = np.array(dims * ds) / 2.0
+    v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
+                       [0, 0, ds[2], -ns[2]],
+                       [0, -ds[1], 0, ns[1]],
+                       [0, 0, 0, 1]], dtype=np.float32)
+    # ras2vox
+    d = np.diag(delta)
+    pcrs_c = dims / 2.0
+    Mdc = header['Mdc'].T
+    pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
+    M = np.eye(4, 4)
+    M[0:3, 0:3] = np.dot(Mdc, d)
+    M[0:3, 3] = pxyz_0.T
+    M = linalg.inv(M)
+    header = dict(dims=dims, vox2ras_tkr=v2rtkr, ras2vox=M)
+    return header
+
+
 def _add_interpolator(s, mri_name):
     """Compute a sparse matrix to interpolate the data into an MRI volume"""
     # extract transformation information from mri
     logger.info('Reading %s...' % mri_name)
-    mri_hdr = nib.load(mri_name).get_header()
-    mri_width, mri_height, mri_depth = mri_hdr.get_data_shape()
+    header = _get_mgz_header(mri_name)
+    mri_width, mri_height, mri_depth = header['dims']
+
     s.update(dict(mri_width=mri_width, mri_height=mri_height,
                   mri_depth=mri_depth))
-    trans = mri_hdr.get_vox2ras_tkr()
+    trans = header['vox2ras_tkr'].copy()
     trans[:3, :] /= 1000.0
     s['vox_mri_t'] = {'trans': trans, 'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
                       'to': FIFF.FIFFV_COORD_MRI}  # ras_tkr
-    trans = linalg.inv(np.dot(mri_hdr.get_vox2ras_tkr(),
-                              mri_hdr.get_ras2vox()))
+    trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
     trans[:3, 3] /= 1000.0
     s['mri_ras_t'] = {'trans': trans, 'from': FIFF.FIFFV_COORD_MRI,
                       'to': FIFF.FIFFV_MNE_COORD_RAS}  # ras
@@ -1376,7 +1439,11 @@ def _add_interpolator(s, mri_name):
     _print_coord_trans(s['src_mri_t'], 'Source space : ')
     _print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
     _print_coord_trans(s['mri_ras_t'], 'MRI volume : ')
-    # Convert from destination to source volume coords
+
+    #
+    # Convert MRI voxels from destination (MRI volume) to source (volume
+    # source space subset) coordinates
+    #
     combo_trans = combine_transforms(s['vox_mri_t'],
                                      invert_transform(s['src_mri_t']),
                                      FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
@@ -1384,6 +1451,8 @@ def _add_interpolator(s, mri_name):
     combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
 
     logger.info('Setting up interpolation...')
+
+    # Take *all* MRI vertices...
     js = np.arange(mri_width, dtype=np.float32)
     js = np.tile(js[np.newaxis, np.newaxis, :],
                  (mri_depth, mri_height, 1)).ravel()
@@ -1393,14 +1462,23 @@ def _add_interpolator(s, mri_name):
     ps = np.arange(mri_depth, dtype=np.float32)
     ps = np.tile(ps[:, np.newaxis, np.newaxis],
                  (1, mri_height, mri_width)).ravel()
-
-    r0 = apply_trans(combo_trans['trans'], np.c_[js, ks, ps])
-    del js, ks, ps
+    r0 = np.c_[js, ks, ps]
+    # note we have the correct number of vertices
+    assert len(r0) == mri_width * mri_height * mri_depth
+
+    # ...and transform them from their MRI space into our source space's frame
+    # (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's really a subset
+    # of the entire volume!)
+    r0 = apply_trans(combo_trans['trans'], r0)
     rn = np.floor(r0).astype(int)
     maxs = (s['vol_dims'] - 1)[np.newaxis, :]
     good = np.logical_and(np.all(rn >= 0, axis=1), np.all(rn < maxs, axis=1))
     rn = rn[good]
     r0 = r0[good]
+    # now we take each MRI voxel *in this space*, and figure out how to make
+    # its value the weighted sum of voxels in the volume source space. This
+    # is a 3D weighting scheme based (presumably) on the fact that we know
+    # we're interpolating from one volumetric grid into another.
     jj = rn[:, 0]
     kk = rn[:, 1]
     pp = rn[:, 2]
@@ -1543,6 +1621,11 @@ def _get_solids(tri_rrs, fros):
 def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
     """Compute inter-source distances along the cortical surface
 
+    This function will also try to add patch info for the source space.
+    It will only occur if the ``dist_limit`` is sufficiently high that all
+    points on the surface are within ``dist_limit`` of a point in the
+    source space.
+
     Parameters
     ----------
     src : instance of SourceSpaces
@@ -1601,13 +1684,26 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
                                '> 0.13 is installed')
 
     parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
+    min_dists = list()
+    min_idxs = list()
+    logger.info('Calculating source space distances (limit=%s mm)...'
+                % (1000 * dist_limit))
     for s in src:
         connectivity = mesh_dist(s['tris'], s['rr'])
         d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit)
                      for r in np.array_split(np.arange(len(s['vertno'])),
                                              n_jobs))
-        d = np.concatenate(d, axis=0)
-        # convert to sparse representation
+        # deal with indexing so we can add patch info
+        min_idx = np.array([dd[1] for dd in d])
+        min_dist = np.array([dd[2] for dd in d])
+        midx = np.argmin(min_dist, axis=0)
+        range_idx = np.arange(len(s['rr']))
+        min_dist = min_dist[midx, range_idx]
+        min_idx = min_idx[midx, range_idx]
+        min_dists.append(min_dist)
+        min_idxs.append(min_idx)
+        # now actually deal with distances, convert to sparse representation
+        d = np.concatenate([dd[0] for dd in d], axis=0)
         i, j = np.meshgrid(s['vertno'], s['vertno'])
         d = d.ravel()
         i = i.ravel()
@@ -1617,6 +1713,16 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
                               shape=(s['np'], s['np']), dtype=np.float32)
         s['dist'] = d
         s['dist_limit'] = np.array([dist_limit], np.float32)
+
+    # Let's see if our distance was sufficient to allow for patch info
+    if not any([np.any(np.isinf(md)) for md in min_dists]):
+        # Patch info can be added!
+        for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
+            s['nearest'] = min_idx
+            s['nearest_dist'] = min_dist
+            _add_patch_info(s)
+    else:
+        logger.info('Not adding patch information, dist_limit too small')
     return src
 
 
@@ -1628,9 +1734,20 @@ def _do_src_distances(con, vertno, run_inds, limit):
         func = sparse.csgraph.dijkstra
     chunk_size = 100  # save memory by chunking (only a little slower)
     lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
+    n_chunks = len(lims) - 1
     d = np.empty((len(run_inds), len(vertno)))
-    for l1, l2 in zip(lims[:-1], lims[1:]):
+    min_dist = np.empty((n_chunks, con.shape[0]))
+    min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
+    range_idx = np.arange(con.shape[0])
+    for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
         idx = vertno[run_inds[l1:l2]]
-        d[l1:l2] = func(con, indices=idx)[:, vertno]
+        out = func(con, indices=idx)
+        midx = np.argmin(out, axis=0)
+        min_idx[li] = idx[midx]
+        min_dist[li] = out[midx, range_idx]
+        d[l1:l2] = out[:, vertno]
+    midx = np.argmin(min_dist, axis=0)
+    min_dist = min_dist[midx, range_idx]
+    min_idx = min_idx[midx, range_idx]
     d[d == np.inf] = 0  # scipy will give us np.inf for uncalc. distances
-    return d
+    return d, min_idx, min_dist
diff --git a/mne/stats/__init__.py b/mne/stats/__init__.py
index 17fb8a9..31381f3 100644
--- a/mne/stats/__init__.py
+++ b/mne/stats/__init__.py
@@ -10,3 +10,4 @@ from .cluster_level import (permutation_cluster_test,
                             ttest_1samp_no_p,
                             summarize_clusters_stc)
 from .multi_comp import fdr_correction, bonferroni_correction
+from .regression import linear_regression
diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py
index 9cb5c95..8fbc3a9 100755
--- a/mne/stats/cluster_level.py
+++ b/mne/stats/cluster_level.py
@@ -2,10 +2,10 @@
 # -*- coding: utf-8 -*-
 
 # Authors: Thorsten Kranz <thorstenkranz at gmail.com>
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: Simplified BSD
 
@@ -171,7 +171,7 @@ def _get_clusters_st_multistep(keepers, neighbors, max_step=1):
 def _get_clusters_st(x_in, neighbors, max_step=1):
     """Helper function to choose the most efficient version"""
     n_src = len(neighbors)
-    n_times = x_in.size / n_src
+    n_times = x_in.size // n_src
     cl_goods = np.where(x_in)[0]
     if len(cl_goods) > 0:
         keepers = [np.array([], dtype=int)] * n_times
@@ -326,7 +326,7 @@ def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
                 txt = ('threshold["start"] (%s) is more extreme than '
                        'data statistics with most extreme value %s'
                        % (threshold['start'], stop))
-                logger.warn(txt)
+                logger.warning(txt)
                 warnings.warn(txt)
             else:
                 logger.info('Using %d thresholds from %0.2f to %0.2f for TFCE '
@@ -436,11 +436,11 @@ def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power):
             else:
                 if t_power == 1:
                     sums = ndimage.measurements.sum(x, labels,
-                                                  index=range(1, n_labels + 1))
+                                                  index=list(range(1, n_labels + 1)))
                 else:
                     sums = ndimage.measurements.sum(np.sign(x) *
                                                   np.abs(x) ** t_power, labels,
-                                                  index=range(1, n_labels + 1))
+                                                  index=list(range(1, n_labels + 1)))
         else:
             # boolean masks (raveled)
             clusters = list()
@@ -556,7 +556,7 @@ def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
             # only shuffle a small data buffer, so we need less memory
             T_obs_surr = np.empty(n_vars, dtype=X_full.dtype)
 
-            for pos in xrange(0, n_vars, buffer_size):
+            for pos in range(0, n_vars, buffer_size):
                 # number of variables for this loop
                 n_var_loop = min(pos + buffer_size, n_vars) - pos
 
@@ -628,7 +628,7 @@ def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
             # only sign-flip a small data buffer, so we need less memory
             T_obs_surr = np.empty(n_vars, dtype=X.dtype)
 
-            for pos in xrange(0, n_vars, buffer_size):
+            for pos in range(0, n_vars, buffer_size):
                 # number of variables for this loop
                 n_var_loop = min(pos + buffer_size, n_vars) - pos
 
@@ -703,13 +703,13 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
     # test if stat_fun treats variables independently
     if buffer_size is not None:
         T_obs_buffer = np.zeros_like(T_obs)
-        for pos in xrange(0, n_tests, buffer_size):
+        for pos in range(0, n_tests, buffer_size):
             T_obs_buffer[pos: pos + buffer_size] =\
                 stat_fun(*[x[:, pos: pos + buffer_size] for x in X])
 
         if not np.alltrue(T_obs == T_obs_buffer):
-            logger.warn('Provided stat_fun does not treat variables '
-                        'independently. Setting buffer_size to None.')
+            logger.warning('Provided stat_fun does not treat variables '
+                           'independently. Setting buffer_size to None.')
             buffer_size = None
 
     # The stat should have the same shape as the samples for no conn.
@@ -786,11 +786,11 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
                 seeds = list(seed + np.arange(n_permutations))
 
         # Step 3: repeat permutations for step-down-in-jumps procedure
-        smallest_p = -1
-        clusters_kept = 0
+        n_removed = 1  # number of new clusters added
+        total_removed = 0
         step_down_include = None  # start out including all points
-        step_down_iteration = 0
-        while smallest_p < step_down_p:
+        n_step_downs = 0
+        while n_removed > 0:
             # actually do the clustering for each partition
             if include is not None:
                 if step_down_include is not None:
@@ -806,27 +806,22 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
             H0 = np.concatenate(H0)
             cluster_pv = _pval_from_histogram(cluster_stats, H0, tail)
 
-            # sort them by significance; for backward compat, don't sort the
-            # clusters themselves
-            inds = np.argsort(cluster_pv)
-            ord_pv = cluster_pv[inds]
-            smallest_p = ord_pv[clusters_kept]
+            # figure out how many new ones will be removed for step-down
+            to_remove = np.where(cluster_pv < step_down_p)[0]
+            n_removed = to_remove.size - total_removed
+            total_removed = to_remove.size
             step_down_include = np.ones(n_tests, dtype=bool)
-            under = np.where(cluster_pv < step_down_p)[0]
-            for ci in under:
-                step_down_include[clusters[ci]] = False
+            for ti in to_remove:
+                step_down_include[clusters[ti]] = False
             if connectivity is None:
                 step_down_include.shape = sample_shape
-            step_down_iteration += 1
+            n_step_downs += 1
             if step_down_p > 0:
-                extra_text = 'additional ' if step_down_iteration > 1 else ''
-                new_count = under.size - clusters_kept
-                plural = '' if new_count == 1 else 's'
-                logger.info('Step-down-in-jumps iteration'
-                            '%i found %i %scluster%s'
-                            % (step_down_iteration, new_count,
-                               extra_text, plural))
-            clusters_kept += under.size
+                a_text = 'additional ' if n_step_downs > 1 else ''
+                pl = '' if n_removed == 1 else 's'
+                logger.info('Step-down-in-jumps iteration #%i found %i %s'
+                            'cluster%s to exclude from subsequent iterations'
+                            % (n_step_downs, n_removed, a_text, pl))
 
         # The clusters should have the same shape as the samples
         clusters = _reshape_clusters(clusters, sample_shape)
diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py
index 6bb9170..37e4eee 100644
--- a/mne/stats/multi_comp.py
+++ b/mne/stats/multi_comp.py
@@ -1,5 +1,5 @@
 # Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis
-#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # Code borrowed from statsmodels
 #
diff --git a/mne/stats/parametric.py b/mne/stats/parametric.py
index ff25374..5987370 100644
--- a/mne/stats/parametric.py
+++ b/mne/stats/parametric.py
@@ -1,10 +1,14 @@
 import numpy as np
 from scipy import stats
+from scipy.stats import f
+fprob = f.sf  # stats.fprob is deprecated
 from scipy.signal import detrend
 from ..fixes import matrix_rank
+from functools import reduce
+from ..externals.six.moves import map  # analysis:ignore
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #          Eric Larson <larson.eric.d at gmail.com>
 #
 # License: Simplified BSD
@@ -18,7 +22,7 @@ defaults_twoway_rm = {
         'A*B': [0, 1, 2]
         },
     'iter_contrasts': np.array([(1, 0, 1), (0, 1, 1), (1, 1, 1)])
- }
+    }
 
 
 # The following function is a rewriting of scipy.stats.f_oneway
@@ -91,7 +95,7 @@ def _f_oneway(*args):
     msb = ssbn / float(dfbn)
     msw = sswn / float(dfwn)
     f = msb / msw
-    prob = stats.fprob(dfbn, dfwn, f)
+    prob = fprob(dfbn, dfwn, f)
     return f, prob
 
 
@@ -103,28 +107,28 @@ def f_oneway(*args):
 def _check_effects(effects):
     """ Aux Function """
     if effects.upper() not in defaults_twoway_rm['parse']:
-        raise ValueError('The value passed for `effects` is not supported.'
-            ' Please consider the documentation.')
+        raise ValueError('The value passed for `effects` is not supported. '
+                         'Please consider the documentation.')
 
     return defaults_twoway_rm['parse'][effects]
 
 
 def _iter_contrasts(n_subjects, factor_levels, effect_picks):
     """ Aux Function: Setup contrasts """
-    sc, sy, = [], []  
-    
+    sc, sy, = [], []
+
     # prepare computation of Kronecker products
     for n_levels in factor_levels:
         # for each factor append
         # 1) column vector of length == number of levels,
-        # 2) square matrix with diagonal == number of levels 
+        # 2) square matrix with diagonal == number of levels
 
-        # main + interaction effects for contrasts 
+        # main + interaction effects for contrasts
         sc.append([np.ones([n_levels, 1]),
                    detrend(np.eye(n_levels), type='constant')])
         # main + interaction effects for component means
         sy.append([np.ones([n_levels, 1]) / n_levels, np.eye(n_levels)])
-        # XXX component means not returned at the moment 
+        # XXX component means not returned at the moment
 
     for (c1, c2, c3) in defaults_twoway_rm['iter_contrasts'][effect_picks]:
         # c1 selects the first factors' level in the column vector
@@ -138,7 +142,7 @@ def _iter_contrasts(n_subjects, factor_levels, effect_picks):
 
 
 def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
-                       pvalue=0.05):
+                          pvalue=0.05):
     """ Compute f-value thesholds for a two-way ANOVA
 
     Parameters
@@ -168,7 +172,7 @@ def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
 
     f_threshold = []
     for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
-                                        effect_picks):
+                                       effect_picks):
         f_threshold.append(stats.f(df1, df2).isf(pvalue))
 
     return f_threshold if len(f_threshold) > 1 else f_threshold[0]
@@ -177,7 +181,7 @@ def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
 # The following functions based on MATLAB code by Rik Henson
 # and Python code from the pvttble toolbox by Roger Lew.
 def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
-                   correction=False, return_pvals=True):
+                correction=False, return_pvals=True):
     """ 2 way repeated measures ANOVA for fully balanced designs
 
     data : ndarray
@@ -225,7 +229,7 @@ def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
         data = data[:, :, np.newaxis]
     elif data.ndim > 3:  # let's allow for some magic here.
         data = data.reshape(data.shape[0], data.shape[1],
-            np.prod(data.shape[2:]))
+                            np.prod(data.shape[2:]))
 
     effect_picks = _check_effects(effects)
     n_obs = data.shape[2]
@@ -235,7 +239,7 @@ def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
     data = np.rollaxis(data, 2)
     fvalues, pvalues = [], []
     for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
-            effect_picks):
+                                        effect_picks):
         y = np.dot(data, c_)
         b = np.mean(y, axis=1)[:, np.newaxis, :]
         ss = np.sum(np.sum(y * b, axis=2), axis=1)
@@ -244,16 +248,15 @@ def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
         fvalues.append(fvals)
         if correction:
             # sample covariances, leave off "/ (y.shape[1] - 1)" norm because
-            # it falls out. the below line is faster than the equivalent:
-            # v = np.array([np.dot(y_.T, y_) for y_ in y])
-            v = np.array(map(np.dot, y.swapaxes(2, 1), y))
-            v = (np.array(map(np.trace, v)) ** 2 /
-                  (df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
+            # it falls out.
+            v = np.array([np.dot(y_.T, y_) for y_ in y])
+            v = (np.array([np.trace(vv) for vv in v]) ** 2 /
+                 (df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
             eps = v
 
         df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
         if correction:
-            df1, df2 = [d[None, :] * eps for d in df1, df2]
+            df1, df2 = [d[None, :] * eps for d in (df1, df2)]
 
         if return_pvals:
             pvals = stats.f(df1, df2).sf(fvals)
@@ -262,4 +265,4 @@ def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
         pvalues.append(pvals)
 
     # handle single effect returns
-    return [np.squeeze(np.asarray(v)) for v in fvalues, pvalues]
+    return [np.squeeze(np.asarray(v)) for v in (fvalues, pvalues)]
diff --git a/mne/stats/permutations.py b/mne/stats/permutations.py
index 2608c5f..a20892a 100644
--- a/mne/stats/permutations.py
+++ b/mne/stats/permutations.py
@@ -1,7 +1,7 @@
 """T-test with permutations
 """
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Fernando Perez (bin_perm_rep function)
 #
 # License: Simplified BSD
@@ -37,7 +37,7 @@ def bin_perm_rep(ndim, a=0, b=1):
     nperms = 2 ** ndim
     perms = np.empty((nperms, ndim), type(a))
     perms.fill(a)
-    half_point = nperms / 2
+    half_point = nperms // 2
     perms[half_point:, 0] = b
     # Fill the rest of the table by sampling the pervious column every 2 items
     for j in range(1, ndim):
diff --git a/mne/stats/regression.py b/mne/stats/regression.py
new file mode 100644
index 0000000..a668f95
--- /dev/null
+++ b/mne/stats/regression.py
@@ -0,0 +1,135 @@
+# Authors: Tal Linzen <linzen at nyu.edu>
+#          Teon Brooks <teon at nyu.edu>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+from collections import namedtuple
+from inspect import isgenerator
+import warnings
+
+import numpy as np
+from scipy import linalg, stats
+
+from ..source_estimate import SourceEstimate
+from ..epochs import _BaseEpochs
+from ..evoked import Evoked, EvokedArray
+from ..utils import logger
+from ..io.pick import pick_types
+
+
+def linear_regression(inst, design_matrix, names=None):
+    """Fit Ordinary Least Squares regression (OLS)
+
+    Parameters
+    ----------
+    inst : instance of Epochs | iterable of SourceEstimate
+        The data to be regressed. Contains all the trials, sensors, and time
+        points for the regression. For Source Estimates, accepts either a list
+        or a generator object.
+    design_matrix : ndarray, shape (n_observations, n_regressors)
+        The regressors to be used. Must be a 2d array with as many rows as
+        the first dimension of `data`. The first column of this matrix will
+        typically consist of ones (intercept column).
+    names : list-like | None
+        Optional parameter to name the regressors. If provided, the length must
+        correspond to the number of columns present in regressors
+        (including the intercept, if present).
+        Otherwise the default names are x0, x1, x2...xn for n regressors.
+
+    Returns
+    -------
+    results : dict of namedtuple
+        For each regressor (key) a namedtuple is provided with the
+        following attributes:
+
+            beta : regression coefficients
+            stderr : standard error of regression coefficients
+            t_val : t statistics (beta / stderr)
+            p_val : two-sided p-value of t statistic under the t distribution
+            mlog10_p_val : -log10 transformed p-value.
+
+        The tuple members are numpy arrays. The shape of each numpy array is
+        the shape of the data minus the first dimension; e.g., if the shape of
+        the original data was (n_observations, n_channels, n_timepoints),
+        then the shape of each of the arrays will be
+        (n_channels, n_timepoints).
+    """
+    if names is None:
+        names = ['x%i' % i for i in range(design_matrix.shape[1])]
+
+    if isinstance(inst, _BaseEpochs):
+        picks = pick_types(inst.info, meg=True, eeg=True, ref_meg=True,
+                           stim=False, eog=False, ecg=False,
+                           emg=False, exclude=['bads'])
+        if [inst.ch_names[p] for p in picks] != inst.ch_names:
+            warnings.warn('Fitting linear model to non-data or bad '
+                          'channels. Check picking', UserWarning)
+        msg = 'Fitting linear model to epochs'
+        data = inst.get_data()
+        out = EvokedArray(np.zeros(data.shape[1:]), inst.info, inst.tmin)
+    elif isgenerator(inst):
+        msg = 'Fitting linear model to source estimates (generator input)'
+        out = next(inst)
+        data = np.array([out.data] + [i.data for i in inst])
+    elif isinstance(inst, list) and isinstance(inst[0], SourceEstimate):
+        msg = 'Fitting linear model to source estimates (list input)'
+        out = inst[0]
+        data = np.array([i.data for i in inst])
+    else:
+        raise ValueError('Input must be epochs or iterable of source '
+                         'estimates')
+    logger.info(msg + ', (%s targets, %s regressors)' %
+                (np.product(data.shape[1:]), len(names)))
+    lm_params = _fit_lm(data, design_matrix, names)
+    lm = namedtuple('lm', 'beta stderr t_val p_val mlog10_p_val')
+    lm_fits = {}
+    for name in names:
+        parameters = [p[name] for p in lm_params]
+        for ii, value in enumerate(parameters):
+            out_ = out.copy()
+            if isinstance(out_, SourceEstimate):
+                out_._data[:] = value
+            elif isinstance(out_, Evoked):
+                out_.data[:] = value
+            else:
+                raise RuntimeError('Invalid container.')
+            parameters[ii] = out_
+        lm_fits[name] = lm(*parameters)
+    logger.info('Done')
+    return lm_fits
+
+
+def _fit_lm(data, design_matrix, names):
+    """Aux function"""
+    n_samples = len(data)
+    n_features = np.product(data.shape[1:])
+    if design_matrix.ndim != 2:
+        raise ValueError('Design matrix must be a 2d array')
+    n_rows, n_predictors = design_matrix.shape
+
+    if n_samples != n_rows:
+        raise ValueError('Number of rows in design matrix must be equal '
+                         'to number of observations')
+    if n_predictors != len(names):
+        raise ValueError('Number of regressor names must be equal to '
+                         'number of column in design matrix')
+
+    y = np.reshape(data, (n_samples, n_features))
+    betas, resid_sum_squares, _, _ = linalg.lstsq(a=design_matrix, b=y)
+
+    df = n_rows - n_predictors
+    sqrt_noise_var = np.sqrt(resid_sum_squares / df).reshape(data.shape[1:])
+    design_invcov = linalg.inv(np.dot(design_matrix.T, design_matrix))
+    unscaled_stderrs = np.sqrt(np.diag(design_invcov))
+
+    beta, stderr, t_val, p_val, mlog10_p_val = (dict() for _ in range(5))
+    for x, unscaled_stderr, predictor in zip(betas, unscaled_stderrs, names):
+        beta[predictor] = x.reshape(data.shape[1:])
+        stderr[predictor] = sqrt_noise_var * unscaled_stderr
+        t_val[predictor] = beta[predictor] / stderr[predictor]
+        cdf = stats.t.cdf(np.abs(t_val[predictor]), df)
+        p_val[predictor] = (1. - cdf) * 2.
+        mlog10_p_val[predictor] = -np.log10(p_val[predictor])
+
+    return beta, stderr, t_val, p_val, mlog10_p_val
diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py
index 90ee37a..d8067a0 100644
--- a/mne/stats/tests/test_cluster_level.py
+++ b/mne/stats/tests/test_cluster_level.py
@@ -37,6 +37,39 @@ def _get_conditions():
     return condition1_1d, condition2_1d, condition1_2d, condition2_2d
 
 
+def test_permutation_step_down_p():
+    """Test cluster level permutations with step_down_p
+    """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+    rng = np.random.RandomState(0)
+    # subjects, time points, spatial points
+    X = rng.randn(9, 2, 10)
+    # add some significant points
+    X[:, 0:2, 0:2] += 2  # span two time points and two spatial points
+    X[:, 1, 5:9] += 0.5  # span four time points with 4x smaller amplitude
+    thresh = 2
+    # make sure it works when we use ALL points in step-down
+    t, clusters, p, H0 = \
+            permutation_cluster_1samp_test(X, threshold=thresh,
+                                            step_down_p=1.0)
+    # make sure using step-down will actually yield improvements sometimes
+    t, clusters, p_old, H0 = \
+            permutation_cluster_1samp_test(X, threshold=thresh,
+                                           step_down_p=0.0)
+    assert_equal(np.sum(p_old < 0.05), 1)  # just spatial cluster
+    t, clusters, p_new, H0 = \
+            permutation_cluster_1samp_test(X, threshold=thresh,
+                                           step_down_p=0.05)
+    assert_equal(np.sum(p_new < 0.05), 2)  # time one rescued
+    assert_true(np.all(p_old >= p_new))
+
+
 def test_cluster_permutation_test():
     """Test cluster level permutations tests
     """
@@ -225,7 +258,7 @@ def test_cluster_permutation_with_connectivity():
                       connectivity=connectivity, threshold=dict(me='hello'))
 
         # too extreme a start threshold
-        with warnings.catch_warnings(True) as w:
+        with warnings.catch_warnings(record=True) as w:
             spatio_temporal_func(X1d_3, connectivity=connectivity,
                                  threshold=dict(start=10, step=1))
         if not did_warn:
diff --git a/mne/stats/tests/test_regression.py b/mne/stats/tests/test_regression.py
new file mode 100644
index 0000000..6255950
--- /dev/null
+++ b/mne/stats/tests/test_regression.py
@@ -0,0 +1,67 @@
+# Authors: Teon Brooks <teon at nyu.edu>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from nose.tools import assert_raises, assert_true, assert_equal
+
+import mne
+from mne import read_source_estimate
+from mne.datasets import sample
+from mne.stats.regression import linear_regression
+
+data_path = sample.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
+
+
+ at sample.requires_sample_data
+def test_regression():
+    """Test Ordinary Least Squares Regression
+    """
+    data_path = sample.data_path()
+    raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+    event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+    tmin, tmax = -0.2, 0.5
+    event_id = dict(aud_l=1, aud_r=2)
+
+    # Setup for reading the raw data
+    raw = mne.io.Raw(raw_fname)
+    events = mne.read_events(event_fname)[:10]
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                        baseline=(None, 0))
+    picks = np.arange(len(epochs.ch_names))
+    evoked = epochs.average(picks=picks)
+    design_matrix = epochs.events[:, 1:].astype(np.float64)
+    # makes the intercept
+    design_matrix[:, 0] = 1
+    # creates contrast: aud_l=0, aud_r=1
+    design_matrix[:, 1] -= 1
+    with warnings.catch_warnings(record=True) as w:
+        lm = linear_regression(epochs, design_matrix, ['intercept', 'aud'])
+        assert_true(w[0].category == UserWarning)
+        assert_true('non-data' in '%s' % w[0].message)
+
+    for predictor, parameters in lm.items():
+        for value in parameters:
+            assert_equal(value.data.shape, evoked.data.shape)
+
+    assert_raises(ValueError, linear_regression, [epochs, epochs],
+                  design_matrix)
+
+    stc = read_source_estimate(stc_fname).crop(0, 0.02)
+    stc_list = [stc, stc, stc]
+    stc_gen = (s for s in stc_list)
+    with warnings.catch_warnings(record=True):  # divide by zero
+        lm1 = linear_regression(stc_list, design_matrix[:len(stc_list)])
+    lm2 = linear_regression(stc_gen, design_matrix[:len(stc_list)])
+
+    for k in lm1:
+        for v1, v2 in zip(lm1[k], lm2[k]):
+            assert_array_equal(v1.data, v2.data)
diff --git a/mne/surface.py b/mne/surface.py
index 395dc5a..e07cd90 100644
--- a/mne/surface.py
+++ b/mne/surface.py
@@ -1,9 +1,10 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Denis A. Engemann <d.engemann at fz-juelich.de>
+#          Denis A. Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
+from .externals.six import string_types
 import os
 from os import path as op
 import sys
@@ -11,15 +12,18 @@ from struct import pack
 import numpy as np
 from scipy.spatial.distance import cdist
 from scipy import sparse
-
-from .fiff.constants import FIFF
-from .fiff.open import fiff_open
-from .fiff.tree import dir_tree_find
-from .fiff.tag import find_tag
-from .fiff.write import (write_int, write_float, write_float_matrix,
-                         write_int_matrix, start_file, end_block,
-                         start_block, end_file, write_string,
-                         write_float_sparse_rcs)
+from fnmatch import fnmatch
+
+from .io.constants import FIFF
+from .io.open import fiff_open
+from .io.tree import dir_tree_find
+from .io.tag import find_tag
+from .io.write import (write_int, write_float, write_float_matrix,
+                       write_int_matrix, start_file, end_block,
+                       start_block, end_file, write_string,
+                       write_float_sparse_rcs)
+from .channels import _get_meg_system
+from .transforms import transform_surface_to
 from .utils import logger, verbose, get_subjects_dir
 
 
@@ -171,6 +175,8 @@ def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
 
     tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
     if tag is None:
+        tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
+    if tag is None:
         res['nn'] = []
     else:
         res['nn'] = tag.data
@@ -286,6 +292,84 @@ def read_bem_solution(fname, verbose=None):
 
 
 ###############################################################################
+# AUTOMATED SURFACE FINDING
+
+def get_head_surf(subject, source='bem', subjects_dir=None):
+    """Load the subject head surface
+
+    Parameters
+    ----------
+    subject : str
+        Subject name.
+    source : str
+        Type to load. Common choices would be `'bem'` or `'head'`. We first
+        try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
+        then look for `'$SUBJECT*$SOURCE.fif'` in the same directory.
+    subjects_dir : str, or None
+        Path to the SUBJECTS_DIR. If None, the path is obtained by using
+        the environment variable SUBJECTS_DIR.
+
+    Returns
+    -------
+    surf : dict
+        The head surface.
+    """
+    # Load the head surface from the BEM
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    # use realpath to allow for linked surfaces (c.f. MNE manual 196-197)
+    this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
+                                    '%s-%s.fif' % (subject, source)))
+    if not op.isfile(this_head):
+        # let's do a more sophisticated search
+        this_head = None
+        path = op.join(subjects_dir, subject, 'bem')
+        if not op.isdir(path):
+            raise IOError('Subject bem directory "%s" does not exist'
+                          % path)
+        files = os.listdir(path)
+        for fname in files:
+            if fnmatch(fname, '%s*%s.fif' % (subject, source)):
+                this_head = op.join(path, fname)
+                break
+        if this_head is None:
+            raise IOError('No file matching "%s*%s" found'
+                          % (subject, source))
+    surf = read_bem_surfaces(this_head, True,
+                             FIFF.FIFFV_BEM_SURF_ID_HEAD)
+    return surf
+
+
+def get_meg_helmet_surf(info, trans=None):
+    """Load the MEG helmet associated with the MEG sensors
+
+    Parameters
+    ----------
+    info : instance of io.meas_info.Info
+        Measurement info.
+    trans : dict
+        The head<->MRI transformation, usually obtained using
+        read_trans(). Can be None, in which case the surface will
+        be in head coordinates instead of MRI coordinates.
+
+    Returns
+    -------
+    surf : dict
+        The MEG helmet as a surface.
+    """
+    system = _get_meg_system(info)
+    fname = op.join(op.split(__file__)[0], 'data', 'helmets',
+                    system + '.fif.gz')
+    surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET)
+
+    # Ignore what the file says, it's in device coords and we want MRI coords
+    surf['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+    transform_surface_to(surf, 'head', info['dev_head_t'])
+    if trans is not None:
+        transform_surface_to(surf, 'mri', trans)
+    return surf
+
+
+###############################################################################
 # EFFICIENCY UTILITIES
 
 def fast_cross_3d(x, y):
@@ -336,11 +420,9 @@ def _accumulate_normals(tris, tri_nn, npts):
     #
     nn = np.zeros((npts, 3))
     for verts in tris.T:  # note this only loops 3x (number of verts per tri)
-        counts = np.bincount(verts, minlength=npts)
-        reord = np.argsort(verts)
-        vals = np.r_[np.zeros((1, 3)), np.cumsum(tri_nn[reord, :], 0)]
-        idx = np.cumsum(np.r_[0, counts])
-        nn += vals[idx[1:], :] - vals[idx[:-1], :]
+        for idx in range(3):  # x, y, z
+            nn[:, idx] += np.bincount(verts, weights=tri_nn[:, idx],
+                                      minlength=npts)
     return nn
 
 
@@ -428,7 +510,7 @@ def _complete_surface_info(this, do_neighbor_vert=False):
     #   Determine the neighboring vertices and fix errors
     if do_neighbor_vert is True:
         this['neighbor_vert'] = [_get_surf_neighbors(this, k)
-                                 for k in xrange(this['np'])]
+                                 for k in range(this['np'])]
 
     return this
 
@@ -561,13 +643,21 @@ def read_surface(fname, verbose=None):
         Triangulation (each line contains indexes for three points which
         together form a face).
     """
-    with open(fname, "rb") as fobj:
+    TRIANGLE_MAGIC = 16777214
+    QUAD_MAGIC = 16777215
+    NEW_QUAD_MAGIC = 16777213
+    with open(fname, "rb", buffering=0) as fobj:  # buffering=0 for np bug
         magic = _fread3(fobj)
-        if (magic == 16777215) or (magic == 16777213):  # Quad file or new quad
+        if (magic == QUAD_MAGIC) or (magic == NEW_QUAD_MAGIC):  # Quad file or new quad
+            create_stamp = ''
             nvert = _fread3(fobj)
             nquad = _fread3(fobj)
-            coords = np.fromfile(fobj, ">i2", nvert * 3).astype(np.float)
-            coords = coords.reshape(-1, 3) / 100.0
+            if magic == QUAD_MAGIC:
+                coords = np.fromfile(fobj, ">i2", nvert * 3).astype(np.float) / 100.
+            else:
+                coords = np.fromfile(fobj, ">f4", nvert * 3).astype(np.float)
+
+            coords = coords.reshape(-1, 3)
             quads = _fread3_many(fobj, nquad * 4)
             quads = quads.reshape(nquad, 4)
             #
@@ -587,11 +677,12 @@ def read_surface(fname, verbose=None):
                     faces[nface] = quad[0], quad[2], quad[3]
                     nface += 1
 
-        elif magic == 16777214:  # Triangle file
+        elif magic == TRIANGLE_MAGIC:  # Triangle file
             create_stamp = fobj.readline()
-            _ = fobj.readline()
+            _ = fobj.readline()  # analysis:ignore
             vnum = np.fromfile(fobj, ">i4", 1)[0]
             fnum = np.fromfile(fobj, ">i4", 1)[0]
+            #raise RuntimeError
             coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
             faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
         else:
@@ -608,7 +699,7 @@ def read_surface(fname, verbose=None):
 def _read_surface_geom(fname, add_geom=True, norm_rr=False, verbose=None):
     """Load the surface as dict, optionally add the geometry information"""
     # based on mne_load_surface_geom() in mne_surface_io.c
-    if isinstance(fname, basestring):
+    if isinstance(fname, string_types):
         rr, tris = read_surface(fname)  # mne_read_triangle_file()
         nvert = len(rr)
         ntri = len(tris)
@@ -746,6 +837,9 @@ def _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
         surf_name = op.join(subjects_dir, subject, 'surf', hemi + '.sphere')
         logger.info('Loading geometry from %s...' % surf_name)
         from_surf = _read_surface_geom(surf_name, norm_rr=True, add_geom=False)
+        if not len(from_surf['rr']) == surf['np']:
+            raise RuntimeError('Mismatch between number of surface vertices, '
+                               'possible parcellation error?')
         _normalize_vectors(ico_surf['rr'])
 
         # Make the maps
@@ -754,7 +848,7 @@ def _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
         mmap = _compute_nearest(from_surf['rr'], ico_surf['rr'])
         nmap = len(mmap)
         surf['inuse'] = np.zeros(surf['np'], int)
-        for k in xrange(nmap):
+        for k in range(nmap):
             if surf['inuse'][mmap[k]]:
                 # Try the nearest neighbors
                 neigh = _get_surf_neighbors(surf, mmap[k])
@@ -819,9 +913,11 @@ def write_surface(fname, coords, faces, create_stamp=''):
     if len(create_stamp.splitlines()) > 1:
         raise ValueError("create_stamp can only contain one line")
 
-    with open(fname, 'w') as fid:
+    with open(fname, 'wb') as fid:
         fid.write(pack('>3B', 255, 255, 254))
-        fid.writelines(('%s\n' % create_stamp, '\n'))
+        strs = ['%s\n' % create_stamp, '\n']
+        strs = [s.encode('utf-8') for s in strs]
+        fid.writelines(strs)
         vnum = len(coords)
         fnum = len(faces)
         fid.write(pack('>2i', vnum, fnum))
@@ -948,7 +1044,7 @@ def read_morph_map(subject_from, subject_to, subjects_dir=None,
     left_map, right_map : sparse matrix
         The morph maps for the 2 hemispheres.
     """
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
 
     # First check for morph-map dir existence
     mmap_dir = op.join(subjects_dir, 'morph-maps')
@@ -956,8 +1052,8 @@ def read_morph_map(subject_from, subject_to, subjects_dir=None,
         try:
             os.mkdir(mmap_dir)
         except:
-            logger.warn('Could not find or make morph map directory "%s"'
-                        % mmap_dir)
+            logger.warning('Could not find or make morph map directory "%s"'
+                           % mmap_dir)
 
     # Does the file exist
     fname = op.join(mmap_dir, '%s-%s-morph.fif' % (subject_from, subject_to))
@@ -978,8 +1074,8 @@ def read_morph_map(subject_from, subject_to, subjects_dir=None,
                 _write_morph_map(fname, subject_from, subject_to,
                                  mmap_1, mmap_2)
             except Exception as exp:
-                logger.warn('Could not write morph-map file "%s" (error: %s)'
-                            % (fname, exp))
+                logger.warning('Could not write morph-map file "%s" '
+                               '(error: %s)' % (fname, exp))
             return mmap_1
 
     f, tree, _ = fiff_open(fname)
diff --git a/mne/tests/test_channels.py b/mne/tests/test_channels.py
new file mode 100644
index 0000000..391725d
--- /dev/null
+++ b/mne/tests/test_channels.py
@@ -0,0 +1,109 @@
+# Author: Daniel G Wakeman <dwakeman at nmr.mgh.harvard.edu>
+#         Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from copy import deepcopy
+
+import numpy as np
+from nose.tools import assert_raises, assert_true, assert_equal
+from scipy.io import savemat
+
+from mne.channels import (rename_channels, read_ch_connectivity,
+                          ch_neighbor_connectivity)
+from mne.io import read_info
+from mne.io.constants import FIFF
+from mne.fixes import partial
+from mne.utils import _TempDir
+
+tempdir = _TempDir()
+
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+
+def test_rename_channels():
+    """Test rename channels
+    """
+    info = read_info(raw_fname)
+    # Error Tests
+    # Test channel name exists in ch_names
+    mapping = {'EEG 160': 'EEG060'}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test change to EEG channel
+    mapping = {'EOG 061': ('EEG 061', 'eeg')}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test change to illegal channel type
+    mapping = {'EOG 061': ('MEG 061', 'meg')}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test channel type which you are changing from e.g. MEG
+    mapping = {'MEG 2641': ('MEG2641', 'eeg')}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test improper mapping configuration
+    mapping = {'MEG 2641': 1.0}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test duplicate named channels
+    mapping = {'EEG 060': 'EOG 061'}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test successful changes
+    # Test ch_name and ch_names are changed
+    info2 = deepcopy(info)  # for consistency at the start of each test
+    info2['bads'] = ['EEG 060', 'EOG 061']
+    mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
+    rename_channels(info2, mapping)
+    assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
+    assert_true(info2['ch_names'][374] == 'EEG060')
+    assert_true('EEG060' in info2['bads'])
+    assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
+    assert_true(info2['ch_names'][375] == 'EOG061')
+    assert_true('EOG061' in info2['bads'])
+    # Test type change
+    info2 = deepcopy(info)
+    info2['bads'] = ['EEG 060', 'EEG 059']
+    mapping = {'EEG 060': ('EOG 060', 'eog'), 'EEG 059': ('EOG 059', 'eog')}
+    rename_channels(info2, mapping)
+    assert_true(info2['chs'][374]['ch_name'] == 'EOG 060')
+    assert_true(info2['ch_names'][374] == 'EOG 060')
+    assert_true('EOG 060' in info2['bads'])
+    assert_true(info2['chs'][374]['kind'] is FIFF.FIFFV_EOG_CH)
+    assert_true(info2['chs'][373]['ch_name'] == 'EOG 059')
+    assert_true(info2['ch_names'][373] == 'EOG 059')
+    assert_true('EOG 059' in info2['bads'])
+    assert_true(info2['chs'][373]['kind'] is FIFF.FIFFV_EOG_CH)
+
+
+def test_read_ch_connectivity():
+    "Test reading channel connectivity templates"
+    a = partial(np.array, dtype='<U7')
+    # no pep8
+    nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
+                     (['MEG0121'], [[a(['MEG0111'])],
+                                    [a(['MEG0131'])]]),
+                     (['MEG0131'], [[a(['MEG0111'])],
+                                    [a(['MEG0121'])]])]],
+                   dtype=[('label', 'O'), ('neighblabel', 'O')])
+    mat = dict(neighbours=nbh)
+    mat_fname = op.join(tempdir, 'test_mat.mat')
+    savemat(mat_fname, mat)
+
+    ch_connectivity = read_ch_connectivity(mat_fname)
+    x = ch_connectivity
+    assert_equal(x.shape, (3, 3))
+    assert_equal(x[0, 1], False)
+    assert_equal(x[0, 2], True)
+    assert_true(np.all(x.diagonal()))
+    assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
+    ch_connectivity = read_ch_connectivity(mat_fname, picks=[0, 2])
+    assert_equal(ch_connectivity.shape[0], 2)
+
+    ch_names = ['EEG01', 'EEG02', 'EEG03']
+    neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
+    assert_raises(ValueError, ch_neighbor_connectivity, ch_names, neighbors)
+    neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
+    assert_raises(ValueError, ch_neighbor_connectivity, ch_names[:2],
+                  neighbors)
+    neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
+    assert_raises(ValueError, ch_neighbor_connectivity, ch_names, neighbors)
diff --git a/mne/tests/test_coreg.py b/mne/tests/test_coreg.py
index 844f09f..e6b2ea2 100644
--- a/mne/tests/test_coreg.py
+++ b/mne/tests/test_coreg.py
@@ -1,20 +1,32 @@
 import os
 
-from nose.tools import assert_raises, assert_true
+from nose.tools import assert_raises, assert_true, assert_equal
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_array_less
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+                           assert_array_less)
 
 from mne.transforms import apply_trans, rotation, translation, scaling
 from mne.coreg import (fit_matched_points, fit_point_cloud,
                        _point_cloud_error, _decimate_points,
                        create_default_subject, scale_mri,
-                       _is_mri_subject, scale_labels, scale_source_space)
+                       _is_mri_subject, scale_labels, scale_source_space,
+                       read_elp)
+from mne.io.kit.tests import data_dir as kit_data_dir
 from mne.utils import requires_mne_fs_in_env, _TempDir, run_subprocess
+from functools import reduce
 
 
 tempdir = _TempDir()
 
 
+def test_read_elp():
+    """Test reading an ELP file"""
+    path = os.path.join(kit_data_dir, 'test_elp.txt')
+    points = read_elp(path)
+    assert_equal(points.shape, (8, 3))
+    assert_array_equal(points[0], [1.3930, 13.1613, -4.6967])
+
+
 @requires_mne_fs_in_env
 def test_scale_mri():
     """Test creating fsaverage and scaling it"""
diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py
index b7b231a..37cc9c0 100644
--- a/mne/tests/test_cov.py
+++ b/mne/tests/test_cov.py
@@ -1,4 +1,4 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
@@ -12,15 +12,16 @@ from scipy import linalg
 import warnings
 
 from mne.cov import regularize, whiten_evoked
-from mne import (read_cov, Epochs, merge_events,
+from mne import (read_cov, write_cov, Epochs, merge_events,
                  find_events, compute_raw_data_covariance,
-                 compute_covariance)
-from mne.fiff import Raw, pick_channels_cov, pick_channels, Evoked, pick_types
+                 compute_covariance, read_evokeds)
+from mne import pick_channels_cov, pick_channels, pick_types
+from mne.io import Raw
 from mne.utils import _TempDir
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
 cov_fname = op.join(base_dir, 'test-cov.fif')
 cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
 cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
@@ -35,28 +36,36 @@ def test_io_cov():
     """Test IO for noise covariance matrices
     """
     cov = read_cov(cov_fname)
-    cov.save(op.join(tempdir, 'cov.fif'))
-    cov2 = read_cov(op.join(tempdir, 'cov.fif'))
+    cov.save(op.join(tempdir, 'test-cov.fif'))
+    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
     assert_array_almost_equal(cov.data, cov2.data)
 
     cov2 = read_cov(cov_gz_fname)
     assert_array_almost_equal(cov.data, cov2.data)
-    cov2.save(op.join(tempdir, 'cov.fif.gz'))
-    cov2 = read_cov(op.join(tempdir, 'cov.fif.gz'))
+    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
+    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
     assert_array_almost_equal(cov.data, cov2.data)
 
     cov['bads'] = ['EEG 039']
     cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
     assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
     assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
-    cov_sel.save(op.join(tempdir, 'cov.fif'))
+    cov_sel.save(op.join(tempdir, 'test-cov.fif'))
 
     cov2 = read_cov(cov_gz_fname)
     assert_array_almost_equal(cov.data, cov2.data)
-    cov2.save(op.join(tempdir, 'cov.fif.gz'))
-    cov2 = read_cov(op.join(tempdir, 'cov.fif.gz'))
+    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
+    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
     assert_array_almost_equal(cov.data, cov2.data)
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_cov(cov_badname, cov)
+        read_cov(cov_badname)
+    assert_true(len(w) == 2)
+
 
 def test_cov_estimation_on_raw_segment():
     """Test estimation from raw on continuous recordings (typically empty room)
@@ -84,8 +93,9 @@ def test_cov_estimation_on_raw_segment():
     # make sure we get a warning with too short a segment
     raw_2 = raw.crop(0, 1)
     with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         cov = compute_raw_data_covariance(raw_2)
-        assert_true(len(w) == 1)
+    assert_true(len(w) == 1)
 
 
 def test_cov_estimation_with_triggers():
@@ -140,14 +150,15 @@ def test_cov_estimation_with_triggers():
 
     # cov with list of epochs with different projectors
     epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
-              baseline=(-0.2, -0.1), proj=True, reject=reject),
+                     baseline=(-0.2, -0.1), proj=True, reject=reject),
               Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
-              baseline=(-0.2, -0.1), proj=False, reject=reject)]
+                     baseline=(-0.2, -0.1), proj=False, reject=reject)]
     # these should fail
     assert_raises(ValueError, compute_covariance, epochs)
     assert_raises(ValueError, compute_covariance, epochs, projs=None)
     # these should work, but won't be equal to above
-    with warnings.catch_warnings(True) as w:  # too few samples warning
+    with warnings.catch_warnings(record=True) as w:  # too few samples warning
+        warnings.simplefilter('always')
         cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
         cov = compute_covariance(epochs, projs=[])
     assert_true(len(w) == 2)
@@ -177,10 +188,12 @@ def test_regularize_cov():
     """Test cov regularization
     """
     raw = Raw(raw_fname, preload=False)
+    raw.info['bads'].append(raw.ch_names[0])  # test with bad channels
     noise_cov = read_cov(cov_fname)
     # Regularize noise cov
     reg_noise_cov = regularize(noise_cov, raw.info,
-                               mag=0.1, grad=0.1, eeg=0.1, proj=True)
+                               mag=0.1, grad=0.1, eeg=0.1, proj=True,
+                               exclude='bads')
     assert_true(noise_cov['dim'] == reg_noise_cov['dim'])
     assert_true(noise_cov['data'].shape == reg_noise_cov['data'].shape)
     assert_true(np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08)
@@ -188,7 +201,8 @@ def test_regularize_cov():
 
 def test_evoked_whiten():
     """Test whitening of evoked data"""
-    evoked = Evoked(ave_fname, setno=0, baseline=(None, 0), proj=True)
+    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
+                          proj=True)
     cov = read_cov(cov_fname)
 
     ###########################################################################
@@ -196,7 +210,8 @@ def test_evoked_whiten():
     picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
                        exclude='bads')
 
-    noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1)
+    noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
+                           exclude='bads')
 
     evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
     whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py
index a294871..f28c2e7 100644
--- a/mne/tests/test_epochs.py
+++ b/mne/tests/test_epochs.py
@@ -1,12 +1,13 @@
-# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#         Denis Engemann <d.engemann at fz-juelich.de>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 import os.path as op
 from copy import deepcopy
 
-from nose.tools import assert_true, assert_equal, assert_raises
+from nose.tools import (assert_true, assert_equal, assert_raises,
+                        assert_not_equal)
 
 from numpy.testing import (assert_array_equal, assert_array_almost_equal,
                            assert_allclose)
@@ -14,33 +15,81 @@ import numpy as np
 import copy as cp
 import warnings
 
-from mne import fiff, Epochs, read_events, pick_events, read_epochs
-from mne.epochs import bootstrap, equalize_epoch_counts, combine_event_ids
-from mne.utils import _TempDir, requires_pandas, requires_nitime
-from mne.fiff import read_evoked
-from mne.fiff.proj import _has_eeg_average_ref_proj
+from mne import (io, Epochs, read_events, pick_events, read_epochs,
+                 equalize_channels, pick_types, pick_channels, read_evokeds,
+                 write_evokeds)
+from mne.epochs import (bootstrap, equalize_epoch_counts, combine_event_ids,
+                        add_channels_epochs, EpochsArray)
+from mne.utils import (_TempDir, requires_pandas, requires_nitime,
+                       clean_warning_registry)
+
+from mne.io.meas_info import create_info
+from mne.io.proj import _has_eeg_average_ref_proj
 from mne.event import merge_events
+from mne.io.constants import FIFF
+from mne.externals.six.moves import zip
+from mne.externals.six.moves import cPickle as pickle
+
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_name = op.join(base_dir, 'test-eve.fif')
 evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
 
 event_id, tmin, tmax = 1, -0.2, 0.5
 event_id_2 = 2
-raw = fiff.Raw(raw_fname, add_eeg_ref=False)
+raw = io.Raw(raw_fname, add_eeg_ref=False)
 events = read_events(event_name)
-picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=True,
-                        ecg=True, eog=True, include=['STI 014'],
-                        exclude='bads')
+picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                   ecg=True, eog=True, include=['STI 014'],
+                   exclude='bads')
 
 reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
 flat = dict(grad=1e-15, mag=1e-15)
 
 tempdir = _TempDir()
 
+clean_warning_registry()  # really clean warning stack
+
+
+def test_epochs_hash():
+    """Test epoch hashing
+    """
+    epochs = Epochs(raw, events, event_id, tmin, tmax)
+    assert_raises(RuntimeError, epochs.__hash__)
+    epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
+    assert_equal(hash(epochs), hash(epochs))
+    epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
+    assert_equal(hash(epochs), hash(epochs_2))
+    # do NOT use assert_equal here, failing output is terrible
+    assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
+
+    epochs_2._data[0, 0, 0] -= 1
+    assert_not_equal(hash(epochs), hash(epochs_2))
+
+
+def test_event_ordering():
+    """Test event order"""
+    events2 = events.copy()
+    np.random.shuffle(events2)
+    for ii, eve in enumerate([events, events2]):
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            Epochs(raw, eve, event_id, tmin, tmax,
+                   baseline=(None, 0), reject=reject, flat=flat)
+            assert_equal(len(w), ii)
+            if ii > 0:
+                assert_true('chronologically' in '%s' % w[-1].message)
+
+
+def test_epochs_bad_baseline():
+    """Test Epochs initialization with bad baseline parameters
+    """
+    assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
+    assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
+
 
 def test_epoch_combine_ids():
     """Test combining event ids in epochs compared to events
@@ -61,18 +110,23 @@ def test_read_epochs_bad_events():
     # Event at the beginning
     epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
                     event_id, tmin, tmax, picks=picks, baseline=(None, 0))
-    evoked = epochs.average()
+    with warnings.catch_warnings(record=True):
+        evoked = epochs.average()
 
     epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
                     event_id, tmin, tmax, picks=picks, baseline=(None, 0))
     epochs.drop_bad_epochs()
-    evoked = epochs.average()
+    with warnings.catch_warnings(record=True):
+        evoked = epochs.average()
 
     # Event at the end
     epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
                     event_id, tmin, tmax, picks=picks, baseline=(None, 0))
-    evoked = epochs.average()
-    assert evoked
+
+    with warnings.catch_warnings(record=True):
+        evoked = epochs.average()
+        assert evoked
+    warnings.resetwarnings()
 
 
 def test_read_write_epochs():
@@ -88,9 +142,10 @@ def test_read_write_epochs():
                           baseline=(None, 0))
     assert_array_equal(data, epochs_no_id.get_data())
 
-    eog_picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False,
-                                eog=True, exclude='bads')
-    epochs.drop_picks(eog_picks)
+    eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
+                           eog=True, exclude='bads')
+    eog_ch_names = [raw.ch_names[k] for k in eog_picks]
+    epochs.drop_channels(eog_ch_names)
     assert_true(len(epochs.info['chs']) == len(epochs.ch_names)
                 == epochs.get_data().shape[1])
     data_no_eog = epochs.get_data()
@@ -98,6 +153,7 @@ def test_read_write_epochs():
 
     # test decim kwarg
     with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                             baseline=(None, 0), decim=4)
         assert_equal(len(w), 1)
@@ -134,7 +190,7 @@ def test_read_write_epochs():
     assert_equal(epochs_read.event_id, epochs.event_id)
 
     epochs.event_id.pop('1')
-    epochs.event_id.update({'a': 1})
+    epochs.event_id.update({'a:a': 1})  # test allow for ':' in key
     epochs.save(op.join(tempdir, 'foo-epo.fif'))
     epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
     assert_equal(epochs_read2.event_id, epochs.event_id)
@@ -155,13 +211,30 @@ def test_read_write_epochs():
     # test equalizing loaded one (drop_log property)
     epochs_read4.equalize_event_counts(epochs.event_id)
 
+    epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
+    epochs.save('test-epo.fif')
+    epochs_read5 = read_epochs('test-epo.fif')
+    assert_array_equal(epochs_read5.selection, epochs.selection)
+    assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
+
+    # Test that one can drop channels on read file
+    epochs_read5.drop_channels(epochs_read5.ch_names[:1])
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        epochs.save(epochs_badname)
+        read_epochs(epochs_badname)
+    assert_true(len(w) == 2)
+
 
 def test_epochs_proj():
     """Test handling projection (apply proj in Raw or in Epochs)
     """
     exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
-    this_picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
-                                 eog=True, exclude=exclude)
+    this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
+                            eog=True, exclude=exclude)
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
                     baseline=(None, 0), proj=True)
     assert_true(all(p['active'] is True for p in epochs.info['projs']))
@@ -169,7 +242,7 @@ def test_epochs_proj():
     assert_true(all(p['active'] is True for p in evoked.info['projs']))
     data = epochs.get_data()
 
-    raw_proj = fiff.Raw(raw_fname, proj=True)
+    raw_proj = io.Raw(raw_fname, proj=True)
     epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
                             picks=this_picks, baseline=(None, 0), proj=False)
 
@@ -182,8 +255,8 @@ def test_epochs_proj():
     assert_array_almost_equal(data, data_no_proj, decimal=8)
 
     # make sure we can exclude avg ref
-    this_picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=True,
-                                 eog=True, exclude=exclude)
+    this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                            eog=True, exclude=exclude)
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
                     baseline=(None, 0), proj=True, add_eeg_ref=True)
     assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
@@ -216,29 +289,32 @@ def test_evoked_io_from_epochs():
     """Test IO of evoked data made from epochs
     """
     # offset our tmin so we don't get exactly a zero value when decimating
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
                         picks=picks, baseline=(None, 0), decim=5)
     assert_true(len(w) == 1)
     evoked = epochs.average()
-    evoked.save(op.join(tempdir, 'evoked.fif'))
-    evoked2 = read_evoked(op.join(tempdir, 'evoked.fif'))
+    evoked.save(op.join(tempdir, 'evoked-ave.fif'))
+    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
     assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
     assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
                     atol=1 / evoked.info['sfreq'])
 
     # now let's do one with negative time
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
                         picks=picks, baseline=(0.1, 0.2), decim=5)
     evoked = epochs.average()
-    evoked.save(op.join(tempdir, 'evoked.fif'))
-    evoked2 = read_evoked(op.join(tempdir, 'evoked.fif'))
+    evoked.save(op.join(tempdir, 'evoked-ave.fif'))
+    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
     assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
     assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
 
     # should be equivalent to a cropped original
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
                         picks=picks, baseline=(0.1, 0.2), decim=5)
     evoked = epochs.average()
@@ -253,17 +329,17 @@ def test_evoked_standard_error():
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0))
     evoked = [epochs.average(), epochs.standard_error()]
-    fiff.write_evoked(op.join(tempdir, 'evoked.fif'), evoked)
-    evoked2 = read_evoked(op.join(tempdir, 'evoked.fif'), [0, 1])
-    evoked3 = [read_evoked(op.join(tempdir, 'evoked.fif'), 'Unknown'),
-               read_evoked(op.join(tempdir, 'evoked.fif'), 'Unknown',
-                           kind='standard_error')]
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
+    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
+    evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
+               read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
+                            kind='standard_error')]
     for evoked_new in [evoked2, evoked3]:
         assert_true(evoked_new[0]._aspect_kind ==
-                    fiff.FIFF.FIFFV_ASPECT_AVERAGE)
+                    FIFF.FIFFV_ASPECT_AVERAGE)
         assert_true(evoked_new[0].kind == 'average')
         assert_true(evoked_new[1]._aspect_kind ==
-                    fiff.FIFF.FIFFV_ASPECT_STD_ERR)
+                    FIFF.FIFFV_ASPECT_STD_ERR)
         assert_true(evoked_new[1].kind == 'standard_error')
         for ave, ave2 in zip(evoked, evoked_new):
             assert_array_almost_equal(ave.data, ave2.data)
@@ -278,7 +354,9 @@ def test_evoked_standard_error():
 def test_reject_epochs():
     """Test of epochs rejection
     """
-    epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
+    events1 = events[events[:, 2] == event_id]
+    epochs = Epochs(raw, events1,
+                    event_id, tmin, tmax, baseline=(None, 0),
                     reject=reject, flat=flat)
     assert_raises(RuntimeError, len, epochs)
     n_events = len(epochs.events)
@@ -289,20 +367,21 @@ def test_reject_epochs():
     #   --saveavetag -ave --ave test.ave --filteroff
     assert_true(n_events > n_clean_epochs)
     assert_true(n_clean_epochs == 3)
-    assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'],
-                                    ['MEG 2443'], ['MEG 2443'], ['MEG 2443']])
+    assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
+                                    ['MEG 2443'], ['MEG 2443']])
 
     # Ensure epochs are not dropped based on a bad channel
     raw_2 = raw.copy()
     raw_2.info['bads'] = ['MEG 2443']
     reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
-    epochs = Epochs(raw_2, events, event_id, tmin, tmax, baseline=(None, 0),
+    epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
                     reject=reject_crazy, flat=flat)
     epochs.drop_bad_epochs()
+
     assert_true(all(['MEG 2442' in e for e in epochs.drop_log]))
     assert_true(all(['MEG 2443' not in e for e in epochs.drop_log]))
 
-    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+    epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=reject, flat=flat,
                     reject_tmin=0., reject_tmax=.1)
     data = epochs.get_data()
@@ -390,12 +469,12 @@ def test_indexing_slicing():
 def test_comparision_with_c():
     """Test of average obtained vs C code
     """
-    c_evoked = fiff.Evoked(evoked_nf_name, setno=0)
+    c_evoked = read_evokeds(evoked_nf_name, condition=0)
     epochs = Epochs(raw, events, event_id, tmin, tmax,
                     baseline=None, preload=True,
                     reject=None, flat=None)
     evoked = epochs.average()
-    sel = fiff.pick_channels(c_evoked.ch_names, evoked.ch_names)
+    sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
     evoked_data = evoked.data
     c_evoked_data = c_evoked.data[sel]
 
@@ -473,8 +552,8 @@ def test_detrend():
                       baseline=None, detrend=1)
     epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                       baseline=None, detrend=None)
-    data_picks = fiff.pick_types(epochs_1.info, meg=True, eeg=True,
-                                 exclude='bads')
+    data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
+                            exclude='bads')
     evoked_1 = epochs_1.average()
     evoked_2 = epochs_2.average()
     evoked_2.detrend(1)
@@ -627,13 +706,14 @@ def test_epoch_eq():
     epochs.drop_bad_epochs()  # make sure drops are logged
     assert_true(len([l for l in epochs.drop_log if not l]) ==
                 len(epochs.events))
-    drop_log1 = [l for l in epochs.drop_log]  # now copy the log
+    drop_log1 = deepcopy(epochs.drop_log)
     old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
     epochs.equalize_event_counts(['a', 'b'], copy=False)
     # undo the eq logging
     drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
                  epochs.drop_log]
     assert_true(drop_log1 == drop_log2)
+
     assert_true(len([l for l in epochs.drop_log if not l]) ==
                 len(epochs.events))
     new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
@@ -675,7 +755,7 @@ def test_epoch_eq():
 
 
 def test_access_by_name():
-    """Test accessing epochs by event name
+    """Test accessing epochs by event name and on_missing for rare events
     """
     assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
                   tmax, picks=picks)
@@ -685,6 +765,20 @@ def test_access_by_name():
                   tmin, tmax, picks=picks)
     assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
                   picks=picks)
+    # Test accessing non-existent events (assumes 12345678 does not exist)
+    event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
+    assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
+                  tmin, tmax)
+    # Test on_missing
+    assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
+                  on_missing='foo')
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
+        nw = len(w)
+        assert_true(1 <= nw <= 2)
+        Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
+        assert_equal(len(w), nw)
     epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
     assert_raises(KeyError, epochs.__getitem__, 'bar')
 
@@ -707,6 +801,8 @@ def test_access_by_name():
 
     epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
                      tmin, tmax, picks=picks, preload=True)
+    assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
+                 [1, 2])
     epochs4 = epochs['a']
     epochs5 = epochs3['a']
     assert_array_equal(epochs4.events, epochs5.events)
@@ -806,15 +902,295 @@ def test_epochs_proj_mixin():
     assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
 
 
-def test_event_ordering():
-    """Test event order"""
-    events2 = events.copy()
-    np.random.shuffle(events2)
-    for ii, eve in enumerate([events, events2]):
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter('always', RuntimeWarning)
-            Epochs(raw, eve, event_id, tmin, tmax,
-                   baseline=(None, 0), reject=reject, flat=flat)
-            assert_equal(len(w), ii)
-            if ii > 0:
-                assert_true('chronologically' in '%s' % w[-1].message)
+def test_drop_epochs():
+    """Test dropping of epochs.
+    """
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    events1 = events[events[:, 2] == event_id]
+
+    # Bound checks
+    assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
+    assert_raises(IndexError, epochs.drop_epochs, [-1])
+    assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
+
+    # Test selection attribute
+    assert_array_equal(epochs.selection,
+                       np.where(events[:, 2] == event_id)[0])
+    assert_equal(len(epochs.drop_log), len(events))
+    assert_true(all(epochs.drop_log[k] == ['IGNORED']
+                for k in set(range(len(events))) - set(epochs.selection)))
+
+    selection = epochs.selection.copy()
+    n_events = len(epochs.events)
+    epochs.drop_epochs([2, 4], reason='d')
+    assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
+    assert_equal(len(epochs.drop_log), len(events))
+    assert_equal([epochs.drop_log[k]
+                  for k in selection[[2, 4]]], [['d'], ['d']])
+    assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
+    assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
+    assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
+
+
+def test_drop_epochs_mult():
+    """Test that subselecting epochs or making less epochs is equivalent"""
+    for preload in [True, False]:
+        epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
+                         tmin, tmax, picks=picks, reject=reject,
+                         preload=preload)['a']
+        epochs2 = Epochs(raw, events, {'a': 1},
+                         tmin, tmax, picks=picks, reject=reject,
+                         preload=preload)
+
+        if preload:
+            # In the preload case you cannot know the bads if already ignored
+            assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
+            for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
+                if d1 == ['IGNORED']:
+                    assert_true(d2 == ['IGNORED'])
+                if d1 != ['IGNORED'] and d1 != []:
+                    assert_true((d2 == d1) or (d2 == ['IGNORED']))
+                if d1 == []:
+                    assert_true(d2 == [])
+            assert_array_equal(epochs1.events, epochs2.events)
+            assert_array_equal(epochs1.selection, epochs2.selection)
+        else:
+            # In the non preload is should be exactly the same
+            assert_equal(epochs1.drop_log, epochs2.drop_log)
+            assert_array_equal(epochs1.events, epochs2.events)
+            assert_array_equal(epochs1.selection, epochs2.selection)
+
+
+def test_contains():
+    """Test membership API"""
+
+    tests = [(('mag', False), ('grad', 'eeg')),
+             (('grad', False), ('mag', 'eeg')),
+             ((False, True), ('grad', 'mag'))]
+
+    for (meg, eeg), others in tests:
+        picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
+        epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
+                        picks=picks_contains, reject=None,
+                        preload=False)
+        test = 'eeg' if eeg is True else meg
+        assert_true(test in epochs)
+        assert_true(not any(o in epochs for o in others))
+
+    assert_raises(ValueError, epochs.__contains__, 'foo')
+    assert_raises(ValueError, epochs.__contains__, 1)
+
+
+def test_drop_channels_mixin():
+    """Test channels-dropping functionality
+    """
+    # here without picks to get additional coverage
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
+                    baseline=(None, 0))
+    drop_ch = epochs.ch_names[:3]
+    ch_names = epochs.ch_names[3:]
+
+    ch_names_orig = epochs.ch_names
+    dummy = epochs.drop_channels(drop_ch, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, epochs.ch_names)
+    assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
+
+    epochs.drop_channels(drop_ch)
+    assert_equal(ch_names, epochs.ch_names)
+    assert_equal(len(ch_names), epochs.get_data().shape[1])
+
+
+def test_pick_channels_mixin():
+    """Test channel-picking functionality
+    """
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    ch_names = epochs.ch_names[:3]
+
+    ch_names_orig = epochs.ch_names
+    dummy = epochs.pick_channels(ch_names, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, epochs.ch_names)
+    assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
+
+    epochs.pick_channels(ch_names)
+    assert_equal(ch_names, epochs.ch_names)
+    assert_equal(len(ch_names), epochs.get_data().shape[1])
+
+
+def test_equalize_channels():
+    """Test equalization of channels
+    """
+    epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), proj=False)
+    epochs2 = epochs1.copy()
+    ch_names = epochs1.ch_names[2:]
+    epochs1.drop_channels(epochs1.ch_names[:1])
+    epochs2.drop_channels(epochs2.ch_names[1:2])
+    my_comparison = [epochs1, epochs2]
+    equalize_channels(my_comparison)
+    for e in my_comparison:
+        assert_equal(ch_names, e.ch_names)
+
+
+def test_illegal_event_id():
+    """Test handling of invalid events ids"""
+    event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
+
+    assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
+                  tmax, picks=picks, baseline=(None, 0), proj=False)
+
+
+def test_add_channels_epochs():
+    """Test adding channels"""
+
+    def make_epochs(picks):
+        return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                      reject=None, preload=True, proj=False, picks=picks)
+
+    picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
+    picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+
+    epochs = make_epochs(picks=picks)
+    epochs_meg = make_epochs(picks=picks_meg)
+    epochs_eeg = make_epochs(picks=picks_eeg)
+
+    epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
+
+    assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
+    assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
+
+    data1 = epochs.get_data()
+    data2 = epochs2.get_data()
+    data3 = np.concatenate([e.get_data() for e in
+                            [epochs_meg, epochs_eeg]], axis=1)
+    assert_array_equal(data1.shape, data2.shape)
+    assert_array_equal(data1, data3)  # XXX unrelated bug? this crashes
+                                      # when proj == True
+    assert_array_equal(data1, data2)
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['meas_date'] += 10
+    add_channels_epochs([epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs2.info['filename'] = epochs2.info['filename'].upper()
+    epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.events[3, 2] -= 1
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg, epochs_eeg[:2]])
+
+    epochs_meg.info['chs'].pop(0)
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['sfreq'] = None
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['sfreq'] += 10
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['dev_head_t']['to'] += 1
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['dev_head_t']['to'] += 1
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['expimenter'] = 'foo'
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.preload = False
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.tmin += 0.4
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.tmin += 0.5
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.baseline = None
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.event_id['b'] = 2
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+
+def test_array_epochs():
+    """Test creating epochs from array
+    """
+
+    # creating
+    rng = np.random.RandomState(42)
+    data = rng.random_sample((10, 20, 300))
+    sfreq = 1e3
+    ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
+    types = ['eeg'] * 20
+    info = create_info(ch_names, sfreq, types)
+    events = np.c_[np.arange(1, 600, 60),
+                   np.zeros(10),
+                   [1, 2] * 5]
+    event_id = {'a': 1, 'b': 2}
+    epochs = EpochsArray(data, info, events=events, event_id=event_id,
+                         tmin=-.2)
+
+    # saving
+    temp_fname = op.join(tempdir, 'test-epo.fif')
+    epochs.save(temp_fname)
+    epochs2 = read_epochs(temp_fname)
+    data2 = epochs2.get_data()
+    assert_allclose(data, data2)
+    assert_allclose(epochs.times, epochs2.times)
+    assert_equal(epochs.event_id, epochs2.event_id)
+    assert_array_equal(epochs.events, epochs2.events)
+
+    # plotting
+    import matplotlib
+    matplotlib.use('Agg')  # for testing don't use X server
+    epochs[0].plot()
+
+    # indexing
+    assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
+    assert_equal(len(epochs[:2]), 2)
+    data[0, 5, 150] = 3000
+    data[1, :, :] = 0
+    data[2, 5, 210] = 3000
+    data[3, 5, 260] = 0
+    epochs = EpochsArray(data, info, events=events, event_id=event_id,
+                         tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
+                         reject_tmin=0.1, reject_tmax=0.2)
+    assert_equal(len(epochs), len(events) - 2)
+    assert_equal(epochs.drop_log[0], ['EEG 006'])
+    assert_equal(len(events), len(epochs.selection))
diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py
index 73cf618..31d2ed1 100644
--- a/mne/tests/test_event.py
+++ b/mne/tests/test_event.py
@@ -3,17 +3,21 @@ import os
 
 from nose.tools import assert_true
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_array_equal
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_raises)
+import warnings
 
 from mne import (read_events, write_events, make_fixed_length_events,
-                 find_events, find_stim_steps, fiff)
+                 find_events, find_stim_steps, io, pick_channels)
 from mne.utils import _TempDir
 from mne.event import define_target_events, merge_events
 
-base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+warnings.simplefilter('always')
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
 fname = op.join(base_dir, 'test-eve.fif')
 fname_gz = op.join(base_dir, 'test-eve.fif.gz')
-fname_1 = op.join(base_dir, 'test-eve-1.fif')
+fname_1 = op.join(base_dir, 'test-1-eve.fif')
 fname_txt = op.join(base_dir, 'test-eve.eve')
 fname_txt_1 = op.join(base_dir, 'test-eve-1.eve')
 
@@ -25,6 +29,29 @@ raw_fname = op.join(base_dir, 'test_raw.fif')
 tempdir = _TempDir()
 
 
+def test_add_events():
+    """Test adding events to a Raw file"""
+    # need preload
+    raw = io.Raw(raw_fname, preload=False)
+    events = np.array([[raw.first_samp, 0, 1]])
+    assert_raises(RuntimeError, raw.add_events, events, 'STI 014')
+    raw = io.Raw(raw_fname, preload=True)
+    orig_events = find_events(raw, 'STI 014')
+    # add some events
+    events = np.array([raw.first_samp, 0, 1])
+    assert_raises(ValueError, raw.add_events, events, 'STI 014')  # bad shape
+    events[0] = raw.first_samp + raw.n_times + 1
+    events = events[np.newaxis, :]
+    assert_raises(ValueError, raw.add_events, events, 'STI 014')  # bad time
+    events[0, 0] = raw.first_samp - 1
+    assert_raises(ValueError, raw.add_events, events, 'STI 014')  # bad time
+    events[0, 0] = raw.first_samp + 1  # can't actually be first_samp
+    assert_raises(ValueError, raw.add_events, events, 'STI FOO')
+    raw.add_events(events, 'STI 014')
+    new_events = find_events(raw, 'STI 014')
+    assert_array_equal(new_events, np.concatenate((events, orig_events)))
+
+
 def test_merge_events():
     """Test event merging
     """
@@ -48,15 +75,15 @@ def test_io_events():
     """
     # Test binary fif IO
     events = read_events(fname)  # Use as the gold standard
-    write_events(op.join(tempdir, 'events.fif'), events)
-    events2 = read_events(op.join(tempdir, 'events.fif'))
+    write_events(op.join(tempdir, 'events-eve.fif'), events)
+    events2 = read_events(op.join(tempdir, 'events-eve.fif'))
     assert_array_almost_equal(events, events2)
 
     # Test binary fif.gz IO
     events2 = read_events(fname_gz)  # Use as the gold standard
     assert_array_almost_equal(events, events2)
-    write_events(op.join(tempdir, 'events.fif.gz'), events2)
-    events2 = read_events(op.join(tempdir, 'events.fif.gz'))
+    write_events(op.join(tempdir, 'events-eve.fif.gz'), events2)
+    events2 = read_events(op.join(tempdir, 'events-eve.fif.gz'))
     assert_array_almost_equal(events, events2)
 
     # Test new format text file IO
@@ -74,18 +101,18 @@ def test_io_events():
     assert_array_almost_equal(events, events2)
 
     # Test event selection
-    a = read_events(op.join(tempdir, 'events.fif'), include=1)
-    b = read_events(op.join(tempdir, 'events.fif'), include=[1])
-    c = read_events(op.join(tempdir, 'events.fif'), exclude=[2, 3, 4, 5, 32])
-    d = read_events(op.join(tempdir, 'events.fif'), include=1, exclude=[2, 3])
+    a = read_events(op.join(tempdir, 'events-eve.fif'), include=1)
+    b = read_events(op.join(tempdir, 'events-eve.fif'), include=[1])
+    c = read_events(op.join(tempdir, 'events-eve.fif'), exclude=[2, 3, 4, 5, 32])
+    d = read_events(op.join(tempdir, 'events-eve.fif'), include=1, exclude=[2, 3])
     assert_array_equal(a, b)
     assert_array_equal(a, c)
     assert_array_equal(a, d)
 
     # Test binary file IO for 1 event
     events = read_events(fname_1)  # Use as the new gold standard
-    write_events(op.join(tempdir, 'events.fif'), events)
-    events2 = read_events(op.join(tempdir, 'events.fif'))
+    write_events(op.join(tempdir, 'events-eve.fif'), events)
+    events2 = read_events(op.join(tempdir, 'events-eve.fif'))
     assert_array_almost_equal(events, events2)
 
     # Test text file IO for 1 event
@@ -93,12 +120,20 @@ def test_io_events():
     events2 = read_events(op.join(tempdir, 'events.eve'))
     assert_array_almost_equal(events, events2)
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        fname2 = op.join(tempdir, 'test-bad-name.fif')
+        write_events(fname2, events)
+        read_events(fname2)
+    assert_true(len(w) == 2)
+
 
 def test_find_events():
     """Test find events in raw file
     """
     events = read_events(fname)
-    raw = fiff.Raw(raw_fname, preload=True)
+    raw = io.Raw(raw_fname, preload=True)
     # let's test the defaulting behavior while we're at it
     extra_ends = ['', '_1']
     orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
@@ -113,7 +148,7 @@ def test_find_events():
     raw.info['sfreq'] = 1000
 
     stim_channel = 'STI 014'
-    stim_channel_idx = fiff.pick_channels(raw.info['ch_names'],
+    stim_channel_idx = pick_channels(raw.info['ch_names'],
                                       include=stim_channel)
 
     # test empty events channel
@@ -157,7 +192,9 @@ def test_find_events():
                         [31, 0, 5],
                         [40, 0, 6],
                         [14399, 0, 9]])
-    assert_array_equal(find_events(raw, output='step', consecutive=True),
+    assert_raises(ValueError,find_events,raw, output='step', consecutive=True)
+    assert_array_equal(find_events(raw, output='step', consecutive=True,
+                                   shortest_event=1),
                        [[10, 0, 5],
                         [20, 5, 6],
                         [30, 6, 5],
@@ -221,7 +258,7 @@ def test_find_events():
 def test_make_fixed_length_events():
     """Test making events of a fixed length
     """
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
     events = make_fixed_length_events(raw, id=1)
     assert_true(events.shape[1], 3)
 
@@ -230,7 +267,7 @@ def test_define_events():
     """Test defining response events
     """
     events = read_events(fname)
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
     events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
                                       .2, 0.7, 42, 99)
     n_target = events[events[:, 2] == 5].shape[0]
diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py
new file mode 100644
index 0000000..fa8bd90
--- /dev/null
+++ b/mne/tests/test_evoked.py
@@ -0,0 +1,384 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
+#         Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from copy import deepcopy
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_equal,
+                           assert_array_equal, assert_allclose)
+from nose.tools import assert_true, assert_raises, assert_not_equal
+
+from mne import (equalize_channels, pick_types, read_evoked, write_evoked,
+                 read_evokeds, write_evokeds)
+from mne.evoked import _get_peak, EvokedArray
+from mne.epochs import EpochsArray
+
+from mne.utils import _TempDir, requires_pandas, requires_nitime
+
+from mne.io.meas_info import create_info
+from mne.externals.six.moves import cPickle as pickle
+
+warnings.simplefilter('always')
+
+fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                'test-ave.fif')
+fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                   'test-ave.fif.gz')
+
+tempdir = _TempDir()
+
+
+def test_hash_evoked():
+    """Test evoked hashing
+    """
+    ave = read_evokeds(fname, 0)
+    ave_2 = read_evokeds(fname, 0)
+    assert_equal(hash(ave), hash(ave_2))
+    # do NOT use assert_equal here, failing output is terrible
+    assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
+
+    ave_2.data[0, 0] -= 1
+    assert_not_equal(hash(ave), hash(ave_2))
+
+
+def test_io_evoked():
+    """Test IO for evoked data (fif + gz) with integer and str args
+    """
+    ave = read_evokeds(fname, 0)
+
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
+
+    # This not being assert_array_equal due to windows rounding
+    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
+    assert_array_almost_equal(ave.times, ave2.times)
+    assert_equal(ave.nave, ave2.nave)
+    assert_equal(ave._aspect_kind, ave2._aspect_kind)
+    assert_equal(ave.kind, ave2.kind)
+    assert_equal(ave.last, ave2.last)
+    assert_equal(ave.first, ave2.first)
+
+    # test compressed i/o
+    ave2 = read_evokeds(fname_gz, 0)
+    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
+
+    # test str access
+    condition = 'Left Auditory'
+    assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
+    assert_raises(ValueError, read_evokeds, fname, condition,
+                  kind='standard_error')
+    ave3 = read_evokeds(fname, condition)
+    assert_array_almost_equal(ave.data, ave3.data, 19)
+
+    # test deprecation warning for read_evoked and write_evoked
+    # XXX should be deleted for 0.9 release
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        ave = read_evoked(fname, setno=0)
+        assert_true(w[0].category == DeprecationWarning)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        write_evoked(op.join(tempdir, 'evoked-ave.fif'), ave)
+        assert_true(w[0].category == DeprecationWarning)
+
+    # test read_evokeds and write_evokeds
+    types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
+    aves1 = read_evokeds(fname)
+    aves2 = read_evokeds(fname, [0, 1, 2, 3])
+    aves3 = read_evokeds(fname, types)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
+    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
+    for aves in [aves2, aves3, aves4]:
+        for [av1, av2] in zip(aves1, aves):
+            assert_array_almost_equal(av1.data, av2.data)
+            assert_array_almost_equal(av1.times, av2.times)
+            assert_equal(av1.nave, av2.nave)
+            assert_equal(av1.kind, av2.kind)
+            assert_equal(av1._aspect_kind, av2._aspect_kind)
+            assert_equal(av1.last, av2.last)
+            assert_equal(av1.first, av2.first)
+            assert_equal(av1.comment, av2.comment)
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        fname2 = op.join(tempdir, 'test-bad-name.fif')
+        write_evokeds(fname2, ave)
+        read_evokeds(fname2)
+    assert_true(len(w) == 2)
+
+
+def test_shift_time_evoked():
+    """ Test for shifting of time scale
+    """
+    # Shift backward
+    ave = read_evokeds(fname, 0)
+    ave.shift_time(-0.1, relative=True)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+
+    # Shift forward twice the amount
+    ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+    ave_bshift.shift_time(0.2, relative=True)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
+
+    # Shift backward again
+    ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+    ave_fshift.shift_time(-0.1, relative=True)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
+
+    ave_normal = read_evokeds(fname, 0)
+    ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+
+    assert_true(np.allclose(ave_normal.data, ave_relative.data,
+                            atol=1e-16, rtol=1e-3))
+    assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
+
+    assert_equal(ave_normal.last, ave_relative.last)
+    assert_equal(ave_normal.first, ave_relative.first)
+
+    # Absolute time shift
+    ave = read_evokeds(fname, 0)
+    ave.shift_time(-0.3, relative=False)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+
+    ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+
+    assert_true(np.allclose(ave_normal.data, ave_absolute.data,
+                            atol=1e-16, rtol=1e-3))
+    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
+
+
+def test_evoked_resample():
+    """Test for resampling of evoked data
+    """
+    # upsample, write it out, read it in
+    ave = read_evokeds(fname, 0)
+    sfreq_normal = ave.info['sfreq']
+    ave.resample(2 * sfreq_normal)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+    ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+
+    # compare it to the original
+    ave_normal = read_evokeds(fname, 0)
+
+    # and compare the original to the downsampled upsampled version
+    ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+    ave_new.resample(sfreq_normal)
+
+    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
+    assert_array_almost_equal(ave_normal.times, ave_new.times)
+    assert_equal(ave_normal.nave, ave_new.nave)
+    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
+    assert_equal(ave_normal.kind, ave_new.kind)
+    assert_equal(ave_normal.last, ave_new.last)
+    assert_equal(ave_normal.first, ave_new.first)
+
+    # for the above to work, the upsampling just about had to, but
+    # we'll add a couple extra checks anyway
+    assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
+    assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
+
+
+def test_evoked_detrend():
+    """Test for detrending evoked data
+    """
+    ave = read_evokeds(fname, 0)
+    ave_normal = read_evokeds(fname, 0)
+    ave.detrend(0)
+    ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
+    picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
+    assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
+                            rtol=1e-8, atol=1e-16))
+
+
+ at requires_nitime
+def test_evoked_to_nitime():
+    """ Test to_nitime """
+    ave = read_evokeds(fname, 0)
+    evoked_ts = ave.to_nitime()
+    assert_equal(evoked_ts.data, ave.data)
+
+    picks2 = [1, 2]
+    ave = read_evokeds(fname, 0)
+    evoked_ts = ave.to_nitime(picks=picks2)
+    assert_equal(evoked_ts.data, ave.data[picks2])
+
+
+ at requires_pandas
+def test_as_data_frame():
+    """Test evoked Pandas exporter"""
+    ave = read_evokeds(fname, 0)
+    assert_raises(ValueError, ave.as_data_frame, picks=np.arange(400))
+    df = ave.as_data_frame()
+    assert_true((df.columns == ave.ch_names).all())
+    df = ave.as_data_frame(use_time_index=False)
+    assert_true('time' in df.columns)
+    assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
+    assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
+
+
+def test_evoked_proj():
+    """Test SSP proj operations
+    """
+    for proj in [True, False]:
+        ave = read_evokeds(fname, condition=0, proj=proj)
+        assert_true(all(p['active'] == proj for p in ave.info['projs']))
+
+        # test adding / deleting proj
+        if proj:
+            assert_raises(ValueError, ave.add_proj, [],
+                          {'remove_existing': True})
+            assert_raises(ValueError, ave.del_proj, 0)
+        else:
+            projs = deepcopy(ave.info['projs'])
+            n_proj = len(ave.info['projs'])
+            ave.del_proj(0)
+            assert_true(len(ave.info['projs']) == n_proj - 1)
+            ave.add_proj(projs, remove_existing=False)
+            assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
+            ave.add_proj(projs, remove_existing=True)
+            assert_true(len(ave.info['projs']) == n_proj)
+
+    ave = read_evokeds(fname, condition=0, proj=False)
+    data = ave.data.copy()
+    ave.apply_proj()
+    assert_allclose(np.dot(ave._projector, data), ave.data)
+
+
+def test_get_peak():
+    """Test peak getter
+    """
+
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
+                  tmax=0.01)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
+    assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
+    assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
+
+    ch_idx, time_idx = evoked.get_peak(ch_type='mag')
+    assert_true(ch_idx in evoked.ch_names)
+    assert_true(time_idx in evoked.times)
+
+    ch_idx, time_idx = evoked.get_peak(ch_type='mag',
+                                       time_as_index=True)
+    assert_true(time_idx < len(evoked.times))
+
+    data = np.array([[0., 1.,  2.],
+                     [0., -3.,  0]])
+
+    times = np.array([.1, .2, .3])
+
+    ch_idx, time_idx = _get_peak(data, times, mode='abs')
+    assert_equal(ch_idx, 1)
+    assert_equal(time_idx, 1)
+
+    ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
+    assert_equal(ch_idx, 0)
+    assert_equal(time_idx, 2)
+
+    ch_idx, time_idx = _get_peak(data, times, mode='pos')
+    assert_equal(ch_idx, 0)
+    assert_equal(time_idx, 2)
+
+    assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
+    assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
+
+
+def test_drop_channels_mixin():
+    """Test channels-dropping functionality
+    """
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    drop_ch = evoked.ch_names[:3]
+    ch_names = evoked.ch_names[3:]
+
+    ch_names_orig = evoked.ch_names
+    dummy = evoked.drop_channels(drop_ch, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, evoked.ch_names)
+    assert_equal(len(ch_names_orig), len(evoked.data))
+
+    evoked.drop_channels(drop_ch)
+    assert_equal(ch_names, evoked.ch_names)
+    assert_equal(len(ch_names), len(evoked.data))
+
+
+def test_pick_channels_mixin():
+    """Test channel-picking functionality
+    """
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    ch_names = evoked.ch_names[:3]
+
+    ch_names_orig = evoked.ch_names
+    dummy = evoked.pick_channels(ch_names, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, evoked.ch_names)
+    assert_equal(len(ch_names_orig), len(evoked.data))
+
+    evoked.pick_channels(ch_names)
+    assert_equal(ch_names, evoked.ch_names)
+    assert_equal(len(ch_names), len(evoked.data))
+
+
+def test_equalize_channels():
+    """Test equalization of channels
+    """
+    evoked1 = read_evokeds(fname, condition=0, proj=True)
+    evoked2 = evoked1.copy()
+    ch_names = evoked1.ch_names[2:]
+    evoked1.drop_channels(evoked1.ch_names[:1])
+    evoked2.drop_channels(evoked2.ch_names[1:2])
+    my_comparison = [evoked1, evoked2]
+    equalize_channels(my_comparison)
+    for e in my_comparison:
+        assert_equal(ch_names, e.ch_names)
+
+
+def test_array_epochs():
+    """Test creating evoked from array
+    """
+
+    # creating
+    rng = np.random.RandomState(42)
+    data1 = rng.randn(20, 60)
+    sfreq = 1e3
+    ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
+    types = ['eeg'] * 20
+    info = create_info(ch_names, sfreq, types)
+    evoked1 = EvokedArray(data1, info, tmin=-0.01)
+
+    # save, read, and compare evokeds
+    tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
+    evoked1.save(tmp_fname)
+    evoked2 = read_evokeds(tmp_fname)[0]
+    data2 = evoked2.data
+    assert_allclose(data1, data2)
+    assert_allclose(evoked1.times, evoked2.times)
+    assert_equal(evoked1.first, evoked2.first)
+    assert_equal(evoked1.last, evoked2.last)
+    assert_equal(evoked1.kind, evoked2.kind)
+    assert_equal(evoked1.nave, evoked2.nave)
+
+    # now compare with EpochsArray (with single epoch)
+    data3 = data1[np.newaxis, :, :]
+    events = np.c_[10, 0, 1]
+    evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
+    assert_allclose(evoked1.data, evoked3.data)
+    assert_allclose(evoked1.times, evoked3.times)
+    assert_equal(evoked1.first, evoked3.first)
+    assert_equal(evoked1.last, evoked3.last)
+    assert_equal(evoked1.kind, evoked3.kind)
+    assert_equal(evoked1.nave, evoked3.nave)
+
+    # test match between channels info and data
+    ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
+    types = ['eeg'] * 19
+    info = create_info(ch_names, sfreq, types)
+    assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py
index 5e929d2..2823f94 100644
--- a/mne/tests/test_filter.py
+++ b/mne/tests/test_filter.py
@@ -1,6 +1,7 @@
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_almost_equal
-from nose.tools import assert_true, assert_raises
+from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
+                           assert_array_equal)
+from nose.tools import assert_equal, assert_true, assert_raises
 import os.path as op
 import warnings
 from scipy.signal import resample as sp_resample
@@ -73,7 +74,8 @@ def test_notch_filters():
 
         if lf is None:
             set_log_file()
-            out = open(log_file).readlines()
+            with open(log_file) as fid:
+                out = fid.readlines()
             if len(out) != 2:
                 raise ValueError('Detected frequencies not logged properly')
             out = np.fromstring(out[1], sep=', ')
@@ -82,6 +84,22 @@ def test_notch_filters():
         assert_almost_equal(new_power, orig_power, tol)
 
 
+def test_resample():
+    """Test resampling"""
+    x = np.random.normal(0, 1, (10, 10, 10))
+    x_rs = resample(x, 1, 2, 10)
+    assert_equal(x.shape, (10, 10, 10))
+    assert_equal(x_rs.shape, (10, 10, 5))
+
+    x_2 = x.swapaxes(0, 1)
+    x_2_rs = resample(x_2, 1, 2, 10)
+    assert_array_equal(x_2_rs.swapaxes(0, 1), x_rs)
+
+    x_3 = x.swapaxes(0, 2)
+    x_3_rs = resample(x_3, 1, 2, 10, 0)
+    assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs)
+
+
 def test_filters():
     """Test low-, band-, high-pass, and band-stop filters plus resampling
     """
@@ -144,13 +162,14 @@ def test_filters():
     assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
                               bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
     # test to make sure our resamling matches scipy's
-    bp_up_dn = sp_resample(sp_resample(bp_oa, 2 * len(bp_oa), window='boxcar'),
-                           len(bp_oa), window='boxcar')
+    bp_up_dn = sp_resample(sp_resample(bp_oa, 2 * bp_oa.shape[-1], axis=-1,
+                                       window='boxcar'),
+                           bp_oa.shape[-1], window='boxcar', axis=-1)
     assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
                               bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
 
     # make sure we don't alias
-    t = np.array(range(Fs * sig_len_secs)) / float(Fs)
+    t = np.array(list(range(Fs * sig_len_secs))) / float(Fs)
     # make sinusoid close to the Nyquist frequency
     sig = np.sin(2 * np.pi * Fs / 2.2 * t)
     # signal should disappear with 2x downsampling
@@ -203,7 +222,8 @@ def test_cuda():
 
     # check to make sure we actually used CUDA
     set_log_file()
-    out = open(log_file).readlines()
+    with open(log_file) as fid:
+        out = fid.readlines()
     assert_true(sum(['Using CUDA for FFT FIR filtering' in o
                      for o in out]) == 12)
 
diff --git a/mne/tests/test_fixes.py b/mne/tests/test_fixes.py
index b39f2ed..96e85b4 100644
--- a/mne/tests/test_fixes.py
+++ b/mne/tests/test_fixes.py
@@ -1,17 +1,17 @@
 # Authors: Emmanuelle Gouillart <emmanuelle.gouillart at normalesup.org>
 #          Gael Varoquaux <gael.varoquaux at normalesup.org>
-#          Alex Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
 # License: BSD
 
 import numpy as np
 
-from nose.tools import assert_equal
+from nose.tools import assert_equal, assert_raises
 from numpy.testing import assert_array_equal
 from distutils.version import LooseVersion
 from scipy import signal
 
 from ..fixes import (_in1d, _tril_indices, _copysign, _unravel_index,
-                     _Counter, _unique, _bincount)
+                     _Counter, _unique, _bincount, _digitize)
 from ..fixes import _firwin2 as mne_firwin2
 from ..fixes import _filtfilt as mne_filtfilt
 
@@ -76,6 +76,19 @@ def test_in1d():
     assert_equal(_in1d(a, b).sum(), 5)
 
 
+def test_digitize():
+    """Test numpy.digitize() replacement"""
+    data = np.arange(9)
+    bins = [0, 5, 10]
+    left = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
+    right = np.array([0, 1, 1, 1, 1, 1, 2, 2, 2])
+
+    assert_array_equal(_digitize(data, bins), left)
+    assert_array_equal(_digitize(data, bins, True), right)
+    assert_raises(NotImplementedError, _digitize, data + 0.1, bins, True)
+    assert_raises(NotImplementedError, _digitize, data, [0., 5, 10], True)
+
+
 def test_tril_indices():
     """Test numpy.tril_indices() replacement"""
     il1 = _tril_indices(4)
@@ -87,7 +100,7 @@ def test_tril_indices():
                   [13, 14, 15, 16]])
 
     assert_array_equal(a[il1],
-                       np.array([1,  5,  6,  9, 10, 11, 13, 14, 15, 16]))
+                       np.array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
 
     assert_array_equal(a[il2], np.array([5, 9, 10, 13, 14, 15]))
 
diff --git a/mne/tests/test_hdf5.py b/mne/tests/test_hdf5.py
new file mode 100644
index 0000000..893dc47
--- /dev/null
+++ b/mne/tests/test_hdf5.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+from os import path as op
+from nose.tools import assert_raises, assert_true, assert_equal
+
+import numpy as np
+
+from mne._hdf5 import write_hdf5, read_hdf5
+from mne.utils import requires_pytables, _TempDir, object_diff
+
+tempdir = _TempDir()
+
+
+ at requires_pytables()
+def test_hdf5():
+    """Test HDF5 IO
+    """
+    test_file = op.join(tempdir, 'test.hdf5')
+    x = dict(a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128),
+             d=[dict(e=(1, -2., 'hello', u'goodbyeu\u2764')), None])
+    write_hdf5(test_file, 1)
+    assert_equal(read_hdf5(test_file), 1)
+    assert_raises(IOError, write_hdf5, test_file, x)  # file exists
+    write_hdf5(test_file, x, overwrite=True)
+    assert_raises(IOError, read_hdf5, test_file + 'FOO')  # not found
+    xx = read_hdf5(test_file)
+    assert_true(object_diff(x, xx) == '')  # no assert_equal, ugly output
diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py
index b7fd56b..be93f3c 100644
--- a/mne/tests/test_label.py
+++ b/mne/tests/test_label.py
@@ -1,48 +1,56 @@
 import os
 import os.path as op
-import cPickle as pickle
+from ..externals.six.moves import cPickle as pickle
 import glob
 import warnings
 
 import numpy as np
 from numpy.testing import assert_array_equal, assert_array_almost_equal
-from nose.tools import assert_true, assert_raises
+from nose.tools import assert_equal, assert_true, assert_raises
 
 from mne.datasets import sample
 from mne import (label_time_courses, read_label, stc_to_label,
                  read_source_estimate, read_source_spaces, grow_labels,
-                 labels_from_parc, parc_from_labels)
-from mne.label import Label
-from mne.utils import requires_mne, run_subprocess, _TempDir
-from mne.fixes import in1d
+                 read_labels_from_annot, write_labels_to_annot, split_label)
+from mne.label import Label, _blend_colors
+from mne.utils import requires_mne, run_subprocess, _TempDir, requires_sklearn
+from mne.fixes import digitize, in1d, assert_is, assert_is_not
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 data_path = sample.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
+src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
 stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
 real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
                            'Aud-lh.label')
 real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
                               'Aud-rh.label')
-src_fname = op.join(data_path, 'MEG', 'sample',
+v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
+
+fwd_fname = op.join(data_path, 'MEG', 'sample',
                     'sample_audvis-eeg-oct-6p-fwd.fif')
+src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
+                        'fsaverage-ico-5-src.fif')
 
-test_path = op.join(op.split(__file__)[0], '..', 'fiff', 'tests', 'data')
+test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
 label_fname = op.join(test_path, 'test-lh.label')
 label_rh_fname = op.join(test_path, 'test-rh.label')
 tempdir = _TempDir()
 
 # This code was used to generate the "fake" test labels:
-#for hemi in ['lh', 'rh']:
+# for hemi in ['lh', 'rh']:
 #    label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
 #                  hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
 #    label.save(op.join(test_path, 'test-%s.label' % hemi))
 
 
 def assert_labels_equal(l0, l1, decimal=5):
-    for attr in ['comment', 'hemi', 'subject']:
-        assert_true(getattr(l0, attr) == getattr(l1, attr))
+    for attr in ['comment', 'hemi', 'subject', 'color']:
+        attr0 = getattr(l0, attr)
+        attr1 = getattr(l1, attr)
+        msg = "label.%s: %r != %r" % (attr, attr0, attr1)
+        assert_equal(attr0, attr1, msg)
     for attr in ['vertices', 'pos', 'values']:
         a0 = getattr(l0, attr)
         a1 = getattr(l1, attr)
@@ -53,7 +61,7 @@ def test_label_subject():
     """Test label subject name extraction
     """
     label = read_label(label_fname)
-    assert_true(label.subject is None)
+    assert_is(label.subject, None)
     assert_true('unknown' in repr(label))
     label = read_label(label_fname, subject='fsaverage')
     assert_true(label.subject == 'fsaverage')
@@ -65,38 +73,72 @@ def test_label_addition():
     """
     pos = np.random.rand(10, 3)
     values = np.arange(10.) / 10
-    idx0 = range(7)
-    idx1 = range(7, 10)  # non-overlapping
-    idx2 = range(5, 10)  # overlapping
-    l0 = Label(idx0, pos[idx0], values[idx0], 'lh')
+    idx0 = list(range(7))
+    idx1 = list(range(7, 10))  # non-overlapping
+    idx2 = list(range(5, 10))  # overlapping
+    l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
     l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
-    l2 = Label(idx2, pos[idx2], values[idx2], 'lh')
+    l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
 
-    assert len(l0) == len(idx0)
+    assert_equal(len(l0), len(idx0))
 
     # adding non-overlapping labels
     l01 = l0 + l1
-    assert len(l01) == len(l0) + len(l1)
+    assert_equal(len(l01), len(l0) + len(l1))
     assert_array_equal(l01.values[:len(l0)], l0.values)
+    assert_equal(l01.color, l0.color)
 
     # adding overlappig labels
     l = l0 + l2
     i0 = np.where(l0.vertices == 6)[0][0]
     i2 = np.where(l2.vertices == 6)[0][0]
     i = np.where(l.vertices == 6)[0][0]
-    assert l.values[i] == l0.values[i0] + l2.values[i2]
-    assert l.values[0] == l0.values[0]
+    assert_equal(l.values[i], l0.values[i0] + l2.values[i2])
+    assert_equal(l.values[0], l0.values[0])
     assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
+    assert_equal(l.color, _blend_colors(l0.color, l2.color))
 
     # adding lh and rh
     l2.hemi = 'rh'
     # this now has deprecated behavior
     bhl = l0 + l2
-    assert bhl.hemi == 'both'
-    assert len(bhl) == len(l0) + len(l2)
+    assert_equal(bhl.hemi, 'both')
+    assert_equal(len(bhl), len(l0) + len(l2))
+    assert_equal(bhl.color, l.color)
+
+    bhl2 = l1 + bhl
+    assert_labels_equal(bhl2.lh, l01)
+    assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
+
+
+ at sample.requires_sample_data
+def test_label_in_src():
+    """Test label in src"""
+    src = read_source_spaces(src_fname)
+    label = read_label(v1_label_fname)
+
+    # construct label from source space vertices
+    vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True)
+    where = in1d(label.vertices, vert_in_src)
+    pos_in_src = label.pos[where]
+    values_in_src = label.values[where]
+    label_src = Label(vert_in_src, pos_in_src, values_in_src,
+                      hemi='lh').fill(src)
 
-    bhl = l1 + bhl
-    assert_labels_equal(bhl.lh, l01)
+    # check label vertices
+    vertices_status = in1d(src[0]['nearest'], label.vertices)
+    vertices_in = np.nonzero(vertices_status)[0]
+    vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
+    assert_array_equal(label_src.vertices, vertices_in)
+    assert_array_equal(in1d(vertices_out, label_src.vertices), False)
+
+    # check values
+    value_idx = digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
+    assert_array_equal(label_src.values, values_in_src[value_idx])
+
+    # test exception
+    vertices = np.append([-1], vert_in_src)
+    assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
 
 
 @sample.requires_sample_data
@@ -112,15 +154,22 @@ def test_label_io():
     """Test IO of label files
     """
     label = read_label(label_fname)
+
+    # label attributes
+    assert_equal(label.name, 'test-lh')
+    assert_is(label.subject, None)
+    assert_is(label.color, None)
+
+    # save and reload
     label.save(op.join(tempdir, 'foo'))
     label2 = read_label(op.join(tempdir, 'foo-lh.label'))
     assert_labels_equal(label, label2)
 
     # pickling
     dest = op.join(tempdir, 'foo.pickled')
-    with open(dest, 'w') as fid:
+    with open(dest, 'wb') as fid:
         pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
-    with open(dest) as fid:
+    with open(dest, 'rb') as fid:
         label2 = pickle.load(fid)
     assert_labels_equal(label, label2)
 
@@ -136,34 +185,31 @@ def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
 
 
 @sample.requires_sample_data
-def test_labels_from_parc():
+def test_read_labels_from_annot():
     """Test reading labels from FreeSurfer parcellation
     """
     # test some invalid inputs
-    assert_raises(ValueError, labels_from_parc, 'sample', hemi='bla',
+    assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
                   subjects_dir=subjects_dir)
-    assert_raises(ValueError, labels_from_parc, 'sample',
+    assert_raises(ValueError, read_labels_from_annot, 'sample',
                   annot_fname='bla.annot', subjects_dir=subjects_dir)
 
     # read labels using hemi specification
-    labels_lh, colors_lh = labels_from_parc('sample', hemi='lh',
-                                            subjects_dir=subjects_dir)
+    labels_lh = read_labels_from_annot('sample', hemi='lh',
+                                       subjects_dir=subjects_dir)
     for label in labels_lh:
         assert_true(label.name.endswith('-lh'))
         assert_true(label.hemi == 'lh')
-
-    assert_true(len(labels_lh) == len(colors_lh))
+        assert_is_not(label.color, None)
 
     # read labels using annot_fname
     annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
-    labels_rh, colors_rh = labels_from_parc('sample', annot_fname=annot_fname,
-                                            subjects_dir=subjects_dir)
-
-    assert_true(len(labels_rh) == len(colors_rh))
-
+    labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
+                                       subjects_dir=subjects_dir)
     for label in labels_rh:
         assert_true(label.name.endswith('-rh'))
         assert_true(label.hemi == 'rh')
+        assert_is_not(label.color, None)
 
     # combine the lh, rh, labels and sort them
     labels_lhrh = list()
@@ -174,9 +220,7 @@ def test_labels_from_parc():
     labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
 
     # read all labels at once
-    labels_both, colors = labels_from_parc('sample', subjects_dir=subjects_dir)
-
-    assert_true(len(labels_both) == len(colors))
+    labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
 
     # we have the same result
     _assert_labels_equal(labels_lhrh, labels_both)
@@ -185,22 +229,22 @@ def test_labels_from_parc():
     assert_true(len(labels_both) == 68)
 
     # test regexp
-    label = labels_from_parc('sample', parc='aparc.a2009s', regexp='Angu',
-                             subjects_dir=subjects_dir)[0][0]
+    label = read_labels_from_annot('sample', parc='aparc.a2009s',
+                                   regexp='Angu', subjects_dir=subjects_dir)[0]
     assert_true(label.name == 'G_pariet_inf-Angular-lh')
     # silly, but real regexp:
-    label = labels_from_parc('sample', parc='aparc.a2009s',
-                             regexp='.*-.{4,}_.{3,3}-L',
-                             subjects_dir=subjects_dir)[0][0]
+    label = read_labels_from_annot('sample', 'aparc.a2009s',
+                                   regexp='.*-.{4,}_.{3,3}-L',
+                                   subjects_dir=subjects_dir)[0]
     assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
-    assert_raises(RuntimeError, labels_from_parc, 'sample', parc='aparc',
+    assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
                   annot_fname=annot_fname, regexp='JackTheRipper',
                   subjects_dir=subjects_dir)
 
 
 @sample.requires_sample_data
 @requires_mne
-def test_labels_from_parc_annot2labels():
+def test_read_labels_from_annot_annot2labels():
     """Test reading labels from parc. by comparing with mne_annot2labels
     """
 
@@ -223,7 +267,7 @@ def test_labels_from_parc_annot2labels():
 
         return labels
 
-    labels, _ = labels_from_parc('sample', subjects_dir=subjects_dir)
+    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
     labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')
 
     # we have the same result, mne does not fill pos, so ignore it
@@ -231,74 +275,167 @@ def test_labels_from_parc_annot2labels():
 
 
 @sample.requires_sample_data
-def test_parc_from_labels():
+def test_write_labels_to_annot():
     """Test writing FreeSurfer parcellation from labels"""
 
-    labels, colors = labels_from_parc('sample', subjects_dir=subjects_dir)
+    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
 
     # write left and right hemi labels:
     fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]
 
     for fname in fnames:
-        parc_from_labels(labels, colors, annot_fname=fname)
+        write_labels_to_annot(labels, annot_fname=fname)
 
     # read it back
-    labels2, colors2 = labels_from_parc('sample', subjects_dir=subjects_dir,
-                                        annot_fname=fnames[0])
-    labels22, colors22 = labels_from_parc('sample', subjects_dir=subjects_dir,
-                                          annot_fname=fnames[1])
+    labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                     annot_fname=fnames[0])
+    labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                      annot_fname=fnames[1])
     labels2.extend(labels22)
-    colors2.extend(colors22)
 
     names = [label.name for label in labels2]
 
-    for label, color in zip(labels, colors):
+    for label in labels:
         idx = names.index(label.name)
         assert_labels_equal(label, labels2[idx])
-        assert_array_almost_equal(np.array(color), np.array(colors2[idx]))
+
+    # same with label-internal colors
+    for fname in fnames:
+        write_labels_to_annot(labels, annot_fname=fname, overwrite=True)
+    labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                     annot_fname=fnames[0])
+    labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                      annot_fname=fnames[1])
+    labels3.extend(labels33)
+    names3 = [label.name for label in labels3]
+    for label in labels:
+        idx = names3.index(label.name)
+        assert_labels_equal(label, labels3[idx])
 
     # make sure we can't overwrite things
-    assert_raises(ValueError, parc_from_labels, labels, colors,
+    assert_raises(ValueError, write_labels_to_annot, labels,
                   annot_fname=fnames[0])
 
     # however, this works
-    parc_from_labels(labels, colors=None, annot_fname=fnames[0],
-                     overwrite=True)
+    write_labels_to_annot(labels, annot_fname=fnames[0], overwrite=True)
+
+    # label without color
+    labels_ = labels[:]
+    labels_[0] = labels_[0].copy()
+    labels_[0].color = None
+    write_labels_to_annot(labels_, annot_fname=fnames[0], overwrite=True)
 
-    # test some other invalid inputs
-    assert_raises(ValueError, parc_from_labels, labels[:-1], colors,
+    # duplicate color
+    labels_[0].color = labels_[2].color
+    assert_raises(ValueError, write_labels_to_annot, labels_,
                   annot_fname=fnames[0], overwrite=True)
-    colors2 = np.asarray(colors)
-    assert_raises(ValueError, parc_from_labels, labels, colors2[:, :3],
+
+    # invalid color inputs
+    labels_[0].color = (1.1, 1., 1., 1.)
+    assert_raises(ValueError, write_labels_to_annot, labels_,
                   annot_fname=fnames[0], overwrite=True)
-    colors2[0] = 1.1
-    assert_raises(ValueError, parc_from_labels, labels, colors2,
+
+    # overlapping labels
+    labels_ = labels[:]
+    cuneus_lh = labels[6]
+    precuneus_lh = labels[50]
+    labels_.append(precuneus_lh + cuneus_lh)
+    assert_raises(ValueError, write_labels_to_annot, labels_,
                   annot_fname=fnames[0], overwrite=True)
 
+    # unlabeled vertices
+    labels_lh = [label for label in labels if label.name.endswith('lh')]
+    write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
+                          overwrite=True, subjects_dir=subjects_dir)
+    labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
+                                             subjects_dir=subjects_dir)
+    assert_equal(len(labels_lh), len(labels_reloaded))
+    label0 = labels_lh[0]
+    label1 = labels_reloaded[-1]
+    assert_equal(label1.name, "unknown-lh")
+    assert_true(np.all(in1d(label0.vertices, label1.vertices)))
+
 
 @sample.requires_sample_data
+def test_split_label():
+    """Test splitting labels"""
+    aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
+                                   regexp='lingual', subjects_dir=subjects_dir)
+    lingual = aparc[0]
+
+    # split with names
+    parts = ('lingual_post', 'lingual_ant')
+    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
+
+    # check output names
+    assert_equal(post.name, parts[0])
+    assert_equal(ant.name, parts[1])
+
+    # check vertices add up
+    lingual_reconst = post + ant
+    lingual_reconst.name = lingual.name
+    lingual_reconst.comment = lingual.comment
+    lingual_reconst.color = lingual.color
+    assert_labels_equal(lingual_reconst, lingual)
+
+    # compare output of Label.split() method
+    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
+    assert_labels_equal(post1, post)
+    assert_labels_equal(ant1, ant)
+
+    # compare fs_like split with freesurfer split
+    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
+    fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
+               32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
+               71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
+               107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
+               139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
+    assert_array_equal(antmost.vertices, fs_vert)
+
+    # check default label name
+    assert_equal(antmost.name, "lingual_div40-lh")
+
+
+ at sample.requires_sample_data
+ at requires_sklearn
 def test_stc_to_label():
     """Test stc_to_label
     """
-    src = read_source_spaces(src_fname)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        src = read_source_spaces(fwd_fname)
+    src_bad = read_source_spaces(src_bad_fname)
     stc = read_source_estimate(stc_fname, 'sample')
     os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
-    labels1 = stc_to_label(stc, src='sample', smooth=3)
-    with warnings.catch_warnings(True) as w:  # connectedness warning
+    with warnings.catch_warnings(record=True) as w:  # connectedness warning
+        warnings.simplefilter('always')
+        labels1 = stc_to_label(stc, src='sample', smooth=3)
         labels2 = stc_to_label(stc, src=src, smooth=3)
-    assert_true(len(w) == 1)
-    assert_true(len(labels1) == len(labels2))
+    assert_true(len(w) > 0)
+    assert_equal(len(labels1), len(labels2))
     for l1, l2 in zip(labels1, labels2):
         assert_labels_equal(l1, l2, decimal=4)
 
-    with warnings.catch_warnings(True) as w:  # connectedness warning
-        labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=3,
+    with warnings.catch_warnings(record=True) as w:  # connectedness warning
+        warnings.simplefilter('always')
+        labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
                                             connected=True)
-    assert_true(len(w) == 1)
-    assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=3,
+    assert_true(len(w) > 0)
+    assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
                   connected=True)
-    assert_true(len(labels_lh) == 1)
-    assert_true(len(labels_rh) == 1)
+    assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
+                  connected=True)
+    assert_equal(len(labels_lh), 1)
+    assert_equal(len(labels_rh), 1)
+
+    # with smooth='patch'
+    with warnings.catch_warnings(record=True) as w:  # connectedness warning
+        warnings.simplefilter('always')
+        labels_patch = stc_to_label(stc, src=src, smooth=True)
+    assert_equal(len(w), 1)
+    assert_equal(len(labels_patch), len(labels1))
+    for l1, l2 in zip(labels1, labels2):
+        assert_labels_equal(l1, l2, decimal=4)
 
 
 @sample.requires_sample_data
@@ -338,15 +475,40 @@ def test_grow_labels():
     # these were chosen manually in mne_analyze
     should_be_in = [[49, 227], [51207, 48794]]
     hemis = [0, 1]
-    labels = grow_labels('sample', seeds, 3, hemis, n_jobs=2)
-
-    for label, seed, hemi, sh in zip(labels, seeds, hemis, should_be_in):
-        assert(np.any(label.vertices == seed))
-        assert np.all(in1d(sh, label.vertices))
-        if hemi == 0:
-            assert(label.hemi == 'lh')
-        else:
-            assert(label.hemi == 'rh')
+    names = ['aneurism', 'tumor']
+    labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, n_jobs=2,
+                         names=names)
+
+    tgt_names = ['aneurism-lh', 'tumor-rh']
+    tgt_hemis = ['lh', 'rh']
+    for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
+                                           should_be_in, tgt_names):
+        assert_true(np.any(label.vertices == seed))
+        assert_true(np.all(in1d(sh, label.vertices)))
+        assert_equal(label.hemi, hemi)
+        assert_equal(label.name, name)
+
+    # grow labels with and without overlap
+    seeds = [57532, [58887, 6304]]
+    l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
+    seeds = [57532, [58887, 6304]]
+    l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
+                           overlap=False)
+
+    # test label naming
+    assert_equal(l01.name, 'Label_0-lh')
+    assert_equal(l02.name, 'Label_1-lh')
+    assert_equal(l11.name, 'Label_0-lh')
+    assert_equal(l12.name, 'Label_1-lh')
+
+    # make sure set 1 does not overlap
+    overlap = np.intersect1d(l11.vertices, l12.vertices, True)
+    assert_array_equal(overlap, [])
+
+    # make sure both sets cover the same vertices
+    l0 = l01 + l02
+    l1 = l11 + l12
+    assert_array_equal(l1.vertices, l0.vertices)
 
 
 @sample.requires_sample_data
diff --git a/mne/tests/test_misc.py b/mne/tests/test_misc.py
index 467d569..edf8589 100644
--- a/mne/tests/test_misc.py
+++ b/mne/tests/test_misc.py
@@ -3,7 +3,7 @@ from nose.tools import assert_true
 
 from mne.misc import parse_config
 
-ave_fname = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data',
+ave_fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
                     'test.ave')
 
 
diff --git a/mne/tests/test_proj.py b/mne/tests/test_proj.py
index 2468ca7..4c67da7 100644
--- a/mne/tests/test_proj.py
+++ b/mne/tests/test_proj.py
@@ -3,32 +3,36 @@ from nose.tools import assert_true
 import warnings
 
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_allclose
+from numpy.testing import (assert_array_almost_equal, assert_allclose,
+                           assert_equal)
 
 import copy as cp
 
 import mne
 from mne.datasets import sample
-from mne.fiff import Raw, pick_types
+from mne import pick_types
+from mne.io import Raw
 from mne import compute_proj_epochs, compute_proj_evoked, compute_proj_raw
-from mne.fiff.proj import make_projector, activate_proj
+from mne.io.proj import make_projector, activate_proj
 from mne.proj import read_proj, write_proj, make_eeg_average_ref_proj
 from mne import read_events, Epochs, sensitivity_map, read_source_estimate
 from mne.utils import _TempDir
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_fname = op.join(base_dir, 'test-eve.fif')
-proj_fname = op.join(base_dir, 'test_proj.fif')
-proj_gz_fname = op.join(base_dir, 'test_proj.fif.gz')
+proj_fname = op.join(base_dir, 'test-proj.fif')
+proj_gz_fname = op.join(base_dir, 'test-proj.fif.gz')
 bads_fname = op.join(base_dir, 'test_bads.txt')
 
 data_path = sample.data_path(download=False)
 sample_path = op.join(data_path, 'MEG', 'sample')
 fwd_fname = op.join(sample_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
 sensmap_fname = op.join(sample_path, 'sample_audvis-%s-oct-6-fwd-sensmap-%s.w')
+
+# sample dataset should be updated to reflect mne conventions
 eog_fname = op.join(sample_path, 'sample_audvis_eog_proj.fif')
 
 tempdir = _TempDir()
@@ -38,7 +42,9 @@ tempdir = _TempDir()
 def test_sensitivity_maps():
     """Test sensitivity map computation"""
     fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
-    proj_eog = read_proj(eog_fname)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        proj_eog = read_proj(eog_fname)
     decim = 6
     for ch_type in ['eeg', 'grad', 'mag']:
         w = read_source_estimate(sensmap_fname % (ch_type, 'lh')).data
@@ -88,9 +94,9 @@ def test_compute_proj_epochs():
 
     evoked = epochs.average()
     projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
-    write_proj(op.join(tempdir, 'proj.fif.gz'), projs)
+    write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs)
     for p_fname in [proj_fname, proj_gz_fname,
-                    op.join(tempdir, 'proj.fif.gz')]:
+                    op.join(tempdir, 'test-proj.fif.gz')]:
         projs2 = read_proj(p_fname)
 
         assert_true(len(projs) == len(projs2))
@@ -121,7 +127,7 @@ def test_compute_proj_epochs():
     # test that you can save them
     epochs.info['projs'] += projs
     evoked = epochs.average()
-    evoked.save(op.join(tempdir, 'foo.fif'))
+    evoked.save(op.join(tempdir, 'foo-ave.fif'))
 
     projs = read_proj(proj_fname)
 
@@ -135,6 +141,15 @@ def test_compute_proj_epochs():
     proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
     assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_proj(proj_badname, projs)
+        read_proj(proj_badname)
+        print([ww.message for ww in w])
+    assert_equal(len(w), 2)
+
 
 def test_compute_proj_raw():
     """Test SSP computation on raw"""
@@ -142,7 +157,8 @@ def test_compute_proj_raw():
     raw_time = 2.5  # Do shorter amount for speed
     raw = Raw(raw_fname, preload=True).crop(0, raw_time, False)
     for ii in (0.25, 0.5, 1, 2):
-        with warnings.catch_warnings(True) as w:
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
             projs = compute_proj_raw(raw, duration=ii - 0.1, stop=raw_time,
                                      n_grad=1, n_mag=1, n_eeg=0)
             assert_true(len(w) == 1)
@@ -159,10 +175,11 @@ def test_compute_proj_raw():
         raw.save(op.join(tempdir, 'foo_%d_raw.fif' % ii), overwrite=True)
 
     # Test that purely continuous (no duration) raw projection works
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         projs = compute_proj_raw(raw, duration=None, stop=raw_time,
                                  n_grad=1, n_mag=1, n_eeg=0)
-        assert_true(len(w) == 1)
+        assert_equal(len(w), 1)
 
     # test that you can compute the projection matrix
     projs = activate_proj(projs)
@@ -179,7 +196,8 @@ def test_compute_proj_raw():
     # here to save an extra filtering (raw would have to be LP'ed to be equiv)
     raw_resamp = cp.deepcopy(raw)
     raw_resamp.resample(raw.info['sfreq'] * 2, n_jobs=2)
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         projs = compute_proj_raw(raw_resamp, duration=None, stop=raw_time,
                                  n_grad=1, n_mag=1, n_eeg=0)
     projs = activate_proj(projs)
@@ -188,7 +206,8 @@ def test_compute_proj_raw():
 
     # test with bads
     raw.load_bad_channels(bads_fname)  # adds 2 bad mag channels
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         projs = compute_proj_raw(raw, n_grad=0, n_mag=0, n_eeg=1)
 
     # test that bad channels can be excluded
diff --git a/mne/tests/test_report.py b/mne/tests/test_report.py
new file mode 100644
index 0000000..db3da15
--- /dev/null
+++ b/mne/tests/test_report.py
@@ -0,0 +1,119 @@
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import glob
+import warnings
+
+from nose.tools import assert_true, assert_equal, assert_raises
+
+from mne import read_evokeds
+from mne.datasets import sample
+from mne.report import Report
+from mne.io import Raw
+from mne.utils import _TempDir
+
+data_dir = sample.data_path(download=False)
+base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
+                               'data'))
+subjects_dir = op.join(data_dir, 'subjects')
+
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked1_fname = op.join(base_dir, 'test-nf-ave.fif')
+evoked2_fname = op.join(base_dir, 'test-ave.fif')
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+os.environ['MNE_REPORT_TESTING'] = 'True'
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+tempdir = _TempDir()
+
+
+def test_render_report():
+    """Test rendering -*.fif files for mne report.
+    """
+
+    report = Report(info_fname=raw_fname)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        report.parse_folder(data_path=base_dir)
+    assert_true(len(w) == 1)
+
+    # Check correct paths and filenames
+    assert_true(raw_fname in report.fnames)
+    assert_true(event_name in report.fnames)
+    assert_true(report.data_path == base_dir)
+
+    # Check if raw repr is printed correctly
+    raw = Raw(raw_fname)
+    raw_idx = [ii for ii, fname in enumerate(report.fnames)
+               if fname == raw_fname][0]
+    raw_html = report.html[raw_idx]
+    assert_true(raw_html.find(repr(raw)[1:-1]) != -1)
+    assert_true(raw_html.find(str(raw.info['sfreq'])) != -1)
+    assert_true(raw_html.find('class="raw"') != -1)
+    assert_true(raw_html.find(raw_fname) != -1)
+
+    # Check if all files were rendered in the report
+    fnames = glob.glob(op.join(base_dir, '*.fif'))
+    bad_name = 'test_ctf_comp_raw-eve.fif'
+    decrement = any(fname.endswith(bad_name) for fname in fnames)
+    fnames = [fname for fname in fnames if
+              fname.endswith(('-eve.fif', '-ave.fif', '-cov.fif',
+                              '-sol.fif', '-fwd.fif', '-inv.fif',
+                              '-src.fif', '-trans.fif', 'raw.fif',
+                              'sss.fif', '-epo.fif')) and
+              not fname.endswith(bad_name)]
+    # last file above gets created by another test, and it shouldn't be there
+
+    for fname in fnames:
+        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
+
+    assert_equal(len(report.fnames), len(fnames))
+    assert_equal(len(report.html), len(report.fnames))
+
+    evoked1 = read_evokeds(evoked1_fname)
+    evoked2 = read_evokeds(evoked2_fname)
+    assert_equal(len(report.fnames) + len(evoked1) + len(evoked2) - 2,
+                 report.initial_id - decrement)
+
+    # Check saving functionality
+    report.data_path = tempdir
+    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
+    assert_true(op.isfile(op.join(tempdir, 'report.html')))
+
+    # Check add_section functionality
+    fig = evoked1[0].plot(show=False)
+    report.add_section(figs=fig,  # test non-list input
+                       captions=['evoked response'])
+    assert_equal(len(report.html), len(fnames) + 1)
+    assert_equal(len(report.html), len(report.fnames))
+    assert_raises(ValueError, report.add_section, figs=[fig, fig],
+                  captions='H')
+
+    # Check saving same report to new filename
+    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
+    assert_true(op.isfile(op.join(tempdir, 'report2.html')))
+
+    # Check overwriting file
+    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
+                overwrite=True)
+    assert_true(op.isfile(op.join(tempdir, 'report.html')))
+
+
+ at sample.requires_sample_data
+def test_render_mri():
+    """Test rendering MRI for mne report.
+    """
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=subjects_dir)
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        report.parse_folder(data_path=data_dir,
+                            pattern='*sample_audvis_raw-trans.fif')
diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py
index 3aac3ae..6360ae5 100644
--- a/mne/tests/test_source_estimate.py
+++ b/mne/tests/test_source_estimate.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import os.path as op
 from nose.tools import assert_true, assert_raises
 import warnings
@@ -18,8 +19,9 @@ from mne.source_estimate import (spatio_temporal_tris_connectivity,
                                  compute_morph_matrix, grade_to_vertices)
 
 from mne.minimum_norm import read_inverse_operator
-from mne.label import labels_from_parc, label_sign_flip
-from mne.utils import _TempDir, requires_pandas, requires_sklearn
+from mne.label import read_labels_from_annot, label_sign_flip
+from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
+                       requires_pytables)
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -51,12 +53,13 @@ def test_volume_stc():
         stc = VolSourceEstimate(data, vertno, 0, 1)
         fname_temp = op.join(tempdir, 'temp-vl.stc')
         stc_new = stc
-        for _ in xrange(2):
+        for _ in range(2):
             stc_new.save(fname_temp)
             stc_new = read_source_estimate(fname_temp)
             assert_true(isinstance(stc_new, VolSourceEstimate))
             assert_array_equal(vertno_read, stc_new.vertno)
             assert_array_almost_equal(stc.data, stc_new.data)
+
     # now let's actually read a MNE-C processed file
     stc = read_source_estimate(fname_vol, 'sample')
     assert_true(isinstance(stc, VolSourceEstimate))
@@ -64,7 +67,7 @@ def test_volume_stc():
     assert_true('sample' in repr(stc))
     stc_new = stc
     assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
-    for _ in xrange(2):
+    for _ in range(2):
         fname_temp = op.join(tempdir, 'temp-vol.w')
         stc_new.save(fname_temp, ftype='w')
         stc_new = read_source_estimate(fname_temp)
@@ -75,17 +78,22 @@ def test_volume_stc():
     # save the stc as a nifti file and export
     try:
         import nibabel as nib
-        src = read_source_spaces(fname_vsrc)
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            src = read_source_spaces(fname_vsrc)
         vol_fname = op.join(tempdir, 'stc.nii.gz')
         stc.save_as_volume(vol_fname, src,
                            dest='surf', mri_resolution=False)
-        img = nib.load(vol_fname)
+        with warnings.catch_warnings(record=True):  # nib<->numpy
+            img = nib.load(vol_fname)
         assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
 
-        t1_img = nib.load(fname_t1)
+        with warnings.catch_warnings(record=True):  # nib<->numpy
+            t1_img = nib.load(fname_t1)
         stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
                            dest='mri', mri_resolution=True)
-        img = nib.load(vol_fname)
+        with warnings.catch_warnings(record=True):  # nib<->numpy
+            img = nib.load(vol_fname)
         assert_true(img.shape == t1_img.shape + (len(stc.times),))
         assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
                                   decimal=5)
@@ -97,7 +105,7 @@ def test_volume_stc():
                                   decimal=5)
 
     except ImportError:
-        print 'Save as nifti test skipped, needs NiBabel'
+        print('Save as nifti test skipped, needs NiBabel')
 
 
 @sample.requires_sample_data
@@ -106,8 +114,8 @@ def test_expand():
     """
     stc = read_source_estimate(fname, 'sample')
     assert_true('sample' in repr(stc))
-    labels_lh, _ = labels_from_parc('sample', hemi='lh',
-                                    subjects_dir=subjects_dir)
+    labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
+                                       subjects_dir=subjects_dir)
     stc_limited = stc.in_label(labels_lh[0] + labels_lh[1])
     stc_new = stc_limited.copy()
     stc_new.data.fill(0)
@@ -117,46 +125,65 @@ def test_expand():
     assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
 
 
- at sample.requires_sample_data
+def _fake_stc(n_time=10):
+    verts = [np.arange(10), np.arange(90)]
+    return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
+
+
 def test_io_stc():
     """Test IO for STC files
     """
-    stc = read_source_estimate(fname)
+    stc = _fake_stc()
     stc.save(op.join(tempdir, "tmp.stc"))
     stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
 
     assert_array_almost_equal(stc.data, stc2.data)
     assert_array_almost_equal(stc.tmin, stc2.tmin)
-    assert_true(len(stc.vertno) == len(stc2.vertno))
+    assert_equal(len(stc.vertno), len(stc2.vertno))
     for v1, v2 in zip(stc.vertno, stc2.vertno):
         assert_array_almost_equal(v1, v2)
     assert_array_almost_equal(stc.tstep, stc2.tstep)
 
 
- at sample.requires_sample_data
+ at requires_pytables()
+def test_io_stc_h5():
+    """Test IO for STC files using HDF5
+    """
+    stc = _fake_stc()
+    assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
+    out_name = op.join(tempdir, 'tmp')
+    stc.save(out_name, ftype='h5')
+    stc3 = read_source_estimate(out_name)
+    stc4 = read_source_estimate(out_name + '-stc.h5')
+    assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
+    for stc_new in stc3, stc4:
+        assert_equal(stc_new.subject, stc.subject)
+        assert_array_equal(stc_new.data, stc.data)
+        assert_array_equal(stc_new.tmin, stc.tmin)
+        assert_array_equal(stc_new.tstep, stc.tstep)
+        assert_equal(len(stc_new.vertno), len(stc.vertno))
+        for v1, v2 in zip(stc_new.vertno, stc.vertno):
+            assert_array_equal(v1, v2)
+
+
 def test_io_w():
     """Test IO for w files
     """
-    w_fname = op.join(data_path, 'MEG', 'sample',
-                      'sample_audvis-meg-oct-6-fwd-sensmap')
-
+    stc = _fake_stc(n_time=1)
+    w_fname = op.join(tempdir, 'fake')
+    stc.save(w_fname, ftype='w')
     src = read_source_estimate(w_fname)
-
     src.save(op.join(tempdir, 'tmp'), ftype='w')
-
     src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
-
     assert_array_almost_equal(src.data, src2.data)
     assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
     assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
 
 
- at sample.requires_sample_data
 def test_stc_arithmetic():
     """Test arithmetic for STC files
     """
-    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
-    stc = read_source_estimate(fname)
+    stc = _fake_stc()
     data = stc.data.copy()
 
     out = list()
@@ -165,7 +192,9 @@ def test_stc_arithmetic():
 
         a += a
         a -= a
-        a /= 2 * a
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            a /= 2 * a
         a *= -a
 
         a += 2
@@ -232,10 +261,10 @@ def test_extract_label_time_course():
     n_verts = len(vertices[0]) + len(vertices[1])
 
     # get some labels
-    labels_lh, _ = labels_from_parc('sample', hemi='lh',
-                                    subjects_dir=subjects_dir)
-    labels_rh, _ = labels_from_parc('sample', hemi='rh',
-                                    subjects_dir=subjects_dir)
+    labels_lh = read_labels_from_annot('sample', hemi='lh',
+                                       subjects_dir=subjects_dir)
+    labels_rh = read_labels_from_annot('sample', hemi='rh',
+                                       subjects_dir=subjects_dir)
     labels = list()
     labels.extend(labels_lh[:5])
     labels.extend(labels_rh[:4])
@@ -243,6 +272,7 @@ def test_extract_label_time_course():
     n_labels = len(labels)
 
     label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
+    label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
 
     # compute the mean with sign flip
     label_means_flipped = np.zeros_like(label_means)
@@ -284,7 +314,7 @@ def test_extract_label_time_course():
         assert_array_equal(arr, np.zeros((1, n_times)))
 
     # test the different modes
-    modes = ['mean', 'mean_flip', 'pca_flip']
+    modes = ['mean', 'mean_flip', 'pca_flip', 'max']
 
     for mode in modes:
         label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
@@ -300,6 +330,8 @@ def test_extract_label_time_course():
                 assert_array_almost_equal(tc1, label_means)
             if mode == 'mean_flip':
                 assert_array_almost_equal(tc1, label_means_flipped)
+            if mode == 'max':
+                assert_array_almost_equal(tc1, label_maxs)
 
     # test label with very few vertices (check SVD conditionals)
     label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
@@ -327,7 +359,8 @@ def test_morph_data():
                              subjects_dir=subjects_dir)
     stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
     # make sure we can specify vertices
-    vertices_to = grade_to_vertices(subject_to, grade=3)
+    vertices_to = grade_to_vertices(subject_to, grade=3,
+                                    subjects_dir=subjects_dir)
     stc_to2 = morph_data(subject_from, subject_to, stc_from,
                          grade=vertices_to, smooth=12, buffer_size=1000,
                          subjects_dir=subjects_dir)
@@ -364,6 +397,38 @@ def test_morph_data():
     mask[6800] = False
     assert_array_almost_equal(stc_from.data[mask], stc_to6.data[mask], 5)
 
+    # Morph sparse data
+    # Make a sparse stc
+    stc_from.vertno[0] = stc_from.vertno[0][[100, 500]]
+    stc_from.vertno[1] = stc_from.vertno[1][[200]]
+    stc_from._data = stc_from._data[:3]
+
+    assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
+                  grade=5, subjects_dir=subjects_dir)
+
+    stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
+                                   subjects_dir=subjects_dir)
+    assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
+                              np.sort(stc_to_sparse.data.sum(axis=1)))
+    assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
+    assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
+    assert_equal(stc_to_sparse.subject, subject_to)
+    assert_equal(stc_from.tmin, stc_from.tmin)
+    assert_equal(stc_from.tstep, stc_from.tstep)
+
+    stc_from.vertno[0] = np.array([], dtype=np.int64)
+    stc_from._data = stc_from._data[:1]
+
+    stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
+                                   subjects_dir=subjects_dir)
+    assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
+                              np.sort(stc_to_sparse.data.sum(axis=1)))
+    assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
+    assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
+    assert_equal(stc_to_sparse.subject, subject_to)
+    assert_equal(stc_from.tmin, stc_from.tmin)
+    assert_equal(stc_from.tstep, stc_from.tstep)
+
 
 def _my_trans(data):
     """FFT that adds an additional dimension by repeating result"""
@@ -383,7 +448,7 @@ def test_transform_data():
     data = np.dot(kernel, sens_data)
 
     for idx, tmin_idx, tmax_idx in\
-            zip([None, np.arange(n_vertices / 2, n_vertices)],
+            zip([None, np.arange(n_vertices // 2, n_vertices)],
                 [None, 1], [None, 3]):
 
         if idx is None:
@@ -405,7 +470,7 @@ def test_transform_data():
 def test_transform():
     """Test applying linear (time) transform to data"""
     # make up some data
-    n_sensors, n_verts_lh, n_verts_rh, n_times = 10, 10, 10, 10
+    n_verts_lh, n_verts_rh, n_times = 10, 10, 10
     vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
     data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
     stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
@@ -419,7 +484,7 @@ def test_transform():
     data = np.concatenate((stcs_t[0].data[:, :, None],
                            stcs_t[1].data[:, :, None]), axis=2)
     data_t = stc.transform_data(_my_trans)
-    assert_array_equal(data, data_t) # check against stc.transform_data()
+    assert_array_equal(data, data_t)  # check against stc.transform_data()
 
     # data_t.ndim > 2 & copy is False
     assert_raises(ValueError, stc.transform, _my_trans, copy=False)
@@ -428,7 +493,7 @@ def test_transform():
     tmp = deepcopy(stc)
     stc_t = stc.transform(np.abs, copy=True)
     assert_true(isinstance(stc_t, SourceEstimate))
-    assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
+    assert_array_equal(stc.data, tmp.data)  # xfrm doesn't modify original?
 
     # data_t.ndim = 2 & copy is False
     times = np.round(1000 * stc.times)
@@ -492,6 +557,7 @@ def test_spatio_temporal_src_connectivity():
     # add test for source space connectivity with omitted vertices
     inverse_operator = read_inverse_operator(fname_inv)
     with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         src_ = inverse_operator['src']
         connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
         assert len(w) == 1
@@ -518,4 +584,33 @@ def test_as_data_frame():
                         if isinstance(ind, list) else [ind])
             assert_array_equal(df.values.T[ncat:], stc.data)
             # test that non-indexed data were present as categorial variables
-            df.reset_index().columns[:3] == ['subject', 'time']
+            with warnings.catch_warnings(record=True):  # pandas
+                df.reset_index().columns[:3] == ['subject', 'time']
+
+
+def test_get_peak():
+    """Test peak getter
+    """
+    n_vert, n_times = 10, 5
+    vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
+    data = np.random.randn(n_vert, n_times)
+    stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
+                              subject='sample')
+
+    stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
+                                subject='sample')
+
+    for ii, stc in enumerate([stc_surf, stc_vol]):
+        assert_raises(ValueError, stc.get_peak, tmin=-100)
+        assert_raises(ValueError, stc.get_peak, tmax=90)
+        assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
+
+        vert_idx, time_idx = stc.get_peak()
+        vertno = np.concatenate(stc.vertno) if ii == 0 else stc.vertno
+        assert_true(vert_idx in vertno)
+        assert_true(time_idx in stc.times)
+
+        ch_idx, time_idx = stc.get_peak(vert_as_index=True,
+                                        time_as_index=True)
+        assert_true(vert_idx < stc.data.shape[0])
+        assert_true(time_idx < len(stc.times))
diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py
index a9a475e..458338c 100644
--- a/mne/tests/test_source_space.py
+++ b/mne/tests/test_source_space.py
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import os
 import os.path as op
 from nose.tools import assert_true, assert_raises
@@ -5,6 +7,7 @@ from nose.plugins.skip import SkipTest
 import numpy as np
 from numpy.testing import assert_array_equal, assert_allclose, assert_equal
 import warnings
+from scipy.spatial.distance import cdist
 
 from mne.datasets import sample
 from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
@@ -14,14 +17,18 @@ from mne.utils import (_TempDir, requires_fs_or_nibabel, requires_nibabel,
                        requires_freesurfer, run_subprocess,
                        requires_mne, requires_scipy_version)
 from mne.surface import _accumulate_normals, _triangle_neighbors
+from mne.source_space import _get_mgz_header
+from mne.externals.six.moves import zip
 
-from scipy.spatial.distance import cdist
+warnings.simplefilter('always')
 
 # WARNING: test_source_space is imported by forward, so download=False
 # is critical here, otherwise on first import of MNE users will have to
 # download the whole sample dataset!
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
 data_path = sample.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
+fname_small = op.join(base_dir, 'small-src.fif.gz')
 fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
 fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
                     'sample-5120-bem.fif')
@@ -30,6 +37,48 @@ fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
 tempdir = _TempDir()
 
 
+ at requires_nibabel(vox2ras_tkr=True)
+def test_mgz_header():
+    import nibabel as nib
+    header = _get_mgz_header(fname_mri)
+    mri_hdr = nib.load(fname_mri).get_header()
+    assert_allclose(mri_hdr.get_data_shape(), header['dims'])
+    assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr'])
+    assert_allclose(mri_hdr.get_ras2vox(), header['ras2vox'])
+
+
+ at requires_scipy_version('0.11')
+def test_add_patch_info():
+    """Test adding patch info to source space"""
+    # let's setup a small source space
+    src = read_source_spaces(fname_small)
+    src_new = read_source_spaces(fname_small)
+    for s in src_new:
+        s['nearest'] = None
+        s['nearest_dist'] = None
+        s['pinfo'] = None
+
+    # test that no patch info is added for small dist_limit
+    try:
+        add_source_space_distances(src_new, dist_limit=0.00001)
+    except RuntimeError:  # what we throw when scipy version is wrong
+        pass
+    else:
+        assert_true(all(s['nearest'] is None for s in src_new))
+        assert_true(all(s['nearest_dist'] is None for s in src_new))
+        assert_true(all(s['pinfo'] is None for s in src_new))
+
+    # now let's use one that works
+    add_source_space_distances(src_new)
+
+    for s1, s2 in zip(src, src_new):
+        assert_array_equal(s1['nearest'], s2['nearest'])
+        assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7)
+        assert_equal(len(s1['pinfo']), len(s2['pinfo']))
+        for p1, p2 in zip(s1['pinfo'], s2['pinfo']):
+            assert_array_equal(p1, p2)
+
+
 @sample.requires_sample_data
 @requires_scipy_version('0.11')
 def test_add_source_space_distances_limited():
@@ -41,7 +90,7 @@ def test_add_source_space_distances_limited():
     n_do = 200  # limit this for speed
     src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
     src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
-    out_name = op.join(tempdir, 'temp.src')
+    out_name = op.join(tempdir, 'temp-src.fif')
     try:
         add_source_space_distances(src_new, dist_limit=0.007)
     except RuntimeError:  # what we throw when scipy version is wrong
@@ -78,7 +127,7 @@ def test_add_source_space_distances():
     n_do = 20  # limit this for speed
     src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
     src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
-    out_name = op.join(tempdir, 'temp.src')
+    out_name = op.join(tempdir, 'temp-src.fif')
     add_source_space_distances(src_new)
     write_source_spaces(out_name, src_new)
     src_new = read_source_spaces(out_name)
@@ -123,11 +172,11 @@ def test_discrete_source_space():
         np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
         # let's try the spherical one (no bem or surf supplied)
         run_subprocess(['mne_volume_source_space', '--meters',
-                        '--pos',  temp_pos, '--src', temp_name])
+                        '--pos', temp_pos, '--src', temp_name])
         src_c = read_source_spaces(temp_name)
+        pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
         src_new = setup_volume_source_space('sample', None,
-                                            pos=dict(rr=src[0]['rr'][v],
-                                                     nn=src[0]['nn'][v]),
+                                            pos=pos_dict,
                                             subjects_dir=subjects_dir)
         _compare_source_spaces(src_c, src_new, mode='approx')
         assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
@@ -139,6 +188,10 @@ def test_discrete_source_space():
         write_source_spaces(temp_name, src_c)
         src_c2 = read_source_spaces(temp_name)
         _compare_source_spaces(src_c, src_c2)
+
+        # now do MRI
+        assert_raises(ValueError, setup_volume_source_space, 'sample',
+                      pos=pos_dict, mri=fname_mri)
     finally:
         if op.isfile(temp_name):
             os.remove(temp_name)
@@ -146,7 +199,6 @@ def test_discrete_source_space():
 
 @sample.requires_sample_data
 @requires_mne
- at requires_nibabel(vox2ras_tkr=True)
 def test_volume_source_space():
     """Test setting up volume source spaces
     """
@@ -165,7 +217,7 @@ def test_volume_source_space():
 
         # let's try the spherical one (no bem or surf supplied)
         run_subprocess(['mne_volume_source_space',
-                        '--grid',  '15.0',
+                        '--grid', '15.0',
                         '--src', temp_name,
                         '--mri', fname_mri])
         src = read_source_spaces(temp_name)
@@ -177,7 +229,7 @@ def test_volume_source_space():
         # now without MRI argument, it should give an error when we try
         # to read it
         run_subprocess(['mne_volume_source_space',
-                        '--grid',  '15.0',
+                        '--grid', '15.0',
                         '--src', temp_name])
         assert_raises(ValueError, read_source_spaces, temp_name)
     finally:
@@ -189,8 +241,8 @@ def test_volume_source_space():
 def test_triangle_neighbors():
     """Test efficient vertex neighboring triangles for surfaces"""
     this = read_source_spaces(fname)[0]
-    this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
-    for p in xrange(this['ntri']):
+    this['neighbor_tri'] = [list() for _ in range(this['np'])]
+    for p in range(this['ntri']):
         verts = this['tris'][p]
         this['neighbor_tri'][verts[0]].append(p)
         this['neighbor_tri'][verts[1]].append(p)
@@ -218,7 +270,7 @@ def test_accumulate_normals():
     # cut-and-paste from original code in surface.py:
     #    Find neighboring triangles and accumulate vertex normals
     this['nn'] = np.zeros((this['np'], 3))
-    for p in xrange(this['ntri']):
+    for p in range(this['ntri']):
         # vertex normals
         verts = this['tris'][p]
         this['nn'][verts, :] += this['tri_nn'][p, :]
@@ -237,29 +289,37 @@ def test_setup_source_space():
     fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
                         'fsaverage-ico-5-src.fif')
     # first lets test some input params
-    assert_raises(ValueError, setup_source_space, 'sample', spacing='oct')
-    assert_raises(ValueError, setup_source_space, 'sample', spacing='octo')
-    assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e')
-    assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm')
-    assert_raises(ValueError, setup_source_space, 'sample', spacing='alls')
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='oct',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='octo',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='alls',
+                  add_dist=False)
     assert_raises(IOError, setup_source_space, 'sample', spacing='oct6',
-                  subjects_dir=subjects_dir)
+                  subjects_dir=subjects_dir, add_dist=False)
 
     # ico 5 (fsaverage) - write to temp file
     src = read_source_spaces(fname_ico)
     temp_name = op.join(tempdir, 'temp-src.fif')
-    with warnings.catch_warnings(True):  # sklearn equiv neighbors
+    with warnings.catch_warnings(record=True):  # sklearn equiv neighbors
+        warnings.simplefilter('always')
         src_new = setup_source_space('fsaverage', temp_name, spacing='ico5',
-                                     subjects_dir=subjects_dir)
+                                     subjects_dir=subjects_dir, add_dist=False,
+                                     overwrite=True)
     _compare_source_spaces(src, src_new, mode='approx')
 
     # oct-6 (sample) - auto filename + IO
     src = read_source_spaces(fname)
     temp_name = op.join(tempdir, 'temp-src.fif')
-    with warnings.catch_warnings(True):  # sklearn equiv neighbors
+    with warnings.catch_warnings(record=True):  # sklearn equiv neighbors
+        warnings.simplefilter('always')
         src_new = setup_source_space('sample', temp_name, spacing='oct6',
                                      subjects_dir=subjects_dir,
-                                     overwrite=True)
+                                     overwrite=True, add_dist=False)
     _compare_source_spaces(src, src_new, mode='approx')
     src_new = read_source_spaces(temp_name)
     _compare_source_spaces(src, src_new, mode='approx')
@@ -267,7 +327,7 @@ def test_setup_source_space():
     # all source points - no file writing
     src = read_source_spaces(fname_all)
     src_new = setup_source_space('sample', None, spacing='all',
-                                 subjects_dir=subjects_dir)
+                                 subjects_dir=subjects_dir, add_dist=False)
     _compare_source_spaces(src, src_new, mode='approx')
 
 
@@ -299,10 +359,19 @@ def test_write_source_space():
     """Test writing and reading of source spaces
     """
     src0 = read_source_spaces(fname, add_geom=False)
-    write_source_spaces(op.join(tempdir, 'tmp.fif'), src0)
-    src1 = read_source_spaces(op.join(tempdir, 'tmp.fif'), add_geom=False)
+    write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0)
+    src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'), add_geom=False)
     _compare_source_spaces(src0, src1)
 
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        src_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_source_spaces(src_badname, src0)
+        read_source_spaces(src_badname)
+        print([ww.message for ww in w])
+    assert_equal(len(w), 2)
+
 
 def _compare_source_spaces(src0, src1, mode='exact'):
     """Compare two source spaces
@@ -311,19 +380,20 @@ def _compare_source_spaces(src0, src1, mode='exact'):
     """
     for s0, s1 in zip(src0, src1):
         for name in ['nuse', 'ntri', 'np', 'type', 'id']:
-            print name
+            print(name)
             assert_equal(s0[name], s1[name])
         for name in ['subject_his_id']:
             if name in s0 or name in s1:
-                print name
+                print(name)
                 assert_equal(s0[name], s1[name])
         for name in ['interpolator']:
             if name in s0 or name in s1:
-                print name
+                print(name)
                 diffs = (s0['interpolator'] - s1['interpolator']).data
-                assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.05)  # 5%
+                if len(diffs) > 0:
+                    assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.05)  # 5%
         for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
-            print name
+            print(name)
             if s0[name] is None:
                 assert_true(s1[name] is None)
             else:
@@ -339,13 +409,13 @@ def _compare_source_spaces(src0, src1, mode='exact'):
             # these fields will exist if patch info was added, these are
             # not tested in mode == 'approx'
             for name in ['nearest', 'nearest_dist']:
-                print name
+                print(name)
                 if s0[name] is None:
                     assert_true(s1[name] is None)
                 else:
                     assert_array_equal(s0[name], s1[name])
             for name in ['dist_limit']:
-                print name
+                print(name)
                 assert_true(s0[name] == s1[name])
             for name in ['dist']:
                 if s0[name] is not None:
@@ -381,7 +451,7 @@ def _compare_source_spaces(src0, src1, mode='exact'):
         if mode == 'exact':
             assert_equal(src0.info[name], src1.info[name])
         elif mode == 'approx':
-            print name
+            print(name)
             if name in src0.info:
                 assert_true(name in src1.info)
             else:
@@ -400,8 +470,8 @@ def test_vertex_to_mni():
     coords_f = np.array([[-41.28, -40.04, 18.20], [-6.05, 49.74, -18.15],
                          [-61.71, -14.55, 20.52], [21.70, -60.84, 25.02]])
     hemis = [0, 0, 0, 1]
-    for coords, subj in zip([coords_s, coords_f], ['sample', 'fsaverage']):
-        coords_2 = vertex_to_mni(vertices, hemis, subj)
+    for coords, subject in zip([coords_s, coords_f], ['sample', 'fsaverage']):
+        coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir)
         # less than 1mm error
         assert_allclose(coords, coords_2, atol=1.0)
 
@@ -416,7 +486,58 @@ def test_vertex_to_mni_fs_nibabel():
     for subject in ['sample', 'fsaverage']:
         vertices = np.random.randint(0, 100000, n_check)
         hemis = np.random.randint(0, 1, n_check)
-        coords = vertex_to_mni(vertices, hemis, subject, mode='nibabel')
-        coords_2 = vertex_to_mni(vertices, hemis, subject, mode='freesurfer')
+        coords = vertex_to_mni(vertices, hemis, subject, subjects_dir,
+                               'nibabel')
+        coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir,
+                                 'freesurfer')
         # less than 0.1 mm error
         assert_allclose(coords, coords_2, atol=0.1)
+
+
+# The following code was used to generate small-src.fif.gz.
+# Unfortunately the C code bombs when trying to add source space distances,
+# possibly due to incomplete "faking" of a smaller surface on our part here.
+"""
+# -*- coding: utf-8 -*-
+
+import os
+import numpy as np
+import mne
+
+data_path = mne.datasets.sample.data_path()
+src = mne.setup_source_space('sample', fname=None, spacing='oct5')
+hemis = ['lh', 'rh']
+fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
+
+vs = list()
+for s, fname in zip(src, fnames):
+    coords = s['rr'][s['vertno']]
+    vs.append(s['vertno'])
+    idx = -1 * np.ones(len(s['rr']))
+    idx[s['vertno']] = np.arange(s['nuse'])
+    faces = s['use_tris']
+    faces = idx[faces]
+    mne.write_surface(fname, coords, faces)
+
+# we need to move sphere surfaces
+spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
+for s in spheres:
+    os.rename(s, s + '.bak')
+try:
+    for s, v in zip(spheres, vs):
+        coords, faces = mne.read_surface(s + '.bak')
+        coords = coords[v]
+        mne.write_surface(s, coords, faces)
+    src = mne.setup_source_space('sample', fname=None, spacing='oct4',
+                                 surface='decimated')
+finally:
+    for s in spheres:
+        os.rename(s + '.bak', s)
+
+fname = 'small-src.fif'
+fname_gz = fname + '.gz'
+mne.write_source_spaces(fname, src)
+mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
+                          '--srcp', fname])
+mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
+"""
diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py
index 48da08b..326d444 100644
--- a/mne/tests/test_surface.py
+++ b/mne/tests/test_surface.py
@@ -1,17 +1,19 @@
+from __future__ import print_function
 import os.path as op
 import numpy as np
-
+from nose.tools import assert_true, assert_raises
 from numpy.testing import (assert_array_equal, assert_array_almost_equal,
                            assert_allclose, assert_equal)
 
-from nose.tools import assert_true, assert_raises
-
 from mne.datasets import sample
 from mne import (read_bem_surfaces, write_bem_surface, read_surface,
                  write_surface, decimate_surface)
 from mne.surface import (_make_morph_map, read_morph_map, _compute_nearest,
-                         fast_cross_3d)
+                         fast_cross_3d, get_head_surf,
+                         get_meg_helmet_surf)
 from mne.utils import _TempDir, requires_tvtk
+from mne.io import read_info
+from mne.transforms import _get_mri_head_t_from_trans_file
 
 data_path = sample.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
@@ -20,6 +22,34 @@ fname = op.join(subjects_dir, 'sample', 'bem',
 tempdir = _TempDir()
 
 
+def test_helmet():
+    """Test loading helmet surfaces
+    """
+    base_dir = op.join(op.dirname(__file__), '..', 'io')
+    fname_raw = op.join(base_dir, 'tests', 'data', 'test_raw.fif')
+    fname_kit_raw = op.join(base_dir, 'kit', 'tests', 'data',
+                            'test_bin_raw.fif')
+    fname_bti_raw = op.join(base_dir, 'bti', 'tests', 'data',
+                            'exported4D_linux_raw.fif')
+    fname_ctf_raw = op.join(base_dir, 'tests', 'data', 'test_ctf_raw.fif')
+    fname_trans = op.join(base_dir, 'tests', 'data',
+                          'sample-audvis-raw-trans.txt')
+    trans = _get_mri_head_t_from_trans_file(fname_trans)
+    for fname in [fname_raw, fname_kit_raw, fname_bti_raw, fname_ctf_raw]:
+        helmet = get_meg_helmet_surf(read_info(fname), trans)
+        assert_equal(len(helmet['rr']), 304)  # they all have 304 verts
+        assert_equal(len(helmet['rr']), len(helmet['nn']))
+
+
+ at sample.requires_sample_data
+def test_head():
+    """Test loading the head surface
+    """
+    surf_1 = get_head_surf('sample', subjects_dir=subjects_dir)
+    surf_2 = get_head_surf('sample', 'head', subjects_dir=subjects_dir)
+    assert_true(len(surf_1['rr']) < len(surf_2['rr']))  # BEM vs dense head
+
+
 def test_huge_cross():
     """Test cross product with lots of elements
     """
@@ -71,7 +101,7 @@ def test_io_bem_surfaces():
     """
     surf = read_bem_surfaces(fname, add_geom=True)
     surf = read_bem_surfaces(fname, add_geom=False)
-    print "Number of surfaces : %d" % len(surf)
+    print("Number of surfaces : %d" % len(surf))
 
     write_bem_surface(op.join(tempdir, 'bem_surf.fif'), surf[0])
     surf_read = read_bem_surfaces(op.join(tempdir, 'bem_surf.fif'),
diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py
index a21e486..a7cdca1 100644
--- a/mne/tests/test_transforms.py
+++ b/mne/tests/test_transforms.py
@@ -1,8 +1,9 @@
 from math import pi
 import os.path as op
 
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_raises
 from numpy.testing import assert_array_equal, assert_equal, assert_allclose
+import warnings
 
 from mne.datasets import sample
 from mne import read_trans, write_trans
@@ -10,9 +11,12 @@ from mne.utils import _TempDir
 from mne.transforms import (_get_mri_head_t_from_trans_file, invert_transform,
                             rotation, rotation3d, rotation_angles)
 
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
 data_path = sample.data_path(download=False)
 fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
-fname_trans = op.join(op.split(__file__)[0], '..', 'fiff', 'tests',
+fname_eve = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
+fname_trans = op.join(op.split(__file__)[0], '..', 'io', 'tests',
                       'data', 'sample-audvis-raw-trans.txt')
 
 tempdir = _TempDir()
@@ -33,19 +37,24 @@ def test_get_mri_head_t():
 def test_io_trans():
     """Test reading and writing of trans files
     """
-    info0 = read_trans(fname)
+    trans0 = read_trans(fname)
     fname1 = op.join(tempdir, 'test-trans.fif')
-    write_trans(fname1, info0)
-    info1 = read_trans(fname1)
+    write_trans(fname1, trans0)
+    trans1 = read_trans(fname1)
 
     # check all properties
-    assert_true(info0['from'] == info1['from'])
-    assert_true(info0['to'] == info1['to'])
-    assert_array_equal(info0['trans'], info1['trans'])
-    for d0, d1 in zip(info0['dig'], info1['dig']):
-        assert_array_equal(d0['r'], d1['r'])
-        for name in ['kind', 'ident', 'coord_frame']:
-            assert_true(d0[name] == d1[name])
+    assert_true(trans0['from'] == trans1['from'])
+    assert_true(trans0['to'] == trans1['to'])
+    assert_array_equal(trans0['trans'], trans1['trans'])
+
+    # check reading non -trans.fif files
+    assert_raises(IOError, read_trans, fname_eve)
+    
+    # check warning on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
+        write_trans(fname2, trans0)
+    assert_true(len(w) >= 1)
 
 
 def test_rotation():
diff --git a/mne/tests/test_utils.py b/mne/tests/test_utils.py
index 080295e..94d990d 100644
--- a/mne/tests/test_utils.py
+++ b/mne/tests/test_utils.py
@@ -1,20 +1,25 @@
-from numpy.testing import assert_equal
-from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_equal, assert_array_equal
+from nose.tools import assert_true, assert_raises, assert_not_equal
+from copy import deepcopy
 import os.path as op
 import numpy as np
 import os
 import warnings
-import urllib2
+from mne.externals.six.moves import urllib
+
+from mne.utils import (set_log_level, set_log_file, _TempDir,
+                       get_config, set_config, deprecated, _fetch_file,
+                       sum_squared, requires_mem_gb, estimate_rank,
+                       _url_to_local_path, sizeof_fmt,
+                       _check_type_picks, object_hash, object_diff,
+                       requires_good_network)
+from mne.io import show_fiff
+from mne import Evoked
 
-from ..utils import (set_log_level, set_log_file, _TempDir,
-                     get_config, set_config, deprecated, _fetch_file,
-                     sum_squared, requires_mem_gb, estimate_rank,
-                     _url_to_local_path, sizeof_fmt)
-from ..fiff import Evoked, show_fiff
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
 fname_evoked = op.join(base_dir, 'test-ave.fif')
 fname_raw = op.join(base_dir, 'test_raw.fif')
 fname_log = op.join(base_dir, 'test-ave.log')
@@ -28,6 +33,40 @@ def clean_lines(lines):
     return [l if 'Reading ' not in l else 'Reading test file' for l in lines]
 
 
+def test_hash():
+    """Test dictionary hashing and comparison functions"""
+    # does hashing all of these types work:
+    # {dict, list, tuple, ndarray, str, float, int, None}
+    d0 = dict(a=dict(a=0.1, b='fo', c=1), b=[1, 'b'], c=(), d=np.ones(3))
+    d0[1] = None
+    d0[2.] = b'123'
+
+    d1 = deepcopy(d0)
+    print(object_diff(d0, d1))
+    assert_equal(object_hash(d0), object_hash(d1))
+
+    # change values slightly
+    d1['data'] = np.ones(3, int)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    print(object_diff(d0, d1))
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['a']['a'] = 0.11
+    object_diff(d0, d1)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    print(object_diff(d0, d1))
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1[1] = 2
+    object_diff(d0, d1)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+    # generators (and other types) not supported
+    d1[1] = (x for x in d0)
+    assert_raises(RuntimeError, object_hash, d1)
+
+
 def test_tempdir():
     """Test TempDir
     """
@@ -38,6 +77,8 @@ def test_tempdir():
 
 
 def test_estimate_rank():
+    """Test rank estimation
+    """
     data = np.eye(10)
     data[0, 0] = 0
     assert_equal(estimate_rank(data), 9)
@@ -46,12 +87,10 @@ def test_estimate_rank():
 def test_logging():
     """Test logging (to file)
     """
-    old_log_file = open(fname_log, 'r')
-    old_lines = clean_lines(old_log_file.readlines())
-    old_log_file.close()
-    old_log_file_2 = open(fname_log_2, 'r')
-    old_lines_2 = clean_lines(old_log_file_2.readlines())
-    old_log_file_2.close()
+    with open(fname_log, 'r') as old_log_file:
+        old_lines = clean_lines(old_log_file.readlines())
+    with open(fname_log_2, 'r') as old_log_file_2:
+        old_lines_2 = clean_lines(old_log_file_2.readlines())
 
     if op.isfile(test_name):
         os.remove(test_name)
@@ -59,20 +98,22 @@ def test_logging():
     set_log_file(test_name)
     set_log_level('WARNING')
     # should NOT print
-    evoked = Evoked(fname_evoked, setno=1)
-    assert_true(open(test_name).readlines() == [])
+    evoked = Evoked(fname_evoked, condition=1)
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
     # should NOT print
-    evoked = Evoked(fname_evoked, setno=1, verbose=False)
-    assert_true(open(test_name).readlines() == [])
+    evoked = Evoked(fname_evoked, condition=1, verbose=False)
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
     # should NOT print
-    evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
-    assert_true(open(test_name).readlines() == [])
+    evoked = Evoked(fname_evoked, condition=1, verbose='WARNING')
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
     # SHOULD print
-    evoked = Evoked(fname_evoked, setno=1, verbose=True)
-    new_log_file = open(test_name, 'r')
-    new_lines = clean_lines(new_log_file.readlines())
+    evoked = Evoked(fname_evoked, condition=1, verbose=True)
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
     assert_equal(new_lines, old_lines)
-    new_log_file.close()
     set_log_file(None)  # Need to do this to close the old file
     os.remove(test_name)
 
@@ -80,35 +121,38 @@ def test_logging():
     set_log_file(test_name)
     set_log_level('INFO')
     # should NOT print
-    evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
-    assert_true(open(test_name).readlines() == [])
+    evoked = Evoked(fname_evoked, condition=1, verbose='WARNING')
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
     # should NOT print
-    evoked = Evoked(fname_evoked, setno=1, verbose=False)
-    assert_true(open(test_name).readlines() == [])
+    evoked = Evoked(fname_evoked, condition=1, verbose=False)
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
     # SHOULD print
-    evoked = Evoked(fname_evoked, setno=1)
-    new_log_file = open(test_name, 'r')
-    old_log_file = open(fname_log, 'r')
-    new_lines = clean_lines(new_log_file.readlines())
-    assert_equal(new_lines, old_lines)
+    evoked = Evoked(fname_evoked, condition=1)
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
+    with open(fname_log, 'r') as old_log_file:
+        assert_equal(new_lines, old_lines)
     # check to make sure appending works (and as default, raises a warning)
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         set_log_file(test_name, overwrite=False)
         assert len(w) == 0
         set_log_file(test_name)
         assert len(w) == 1
-    evoked = Evoked(fname_evoked, setno=1)
-    new_log_file = open(test_name, 'r')
-    new_lines = clean_lines(new_log_file.readlines())
+    evoked = Evoked(fname_evoked, condition=1)
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
     assert_equal(new_lines, old_lines_2)
 
     # make sure overwriting works
     set_log_file(test_name, overwrite=True)
     # this line needs to be called to actually do some logging
-    evoked = Evoked(fname_evoked, setno=1)
+    evoked = Evoked(fname_evoked, condition=1)
     del evoked
-    new_log_file = open(test_name, 'r')
-    new_lines = clean_lines(new_log_file.readlines())
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
     assert_equal(new_lines, old_lines)
 
 
@@ -121,17 +165,26 @@ def test_config():
     assert_true(get_config(key) == value)
     del os.environ[key]
     # catch the warning about it being a non-standard config key
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         set_config(key, None, home_dir=tempdir)
-        assert_true(len(w) == 1)
+    assert_true(len(w) == 1)
     assert_true(get_config(key, home_dir=tempdir) is None)
     assert_raises(KeyError, get_config, key, raise_error=True)
-    with warnings.catch_warnings(True):
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
         set_config(key, value, home_dir=tempdir)
         assert_true(get_config(key, home_dir=tempdir) == value)
         set_config(key, None, home_dir=tempdir)
     if old_val is not None:
         os.environ[key] = old_val
+    # Check if get_config with no input returns all config
+    key = 'MNE_PYTHON_TESTING_KEY'
+    config = {key: value}
+    with warnings.catch_warnings(record=True):  # non-standard key
+        warnings.simplefilter('always')
+        set_config(key, value, home_dir=tempdir)
+    assert_equal(get_config(home_dir=tempdir), config)
 
 
 def test_show_fiff():
@@ -170,10 +223,12 @@ def no_mem_func():
 def test_deprecated():
     """Test deprecated function
     """
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         deprecated_func()
     assert_true(len(w) == 1)
-    with warnings.catch_warnings(True) as w:
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
         deprecated_class()
     assert_true(len(w) == 1)
 
@@ -182,10 +237,12 @@ def test_requires_mem_gb():
     """Test requires memory function
     """
     try:
-        with warnings.catch_warnings(True) as w:
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
             big_mem_func()
         assert_true(len(w) == 1)
-        with warnings.catch_warnings(True) as w:
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
             no_mem_func()
         assert_true(len(w) == 0)
     except:
@@ -199,13 +256,14 @@ def test_requires_mem_gb():
         SkipTest(msg)
 
 
+ at requires_good_network
 def test_fetch_file():
     """Test file downloading
     """
     # Skipping test if no internet connection available
     try:
-        urllib2.urlopen("http://github.com", timeout=2)
-    except urllib2.URLError:
+        urllib.request.urlopen("http://github.com", timeout=2)
+    except urllib.request.URLError:
         from nose.plugins.skip import SkipTest
         raise SkipTest('No internet connection, skipping download test.')
 
@@ -218,13 +276,13 @@ def test_fetch_file():
                       op.join(tempdir, 'test'))
         resume_name = op.join(tempdir, "download_resume")
         # touch file
-        with file(resume_name + '.part', 'w'):
+        with open(resume_name + '.part', 'w'):
             os.utime(resume_name + '.part', None)
         _fetch_file(url, resume_name, print_destination=False, resume=True)
 
 
 def test_sum_squared():
-    """Optimized sum of squares
+    """Test optimized sum of squares
     """
     X = np.random.randint(0, 50, (3, 3))
     assert_equal(np.sum(X ** 2), sum_squared(X))
@@ -243,3 +301,18 @@ def test_url_to_local_path():
     """
     assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'),
                  op.join('.', 'home', 'why.html'))
+
+
+def test_check_type_picks():
+    """Test checking type integrity checks of picks
+    """
+    picks = np.arange(12)
+    assert_array_equal(picks, _check_type_picks(picks))
+    picks = list(range(12))
+    assert_array_equal(np.array(picks), _check_type_picks(picks))
+    picks = None
+    assert_array_equal(None, _check_type_picks(picks))
+    picks = ['a', 'b']
+    assert_raises(ValueError, _check_type_picks, picks)
+    picks = 'b'
+    assert_raises(ValueError, _check_type_picks, picks)
diff --git a/mne/tests/test_viz.py b/mne/tests/test_viz.py
deleted file mode 100644
index 6b74c38..0000000
--- a/mne/tests/test_viz.py
+++ /dev/null
@@ -1,511 +0,0 @@
-import os.path as op
-from functools import wraps
-import numpy as np
-from numpy.testing import assert_raises, assert_equal
-import warnings
-
-from mne import fiff, read_events, Epochs, SourceEstimate, read_cov, read_proj
-from mne.layouts import read_layout
-from mne.fiff.pick import pick_channels_evoked
-from mne.viz import (plot_topo, plot_topo_tfr, plot_topo_power,
-                     plot_topo_phase_lock, plot_topo_image_epochs,
-                     plot_evoked_topomap, plot_projs_topomap,
-                     plot_sparse_source_estimates, plot_source_estimates,
-                     plot_cov, mne_analyze_colormap, plot_image_epochs,
-                     plot_connectivity_circle, circular_layout, plot_drop_log,
-                     compare_fiff, plot_source_spectrogram)
-from mne.datasets import sample
-from mne.source_space import read_source_spaces
-from mne.preprocessing import ICA
-from mne.utils import check_sklearn_version
-
-
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
-
-# Set our plotters to test mode
-import matplotlib
-matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
-
-lacks_mayavi = False
-try:
-    from mayavi import mlab
-except ImportError:
-    try:
-        from enthought.mayavi import mlab
-    except ImportError:
-        lacks_mayavi = True
-requires_mayavi = np.testing.dec.skipif(lacks_mayavi, 'Requires mayavi')
-
-
-def requires_sklearn(function):
-    """Decorator to skip test if scikit-learn >= 0.12 is not available"""
-    @wraps(function)
-    def dec(*args, **kwargs):
-        if not check_sklearn_version(min_version='0.12'):
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires scikit-learn >= 0.12'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-        return ret
-    return dec
-
-if not lacks_mayavi:
-    mlab.options.backend = 'test'
-
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
-ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
-
-base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
-evoked_fname = op.join(base_dir, 'test-ave.fif')
-fname = op.join(base_dir, 'test-ave.fif')
-raw_fname = op.join(base_dir, 'test_raw.fif')
-cov_fname = op.join(base_dir, 'test-cov.fif')
-event_name = op.join(base_dir, 'test-eve.fif')
-event_id, tmin, tmax = 1, -0.2, 0.5
-n_chan = 15
-layout = read_layout('Vectorview-all')
-
-
-def _fake_click(fig, ax, point, xform='ax'):
-    """Helper to fake a click at a relative point within axes"""
-    if xform == 'ax':
-        x, y = ax.transAxes.transform_point(point)
-    elif xform == 'data':
-        x, y = ax.transData.transform_point(point)
-    else:
-        raise ValueError('unknown transform')
-    try:
-        fig.canvas.button_press_event(x, y, 1, False, None)
-    except:  # for old MPL
-        fig.canvas.button_press_event(x, y, 1, False)
-
-
-def _get_raw():
-    return fiff.Raw(raw_fname, preload=False)
-
-
-def _get_events():
-    return read_events(event_name)
-
-
-def _get_picks(raw):
-    return fiff.pick_types(raw.info, meg=True, eeg=False, stim=False,
-                           ecg=False, eog=False, exclude='bads')
-
-
-def _get_epochs():
-    raw = _get_raw()
-    events = _get_events()
-    picks = _get_picks(raw)
-    # Use a subset of channels for plotting speed
-    picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
-    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0))
-    return epochs
-
-
-def _get_epochs_delayed_ssp():
-    raw = _get_raw()
-    events = _get_events()
-    picks = _get_picks(raw)
-    reject = dict(mag=4e-12)
-    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
-                                picks=picks, baseline=(None, 0),
-                                proj='delayed', reject=reject)
-    return epochs_delayed_ssp
-
-
-def test_plot_topo():
-    """Test plotting of ERP topography
-    """
-    # Show topography
-    evoked = _get_epochs().average()
-    plot_topo(evoked, layout)
-    warnings.simplefilter('always', UserWarning)
-    picked_evoked = pick_channels_evoked(evoked, evoked.ch_names[:3])
-
-    # test scaling
-    with warnings.catch_warnings(record=True):
-        for ylim in [dict(mag=[-600, 600]), None]:
-            plot_topo([picked_evoked] * 2, layout, ylim=ylim)
-
-        for evo in [evoked, [evoked, picked_evoked]]:
-            assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
-
-        evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
-        plot_topo(evoked_delayed_ssp, layout, proj='interactive')
-
-
-def test_plot_topo_tfr():
-    """Test plotting of TFR
-    """
-    # Make a fake dataset to plot
-    epochs = _get_epochs()
-    n_freqs = 11
-    con = np.random.randn(n_chan, n_freqs, len(epochs.times))
-    freqs = np.arange(n_freqs)
-    # Show topography of connectivity from seed
-    plot_topo_tfr(epochs, con, freqs, layout)
-    plt.close('all')
-
-
-def test_plot_topo_power():
-    """Test plotting of power
-    """
-    epochs = _get_epochs()
-    decim = 3
-    frequencies = np.arange(7, 30, 3)  # define frequencies of interest
-    power = np.abs(np.random.randn(n_chan, 7, 141))
-    phase_lock = np.random.randn(n_chan, 7, 141)
-    baseline = (None, 0)  # set the baseline for induced power
-    title = 'Induced power - MNE sample data'
-    plot_topo_power(epochs, power, frequencies, layout, baseline=baseline,
-                    mode='ratio', decim=decim, vmin=0., vmax=14, title=title)
-    title = 'Phase locking value - MNE sample data'
-    plot_topo_phase_lock(epochs, phase_lock, frequencies, layout,
-                         baseline=baseline, mode='mean', decim=decim,
-                         title=title)
-    plt.close('all')
-
-
-def test_plot_topo_image_epochs():
-    """Test plotting of epochs image topography
-    """
-    title = 'ERF images - MNE sample data'
-    epochs = _get_epochs()
-    plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
-                           colorbar=True, title=title)
-    plt.close('all')
-
-
-def test_plot_evoked():
-    """Test plotting of evoked
-    """
-    evoked = _get_epochs().average()
-    with warnings.catch_warnings(record=True):
-        evoked.plot(proj=True, hline=[1])
-        # plot with bad channels excluded
-        evoked.plot(exclude='bads')
-        evoked.plot(exclude=evoked.info['bads'])  # does the same thing
-
-        # test selective updating of dict keys is working.
-        evoked.plot(hline=[1], units=dict(mag='femto foo'))
-        evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
-        evoked_delayed_ssp.plot(proj='interactive')
-        evoked_delayed_ssp.apply_proj()
-        assert_raises(RuntimeError, evoked_delayed_ssp.plot, proj='interactive')
-        evoked_delayed_ssp.info['projs'] = []
-        assert_raises(RuntimeError, evoked_delayed_ssp.plot, proj='interactive')
-        assert_raises(RuntimeError, evoked_delayed_ssp.plot, proj='interactive',
-                      axes='foo')
-        plt.close('all')
-
-
-def test_plot_epochs():
-    """ Test plotting epochs
-    """
-    epochs = _get_epochs()
-    epochs.plot([0, 1], picks=[0, 2, 3], scalings=None, title_str='%s')
-    epochs[0].plot(picks=[0, 2, 3], scalings=None, title_str='%s')
-    # test clicking: should increase coverage on
-    # 3200-3226, 3235, 3237, 3239-3242, 3245-3255, 3260-3280
-    fig = plt.gcf()
-    fig.canvas.button_press_event(10, 10, 'left')
-    # now let's add a bad channel
-    epochs.info['bads'] = [epochs.ch_names[0]]  # include a bad one
-    epochs.plot([0, 1], picks=[0, 2, 3], scalings=None, title_str='%s')
-    epochs[0].plot(picks=[0, 2, 3], scalings=None, title_str='%s')
-    plt.close('all')
-
-
- at sample.requires_sample_data
- at requires_mayavi
-def test_plot_sparse_source_estimates():
-    """Test plotting of (sparse) source estimates
-    """
-    sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
-                                            'bem', 'sample-oct-6-src.fif'))
-
-    # dense version
-    vertices = [s['vertno'] for s in sample_src]
-    n_time = 5
-    n_verts = sum(len(v) for v in vertices)
-    stc_data = np.zeros((n_verts * n_time))
-    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
-    stc_data.shape = (n_verts, n_time)
-    stc = SourceEstimate(stc_data, vertices, 1, 1)
-    colormap = mne_analyze_colormap(format='matplotlib')
-    # don't really need to test matplotlib method since it's not used now...
-    colormap = mne_analyze_colormap()
-    plot_source_estimates(stc, 'sample', colormap=colormap,
-                          config_opts={'background': (1, 1, 0)},
-                          subjects_dir=subjects_dir, colorbar=True)
-    assert_raises(TypeError, plot_source_estimates, stc, 'sample',
-                  figure='foo', hemi='both')
-
-    # now do sparse version
-    vertices = sample_src[0]['vertno']
-    n_verts = len(vertices)
-    stc_data = np.zeros((n_verts * n_time))
-    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
-    stc_data.shape = (n_verts, n_time)
-    inds = np.where(np.any(stc_data, axis=1))[0]
-    stc_data = stc_data[inds]
-    vertices = [vertices[inds], np.empty(0, dtype=np.int)]
-    stc = SourceEstimate(stc_data, vertices, 1, 1)
-    plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
-                                 opacity=0.5, high_resolution=True)
-
-
-def test_plot_cov():
-    """Test plotting of covariances
-    """
-    raw = _get_raw()
-    cov = read_cov(cov_fname)
-    plot_cov(cov, raw.info, proj=True)
-    plt.close('all')
-
-
- at requires_sklearn
-def test_plot_ica_panel():
-    """Test plotting of ICA panel
-    """
-    raw = _get_raw()
-    ica_picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False,
-                                ecg=False, eog=False, exclude='bads')
-    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
-              max_pca_components=3, n_pca_components=3)
-    ica.decompose_raw(raw, picks=ica_picks)
-    ica.plot_sources_raw(raw)
-    plt.close('all')
-
-
-def test_plot_image_epochs():
-    """Test plotting of epochs image
-    """
-    epochs = _get_epochs()
-    plot_image_epochs(epochs, picks=[1, 2])
-    plt.close('all')
-
-
-def test_plot_connectivity_circle():
-    """Test plotting connectivity circle
-    """
-    node_order = ['frontalpole-lh', 'parsorbitalis-lh',
-                  'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
-                  'medialorbitofrontal-lh', 'parstriangularis-lh',
-                  'rostralanteriorcingulate-lh', 'temporalpole-lh',
-                  'parsopercularis-lh', 'caudalanteriorcingulate-lh',
-                  'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
-                  'caudalmiddlefrontal-lh', 'superiortemporal-lh',
-                  'parahippocampal-lh', 'middletemporal-lh',
-                  'inferiortemporal-lh', 'precentral-lh',
-                  'transversetemporal-lh', 'posteriorcingulate-lh',
-                  'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
-                  'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
-                  'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
-                  'superiorparietal-lh', 'pericalcarine-lh',
-                  'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
-                  'lateraloccipital-rh', 'pericalcarine-rh',
-                  'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
-                  'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
-                  'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
-                  'fusiform-rh', 'posteriorcingulate-rh',
-                  'transversetemporal-rh', 'precentral-rh',
-                  'inferiortemporal-rh', 'middletemporal-rh',
-                  'parahippocampal-rh', 'superiortemporal-rh',
-                  'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
-                  'entorhinal-rh', 'caudalanteriorcingulate-rh',
-                  'parsopercularis-rh', 'temporalpole-rh',
-                  'rostralanteriorcingulate-rh', 'parstriangularis-rh',
-                  'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
-                  'lateralorbitofrontal-rh', 'parsorbitalis-rh',
-                  'frontalpole-rh']
-    label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
-                   'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
-                   'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
-                   'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
-                   'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
-                   'inferiorparietal-lh', 'inferiorparietal-rh',
-                   'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
-                   'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
-                   'lateraloccipital-lh', 'lateraloccipital-rh',
-                   'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
-                   'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
-                   'medialorbitofrontal-rh', 'middletemporal-lh',
-                   'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
-                   'parahippocampal-lh', 'parahippocampal-rh',
-                   'parsopercularis-lh', 'parsopercularis-rh',
-                   'parsorbitalis-lh', 'parsorbitalis-rh',
-                   'parstriangularis-lh', 'parstriangularis-rh',
-                   'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
-                   'postcentral-rh', 'posteriorcingulate-lh',
-                   'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
-                   'precuneus-lh', 'precuneus-rh',
-                   'rostralanteriorcingulate-lh',
-                   'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
-                   'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
-                   'superiorfrontal-rh', 'superiorparietal-lh',
-                   'superiorparietal-rh', 'superiortemporal-lh',
-                   'superiortemporal-rh', 'supramarginal-lh',
-                   'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
-                   'transversetemporal-lh', 'transversetemporal-rh']
-    node_angles = circular_layout(label_names, node_order, start_pos=90)
-    con = np.random.randn(68, 68)
-    plot_connectivity_circle(con, label_names, n_lines=300,
-                             node_angles=node_angles, title='test')
-    plt.close('all')
-
-
-def test_plot_drop_log():
-    """Test plotting a drop log
-    """
-    epochs = _get_epochs()
-    epochs.drop_bad_epochs()
-    plot_drop_log(epochs.drop_log)
-    plot_drop_log([['One'], [], []])
-    plot_drop_log([['One'], ['Two'], []])
-    plot_drop_log([['One'], ['One', 'Two'], []])
-    plt.close('all')
-
-
-def test_plot_raw():
-    """Test plotting of raw data
-    """
-    raw = _get_raw()
-    events = _get_events()
-    plt.close('all')  # ensure all are closed
-    with warnings.catch_warnings(record=True):
-        fig = raw.plot(events=events, show_options=True)
-        # test mouse clicks
-        x = fig.get_axes()[0].lines[1].get_xdata().mean()
-        y = fig.get_axes()[0].lines[1].get_ydata().mean()
-        data_ax = fig.get_axes()[0]
-        _fake_click(fig, data_ax, [x, y], xform='data')  # mark a bad channel
-        _fake_click(fig, data_ax, [x, y], xform='data')  # unmark a bad channel
-        _fake_click(fig, data_ax, [0.5, 0.999])  # click elsewhere in first axes
-        _fake_click(fig, fig.get_axes()[1], [0.5, 0.5])  # change time
-        _fake_click(fig, fig.get_axes()[2], [0.5, 0.5])  # change channels
-        _fake_click(fig, fig.get_axes()[3], [0.5, 0.5])  # open SSP window
-        fig.canvas.button_press_event(1, 1, 1)  # outside any axes
-        # sadly these fail when no renderer is used (i.e., when using Agg):
-        #ssp_fig = set(plt.get_fignums()) - set([fig.number])
-        #assert_equal(len(ssp_fig), 1)
-        #ssp_fig = plt.figure(list(ssp_fig)[0])
-        #ax = ssp_fig.get_axes()[0]  # only one axis is used
-        #t = [c for c in ax.get_children() if isinstance(c,
-        #     matplotlib.text.Text)]
-        #pos = np.array(t[0].get_position()) + 0.01
-        #_fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # off
-        #_fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # on
-        # test keypresses
-        fig.canvas.key_press_event('escape')
-        fig.canvas.key_press_event('down')
-        fig.canvas.key_press_event('up')
-        fig.canvas.key_press_event('right')
-        fig.canvas.key_press_event('left')
-        fig.canvas.key_press_event('o')
-        fig.canvas.key_press_event('escape')
-        plt.close('all')
-
-
-def test_plot_raw_psds():
-    """Test plotting of raw psds
-    """
-    import matplotlib.pyplot as plt
-    raw = _get_raw()
-    # normal mode
-    raw.plot_psds(tmax=2.0)
-    # specific mode
-    picks = fiff.pick_types(raw.info, meg='mag', eeg=False)[:4]
-    raw.plot_psds(picks=picks, area_mode='range')
-    ax = plt.axes()
-    # if ax is supplied, picks must be, too:
-    assert_raises(ValueError, raw.plot_psds, ax=ax)
-    raw.plot_psds(picks=picks, ax=ax)
-    plt.close('all')
-
-
- at sample.requires_sample_data
-def test_plot_topomap():
-    """Test topomap plotting
-    """
-    # evoked
-    warnings.simplefilter('always', UserWarning)
-    with warnings.catch_warnings(record=True):
-        evoked = fiff.read_evoked(evoked_fname, 'Left Auditory',
-                                  baseline=(None, 0))
-        evoked.plot_topomap(0.1, 'mag', layout=layout)
-        plot_evoked_topomap(evoked, None, ch_type='mag')
-        times = [0.1, 0.2]
-        plot_evoked_topomap(evoked, times, ch_type='eeg')
-        plot_evoked_topomap(evoked, times, ch_type='grad')
-        plot_evoked_topomap(evoked, times, ch_type='planar1')
-        plot_evoked_topomap(evoked, times, ch_type='planar2')
-        with warnings.catch_warnings(True):  # delaunay triangulation warning
-            plot_evoked_topomap(evoked, times, ch_type='mag', layout='auto')
-        assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
-                      proj='interactive')  # projs have already been applied
-        evoked.proj = False  # let's fake it like they haven't been applied
-        plot_evoked_topomap(evoked, 0.1, 'mag', proj='interactive')
-        assert_raises(RuntimeError, plot_evoked_topomap, evoked,
-                      np.repeat(.1, 50))
-        assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
-
-        projs = read_proj(ecg_fname)
-        projs = [p for p in projs if p['desc'].lower().find('eeg') < 0]
-        plot_projs_topomap(projs)
-        plt.close('all')
-        for ch in evoked.info['chs']:
-            if ch['coil_type'] == fiff.FIFF.FIFFV_COIL_EEG:
-                if ch['eeg_loc'] is not None:
-                    ch['eeg_loc'].fill(0)
-                ch['loc'].fill(0)
-        assert_raises(RuntimeError, plot_evoked_topomap, evoked,
-                      times, ch_type='eeg')
-        
-
-
-def test_compare_fiff():
-    """Test comparing fiff files
-    """
-    compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
-    plt.close('all')
-
-
- at requires_sklearn
-def test_plot_ica_topomap():
-    """Test plotting of ICA solutions
-    """
-    raw = _get_raw()
-    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
-              max_pca_components=3, n_pca_components=3)
-    ica_picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False,
-                                ecg=False, eog=False, exclude='bads')
-    ica.decompose_raw(raw, picks=ica_picks)
-    warnings.simplefilter('always', UserWarning)
-    with warnings.catch_warnings(record=True):
-        for components in [0, [0], [0, 1], [0, 1] * 7]:
-            ica.plot_topomap(components)
-    ica.info = None
-    assert_raises(RuntimeError, ica.plot_topomap, 1)
-    plt.close('all')
-
-
- at sample.requires_sample_data
-def test_plot_source_spectrogram():
-    """Test plotting of source spectrogram
-    """
-    sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
-                                            'bem', 'sample-oct-6-src.fif'))
-
-    # dense version
-    vertices = [s['vertno'] for s in sample_src]
-    n_time = 5
-    n_verts = sum(len(v) for v in vertices)
-    stc_data = np.ones((n_verts, n_time))
-    stc = SourceEstimate(stc_data, vertices, 1, 1)
-    plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
-    assert_raises(ValueError, plot_source_spectrogram, [], [])
diff --git a/mne/time_frequency/__init__.py b/mne/time_frequency/__init__.py
index 8d17898..806c44d 100644
--- a/mne/time_frequency/__init__.py
+++ b/mne/time_frequency/__init__.py
@@ -1,7 +1,7 @@
 """Time frequency analysis tools
 """
 
-from .tfr import induced_power, single_trial_power, morlet
+from .tfr import induced_power, single_trial_power, morlet, tfr_morlet
 from .psd import compute_raw_psd, compute_epochs_psd
 from .csd import CrossSpectralDensity, compute_epochs_csd
 from .ar import yule_walker, ar_raw, iir_filter_raw
diff --git a/mne/time_frequency/ar.py b/mne/time_frequency/ar.py
index 62eacd7..2feba20 100644
--- a/mne/time_frequency/ar.py
+++ b/mne/time_frequency/ar.py
@@ -1,4 +1,4 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          The statsmodels folks for AR yule_walker
 #
 # License: BSD (3-clause)
@@ -89,7 +89,7 @@ def ar_raw(raw, order, picks, tmin=None, tmax=None):
         The raw data
     order : int
         The AR model order
-    picks : array of int
+    picks : array-like of int
         The channels indices to include
     tmin : float
         The beginning of time interval in seconds.
@@ -133,7 +133,7 @@ def iir_filter_raw(raw, order, picks, tmin=None, tmax=None):
         an instance of Raw
     order : int
         order of the FIR filter
-    picks : array of int
+    picks : array-like of int
         indices of selected channels
     tmin : float
         The beginning of time interval in seconds.
diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py
index 1b4d0e3..bb40349 100644
--- a/mne/time_frequency/csd.py
+++ b/mne/time_frequency/csd.py
@@ -8,7 +8,7 @@ import copy as cp
 import numpy as np
 from scipy.fftpack import fftfreq
 
-from ..fiff.pick import pick_types
+from ..io.pick import pick_types
 from ..utils import logger, verbose
 from ..time_frequency.multitaper import (dpss_windows, _mt_spectra,
                                          _csd_from_mt, _psd_from_mt_adaptive)
diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py
index 387a9bd..1985ee3 100644
--- a/mne/time_frequency/multitaper.py
+++ b/mne/time_frequency/multitaper.py
@@ -43,16 +43,16 @@ def tridisolve(d, e, b, overwrite_b=True):
         x = b
     else:
         x = b.copy()
-    for k in xrange(1, N):
+    for k in range(1, N):
         # e^(k-1) = e(k-1) / d(k-1)
         # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)
         t = ew[k - 1]
         ew[k - 1] = t / dw[k - 1]
         dw[k] = dw[k] - t * ew[k - 1]
-    for k in xrange(1, N):
+    for k in range(1, N):
         x[k] = x[k] - ew[k - 1] * x[k - 1]
     x[N - 1] = x[N - 1] / dw[N - 1]
-    for k in xrange(N - 2, -1, -1):
+    for k in range(N - 2, -1, -1):
         x[k] = x[k] / dw[k] - ew[k] * x[k + 1]
 
     if not overwrite_b:
@@ -207,7 +207,7 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
         # find the corresponding eigenvectors via inverse iteration
         t = np.linspace(0, np.pi, N)
         dpss = np.zeros((Kmax, N), 'd')
-        for k in xrange(Kmax):
+        for k in range(Kmax):
             dpss[k] = tridi_inverse_iteration(diagonal, off_diag, w[k],
                                               x0=np.sin((k + 1) * t))
 
@@ -228,8 +228,8 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
 
     # compute autocorr using FFT (same as nitime.utils.autocorr(dpss) * N)
     rxx_size = 2 * N - 1
-    NFFT = 2 ** int(np.ceil(np.log2(rxx_size)))
-    dpss_fft = fftpack.fft(dpss, NFFT)
+    n_fft = 2 ** int(np.ceil(np.log2(rxx_size)))
+    dpss_fft = fftpack.fft(dpss, n_fft)
     dpss_rxx = np.real(fftpack.ifft(dpss_fft * dpss_fft.conj()))
     dpss_rxx = dpss_rxx[:, :N]
 
@@ -452,7 +452,8 @@ def _mt_spectra(x, dpss, sfreq, n_fft=None):
 
 @verbose
 def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
-                   adaptive=False, low_bias=True, n_jobs=1, verbose=None):
+                   adaptive=False, low_bias=True, n_jobs=1,
+                   normalization='length', verbose=None):
     """Compute power spectrum density (PSD) using a multi-taper method
 
     Parameters
@@ -475,6 +476,10 @@ def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
         bandwidth.
     n_jobs : int
         Number of parallel jobs to use (only used if adaptive=True).
+    normalization : str
+        Either "full" or "length" (default). If "full", the PSD will
+        be normalized by the sampling rate as well as the length of
+        the signal (as in nitime).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -485,6 +490,9 @@ def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
     freqs : array
         The frequency points in Hz of the PSD.
     """
+    if normalization not in ('length', 'full'):
+        raise ValueError('Normalization must be "length" or "full", not %s'
+                         % normalization)
     if x.ndim > 2:
         raise ValueError('x can only be 1d or 2d')
 
@@ -531,5 +539,7 @@ def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
         psd = psd[0, :]
 
     freqs = freqs[freq_mask]
+    if normalization == 'full':
+        psd /= sfreq
 
     return psd, freqs
diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py
index 99a8510..bd719d5 100644
--- a/mne/time_frequency/psd.py
+++ b/mne/time_frequency/psd.py
@@ -1,35 +1,42 @@
-# Authors : Alexandre Gramfort, gramfort at nmr.mgh.harvard.edu (2011)
-#           Denis A. Engemann <d.engemann at fz-juelich.de>
+# Authors : Alexandre Gramfort, alexandre.gramfort at telecom-paristech.fr (2011)
+#           Denis A. Engemann <denis.engemann at gmail.com>
 # License : BSD 3-clause
 
 import numpy as np
 
 from ..parallel import parallel_func
-from ..fiff.proj import make_projector_info
-from ..fiff.pick import pick_types
+from ..io.proj import make_projector_info
+from ..io.pick import pick_types
 from ..utils import logger, verbose
 
 
 @verbose
 def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
-                    fmin=0, fmax=np.inf, NFFT=2048, n_jobs=1,
-                    plot=False, proj=False, verbose=None):
-    """Compute power spectral density with multi-taper
+                    fmin=0, fmax=np.inf, n_fft=2048, pad_to=None, n_overlap=0,
+                    n_jobs=1, plot=False, proj=False, NFFT=None,
+                    verbose=None):
+    """Compute power spectral density with average periodograms.
 
     Parameters
     ----------
     raw : instance of Raw
         The raw data.
-    picks : None or array of integers
+    picks : array-like of int | None
         The selection of channels to include in the computation.
         If None, take all channels.
     fmin : float
         Min frequency of interest
     fmax : float
         Max frequency of interest
-    NFFT : int
+    n_fft : int
         The length of the tapers ie. the windows. The smaller
         it is the smoother are the PSDs.
+    pad_to : int | None
+        The number of points to which the data segment is padded when
+        performing the FFT. If None, pad_to equals `NFFT`.
+    n_overlap : int
+        The number of points of overlap between blocks. The default value
+        is 0 (no overlap).
     n_jobs : int
         Number of CPUs to use in the computation.
     plot : bool
@@ -46,6 +53,10 @@ def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
     freqs: array of float
         The frequencies
     """
+    if NFFT is not None:
+        n_fft = NFFT
+        warnings.warn("`NFFT` is deprecated and will be removed in v0.9. "
+                      "Use `n_fft` instead")
     start, stop = raw.time_as_index([tmin, tmax])
     if picks is not None:
         data, times = raw[picks, start:(stop + 1)]
@@ -59,19 +70,20 @@ def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
         else:
             data = np.dot(proj, data)
 
-    NFFT = int(NFFT)
+    n_fft = int(n_fft)
     Fs = raw.info['sfreq']
 
-    logger.info("Effective window size : %0.3f (s)" % (NFFT / float(Fs)))
+    logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
 
     import matplotlib.pyplot as plt
     parallel, my_psd, n_jobs = parallel_func(plt.psd, n_jobs)
     fig = plt.figure()
-    out = parallel(my_psd(d, Fs=Fs, NFFT=NFFT) for d in data)
+    out = parallel(my_psd(d, Fs=Fs, NFFT=n_fft, noverlap=n_overlap,
+                          pad_to=pad_to) for d in data)
     if not plot:
         plt.close(fig)
     freqs = out[0][1]
-    psd = np.array(zip(*out)[0])
+    psd = np.array([o[0] for o in out])
 
     mask = (freqs >= fmin) & (freqs <= fmax)
     freqs = freqs[mask]
@@ -80,10 +92,11 @@ def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
     return psd, freqs
 
 
-def _compute_psd(data, fmin, fmax, Fs, n_fft, psd):
+def _compute_psd(data, fmin, fmax, Fs, n_fft, psd, n_overlap, pad_to):
     """Compute the PSD"""
-    out = [psd(d, Fs=Fs, NFFT=n_fft) for d in data]
-    psd = np.array(zip(*out)[0])
+    out = [psd(d, Fs=Fs, NFFT=n_fft, noverlap=n_overlap, pad_to=pad_to)
+           for d in data]
+    psd = np.array([o[0] for o in out])
     freqs = out[0][1]
     mask = (freqs >= fmin) & (freqs <= fmax)
     freqs = freqs[mask]
@@ -92,8 +105,8 @@ def _compute_psd(data, fmin, fmax, Fs, n_fft, psd):
 
 @verbose
 def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
-                       n_jobs=1, verbose=None):
-    """Compute power spectral density with multi-taper
+                       pad_to=None, n_overlap=0, n_jobs=1, verbose=None):
+    """Compute power spectral density with with average periodograms.
 
     Parameters
     ----------
@@ -103,7 +116,7 @@ def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
         Min time instant to consider
     tmax : float
         Max time instant to consider
-    picks : None or array of integers
+    picks : array-like of int | None
         The selection of channels to include in the computation.
         If None, take all channels.
     fmin : float
@@ -113,6 +126,12 @@ def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
     n_fft : int
         The length of the tapers ie. the windows. The smaller
         it is the smoother are the PSDs.
+    pad_to : int | None
+        The number of points to which the data segment is padded when
+        performing the FFT. If None, pad_to equals `n_fft`.
+    n_overlap : int
+        The number of points of overlap between blocks. The default value
+        is 0 (no overlap).
     n_jobs : int
         Number of CPUs to use in the computation.
     verbose : bool, str, int, or None
@@ -137,9 +156,10 @@ def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
     import matplotlib.pyplot as plt
     parallel, my_psd, n_jobs = parallel_func(_compute_psd, n_jobs)
     fig = plt.figure()  # threading will induce errors otherwise
-    out = parallel(my_psd(data[picks], fmin, fmax, Fs, n_fft, plt.psd)
+    out = parallel(my_psd(data[picks], fmin, fmax, Fs, n_fft, plt.psd,
+                          n_overlap, pad_to)
                    for data in epochs)
     plt.close(fig)
-    psds, freqs = zip(*out)
-
+    psds = [o[0] for o in out]
+    freqs = [o[1] for o in out]
     return np.array(psds), freqs[0]
diff --git a/mne/time_frequency/stft.py b/mne/time_frequency/stft.py
index ff802bd..086a8b5 100644
--- a/mne/time_frequency/stft.py
+++ b/mne/time_frequency/stft.py
@@ -67,7 +67,7 @@ def stft(x, wsize, tstep=None, verbose=None):
                          'window length.')
 
     n_step = int(ceil(T / float(tstep)))
-    n_freq = wsize / 2 + 1
+    n_freq = wsize // 2 + 1
     logger.info("Number of frequencies: %d" % n_freq)
     logger.info("Number of time steps: %d" % n_step)
 
@@ -88,7 +88,7 @@ def stft(x, wsize, tstep=None, verbose=None):
     # Zero-padding and Pre-processing for edges
     xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),
                   dtype=x.dtype)
-    xp[:, (wsize - tstep) / 2: (wsize - tstep) / 2 + T] = x
+    xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x
     x = xp
 
     for t in range(n_step):
@@ -169,18 +169,18 @@ def istft(X, tstep=None, Tx=None):
         swin[t * tstep:t * tstep + wsize] += win ** 2
     swin = np.sqrt(swin / wsize)
 
-    fframe = np.empty((n_signals, n_win + wsize / 2 - 1), dtype=X.dtype)
+    fframe = np.empty((n_signals, n_win + wsize // 2 - 1), dtype=X.dtype)
     for t in range(n_step):
         # IFFT
         fframe[:, :n_win] = X[:, :, t]
-        fframe[:, n_win:] = np.conj(X[:, wsize / 2 - 1: 0: -1, t])
+        fframe[:, n_win:] = np.conj(X[:, wsize // 2 - 1: 0: -1, t])
         frame = ifft(fframe)
         wwin = win / swin[t * tstep:t * tstep + wsize]
         # Overlap-add
         x[:, t * tstep: t * tstep + wsize] += np.real(np.conj(frame) * wwin)
 
     # Truncation
-    x = x[:, (wsize - tstep) / 2: (wsize - tstep) / 2 + T + 1][:, :Tx].copy()
+    x = x[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1][:, :Tx].copy()
     return x
 
 
@@ -206,7 +206,7 @@ def stftfreq(wsize, sfreq=None):
     stft
     istft
     """
-    n_freq = wsize / 2 + 1
+    n_freq = wsize // 2 + 1
     freqs = fftfreq(wsize)
     freqs = np.abs(freqs[:n_freq])
     if sfreq is not None:
diff --git a/mne/time_frequency/tests/test_ar.py b/mne/time_frequency/tests/test_ar.py
index a8dcf2a..70ea665 100644
--- a/mne/time_frequency/tests/test_ar.py
+++ b/mne/time_frequency/tests/test_ar.py
@@ -3,15 +3,15 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true
 
-from mne import fiff
+from mne import io, pick_types
 from mne.time_frequency import yule_walker, ar_raw
-from mne.utils import requires_statsmodels
+from mne.utils import requires_statsmodels, requires_patsy
 
 
-raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
 
-
+ at requires_patsy
 @requires_statsmodels
 def test_yule_walker():
     """Test Yule-Walker against statsmodels
@@ -27,10 +27,10 @@ def test_yule_walker():
 def test_ar_raw():
     """Test fitting AR model on raw data
     """
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
 
     # picks MEG gradiometers
-    picks = fiff.pick_types(raw.info, meg='grad', exclude='bads')
+    picks = pick_types(raw.info, meg='grad', exclude='bads')
 
     picks = picks[:2]
 
diff --git a/mne/time_frequency/tests/test_csd.py b/mne/time_frequency/tests/test_csd.py
index ba5c574..68cb9a0 100644
--- a/mne/time_frequency/tests/test_csd.py
+++ b/mne/time_frequency/tests/test_csd.py
@@ -3,14 +3,16 @@ from nose.tools import (assert_raises, assert_equal, assert_almost_equal,
                         assert_true)
 from numpy.testing import assert_array_equal
 from os import path as op
+import warnings
 
 import mne
 
-from mne.fiff import Raw
+from mne.io import Raw
 from mne.utils import sum_squared
 from mne.time_frequency import compute_epochs_csd, induced_power
 
-base_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+warnings.simplefilter('always')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_fname = op.join(base_dir, 'test-eve.fif')
 
@@ -21,7 +23,7 @@ def _get_data():
     raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
 
     # Set picks
-    picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
+    picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
                                 stim=False, exclude='bads')
 
     # Read several epochs
@@ -72,8 +74,10 @@ def test_compute_epochs_csd():
 
     # Computing induced power for comparison
     epochs.crop(tmin=0.04, tmax=0.15)
-    power, _ = induced_power(epochs.get_data(), epochs.info['sfreq'], [10],
-                             n_cycles=0.6)
+    with warnings.catch_warnings(record=True):  # deprecation
+        warnings.simplefilter('always')
+        power, _ = induced_power(epochs.get_data(), epochs.info['sfreq'], [10],
+                                 n_cycles=0.6)
     power = np.mean(power, 2)
 
     # Maximum PSD should occur for specific channel
diff --git a/mne/time_frequency/tests/test_multitaper.py b/mne/time_frequency/tests/test_multitaper.py
index a6196c1..1dd04f2 100644
--- a/mne/time_frequency/tests/test_multitaper.py
+++ b/mne/time_frequency/tests/test_multitaper.py
@@ -1,5 +1,7 @@
 import numpy as np
+from nose.tools import assert_raises
 from numpy.testing import assert_array_almost_equal
+from distutils.version import LooseVersion
 
 from mne.time_frequency import dpss_windows, multitaper_psd
 from mne.utils import requires_nitime
@@ -20,8 +22,10 @@ def test_dpss_windows():
     assert_array_almost_equal(dpss, dpss_ni)
     assert_array_almost_equal(eigs, eigs_ni)
 
-    dpss, eigs = dpss_windows(N, half_nbw, Kmax, interp_from=200, low_bias=False)
-    dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax, interp_from=200)
+    dpss, eigs = dpss_windows(N, half_nbw, Kmax, interp_from=200,
+                              low_bias=False)
+    dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax,
+                                                  interp_from=200)
 
     assert_array_almost_equal(dpss, dpss_ni)
     assert_array_almost_equal(eigs, eigs_ni)
@@ -35,11 +39,15 @@ def test_multitaper_psd():
     n_times = 1000
     x = np.random.randn(5, n_times)
     sfreq = 500
+    assert_raises(ValueError, multitaper_psd, x, sfreq, normalization='foo')
+    ni_5 = (LooseVersion(ni.__version__) >= LooseVersion('0.5'))
+    norm = 'full' if ni_5 else 'length'
 
     for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):
-        psd, freqs = multitaper_psd(x, sfreq, adaptive=adaptive, n_jobs=n_jobs)
+        psd, freqs = multitaper_psd(x, sfreq, adaptive=adaptive, n_jobs=n_jobs,
+                                    normalization=norm)
         freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(x, sfreq,
-                              adaptive=adaptive, jackknife=False)
+            adaptive=adaptive, jackknife=False)
 
         # for some reason nitime returns n_times + 1 frequency points
         # causing the value at 0 to be different
diff --git a/mne/time_frequency/tests/test_psd.py b/mne/time_frequency/tests/test_psd.py
index c7677e3..37a06b0 100644
--- a/mne/time_frequency/tests/test_psd.py
+++ b/mne/time_frequency/tests/test_psd.py
@@ -3,12 +3,12 @@ import os.path as op
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true
 
-from mne import fiff
+from mne import io, pick_types
 from mne import Epochs
 from mne import read_events
 from mne.time_frequency import compute_raw_psd, compute_epochs_psd
 
-base_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_fname = op.join(base_dir, 'test-eve.fif')
 
@@ -16,24 +16,24 @@ event_fname = op.join(base_dir, 'test-eve.fif')
 def test_psd():
     """Test PSD estimation
     """
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
 
     exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
 
     # picks MEG gradiometers
-    picks = fiff.pick_types(raw.info, meg='mag', eeg=False, stim=False,
+    picks = pick_types(raw.info, meg='mag', eeg=False, stim=False,
                             exclude=exclude)
 
     picks = picks[:2]
 
     tmin, tmax = 0, 10  # use the first 60s of data
     fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
-    NFFT = 128  # the FFT size (NFFT). Ideally a power of 2
+    n_fft = 128  # the FFT size (n_fft). Ideally a power of 2
     psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
-                                  fmin=fmin, fmax=fmax, NFFT=NFFT, n_jobs=1,
+                                  fmin=fmin, fmax=fmax, n_fft=n_fft, n_jobs=1,
                                   proj=False)
     psds_proj, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
-                                       fmin=fmin, fmax=fmax, NFFT=NFFT,
+                                       fmin=fmin, fmax=fmax, n_fft=n_fft,
                                        n_jobs=1, proj=True)
 
     assert_array_almost_equal(psds, psds_proj)
@@ -45,12 +45,12 @@ def test_psd():
 def test_psd_epochs():
     """Test PSD estimation on epochs
     """
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
 
     exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
 
     # picks MEG gradiometers
-    picks = fiff.pick_types(raw.info, meg='mag', eeg=False, stim=False,
+    picks = pick_types(raw.info, meg='mag', eeg=False, stim=False,
                             exclude=exclude)
 
     picks = picks[:2]
@@ -62,7 +62,7 @@ def test_psd_epochs():
     raw.info['bads'] += ['MEG 2443']  # bads
 
     # picks MEG gradiometers
-    picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+    picks = pick_types(raw.info, meg='grad', eeg=False, eog=True,
                             stim=False, include=include, exclude='bads')
 
     events = read_events(event_fname)
@@ -71,7 +71,7 @@ def test_psd_epochs():
                     reject=dict(grad=4000e-13, eog=150e-6), proj=False,
                     preload=True)
 
-    picks = fiff.pick_types(epochs.info, meg='grad', eeg=False, eog=True,
+    picks = pick_types(epochs.info, meg='grad', eeg=False, eog=True,
                             stim=False, include=include, exclude='bads')
     psds, freqs = compute_epochs_psd(epochs[:1], fmin=2, fmax=300, n_fft=n_fft,
                                      picks=picks)
diff --git a/mne/time_frequency/tests/test_stft.py b/mne/time_frequency/tests/test_stft.py
index 59b3789..e7eca49 100644
--- a/mne/time_frequency/tests/test_stft.py
+++ b/mne/time_frequency/tests/test_stft.py
@@ -31,7 +31,7 @@ def test_stft():
 
         # norm conservation thanks to tight frame property
         assert_almost_equal(np.sqrt(stft_norm2(X)),
-                            map(linalg.norm, x), decimal=2)
+                            [linalg.norm(xx) for xx in x], decimal=2)
 
         # Try with empty array
         x = np.zeros((0, T))
diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py
index ccce51a..653e43a 100644
--- a/mne/time_frequency/tests/test_tfr.py
+++ b/mne/time_frequency/tests/test_tfr.py
@@ -1,15 +1,15 @@
 import numpy as np
 import os.path as op
 from numpy.testing import assert_array_almost_equal
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_false, assert_equal
 
-from mne import fiff, Epochs, read_events
-from mne.time_frequency import induced_power, single_trial_power
-from mne.time_frequency.tfr import cwt_morlet, morlet
+from mne import io, Epochs, read_events, pick_types
+from mne.time_frequency import single_trial_power
+from mne.time_frequency.tfr import cwt_morlet, morlet, tfr_morlet
 
-raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
-event_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+event_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                       'data', 'test-eve.fif')
 
 
@@ -31,14 +31,14 @@ def test_time_frequency():
     tmax = 0.5
 
     # Setup for reading the raw data
-    raw = fiff.Raw(raw_fname)
+    raw = io.Raw(raw_fname)
     events = read_events(event_fname)
 
     include = []
     exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
 
     # picks MEG gradiometers
-    picks = fiff.pick_types(raw.info, meg='grad', eeg=False,
+    picks = pick_types(raw.info, meg='grad', eeg=False,
                             stim=False, include=include, exclude=exclude)
 
     picks = picks[:2]
@@ -46,30 +46,48 @@ def test_time_frequency():
                     baseline=(None, 0))
     data = epochs.get_data()
     times = epochs.times
+    nave = len(data)
 
-    frequencies = np.arange(6, 20, 5)  # define frequencies of interest
-    Fs = raw.info['sfreq']  # sampling in Hz
-    n_cycles = frequencies / float(4)
-    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
-                                      n_cycles=n_cycles, use_fft=True)
+    freqs = np.arange(6, 20, 5)  # define frequencies of interest
+    n_cycles = freqs / 4.
+
+    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
+                            use_fft=True, return_itc=True)
+
+    print(itc)  # test repr
+    print(itc.ch_names) # test property
+    itc = itc + power # test add
+    itc = itc - power # test add
+    itc -= power
+    itc += power
 
-    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
-    assert_true(power.shape == phase_lock.shape)
-    assert_true(np.sum(phase_lock >= 1) == 0)
-    assert_true(np.sum(phase_lock <= 0) == 0)
+    power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
 
-    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
-                                      n_cycles=2, use_fft=False)
+    assert_true('meg' in power)
+    assert_true('grad' in power)
+    assert_false('mag' in power)
+    assert_false('eeg' in power)
 
-    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
-    assert_true(power.shape == phase_lock.shape)
-    assert_true(np.sum(phase_lock >= 1) == 0)
-    assert_true(np.sum(phase_lock <= 0) == 0)
+    assert_equal(power.nave, nave)
+    assert_equal(itc.nave, nave)
+    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
+    assert_true(power.data.shape == itc.data.shape)
+    assert_true(np.sum(itc.data >= 1) == 0)
+    assert_true(np.sum(itc.data <= 0) == 0)
 
-    tfr = cwt_morlet(data[0], Fs, frequencies, use_fft=True, n_cycles=2)
-    assert_true(tfr.shape == (len(picks), len(frequencies), len(times)))
+    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
+                            return_itc=True)
+
+    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
+    assert_true(power.data.shape == itc.data.shape)
+    assert_true(np.sum(itc.data >= 1) == 0)
+    assert_true(np.sum(itc.data <= 0) == 0)
+
+    Fs = raw.info['sfreq']  # sampling in Hz
+    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
+    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
 
-    single_power = single_trial_power(data, Fs, frequencies, use_fft=False,
+    single_power = single_trial_power(data, Fs, freqs, use_fft=False,
                                       n_cycles=2)
 
-    assert_array_almost_equal(np.mean(single_power), power)
+    assert_array_almost_equal(np.mean(single_power), power.data)
diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py
index 02b2515..6a16a9e 100644
--- a/mne/time_frequency/tfr.py
+++ b/mne/time_frequency/tfr.py
@@ -1,20 +1,25 @@
 """A module which implements the continuous wavelet transform
 with complex Morlet wavelets.
 
-Author : Alexandre Gramfort, gramfort at nmr.mgh.harvard.edu (2011)
+Author : Alexandre Gramfort, alexandre.gramfort at telecom-paristech.fr (2011)
 License : BSD 3-clause
 
 inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
 """
 
 from math import sqrt
+from copy import deepcopy
 import numpy as np
 from scipy import linalg
 from scipy.fftpack import fftn, ifftn
 
+from ..fixes import partial
 from ..baseline import rescale
 from ..parallel import parallel_func
 from ..utils import logger, verbose
+from ..channels import ContainsMixin, PickDropChannelsMixin
+from ..io.pick import pick_info, pick_types
+from ..utils import deprecated
 
 
 def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
@@ -75,10 +80,11 @@ def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
 
 
 def _centered(arr, newsize):
+    """Aux Function to center data"""
     # Return the center newsize portion of the array.
     newsize = np.asarray(newsize)
     currsize = np.array(arr.shape)
-    startind = (currsize - newsize) / 2
+    startind = (currsize - newsize) // 2
     endind = startind + newsize
     myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
     return arr[tuple(myslice)]
@@ -334,8 +340,8 @@ def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     return power
 
 
-def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
-                  decim=1, n_jobs=1, zero_mean=False):
+def _induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
+                   decim=1, n_jobs=1, zero_mean=False):
     """Compute time induced power and inter-trial phase-locking factor
 
     The time frequency decomposition is done with Morlet wavelets
@@ -398,3 +404,510 @@ def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     psd /= n_epochs
     plf = np.abs(plf) / n_epochs
     return psd, plf
+
+
+ at deprecated("induced_power will be removed in release 0.9. Use "
+            "tfr_morlet instead.")
+def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
+                  decim=1, n_jobs=1, zero_mean=False):
+    """Compute time induced power and inter-trial phase-locking factor
+
+    The time frequency decomposition is done with Morlet wavelets
+
+    Parameters
+    ----------
+    data : array
+        3D array of shape [n_epochs, n_channels, n_times]
+    Fs : float
+        sampling Frequency
+    frequencies : array
+        Array of frequencies of interest
+    use_fft : bool
+        Compute transform with fft based convolutions or temporal
+        convolutions.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    decim: int
+        Temporal decimation factor
+    n_jobs : int
+        The number of CPUs used in parallel. All CPUs are used in -1.
+        Requires joblib package.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+
+    Returns
+    -------
+    power : 2D array
+        Induced power (Channels x Frequencies x Timepoints).
+        Squared amplitude of time-frequency coefficients.
+    phase_lock : 2D array
+        Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
+    """
+    return _induced_power(data, Fs, frequencies, use_fft=use_fft,
+                          n_cycles=n_cycles, decim=decim, n_jobs=n_jobs,
+                          zero_mean=zero_mean)
+
+
+def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
+                 baseline, vmin, vmax, dB):
+    """Aux Function to prepare tfr computation"""
+    from ..viz.utils import _setup_vmin_vmax
+
+    if mode is not None and baseline is not None:
+        logger.info("Applying baseline correction '%s' during %s" %
+                    (mode, baseline))
+        data = rescale(data.copy(), times, baseline, mode)
+
+    # crop time
+    itmin, itmax = None, None
+    if tmin is not None:
+        itmin = np.where(times >= tmin)[0][0]
+    if tmax is not None:
+        itmax = np.where(times <= tmax)[0][-1]
+
+    times = times[itmin:itmax]
+
+    # crop freqs
+    ifmin, ifmax = None, None
+    if fmin is not None:
+        ifmin = np.where(freqs >= fmin)[0][0]
+    if fmax is not None:
+        ifmax = np.where(freqs <= fmax)[0][-1]
+
+    freqs = freqs[ifmin:ifmax]
+
+    times *= 1e3
+    if dB:
+        data = 20 * np.log10(data)
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+    return data, times, freqs, vmin, vmax
+
+
+# XXX : todo IO of TFRs
+class AverageTFR(ContainsMixin, PickDropChannelsMixin):
+    """Container for Time-Frequency data
+
+    Can for example store induced power at sensor level or intertrial
+    coherence.
+
+    Parameters
+    ----------
+    info : Info
+        The measurement info.
+    data : ndarray, shape (n_channels, n_freqs, n_times)
+        The data.
+    times : ndarray, shape (n_times,)
+        The time values in seconds.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    nave : int
+        The number of averaged TFRs.
+
+    Attributes
+    ----------
+    ch_names : list
+        The names of the channels.
+    """
+    @verbose
+    def __init__(self, info, data, times, freqs, nave, verbose=None):
+        self.info = info
+        if data.ndim != 3:
+            raise ValueError('data should be 3d. Got %d.' % data.ndim)
+        n_channels, n_freqs, n_times = data.shape
+        if n_channels != len(info['chs']):
+            raise ValueError("Number of channels and data size don't match"
+                             " (%d != %d)." % (n_channels, len(info['chs'])))
+        if n_freqs != len(freqs):
+            raise ValueError("Number of frequencies and data size don't match"
+                             " (%d != %d)." % (n_freqs, len(freqs)))
+        if n_times != len(times):
+            raise ValueError("Number of times and data size don't match"
+                             " (%d != %d)." % (n_times, len(times)))
+        self.data = data
+        self.times = times
+        self.freqs = freqs
+        self.nave = nave
+
+    @property
+    def ch_names(self):
+        return self.info['ch_names']
+
+    @verbose
+    def plot(self, picks, baseline=None, mode='mean', tmin=None, tmax=None,
+             fmin=None, fmax=None, vmin=None, vmax=None, cmap='RdBu_r',
+             dB=False, colorbar=True, show=True, verbose=None):
+        """Plot TFRs in a topography with images
+
+        Parameters
+        ----------
+        picks : array-like of int
+            The indices of the channels to plot.
+        baseline : None (default) or tuple of length 2
+            The time interval to apply baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal ot (None, None) all the time
+            interval is used.
+        mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or zscore (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline)).
+            If None no baseline correction is applied.
+        tmin : None | float
+            The first time instant to display. If None the first time point
+            available is used.
+        tmax : None | float
+            The last time instant to display. If None the last time point
+            available is used.
+        fmin : None | float
+            The first frequency to display. If None the first frequency
+            available is used.
+        fmax : None | float
+            The last frequency to display. If None the last frequency
+            available is used.
+        vmin : float | None
+            The mininum value an the color scale. If vmin is None, the data
+            minimum value is used.
+        vmax : float | None
+            The maxinum value an the color scale. If vmax is None, the data
+            maximum value is used.
+        layout : Layout | None
+            Layout instance specifying sensor positions. If possible, the
+            correct layout is inferred from the data.
+        cmap : matplotlib colormap | str
+            The colormap to use. Defaults to 'RdBu_r'.
+        dB : bool
+            If True, 20*log10 is applied to the data to get dB.
+        colorbar : bool
+            If true, colorbar will be added to the plot
+        layout_scale : float
+            Scaling factor for adjusting the relative size of the layout
+            on the canvas
+        show : bool
+            Call pyplot.show() at the end.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+        from ..viz.topo import _imshow_tfr
+        import matplotlib.pyplot as plt
+        times, freqs = self.times.copy(), self.freqs.copy()
+        data = self.data[picks]
+
+        data, times, freqs, vmin, vmax = \
+            _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
+                         baseline, vmin, vmax, dB)
+
+        tmin, tmax = times[0], times[-1]
+
+        for k, p in zip(range(len(data)), picks):
+            plt.figure()
+            _imshow_tfr(plt, 0, tmin, tmax, vmin, vmax, ylim=None,
+                        tfr=data[k: k + 1], freq=freqs, x_label='Time (ms)',
+                        y_label='Frequency (Hz)', colorbar=colorbar,
+                        picker=False, cmap=cmap)
+
+        if show:
+            import matplotlib.pyplot as plt
+            plt.show()
+
+    def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
+                  tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
+                  layout=None, cmap='RdBu_r', title=None, dB=False,
+                  colorbar=True, layout_scale=0.945, show=True):
+        """Plot TFRs in a topography with images
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            The indices of the channels to plot. If None all available
+            channels are displayed.
+        baseline : None (default) or tuple of length 2
+            The time interval to apply baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal ot (None, None) all the time
+            interval is used.
+        mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or zscore (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline)).
+            If None no baseline correction is applied.
+        tmin : None | float
+            The first time instant to display. If None the first time point
+            available is used.
+        tmax : None | float
+            The last time instant to display. If None the last time point
+            available is used.
+        fmin : None | float
+            The first frequency to display. If None the first frequency
+            available is used.
+        fmax : None | float
+            The last frequency to display. If None the last frequency
+            available is used.
+        vmin : float | None
+            The mininum value an the color scale. If vmin is None, the data
+            minimum value is used.
+        vmax : float | None
+            The maxinum value an the color scale. If vmax is None, the data
+            maximum value is used.
+        layout : Layout | None
+            Layout instance specifying sensor positions. If possible, the
+            correct layout is inferred from the data.
+        cmap : matplotlib colormap | str
+            The colormap to use. Defaults to 'RdBu_r'.
+        title : str
+            Title of the figure.
+        dB : bool
+            If True, 20*log10 is applied to the data to get dB.
+        colorbar : bool
+            If true, colorbar will be added to the plot
+        layout_scale : float
+            Scaling factor for adjusting the relative size of the layout
+            on the canvas.
+        show : bool
+            Call pyplot.show() at the end.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+        from ..viz.topo import _imshow_tfr, _plot_topo
+        times = self.times.copy()
+        freqs = self.freqs
+        data = self.data
+        info = self.info
+
+        if picks is not None:
+            data = data[picks]
+            info = pick_info(info, picks)
+
+        data, times, freqs, vmin, vmax = \
+            _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
+                         mode, baseline, vmin, vmax, dB)
+
+        if layout is None:
+            from mne.layouts.layout import find_layout
+            layout = find_layout(self.info)
+
+        imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
+
+        fig = _plot_topo(info=info, times=times,
+                         show_func=imshow, layout=layout,
+                         colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                         layout_scale=layout_scale, title=title, border='w',
+                         x_label='Time (ms)', y_label='Frequency (Hz)')
+
+        if show:
+            import matplotlib.pyplot as plt
+            plt.show()
+
+        return fig
+
+    def _check_compat(self, tfr):
+        """checks that self and tfr have the same time-frequency ranges"""
+        assert np.all(tfr.times == self.times)
+        assert np.all(tfr.freqs == self.freqs)
+
+    def __add__(self, tfr):
+        self._check_compat(tfr)
+        out = self.copy()
+        out.data += tfr.data
+        return out
+
+    def __iadd__(self, tfr):
+        self._check_compat(tfr)
+        self.data += tfr.data
+        return self
+
+    def __sub__(self, tfr):
+        self._check_compat(tfr)
+        out = self.copy()
+        out.data -= tfr.data
+        return out
+
+    def __isub__(self, tfr):
+        self._check_compat(tfr)
+        self.data -= tfr.data
+        return self
+
+    def copy(self):
+        """Return a copy of the instance."""
+        return deepcopy(self)
+
+    def __repr__(self):
+        s = "time : [%f, %f]" % (self.times[0], self.times[-1])
+        s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
+        s += ", nave : %d" % self.nave
+        s += ', channels : %d' % self.data.shape[1]
+        return "<AverageTFR  |  %s>" % s
+
+    def apply_baseline(self, baseline, mode='mean'):
+        """Baseline correct the data
+
+        Parameters
+        ----------
+        baseline : tuple or list of length 2
+            The time interval to apply rescaling / baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal to (None, None) all the time
+            interval is used.
+        mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or z-score (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline))
+            If None, baseline no correction will be performed.
+        """
+        self.data = rescale(self.data, self.times, baseline, mode, copy=False)
+
+    def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
+                     ch_type='mag', baseline=None, mode='mean',
+                     layout=None, vmin=None, vmax=None, cmap='RdBu_r',
+                     sensors='k,', colorbar=True, unit=None, res=64, size=2,
+                     format='%1.1e', show_names=False, title=None,
+                     axes=None, show=True):
+        """Plot topographic maps of time-frequency intervals of TFR data
+
+        Parameters
+        ----------
+        tfr : AvereageTFR
+            The AvereageTFR object.
+        tmin : None | float
+            The first time instant to display. If None the first time point
+            available is used.
+        tmax : None | float
+            The last time instant to display. If None the last time point
+            available is used.
+        fmin : None | float
+            The first frequency to display. If None the first frequency
+            available is used.
+        fmax : None | float
+            The last frequency to display. If None the last frequency
+            available is used.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+        baseline : tuple or list of length 2
+            The time interval to apply rescaling / baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal to (None, None) all the time
+            interval is used.
+        mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or z-score (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline))
+            If None, baseline no correction will be performed.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout
+            file is inferred from the data; if no appropriate layout file was
+            found, the layout is automatically generated from the sensor
+            locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses).
+        colorbar : bool
+            Plot a colorbar.
+        unit : str | None
+            The unit of the channel type used for colorbar labels.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        format : str
+            String format for colorbar values.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        axes : instance of Axes | None
+            The axes to plot to. If None the axes is defined automatically.
+        show : bool
+            Call pyplot.show() at the end.
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure containing the topography.
+        """
+        from ..viz import plot_tfr_topomap
+        return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
+                                fmax=fmax, ch_type=ch_type, baseline=baseline,
+                                mode=mode, layout=layout, vmin=vmin, vmax=vmax,
+                                cmap=cmap, sensors=sensors, colorbar=colorbar,
+                                unit=unit, res=res, size=size, format=format,
+                                show_names=show_names, title=title, axes=axes,
+                                show=show)
+
+
+def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
+               return_itc=True, decim=1, n_jobs=1):
+    """Compute Time-Frequency Representation (TFR) using Morlet wavelets
+
+    Parameters
+    ----------
+    epochs : Epochs
+        The epochs.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    n_cycles : float | ndarray, shape (n_freqs,)
+        The number of cycles globally or for each frequency.
+    use_fft : bool
+        The fft based convolution or not.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+    n_jobs : int
+        The number of jobs to run in parallel.
+
+    Returns
+    -------
+    power : AverageTFR
+        The averaged power.
+    itc : AverageTFR
+        The intertrial coherence (ITC). Only returned if return_itc
+        is True.
+    """
+    data = epochs.get_data()
+    picks = pick_types(epochs.info, meg=True, eeg=True)
+    info = pick_info(epochs.info, picks)
+    data = data[:, picks, :]
+    power, itc = _induced_power(data, Fs=info['sfreq'], frequencies=freqs,
+                                n_cycles=n_cycles, n_jobs=n_jobs,
+                                use_fft=use_fft, decim=decim,
+                                zero_mean=True)
+    times = epochs.times[::decim].copy()
+    nave = len(data)
+    out = AverageTFR(info, power, times, freqs, nave)
+    if return_itc:
+        out = (out, AverageTFR(info, itc, times, freqs, nave))
+    return out
diff --git a/mne/transforms.py b/mne/transforms.py
index 55206fd..38278ec 100644
--- a/mne/transforms.py
+++ b/mne/transforms.py
@@ -1,19 +1,20 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Christian Brodbeck <christianbrodbeck at nyu.edu>
 #
 # License: BSD (3-clause)
 
+import os
+import glob
 import numpy as np
 from numpy import sin, cos
 from scipy import linalg
 
-from .fiff import FIFF
-from .fiff.open import fiff_open
-from .fiff.tag import read_tag, find_tag
-from .fiff.tree import dir_tree_find
-from .fiff.write import (start_file, end_file, start_block, end_block,
-                         write_coord_trans, write_dig_point, write_int)
-from .utils import logger
+from .io.constants import FIFF
+from .io.open import fiff_open
+from .io.tag import read_tag
+from .io.write import start_file, end_file, write_coord_trans
+from .utils import check_fname, logger
+from .externals.six import string_types
 
 
 # transformation from anterior/left/superior coordinate system to
@@ -26,24 +27,23 @@ als_ras_trans_mm = als_ras_trans * [0.001, 0.001, 0.001, 1]
 
 def _coord_frame_name(cframe):
     """Map integers to human-readable names"""
-    types = [FIFF.FIFFV_COORD_UNKNOWN, FIFF.FIFFV_COORD_DEVICE,
-             FIFF.FIFFV_COORD_ISOTRAK, FIFF.FIFFV_COORD_HPI,
-             FIFF.FIFFV_COORD_HEAD, FIFF.FIFFV_COORD_MRI,
-             FIFF.FIFFV_MNE_COORD_MRI_VOXEL, FIFF.FIFFV_COORD_MRI_SLICE,
-             FIFF.FIFFV_COORD_MRI_DISPLAY, FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
-             FIFF.FIFFV_MNE_COORD_CTF_HEAD, FIFF.FIFFV_MNE_COORD_RAS,
-             FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ,
-             FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ, -1]
-    strs = ['unknown', 'MEG device', 'isotrak', 'hpi', 'head',
-            'MRI (surface RAS)', 'MRI voxel', 'MRI slice', 'MRI display',
-            'CTF MEG device', 'CTF/4D/KIT head', 'RAS (non-zero origin)',
-            'MNI Talairach', 'Talairach (MNI z > 0)', 'Talairach (MNI z < 0)',
-            'unknown']
-    assert len(types) == len(strs)
-    for t, s in zip(types, strs):
-        if cframe == t:
-            return s
-    return strs[-1]
+    types = {FIFF.FIFFV_COORD_UNKNOWN: 'unknown',
+             FIFF.FIFFV_COORD_DEVICE: 'MEG device',
+             FIFF.FIFFV_COORD_ISOTRAK: 'isotrak',
+             FIFF.FIFFV_COORD_HPI: 'hpi',
+             FIFF.FIFFV_COORD_HEAD: 'head',
+             FIFF.FIFFV_COORD_MRI: 'MRI (surface RAS)',
+             FIFF.FIFFV_MNE_COORD_MRI_VOXEL: 'MRI voxel',
+             FIFF.FIFFV_COORD_MRI_SLICE: 'MRI slice',
+             FIFF.FIFFV_COORD_MRI_DISPLAY: 'MRI display',
+             FIFF.FIFFV_MNE_COORD_CTF_DEVICE: 'CTF MEG device',
+             FIFF.FIFFV_MNE_COORD_CTF_HEAD: 'CTF/4D/KIT head',
+             FIFF.FIFFV_MNE_COORD_RAS: 'RAS (non-zero origin)',
+             FIFF.FIFFV_MNE_COORD_MNI_TAL: 'MNI Talairach',
+             FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ: 'Talairach (MNI z > 0)',
+             FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ: 'Talairach (MNI z < 0)',
+             -1: 'unknown'}
+    return types.get(cframe, 'unknown')
 
 
 def _print_coord_trans(t, prefix='Coordinate transformation: '):
@@ -54,6 +54,24 @@ def _print_coord_trans(t, prefix='Coordinate transformation: '):
                     (tt[0], tt[1], tt[2], 1000 * tt[3]))
 
 
+def _find_trans(subject, subjects_dir=None):
+    if subject is None:
+        if 'SUBJECT' in os.environ:
+            subject = os.environ['SUBJECT']
+        else:
+            raise ValueError('SUBJECT environment variable not set')
+
+    trans_fnames = glob.glob(os.path.join(subjects_dir, subject,
+                                          '*-trans.fif'))
+    if len(trans_fnames) < 1:
+        raise RuntimeError('Could not find the transformation for '
+                           '{subject}'.format(subject=subject))
+    elif len(trans_fnames) > 1:
+        raise RuntimeError('Found multiple transformations for '
+                           '{subject}'.format(subject=subject))
+    return trans_fnames[0]
+
+
 def apply_trans(trans, pts, move=True):
     """Apply a transform matrix to an array of points
 
@@ -73,6 +91,8 @@ def apply_trans(trans, pts, move=True):
     """
     trans = np.asarray(trans)
     pts = np.asarray(pts)
+    if pts.size == 0:
+        return pts.copy()
 
     # apply rotation & scale
     if pts.ndim == 1:
@@ -248,68 +268,42 @@ def read_trans(fname):
 
     Returns
     -------
-    info : dict
-        The contents of the trans file.
-    """
-    info = {}
-    fid, tree, _ = fiff_open(fname)
-    block = dir_tree_find(tree, FIFF.FIFFB_MNE)[0]
-
-    tag = find_tag(fid, block, FIFF.FIFF_COORD_TRANS)
-    info.update(tag.data)
-
-    isotrak = dir_tree_find(block, FIFF.FIFFB_ISOTRAK)
-    isotrak = isotrak[0]
+    trans : dict
+        The transformation dictionary from the fif file.
 
-    tag = find_tag(fid, isotrak, FIFF.FIFF_MNE_COORD_FRAME)
-    if tag is None:
-        coord_frame = 0
-    else:
-        coord_frame = int(tag.data)
+    Notes
+    -----
+    The trans dictionary has the following structure:
+    trans = {'from': int, 'to': int, 'trans': numpy.ndarray <4x4>}
+    """
+    fid, tree, directory = fiff_open(fname)
 
-    info['dig'] = dig = []
-    for k in range(isotrak['nent']):
-        kind = isotrak['directory'][k].kind
-        pos = isotrak['directory'][k].pos
-        if kind == FIFF.FIFF_DIG_POINT:
-            tag = read_tag(fid, pos)
-            tag.data['coord_frame'] = coord_frame
-            dig.append(tag.data)
+    with fid:
+        for t in directory:
+            if t.kind == FIFF.FIFF_COORD_TRANS:
+                tag = read_tag(fid, t.pos)
+                break
+        else:
+            raise IOError('This does not seem to be a -trans.fif file.')
 
-    fid.close()
-    return info
+    trans = tag.data
+    return trans
 
 
-def write_trans(fname, info):
+def write_trans(fname, trans):
     """Write a -trans.fif file
 
     Parameters
     ----------
     fname : str
-        The name of the file.
-    info : dict
+        The name of the file, which should end in '-trans.fif'.
+    trans : dict
         Trans file data, as returned by read_trans.
     """
-    fid = start_file(fname)
-    start_block(fid, FIFF.FIFFB_MNE)
-
-    write_coord_trans(fid, info)
-
-    dig = info['dig']
-    if dig:
-        start_block(fid, FIFF.FIFFB_ISOTRAK)
+    check_fname(fname, 'trans', ('-trans.fif', '-trans.fif.gz'))
 
-        coord_frames = set(d['coord_frame'] for d in dig)
-        if len(coord_frames) > 1:
-            raise ValueError("dig points in different coord_frames")
-        coord_frame = coord_frames.pop()
-        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
-
-        for d in dig:
-            write_dig_point(fid, d)
-        end_block(fid, FIFF.FIFFB_ISOTRAK)
-
-    end_block(fid, FIFF.FIFFB_MNE)
+    fid = start_file(fname)
+    write_coord_trans(fid, trans)
     end_file(fid)
 
 
@@ -321,15 +315,21 @@ def invert_transform(trans):
     return itrans
 
 
-def transform_source_space_to(src, dest, trans):
-    """Transform source space data to the desired coordinate system
+_frame_dict = dict(meg=FIFF.FIFFV_COORD_DEVICE,
+                   mri=FIFF.FIFFV_COORD_MRI,
+                   head=FIFF.FIFFV_COORD_HEAD)
+
+
+def transform_surface_to(surf, dest, trans):
+    """Transform surface to the desired coordinate system
 
     Parameters
     ----------
     src : dict
-        Source space.
-    dest : int
-        Destination coordinate system (one of mne.fiff.FIFF.FIFFV_COORD_...).
+        Surface.
+    orig: 'meg' | 'mri' | 'head' | int
+        Destination coordinate system. Can be an integer for using
+        FIFF types.
     trans : dict
         Transformation.
 
@@ -338,22 +338,24 @@ def transform_source_space_to(src, dest, trans):
     res : dict
         Transformed source space. Data are modified in-place.
     """
-
-    if src['coord_frame'] == dest:
-        return src
-
-    if trans['to'] == src['coord_frame'] and trans['from'] == dest:
+    if isinstance(dest, string_types):
+        if dest not in _frame_dict:
+            raise KeyError('dest must be one of %s, not "%s"'
+                           % [list(_frame_dict.keys()), dest])
+        dest = _frame_dict[dest]  # convert to integer
+    if surf['coord_frame'] == dest:
+        return surf
+
+    if trans['to'] == surf['coord_frame'] and trans['from'] == dest:
         trans = invert_transform(trans)
-    elif trans['from'] != src['coord_frame'] or trans['to'] != dest:
+    elif trans['from'] != surf['coord_frame'] or trans['to'] != dest:
         raise ValueError('Cannot transform the source space using this '
                          'coordinate transformation')
 
-    t = trans['trans'][:3, :]
-    src['coord_frame'] = dest
-
-    src['rr'] = np.dot(np.c_[src['rr'], np.ones((src['np'], 1))], t.T)
-    src['nn'] = np.dot(np.c_[src['nn'], np.zeros((src['np'], 1))], t.T)
-    return src
+    surf['coord_frame'] = dest
+    surf['rr'] = apply_trans(trans['trans'], surf['rr'])
+    surf['nn'] = apply_trans(trans['trans'], surf['nn'], move=False)
+    return surf
 
 
 def transform_coordinates(filename, pos, orig, dest):
@@ -402,7 +404,7 @@ def transform_coordinates(filename, pos, orig, dest):
             tag = read_tag(fid, d.pos)
             trans = tag.data
             if (trans['from'] == FIFF.FIFFV_COORD_MRI and
-                trans['to'] == FIFF.FIFFV_COORD_HEAD):
+                    trans['to'] == FIFF.FIFFV_COORD_HEAD):
                 T0 = invert_transform(trans)
             elif (trans['from'] == FIFF.FIFFV_COORD_MRI and
                   trans['to'] == FIFF.FIFFV_MNE_COORD_RAS):
@@ -447,7 +449,7 @@ def transform_coordinates(filename, pos, orig, dest):
             pos = np.dot(np.dot(T2['trans'], T1['trans']), pos)
             if dest != FIFF.FIFFV_MNE_COORD_MNI_TAL:
                 if dest == FIFF.FIFFV_MNE_COORD_FS_TAL:
-                    for k in xrange(n_points):
+                    for k in range(n_points):
                         if pos[2, k] > 0:
                             pos[:, k] = np.dot(T3plus['trans'], pos[:, k])
                         else:
diff --git a/mne/utils.py b/mne/utils.py
index 54488cd..05b9828 100644
--- a/mne/utils.py
+++ b/mne/utils.py
@@ -1,11 +1,11 @@
 """Some utility functions"""
+from __future__ import print_function
 
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
 import warnings
-import numpy as np
 import logging
 from distutils.version import LooseVersion
 import os
@@ -20,15 +20,20 @@ import tempfile
 import shutil
 from shutil import rmtree
 import atexit
-from math import log
+from math import log, ceil
 import json
-import urllib
-import urllib2
 import ftplib
-import urlparse
+import hashlib
+
+import numpy as np
 import scipy
 from scipy import linalg
 
+
+from .externals.six.moves import urllib
+from .externals.six import string_types, StringIO, BytesIO
+from .externals.decorator import decorator
+
 logger = logging.getLogger('mne')  # one selection here used across mne-python
 logger.propagate = False  # don't propagate (in case of multiple imports)
 
@@ -36,6 +41,112 @@ logger.propagate = False  # don't propagate (in case of multiple imports)
 ###############################################################################
 # RANDOM UTILITIES
 
+def _sort_keys(x):
+    """Sort and return keys of dict"""
+    keys = list(x.keys())  # note: not thread-safe
+    idx = np.argsort([str(k) for k in keys])
+    keys = [keys[ii] for ii in idx]
+    return keys
+
+
+def object_hash(x, h=None):
+    """Hash a reasonable python object
+
+    Parameters
+    ----------
+    x : object
+        Object to hash. Can be anything comprised of nested versions of:
+        {dict, list, tuple, ndarray, str, bytes, float, int, None}.
+    h : hashlib HASH object | None
+        Optional, object to add the hash to. None creates an MD5 hash.
+
+    Returns
+    -------
+    digest : int
+        The digest resulting from the hash.
+    """
+    if h is None:
+        h = hashlib.md5()
+    if isinstance(x, dict):
+        keys = _sort_keys(x)
+        for key in keys:
+            object_hash(key, h)
+            object_hash(x[key], h)
+    elif isinstance(x, (list, tuple)):
+        h.update(str(type(x)).encode('utf-8'))
+        for xx in x:
+            object_hash(xx, h)
+    elif isinstance(x, bytes):
+        # must come before "str" below
+        h.update(x)
+    elif isinstance(x, (string_types, float, int, type(None))):
+        h.update(str(type(x)).encode('utf-8'))
+        h.update(str(x).encode('utf-8'))
+    elif isinstance(x, np.ndarray):
+        x = np.asarray(x)
+        h.update(str(x.shape).encode('utf-8'))
+        h.update(str(x.dtype).encode('utf-8'))
+        h.update(x.tostring())
+    else:
+        raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
+    return int(h.hexdigest(), 16)
+
+
+def object_diff(a, b, pre=''):
+    """Compute all differences between two python variables
+
+    Parameters
+    ----------
+    a : object
+        Currently supported: dict, list, tuple, ndarray, int, str, bytes,
+        float, StringIO, BytesIO.
+    b : object
+        Must be same type as x1.
+    pre : str
+        String to prepend to each line.
+
+    Returns
+    -------
+    diffs : str
+        A string representation of the differences.
+    """
+    out = ''
+    if type(a) != type(b):
+        out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
+    elif isinstance(a, dict):
+        k1s = _sort_keys(a)
+        k2s = _sort_keys(b)
+        m1 = set(k2s) - set(k1s)
+        if len(m1):
+            out += pre + ' x1 missing keys %s\n' % (m1)
+        for key in k1s:
+            if key not in k2s:
+                out += pre + ' x2 missing key %s\n' % key
+            else:
+                out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
+    elif isinstance(a, (list, tuple)):
+        if len(a) != len(b):
+            out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
+        else:
+            for xx1, xx2 in zip(a, b):
+                out += object_diff(xx1, xx2, pre='')
+    elif isinstance(a, (string_types, int, float, bytes)):
+        if a != b:
+            out += pre + ' value mismatch (%s, %s)\n' % (a, b)
+    elif a is None:
+        if b is not None:
+            out += pre + ' a is None, b is not (%s)\n' % (b)
+    elif isinstance(a, np.ndarray):
+        if not np.array_equal(a, b):
+            out += pre + ' array mismatch\n'
+    elif isinstance(a, (StringIO, BytesIO)):
+        if a.getvalue() != b.getvalue():
+            out += pre + ' StringIO mismatch\n'
+    else:
+        raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
+    return out
+
+
 def check_random_state(seed):
     """Turn seed into a np.random.RandomState instance
 
@@ -57,7 +168,7 @@ def check_random_state(seed):
 def split_list(l, n):
     """split list in n (approx) equal pieces"""
     n = int(n)
-    sz = len(l) / n
+    sz = len(l) // n
     for i in range(n - 1):
         yield l[i * sz:(i + 1) * sz]
     yield l[(n - 1) * sz:]
@@ -73,7 +184,7 @@ def create_chunks(sequence, size):
     size : int
         The chunksize to be returned
     """
-    return (sequence[p:p + size] for p in xrange(0, len(sequence), size))
+    return (sequence[p:p + size] for p in range(0, len(sequence), size))
 
 
 def sum_squared(X):
@@ -93,6 +204,25 @@ def sum_squared(X):
     return np.dot(X_flat, X_flat)
 
 
+def check_fname(fname, filetype, endings):
+    """Enforce MNE filename conventions
+
+    Parameters
+    ----------
+    fname : str
+        Name of the file.
+    filetype : str
+        Type of file. e.g., ICA, Epochs etc.
+    endings : tuple
+        Acceptable endings for the filename.
+    """
+    print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
+    if not fname.endswith(endings):
+        warnings.warn('This filename does not conform to mne naming convention'
+                      's. All %s files should end with '
+                      '%s' % (filetype, print_endings))
+
+
 class WrapStdOut(object):
     """Ridiculous class to work around how doctest captures stdout"""
     def __getattr__(self, name):
@@ -167,6 +297,41 @@ def estimate_rank(data, tol=1e-4, return_singular=False,
         return rank
 
 
+def _reject_data_segments(data, reject, flat, decim, info, tstep):
+    """Reject data segments using peak-to-peak amplitude
+    """
+    from .epochs import _is_good
+    from .io.pick import channel_indices_by_type
+
+    data_clean = np.empty_like(data)
+    idx_by_type = channel_indices_by_type(info)
+    step = int(ceil(tstep * info['sfreq']))
+    if decim is not None:
+        step = int(ceil(step / float(decim)))
+    this_start = 0
+    this_stop = 0
+    drop_inds = []
+    for first in range(0, data.shape[1], step):
+        last = first + step
+        data_buffer = data[:, first:last]
+        if data_buffer.shape[1] < (last - first):
+            break  # end of the time segment
+        if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
+                    flat, ignore_chs=info['bads']):
+            this_stop = this_start + data_buffer.shape[1]
+            data_clean[:, this_start:this_stop] = data_buffer
+            this_start += data_buffer.shape[1]
+        else:
+            logger.info("Artifact detected in [%d, %d]" % (first, last))
+            drop_inds.append((first, last))
+    data = data_clean[:, :this_stop]
+    if not data.any():
+        raise RuntimeError('No clean segment found. Please '
+                           'consider updating your rejection '
+                           'thresholds.')
+    return data, drop_inds
+
+
 def run_subprocess(command, *args, **kwargs):
     """Run command using subprocess.Popen
 
@@ -343,50 +508,48 @@ class deprecated(object):
         return newdoc
 
 
-def verbose(function):
-    """Decorator to allow functions to override default log level
+ at decorator
+def verbose(function, *args, **kwargs):
+    """Improved verbose decorator to allow functions to override log-level
 
-    Do not call this function directly to set the global verbosity level,
-    instead use set_log_level().
+    Do not call this directly to set global verbosrity level, instead use
+    set_log_level().
 
-    Parameters (to decorated function)
-    ----------------------------------
-    verbose : bool, str, int, or None
-        The level of messages to print. If a str, it can be either DEBUG,
-        INFO, WARNING, ERROR, or CRITICAL. Note that these are for
-        convenience and are equivalent to passing in logging.DEBUG, etc.
-        For bool, True is the same as 'INFO', False is the same as 'WARNING'.
-        None defaults to using the current log level [e.g., set using
-        mne.set_log_level()].
+    Parameters
+    ----------
+    function - function
+        Function to be decorated by setting the verbosity level.
+
+    Returns
+    -------
+    dec - function
+        The decorated function
     """
     arg_names = inspect.getargspec(function).args
-    # this wrap allows decorated functions to be pickled (e.g., for parallel)
 
-    @wraps(function)
-    def dec(*args, **kwargs):
-        # Check if the first arg is "self", if it has verbose, make it default
-        if len(arg_names) > 0 and arg_names[0] == 'self':
-            default_level = getattr(args[0], 'verbose', None)
-        else:
-            default_level = None
-        verbose_level = kwargs.get('verbose', default_level)
-        if verbose_level is not None:
-            old_level = set_log_level(verbose_level, True)
-            # set it back if we get an exception
-            try:
-                ret = function(*args, **kwargs)
-            except:
-                set_log_level(old_level)
-                raise
-            set_log_level(old_level)
-            return ret
-        else:
-            return function(*args, **kwargs)
+    if len(arg_names) > 0 and arg_names[0] == 'self':
+        default_level = getattr(args[0], 'verbose', None)
+    else:
+        default_level = None
 
-    # set __wrapped__ attribute so ?? in IPython gets the right source
-    dec.__wrapped__ = function
+    if('verbose' in arg_names):
+        verbose_level = args[arg_names.index('verbose')]
+    else:
+        verbose_level = default_level
 
-    return dec
+    if verbose_level is not None:
+        old_level = set_log_level(verbose_level, True)
+        # set it back if we get an exception
+        try:
+            ret = function(*args, **kwargs)
+        except:
+            set_log_level(old_level)
+            raise
+        set_log_level(old_level)
+        return ret
+    else:
+        ret = function(*args, **kwargs)
+        return ret
 
 
 def has_command_line_tools():
@@ -431,6 +594,18 @@ requires_fs_or_nibabel = np.testing.dec.skipif(not has_nibabel() and
                                                'Freesurfer')
 
 
+def has_neuromag2ft():
+    """Aux function"""
+    if not 'NEUROMAG2FT_ROOT' in os.environ:
+        return False
+    else:
+        return True
+
+
+requires_neuromag2ft = np.testing.dec.skipif(not has_neuromag2ft(),
+                                             'Requires neuromag2ft')
+
+
 def requires_nibabel(vox2ras_tkr=False):
     """Aux function"""
     if vox2ras_tkr:
@@ -523,7 +698,7 @@ def requires_statsmodels(function):
     def dec(*args, **kwargs):
         skip = False
         try:
-            from tvtk.api import tvtk  # analysis:ignore
+            import statsmodels  # noqa, analysis:ignore
         except ImportError:
             skip = True
 
@@ -538,6 +713,72 @@ def requires_statsmodels(function):
     return dec
 
 
+def requires_patsy(function):
+    """
+    Decorator to skip test if patsy is not available. Patsy should be a
+    statsmodels dependency but apparently it's possible to install statsmodels
+    without it.
+    """
+    @wraps(function)
+    def dec(*args, **kwargs):
+        skip = False
+        try:
+            import patsy  # noqa, analysis:ignore
+        except ImportError:
+            skip = True
+
+        if skip is True:
+            from nose.plugins.skip import SkipTest
+            raise SkipTest('Test %s skipped, requires patsy'
+                           % function.__name__)
+        ret = function(*args, **kwargs)
+
+        return ret
+
+    return dec
+
+
+def requires_sklearn(function):
+    """Decorator to skip test if sklearn is not available"""
+    @wraps(function)
+    def dec(*args, **kwargs):
+        required_version = '0.14'
+        skip = False
+        try:
+            import sklearn
+            version = LooseVersion(sklearn.__version__)
+            if version < required_version:
+                skip = True
+        except ImportError:
+            skip = True
+
+        if skip is True:
+            from nose.plugins.skip import SkipTest
+            raise SkipTest('Test %s skipped, requires sklearn (version >= %s)'
+                           % (function.__name__, required_version))
+        ret = function(*args, **kwargs)
+
+        return ret
+
+    return dec
+
+
+def requires_good_network(function):
+    """Helper for testing"""
+
+    @wraps(function)
+    def dec(*args, **kwargs):
+        if int(os.environ.get('MNE_SKIP_NETWORK_TESTS', 0)):
+            from nose.plugins.skip import SkipTest
+            raise SkipTest('Test %s skipped, requires a good network '
+                           'connection' % function.__name__)
+        ret = function(*args, **kwargs)
+
+        return ret
+
+    return dec
+
+
 def make_skipper_dec(module, skip_str):
     """Helper to make skipping decorators"""
     skip = False
@@ -548,7 +789,6 @@ def make_skipper_dec(module, skip_str):
     return np.testing.dec.skipif(skip, skip_str)
 
 
-requires_sklearn = make_skipper_dec('sklearn', 'scikit-learn not installed')
 requires_nitime = make_skipper_dec('nitime', 'nitime not installed')
 requires_traits = make_skipper_dec('traits', 'traits not installed')
 
@@ -561,8 +801,24 @@ def _mne_fs_not_in_env():
 requires_mne_fs_in_env = np.testing.dec.skipif(_mne_fs_not_in_env)
 
 
+def _check_mayavi_version(min_version='4.3.0'):
+    """Raise a RuntimeError if the required version of mayavi is not available
+
+    Parameters
+    ----------
+    min_version : str
+        The version string. Anything that matches
+        ``'(\\d+ | [a-z]+ | \\.)'``
+    """
+    import mayavi
+    require_mayavi = LooseVersion(min_version)
+    has_mayavi = LooseVersion(mayavi.__version__)
+    if has_mayavi < require_mayavi:
+        raise RuntimeError("Need mayavi >= %s" % require_mayavi)
+
+
 def check_sklearn_version(min_version):
-    """ Check minimum sklearn version required
+    """Check minimum sklearn version required
 
     Parameters
     ----------
@@ -582,7 +838,7 @@ def check_sklearn_version(min_version):
 
 
 def check_scipy_version(min_version):
-    """ Check minimum sklearn version required
+    """Check minimum sklearn version required
 
     Parameters
     ----------
@@ -601,6 +857,25 @@ def requires_scipy_version(min_version):
                                  % min_version)
 
 
+def _check_pytables():
+    """Helper to error if Pytables is not found"""
+    try:
+        import tables as tb
+    except ImportError:
+        raise ImportError('pytables could not be imported')
+    return tb
+
+
+def requires_pytables():
+    """Helper for testing"""
+    have = True
+    try:
+        _check_pytables()
+    except ImportError:
+        have = False
+    return np.testing.dec.skipif(not have, 'Requires pytables')
+
+
 ###############################################################################
 # LOGGING
 
@@ -626,7 +901,7 @@ def set_log_level(verbose=None, return_old_level=False):
             verbose = 'INFO'
         else:
             verbose = 'WARNING'
-    if isinstance(verbose, basestring):
+    if isinstance(verbose, string_types):
         verbose = verbose.upper()
         logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
                              WARNING=logging.WARNING, ERROR=logging.ERROR,
@@ -708,6 +983,29 @@ def get_subjects_dir(subjects_dir=None, raise_error=False):
     return subjects_dir
 
 
+def _get_extra_data_path(home_dir=None):
+    """Get path to extra data (config, tables, etc.)"""
+    if home_dir is None:
+        # this has been checked on OSX64, Linux64, and Win32
+        if 'nt' == os.name.lower():
+            home_dir = os.getenv('APPDATA')
+        else:
+            # This is a more robust way of getting the user's home folder on
+            # Linux platforms (not sure about OSX, Unix or BSD) than checking
+            # the HOME environment variable. If the user is running some sort
+            # of script that isn't launched via the command line (e.g. a script
+            # launched via Upstart) then the HOME environment variable will
+            # not be set.
+            home_dir = os.path.expanduser('~')
+
+        if home_dir is None:
+            raise ValueError('mne-python config file path could '
+                             'not be determined, please report this '
+                             'error to mne-python developers')
+
+    return op.join(home_dir, '.mne')
+
+
 def get_config_path(home_dir=None):
     """Get path to standard mne-python config file
 
@@ -722,19 +1020,11 @@ def get_config_path(home_dir=None):
     config_path : str
         The path to the mne-python configuration file. On windows, this
         will be '%APPDATA%\.mne\mne-python.json'. On every other
-        system, this will be $HOME/.mne/mne-python.json.
+        system, this will be ~/.mne/mne-python.json.
     """
-    if home_dir is None:
-        # this has been checked on OSX64, Linux64, and Win32
-        home_dir = os.getenv('APPDATA' if 'nt' == os.name.lower() else 'HOME',
-                             None)
-
-    if home_dir is None:
-        raise ValueError('mne-python config file path could '
-                         'not be determined, please report this '
-                         'error to mne-python developers')
-
-    return op.join(home_dir, '.mne', 'mne-python.json')
+    val = op.join(_get_extra_data_path(home_dir=home_dir),
+                  'mne-python.json')
+    return val
 
 
 def set_cache_dir(cache_dir):
@@ -762,12 +1052,12 @@ def set_memmap_min_size(memmap_min_size):
     Parameters
     ----------
     memmap_min_size: str or None
-        Threshold on the minimum size of arrays that triggers automated memmory
+        Threshold on the minimum size of arrays that triggers automated memory
         mapping for parallel processing, e.g., '1M' for 1 megabyte.
         Use None to disable memmaping of large arrays.
     """
     if memmap_min_size is not None:
-        if not isinstance(memmap_min_size, basestring):
+        if not isinstance(memmap_min_size, string_types):
             raise ValueError('\'memmap_min_size\' has to be a string.')
         if memmap_min_size[-1] not in ['K', 'M', 'G']:
             raise ValueError('The size has to be given in kilo-, mega-, or '
@@ -780,9 +1070,11 @@ def set_memmap_min_size(memmap_min_size):
 known_config_types = [
     'MNE_BROWSE_RAW_SIZE',
     'MNE_CUDA_IGNORE_PRECISION',
+    'MNE_DATA',
     'MNE_DATASETS_MEGSIM_PATH',
     'MNE_DATASETS_SAMPLE_PATH',
     'MNE_DATASETS_SPM_FACE_PATH',
+    'MNE_DATASETS_EEGBCI_PATH',
     'MNE_LOGGING_LEVEL',
     'MNE_USE_CUDA',
     'SUBJECTS_DIR',
@@ -798,14 +1090,15 @@ known_config_wildcards = [
     ]
 
 
-def get_config(key, default=None, raise_error=False, home_dir=None):
+def get_config(key=None, default=None, raise_error=False, home_dir=None):
     """Read mne(-python) preference from env, then mne-python config
 
     Parameters
     ----------
-    key : str
+    key : None | str
         The preference key to look for. The os evironment is searched first,
         then the mne-python config file is parsed.
+        If None, all the config parameters present in the path are returned.
     default : str | None
         Value to return if the key is not found.
     raise_error : bool
@@ -817,15 +1110,15 @@ def get_config(key, default=None, raise_error=False, home_dir=None):
 
     Returns
     -------
-    value : str | None
+    value : dict | str | None
         The preference key value.
     """
 
-    if not isinstance(key, basestring):
+    if key is not None and not isinstance(key, string_types):
         raise ValueError('key must be a string')
 
     # first, check to see if key is in env
-    if key in os.environ:
+    if key is not None and key in os.environ:
         return os.environ[key]
 
     # second, look for it in mne-python config file
@@ -836,16 +1129,19 @@ def get_config(key, default=None, raise_error=False, home_dir=None):
     else:
         with open(config_path, 'r') as fid:
             config = json.load(fid)
-        key_found = True if key in config else False
+            if key is None:
+                return config
+        key_found = key in config
         val = config.get(key, default)
 
     if not key_found and raise_error is True:
         meth_1 = 'os.environ["%s"] = VALUE' % key
         meth_2 = 'mne.utils.set_config("%s", VALUE)' % key
         raise KeyError('Key "%s" not found in environment or in the '
-                       'mne-python config file:\n%s\nTry either:\n'
-                       '    %s\nfor a temporary solution, or:\n'
-                       '    %s\nfor a permanent one. You can also '
+                       'mne-python config file: %s '
+                       'Try either:'
+                       ' %s for a temporary solution, or:'
+                       ' %s for a permanent one. You can also '
                        'set the environment variable before '
                        'running python.'
                        % (key, config_path, meth_1, meth_2))
@@ -866,11 +1162,11 @@ def set_config(key, value, home_dir=None):
         The folder that contains the .mne config folder.
         If None, it is found automatically.
     """
-    if not isinstance(key, basestring):
+    if not isinstance(key, string_types):
         raise ValueError('key must be a string')
     # While JSON allow non-string types, we allow users to override config
     # settings using env, which are strings, so we enforce that here
-    if not isinstance(value, basestring) and value is not None:
+    if not isinstance(value, string_types) and value is not None:
         raise ValueError('value must be a string or None')
     if not key in known_config_types and not \
             any(k in key for k in known_config_wildcards):
@@ -890,8 +1186,9 @@ def set_config(key, value, home_dir=None):
     else:
         config[key] = value
 
-    # Write all values
-    directory = op.split(config_path)[0]
+    # Write all values. This may fail if the default directory is not
+    # writeable.
+    directory = op.dirname(config_path)
     if not op.isdir(directory):
         os.mkdir(directory)
     with open(config_path, 'w') as fid:
@@ -1011,7 +1308,7 @@ class ProgressBar(object):
         self.update(self.cur_value, mesg)
 
 
-class _HTTPResumeURLOpener(urllib.FancyURLopener):
+class _HTTPResumeURLOpener(urllib.request.FancyURLopener):
     """Create sub-class in order to overide error 206.
 
     This error means a partial file is being sent, which is ok in this case.
@@ -1032,7 +1329,7 @@ def _chunk_read(response, local_file, chunk_size=65536, initial_size=0):
 
     Parameters
     ----------
-    response: urllib.addinfourl
+    response: urllib.response.addinfourl
         Response to the download request in order to get file size.
     local_file: file
         Hard disk file where data should be written.
@@ -1047,7 +1344,7 @@ def _chunk_read(response, local_file, chunk_size=65536, initial_size=0):
     bytes_so_far = initial_size
     # Returns only amount left to download when resuming, not the size of the
     # entire file
-    total_size = int(response.info().getheader('Content-Length').strip())
+    total_size = int(response.headers['Content-Length'].strip())
     total_size += initial_size
 
     progress = ProgressBar(total_size, initial_value=bytes_so_far,
@@ -1066,14 +1363,17 @@ def _chunk_read_ftp_resume(url, temp_file_name, local_file):
     # Adapted from: https://pypi.python.org/pypi/fileDownloader.py
     # but with changes
 
-    parsed_url = urlparse.urlparse(url)
+    parsed_url = urllib.parse.urlparse(url)
     file_name = os.path.basename(parsed_url.path)
     server_path = parsed_url.path.replace(file_name, "")
-    unquoted_server_path = urllib.unquote(server_path)
+    unquoted_server_path = urllib.parse.unquote(server_path)
     local_file_size = os.path.getsize(temp_file_name)
 
     data = ftplib.FTP()
-    data.connect(parsed_url.hostname, parsed_url.port)
+    if parsed_url.port is not None:
+        data.connect(parsed_url.hostname, parsed_url.port)
+    else:
+        data.connect(parsed_url.hostname)
     data.login()
     if len(server_path) > 1:
         data.cwd(unquoted_server_path)
@@ -1087,6 +1387,7 @@ def _chunk_read_ftp_resume(url, temp_file_name, local_file):
     # chunk and will write it to file and update the progress bar
     chunk_write = lambda chunk: _chunk_write(chunk, local_file, progress)
     data.retrbinary(down_cmd, chunk_write)
+    data.close()
 
 
 def _chunk_write(chunk, local_file, progress):
@@ -1118,14 +1419,17 @@ def _fetch_file(url, file_name, print_destination=True, resume=True):
     initial_size = 0
     try:
         # Checking file size and displaying it alongside the download url
-        u = urllib2.urlopen(url)
-        file_size = int(u.info().getheaders("Content-Length")[0])
-        print 'Downloading data from %s (%s)' % (url, sizeof_fmt(file_size))
+        u = urllib.request.urlopen(url)
+        try:
+            file_size = int(u.headers['Content-Length'].strip())
+        finally:
+            del u
+        print('Downloading data from %s (%s)' % (url, sizeof_fmt(file_size)))
         # Downloading data
         if resume and os.path.exists(temp_file_name):
             local_file = open(temp_file_name, "ab")
             # Resuming HTTP and FTP downloads requires different procedures
-            scheme = urlparse.urlparse(url).scheme
+            scheme = urllib.parse.urlparse(url).scheme
             if scheme == 'http':
                 url_opener = _HTTPResumeURLOpener()
                 local_file_size = os.path.getsize(temp_file_name)
@@ -1133,35 +1437,35 @@ def _fetch_file(url, file_name, print_destination=True, resume=True):
                 url_opener.addheader("Range", "bytes=%s-" % (local_file_size))
                 try:
                     data = url_opener.open(url)
-                except urllib2.HTTPError:
+                except urllib.request.HTTPError:
                     # There is a problem that may be due to resuming, some
                     # servers may not support the "Range" header. Switch back
                     # to complete download method
-                    print 'Resuming download failed. Attempting to restart '\
-                          'downloading the entire file.'
+                    print('Resuming download failed. Attempting to restart '
+                          'downloading the entire file.')
                     _fetch_file(url, resume=False)
-                _chunk_read(data, local_file, initial_size=local_file_size)
+                else:
+                    _chunk_read(data, local_file, initial_size=local_file_size)
+                    del data  # should auto-close
             else:
                 _chunk_read_ftp_resume(url, temp_file_name, local_file)
         else:
             local_file = open(temp_file_name, "wb")
-            data = urllib2.urlopen(url)
-            _chunk_read(data, local_file, initial_size=initial_size)
+            data = urllib.request.urlopen(url)
+            try:
+                _chunk_read(data, local_file, initial_size=initial_size)
+            finally:
+                del data  # should auto-close
         # temp file must be closed prior to the move
         if not local_file.closed:
             local_file.close()
         shutil.move(temp_file_name, file_name)
         if print_destination is True:
             stdout.write('File saved as %s.\n' % file_name)
-    except urllib2.HTTPError, e:
-        print 'Error while fetching file %s.' \
-            ' Dataset fetching aborted.' % url
-        print "HTTP Error:", e, url
-        raise
-    except urllib2.URLError, e:
-        print 'Error while fetching file %s.' \
-            ' Dataset fetching aborted.' % url
-        print "URL Error:", e, url
+    except Exception as e:
+        logger.error('Error while fetching file %s.'
+                     ' Dataset fetching aborted.' % url)
+        logger.error("Error: %s", e)
         raise
     finally:
         if local_file is not None:
@@ -1171,13 +1475,14 @@ def _fetch_file(url, file_name, print_destination=True, resume=True):
 
 def sizeof_fmt(num):
     """Turn number of bytes into human-readable str"""
-    unit_list = zip(['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
-                    [0, 0, 1, 2, 2, 2])
+    units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
+    decimals = [0, 0, 1, 2, 2, 2]
     """Human friendly file size"""
     if num > 1:
-        exponent = min(int(log(num, 1024)), len(unit_list) - 1)
+        exponent = min(int(log(num, 1024)), len(units) - 1)
         quotient = float(num) / 1024 ** exponent
-        unit, num_decimals = unit_list[exponent]
+        unit = units[exponent]
+        num_decimals = decimals[exponent]
         format_string = '{0:.%sf} {1}' % (num_decimals)
         return format_string.format(quotient, unit)
     if num == 0:
@@ -1188,17 +1493,41 @@ def sizeof_fmt(num):
 
 def _url_to_local_path(url, path):
     """Mirror a url path in a local destination (keeping folder structure)"""
-    destination = urlparse.urlparse(url).path
+    destination = urllib.parse.urlparse(url).path
     # First char should be '/', and it needs to be discarded
     if len(destination) < 2 or destination[0] != '/':
         raise ValueError('Invalid URL')
-    destination = os.path.join(path, urllib2.url2pathname(destination)[1:])
+    destination = os.path.join(path,
+                               urllib.request.url2pathname(destination)[1:])
     return destination
 
 
+def _get_stim_channel(stim_channel):
+    """Helper to determine the appropriate stim_channel"""
+    if stim_channel is not None:
+        if not isinstance(stim_channel, list):
+            if not isinstance(stim_channel, string_types):
+                raise ValueError('stim_channel must be a str, list, or None')
+            stim_channel = [stim_channel]
+        if not all([isinstance(s, string_types) for s in stim_channel]):
+            raise ValueError('stim_channel list must contain all strings')
+        return stim_channel
+
+    stim_channel = list()
+    ch_count = 0
+    ch = get_config('MNE_STIM_CHANNEL')
+    while(ch is not None):
+        stim_channel.append(ch)
+        ch_count += 1
+        ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
+    if ch_count == 0:
+        stim_channel = ['STI 014']
+    return stim_channel
+
+
 def _check_fname(fname, overwrite):
     """Helper to check for file existence"""
-    if not isinstance(fname, basestring):
+    if not isinstance(fname, string_types):
         raise TypeError('file name is not a string')
     if op.isfile(fname):
         if not overwrite:
@@ -1211,12 +1540,12 @@ def _check_fname(fname, overwrite):
 def _check_subject(class_subject, input_subject, raise_error=True):
     """Helper to get subject name from class"""
     if input_subject is not None:
-        if not isinstance(input_subject, basestring):
+        if not isinstance(input_subject, string_types):
             raise ValueError('subject input must be a string')
         else:
             return input_subject
     elif class_subject is not None:
-        if not isinstance(class_subject, basestring):
+        if not isinstance(class_subject, string_types):
             raise ValueError('Neither subject input nor class subject '
                              'attribute was a string')
         else:
@@ -1259,7 +1588,7 @@ def _clean_names(names, remove_whitespace=False, before_dash=True):
 
     Usage
     -----
-    # for new VectorView (only inside layout) 
+    # for new VectorView (only inside layout)
     ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
 
     # for CTF
@@ -1275,3 +1604,30 @@ def _clean_names(names, remove_whitespace=False, before_dash=True):
         cleaned.append(name)
 
     return cleaned
+
+
+def clean_warning_registry():
+    """Safe way to reset warnings """
+    warnings.resetwarnings()
+    reg = "__warningregistry__"
+    bad_names = ['MovedModule']  # this is in six.py, and causes bad things
+    for mod in list(sys.modules.values()):
+        if mod.__class__.__name__ not in bad_names and hasattr(mod, reg):
+            getattr(mod, reg).clear()
+
+
+def _check_type_picks(picks):
+    """helper to guarantee type integrity of picks"""
+    err_msg = 'picks must be None, a list or an array of integers'
+    if picks is None:
+        pass
+    elif isinstance(picks, list):
+        if not all([isinstance(i, int) for i in picks]):
+            raise ValueError(err_msg)
+        picks = np.array(picks)
+    elif isinstance(picks, np.ndarray):
+        if not picks.dtype.kind == 'i':
+            raise ValueError(err_msg)
+    else:
+        raise ValueError(err_msg)
+    return picks
diff --git a/mne/viz.py b/mne/viz.py
deleted file mode 100644
index 758ad06..0000000
--- a/mne/viz.py
+++ /dev/null
@@ -1,3460 +0,0 @@
-"""Functions to plot M/EEG data e.g. topographies
-"""
-
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
-#          Denis Engemann <d.engemann at fz-juelich.de>
-#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Eric Larson <larson.eric.d at gmail.com>
-#
-# License: Simplified BSD
-import os
-import warnings
-from itertools import cycle
-from functools import partial
-from copy import deepcopy
-import math
-from distutils.version import LooseVersion
-
-import difflib
-import tempfile
-import webbrowser
-
-import copy
-import inspect
-import numpy as np
-from scipy import linalg
-from scipy import ndimage
-from matplotlib import delaunay
-from warnings import warn
-from collections import deque
-
-# XXX : don't import pyplot here or you will break the doc
-
-from .fixes import tril_indices, Counter
-from .baseline import rescale
-from .utils import (get_subjects_dir, get_config, set_config, _check_subject,
-                    logger, verbose)
-from .fiff import show_fiff, FIFF
-from .fiff.pick import channel_type, pick_types
-from .fiff.proj import make_projector, setup_proj
-from .fixes import normalize_colors
-from .utils import create_chunks, _clean_names
-from .time_frequency import compute_raw_psd
-
-COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
-          '#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
-
-
-DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
-                           emg='k', ref_meg='steelblue', misc='k', stim='k',
-                           resp='k', chpi='k', exci='k', ias='k', syst='k'),
-                units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU'),
-                scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0),
-                scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
-                                       eog=150e-6, ecg=5e-4, emg=1e-3,
-                                       ref_meg=1e-12, misc=1e-3,
-                                       stim=1, resp=1, chpi=1e-4, exci=1,
-                                       ias=1, syst=1),
-                ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
-                          eeg=(-200., 200.), misc=(-5., 5.)),
-                titles=dict(eeg='EEG', grad='Gradiometers',
-                            mag='Magnetometers', misc='misc'))
-
-
-def _mutable_defaults(*mappings):
-    """ To avoid dicts as default keyword arguments
-
-    Use this function instead to resolve default dict values.
-    Example usage:
-    scalings, units = _mutable_defaults(('scalings', scalings,
-                                         'units', units))
-    """
-    out = []
-    for k, v in mappings:
-        this_mapping = DEFAULTS[k]
-        if v is not None:
-            this_mapping = deepcopy(DEFAULTS[k])
-            this_mapping.update(v)
-        out += [this_mapping]
-    return out
-
-
-def _check_delayed_ssp(container):
-    """ Aux function to be used for interactive SSP selection
-    """
-    if container.proj is True:
-        raise RuntimeError('Projs are already applied. Please initialize'
-                           ' the data with proj set to False.')
-    elif len(container.info['projs']) < 1:
-        raise RuntimeError('No projs found in evoked.')
-
-
-def tight_layout(pad=1.2, h_pad=None, w_pad=None):
-    """ Adjust subplot parameters to give specified padding.
-
-    Note. For plotting please use this function instead of plt.tight_layout
-
-    Parameters
-    ----------
-    pad : float
-        padding between the figure edge and the edges of subplots, as a
-        fraction of the font-size.
-    h_pad, w_pad : float
-        padding (height/width) between edges of adjacent subplots.
-        Defaults to `pad_inches`.
-    """
-    import matplotlib.pyplot as plt
-    try:
-        fig = plt.gcf()
-        fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
-        fig.canvas.draw()
-    except:
-        msg = ('Matplotlib function \'tight_layout\'%s.'
-               ' Skipping subpplot adjusment.')
-        if not hasattr(plt, 'tight_layout'):
-            case = ' is not available'
-        else:
-            case = (' is not supported by your backend: `%s`'
-                    % plt.get_backend())
-        warn(msg % case)
-
-
-def _plot_topo(info=None, times=None, show_func=None, layout=None,
-               decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
-               border='none', cmap=None, layout_scale=None, title=None,
-               x_label=None, y_label=None, vline=None):
-    """Helper function to plot on sensor layout"""
-    import matplotlib.pyplot as plt
-    orig_facecolor = plt.rcParams['axes.facecolor']
-    orig_edgecolor = plt.rcParams['axes.edgecolor']
-    try:
-        if cmap is None:
-            cmap = plt.cm.jet
-        ch_names = _clean_names(info['ch_names'])
-        plt.rcParams['axes.facecolor'] = 'k'
-        fig = plt.figure(facecolor='k')
-        pos = layout.pos.copy()
-        tmin, tmax = times[0], times[-1]
-        if colorbar:
-            pos[:, :2] *= layout_scale
-            plt.rcParams['axes.edgecolor'] = 'k'
-            norm = normalize_colors(vmin=vmin, vmax=vmax)
-            sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
-            sm.set_array(np.linspace(vmin, vmax))
-            ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg='k')
-            cb = fig.colorbar(sm, ax=ax)
-            cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
-            plt.setp(cb_yticks, color='w')
-        plt.rcParams['axes.edgecolor'] = border
-        for idx, name in enumerate(layout.names):
-            if name in ch_names:
-                ax = plt.axes(pos[idx], axisbg='k')
-                ch_idx = ch_names.index(name)
-                # hack to inlcude channel idx and name, to use in callback
-                ax.__dict__['_mne_ch_name'] = name
-                ax.__dict__['_mne_ch_idx'] = ch_idx
-
-                if layout.kind == 'Vectorview-all' and ylim is not None:
-                    this_type = {'mag': 0, 'grad': 1}[channel_type(info,
-                                                                   ch_idx)]
-                    ylim_ = [v[this_type] if _check_vlim(v) else
-                             v for v in ylim]
-                else:
-                    ylim_ = ylim
-
-                show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
-                          vmax=vmax, ylim=ylim_)
-
-                if ylim_ and not any(v is None for v in ylim_):
-                    plt.ylim(*ylim_)
-                plt.xticks([], ())
-                plt.yticks([], ())
-
-        # register callback
-        callback = partial(_plot_topo_onpick, show_func=show_func, tmin=tmin,
-                           tmax=tmax, vmin=vmin, vmax=vmax, ylim=ylim,
-                           colorbar=colorbar, title=title, x_label=x_label,
-                           y_label=y_label,
-                           vline=vline)
-
-        fig.canvas.mpl_connect('pick_event', callback)
-        if title is not None:
-            plt.figtext(0.03, 0.9, title, color='w', fontsize=19)
-
-    finally:
-        # Revert global pyplot config
-        plt.rcParams['axes.facecolor'] = orig_facecolor
-        plt.rcParams['axes.edgecolor'] = orig_edgecolor
-
-    return fig
-
-
-def _plot_topo_onpick(event, show_func=None, tmin=None, tmax=None,
-                      vmin=None, vmax=None, ylim=None, colorbar=False,
-                      title=None, x_label=None, y_label=None, vline=None):
-    """Onpick callback that shows a single channel in a new figure"""
-
-    # make sure that the swipe gesture in OS-X doesn't open many figures
-    if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
-        return
-
-    artist = event.artist
-    try:
-        import matplotlib.pyplot as plt
-        ch_idx = artist.axes._mne_ch_idx
-        fig, ax = plt.subplots(1)
-        ax.set_axis_bgcolor('k')
-        show_func(plt, ch_idx, tmin, tmax, vmin, vmax, ylim=ylim,
-                  vline=vline)
-        if colorbar:
-            plt.colorbar()
-        if title is not None:
-            plt.title(title + ' ' + artist.axes._mne_ch_name)
-        else:
-            plt.title(artist.axes._mne_ch_name)
-        if x_label is not None:
-            plt.xlabel(x_label)
-        if y_label is not None:
-            plt.ylabel(y_label)
-    except Exception as err:
-        # matplotlib silently ignores exceptions in event handlers, so we print
-        # it here to know what went wrong
-        print err
-        raise err
-
-
-def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
-                freq=None, vline=None):
-    """ Aux function to show time-freq map on topo """
-    extent = (tmin, tmax, freq[0], freq[-1])
-    ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
-              vmin=vmin, vmax=vmax, picker=True)
-
-
-def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
-                     times, vline=None):
-    """ Aux function to show time series on topo """
-    picker_flag = False
-    for data_, color_ in zip(data, color):
-        if not picker_flag:
-            # use large tol for picker so we can click anywhere in the axes
-            ax.plot(times, data_[ch_idx], color_, picker=1e9)
-            picker_flag = True
-        else:
-            ax.plot(times, data_[ch_idx], color_)
-    if vline:
-        import matplotlib.pyplot as plt
-        [plt.axvline(x, color='w', linewidth=0.5) for x in vline]
-
-
-def _check_vlim(vlim):
-    """AUX function"""
-    return not np.isscalar(vlim) and not vlim is None
-
-
-def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
-              border='none', ylim=None, scalings=None, title=None, proj=False,
-              vline=[0.0]):
-    """Plot 2D topography of evoked responses.
-
-    Clicking on the plot of an individual sensor opens a new figure showing
-    the evoked response for the selected sensor.
-
-    Parameters
-    ----------
-    evoked : list of Evoked | Evoked
-        The evoked response to plot.
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    layout_scale: float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas
-    color : list of color objects | color object | None
-        Everything matplotlib accepts to specify colors. If not list-like,
-        the color specified will be repeated. If None, colors are
-        automatically drawn.
-    border : str
-        matplotlib borders style to be used for each sensor plot.
-    scalings : dict | None
-        The scalings of the channel types to be applied for plotting. If None,`
-        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
-    ylim : dict | None
-        ylim for plots. The value determines the upper and lower subplot
-        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
-        mag, grad, misc. If None, the ylim parameter for each channel is
-        determined by the maximum absolute peak.
-    proj : bool | 'interactive'
-        If true SSP projections are applied before display. If 'interactive',
-        a check box for reversible selection of SSP projection vectors will
-        be shown.
-    title : str
-        Title of the figure.
-    vline : list of floats | None
-        The values at which to show a vertical line.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        Images of evoked responses at sensor locations
-    """
-
-    if not type(evoked) in (tuple, list):
-        evoked = [evoked]
-
-    if type(color) in (tuple, list):
-        if len(color) != len(evoked):
-            raise ValueError('Lists of evoked objects and colors'
-                             ' must have the same length')
-    elif color is None:
-        colors = ['w'] + COLORS
-        stop = (slice(len(evoked)) if len(evoked) < len(colors)
-                else slice(len(colors)))
-        color = cycle(colors[stop])
-        if len(evoked) > len(colors):
-            warnings.warn('More evoked objects then colors available.'
-                          'You should pass a list of unique colors.')
-    else:
-        color = cycle([color])
-
-    times = evoked[0].times
-    if not all([(e.times == times).all() for e in evoked]):
-        raise ValueError('All evoked.times must be the same')
-
-    info = evoked[0].info
-    ch_names = evoked[0].ch_names
-    if not all([e.ch_names == ch_names for e in evoked]):
-        raise ValueError('All evoked.picks must be the same')
-    ch_names = _clean_names(ch_names)
-
-    if layout is None:
-        from .layouts.layout import find_layout
-        layout = find_layout(info)
-
-    # XXX. at the moment we are committed to 1- / 2-sensor-types layouts
-    chs_in_layout = set(layout.names) & set(ch_names)
-    types_used = set(channel_type(info, ch_names.index(ch))
-                     for ch in chs_in_layout)
-    # one check for all vendors
-    meg_types = ['mag'], ['grad'], ['mag', 'grad'],
-    is_meg = any(types_used == set(k) for k in meg_types)
-    if is_meg:
-        types_used = list(types_used)[::-1]  # -> restore kwarg order
-        picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
-                 for kk in types_used]
-    else:
-        types_used_kwargs = dict((t, True) for t in types_used)
-        picks = [pick_types(info, meg=False, **types_used_kwargs)]
-    assert isinstance(picks, list) and len(types_used) == len(picks)
-
-    scalings = _mutable_defaults(('scalings', scalings))[0]
-    evoked = [e.copy() for e in evoked]
-    for e in evoked:
-        for pick, t in zip(picks, types_used):
-            e.data[pick] = e.data[pick] * scalings[t]
-
-    if proj is True and all([e.proj is not True for e in evoked]):
-        evoked = [e.apply_proj() for e in evoked]
-    elif proj == 'interactive':  # let it fail early.
-        for e in evoked:
-            _check_delayed_ssp(e)
-
-    plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
-                       color=color, times=times, vline=vline)
-
-    if ylim is None:
-        set_ylim = lambda x: np.abs(x).max()
-        ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
-        ymax = np.array(ylim_)
-        ylim_ = (-ymax, ymax)
-    elif isinstance(ylim, dict):
-        ylim_ = _mutable_defaults(('ylim', ylim))[0]
-        ylim_ = [ylim_[kk] for kk in types_used]
-        ylim_ = zip(*[np.array(yl) for yl in ylim_])
-    else:
-        raise ValueError('ylim must be None ore a dict')
-
-    fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
-                     decim=1, colorbar=False, ylim=ylim_, cmap=None,
-                     layout_scale=layout_scale, border=border, title=title,
-                     x_label='Time (s)', vline=vline)
-
-    if proj == 'interactive':
-        for e in evoked:
-            _check_delayed_ssp(e)
-        params = dict(evokeds=evoked, times=times,
-                      plot_update_proj_callback=_plot_update_evoked_topo,
-                      projs=evoked[0].info['projs'], fig=fig)
-        _draw_proj_checkbox(None, params)
-
-    return fig
-
-
-def _plot_update_evoked_topo(params, bools):
-    """Helper function to update topo sensor plots"""
-    evokeds, times, fig = [params[k] for k in 'evokeds', 'times', 'fig']
-
-    projs = [proj for ii, proj in enumerate(params['projs'])
-             if ii in np.where(bools)[0]]
-
-    params['proj_bools'] = bools
-    evokeds = [e.copy() for e in evokeds]
-    for e in evokeds:
-        e.info['projs'] = []
-        e.add_proj(projs)
-        e.apply_proj()
-
-    # make sure to only modify the time courses, not the ticks
-    axes = fig.get_axes()
-    n_lines = len(axes[0].lines)
-    n_diff = len(evokeds) - n_lines
-    ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
-    for ax in axes:
-        lines = ax.lines[ax_slice]
-        for line, evoked in zip(lines, evokeds):
-            line.set_data(times, evoked.data[ax._mne_ch_idx])
-
-    fig.canvas.draw()
-
-
-def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
-                  vmax=None, cmap=None, layout_scale=0.945, title=None):
-    """Plot time-frequency data on sensor layout
-
-    Clicking on the time-frequency map of an individual sensor opens a
-    new figure showing the time-frequency map of the selected sensor.
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs used to generate the power
-    tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
-        The time-frequency data. Must have the same channels as Epochs.
-    freq : array-like
-        Frequencies of interest as passed to induced_power
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    colorbar : bool
-        If true, colorbar will be added to the plot
-    vmin : float
-        Minimum value mapped to lowermost color
-    vmax : float
-        Minimum value mapped to upppermost color
-    cmap : instance of matplotlib.pyplot.colormap
-        Colors to be mapped to the values
-    layout_scale : float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas
-    title : str
-        Title of the figure.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        Images of time-frequency data at sensor locations
-    """
-
-    if vmin is None:
-        vmin = tfr.min()
-    if vmax is None:
-        vmax = tfr.max()
-
-    if layout is None:
-        from .layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq)
-
-    fig = _plot_topo(info=epochs.info, times=epochs.times,
-                     show_func=tfr_imshow, layout=layout, border='w',
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title,
-                     x_label='Time (s)', y_label='Frequency (Hz)')
-
-    return fig
-
-
-def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
-                    mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
-                    cmap=None, layout_scale=0.945, dB=True, title=None):
-    """Plot induced power on sensor layout
-
-    Clicking on the induced power map of an individual sensor opens a
-    new figure showing the induced power map of the selected sensor.
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs used to generate the power
-    power : 3D-array
-        First return value from mne.time_frequency.induced_power
-    freq : array-like
-        Frequencies of interest as passed to induced_power
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    baseline : tuple or list of length 2
-        The time interval to apply rescaling / baseline correction.
-        If None do not apply it. If baseline is (a, b)
-        the interval is between "a (s)" and "b (s)".
-        If a is None the beginning of the data is used
-        and if b is None then b is set to the end of the interval.
-        If baseline is equal to (None, None) all the time
-        interval is used.
-    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
-        Do baseline correction with ratio (power is divided by mean
-        power during baseline) or z-score (power is divided by standard
-        deviation of power during baseline after subtracting the mean,
-        power = [power - mean(power_baseline)] / std(power_baseline))
-        If None, baseline no correction will be performed.
-    decim : integer
-        Increment for selecting each nth time slice
-    colorbar : bool
-        If true, colorbar will be added to the plot
-    vmin : float
-        Minimum value mapped to lowermost color
-    vmax : float
-        Minimum value mapped to upppermost color
-    cmap : instance of matplotlib.pyplot.colormap
-        Colors to be mapped to the values
-    layout_scale : float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas
-    dB : bool
-        If True, log10 will be applied to the data.
-    title : str
-        Title of the figure.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        Images of induced power at sensor locations
-    """
-    times = epochs.times[::decim] * 1e3
-    if mode is not None:
-        if baseline is None:
-            baseline = epochs.baseline
-        power = rescale(power.copy(), times, baseline, mode)
-    if dB:
-        power = 20 * np.log10(power)
-    if vmin is None:
-        vmin = power.min()
-    if vmax is None:
-        vmax = power.max()
-    if layout is None:
-        from .layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
-
-    fig = _plot_topo(info=epochs.info, times=times,
-                     show_func=power_imshow, layout=layout, decim=decim,
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title, border='w',
-                     x_label='Time (s)', y_label='Frequency (Hz)')
-
-    return fig
-
-
-def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
-                         mode='mean', decim=1, colorbar=True, vmin=None,
-                         vmax=None, cmap=None, layout_scale=0.945,
-                         title=None):
-    """Plot phase locking values (PLV) on sensor layout
-
-    Clicking on the PLV map of an individual sensor opens a new figure
-    showing the PLV map of the selected sensor.
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs used to generate the phase locking value
-    phase_lock : 3D-array
-        Phase locking value, second return value from
-        mne.time_frequency.induced_power.
-    freq : array-like
-        Frequencies of interest as passed to induced_power
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    baseline : tuple or list of length 2
-        The time interval to apply rescaling / baseline correction.
-        If None do not apply it. If baseline is (a, b)
-        the interval is between "a (s)" and "b (s)".
-        If a is None the beginning of the data is used
-        and if b is None then b is set to the end of the interval.
-        If baseline is equal to (None, None) all the time
-        interval is used.
-    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
-        Do baseline correction with ratio (phase is divided by mean
-        phase during baseline) or z-score (phase is divided by standard
-        deviation of phase during baseline after subtracting the mean,
-        phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
-        If None, baseline no correction will be performed.
-    decim : integer
-        Increment for selecting each nth time slice
-    colorbar : bool
-        If true, colorbar will be added to the plot
-    vmin : float
-        Minimum value mapped to lowermost color
-    vmax : float
-        Minimum value mapped to upppermost color
-    cmap : instance of matplotlib.pyplot.colormap
-        Colors to be mapped to the values
-    layout_scale : float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas.
-    title : str
-        Title of the figure.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figrue
-        Phase lock images at sensor locations
-    """
-    times = epochs.times[::decim] * 1e3
-    if mode is not None:
-        if baseline is None:
-            baseline = epochs.baseline
-        phase = rescale(phase.copy(), times, baseline, mode)
-    if vmin is None:
-        vmin = phase.min()
-    if vmax is None:
-        vmax = phase.max()
-    if layout is None:
-        from .layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
-
-    fig = _plot_topo(info=epochs.info, times=times,
-                     show_func=phase_imshow, layout=layout, decim=decim,
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title, border='w',
-                     x_label='Time (s)', y_label='Frequency (Hz)')
-
-    return fig
-
-
-def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
-                     data=None, epochs=None, sigma=None,
-                     order=None, scalings=None, vline=None):
-    """Aux function to plot erfimage on sensor topography"""
-
-    this_data = data[:, ch_idx, :].copy()
-    ch_type = channel_type(epochs.info, ch_idx)
-    if not ch_type in scalings:
-        raise KeyError('%s channel type not in scalings' % ch_type)
-    this_data *= scalings[ch_type]
-
-    if callable(order):
-        order = order(epochs.times, this_data)
-
-    if order is not None:
-        this_data = this_data[order]
-
-    this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
-
-    ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
-              origin='lower', vmin=vmin, vmax=vmax, picker=True)
-
-
-def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
-                           vmax=None, colorbar=True, order=None, cmap=None,
-                           layout_scale=.95, title=None, scalings=None):
-    """Plot Event Related Potential / Fields image on topographies
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs.
-    layout: instance of Layout
-        System specific sensor positions.
-    sigma : float
-        The standard deviation of the Gaussian smoothing to apply along
-        the epoch axis to apply in the image.
-    vmin : float
-        The min value in the image. The unit is uV for EEG channels,
-        fT for magnetometers and fT/cm for gradiometers.
-    vmax : float
-        The max value in the image. The unit is uV for EEG channels,
-        fT for magnetometers and fT/cm for gradiometers.
-    colorbar : bool
-        Display or not a colorbar.
-    order : None | array of int | callable
-        If not None, order is used to reorder the epochs on the y-axis
-        of the image. If it's an array of int it should be of length
-        the number of good epochs. If it's a callable the arguments
-        passed are the times vector and the data as 2d array
-        (data.shape[1] == len(times)).
-    cmap : instance of matplotlib.pyplot.colormap
-        Colors to be mapped to the values.
-    layout_scale: float
-        scaling factor for adjusting the relative size of the layout
-        on the canvas.
-    title : str
-        Title of the figure.
-    scalings : dict | None
-        The scalings of the channel types to be applied for plotting. If
-        None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
-
-    Returns
-    -------
-    fig : instance of matplotlib figure
-        Figure distributing one image per channel across sensor topography.
-    """
-    scalings = _mutable_defaults(('scalings', scalings))[0]
-    data = epochs.get_data()
-    if vmin is None:
-        vmin = data.min()
-    if vmax is None:
-        vmax = data.max()
-    if layout is None:
-        from .layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
-                         data=data, epochs=epochs, sigma=sigma)
-
-    fig = _plot_topo(info=epochs.info, times=epochs.times,
-                     show_func=erf_imshow, layout=layout, decim=1,
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title,
-                     border='w', x_label='Time (s)', y_label='Epoch')
-
-    return fig
-
-
-def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
-                        vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
-                        scale=None, unit=None, res=256, size=1, format='%3.1f',
-                        proj=False, show=True):
-    """Plot topographic maps of specific time points of evoked data
-
-    Parameters
-    ----------
-    evoked : Evoked
-        The Evoked object.
-    times : float | array of floats | None.
-        The time point(s) to plot. If None, 10 topographies will be shown
-        will a regular time spacing between the first and last time instant.
-    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
-        The channel type to plot. For 'grad', the gradiometers are collected in
-        pairs and the RMS for each pair is plotted.
-    layout : None | Layout
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    vmax : scalar
-        The value specfying the range of the color scale (-vmax to +vmax). If
-        None, the largest absolute value in the data is used.
-    cmap : matplotlib colormap
-        Colormap.
-    sensors : bool | str
-        Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
-    colorbar : bool
-        Plot a colorbar.
-    scale : float | None
-        Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
-        for grad and 1e15 for mag.
-    units : str | None
-        The units of the channel types used for colorbar lables. If
-        scale == None the unit is automatically determined.
-    res : int
-        The resolution of the topomap image (n pixels along each side).
-    size : float
-        Side length per topomap in inches.
-    format : str
-        String format for colorbar values.
-    proj : bool | 'interactive'
-        If true SSP projections are applied before display. If 'interactive',
-        a check box for reversible selection of SSP projection vectors will
-        be show.
-    show : bool
-        Call pyplot.show() at the end.
-    """
-    import matplotlib.pyplot as plt
-
-    if scale is None:
-        if ch_type.startswith('planar'):
-            key = 'grad'
-        else:
-            key = ch_type
-        scale = DEFAULTS['scalings'][key]
-        unit = DEFAULTS['units'][key]
-
-    if times is None:
-        times = np.linspace(evoked.times[0], evoked.times[-1], 10)
-    elif np.isscalar(times):
-        times = [times]
-    if len(times) > 20:
-        raise RuntimeError('Too many plots requested. Please pass fewer '
-                           'than 20 time instants.')
-    tmin, tmax = evoked.times[[0, -1]]
-    for t in times:
-        if not tmin <= t <= tmax:
-            raise ValueError('Times should be between %0.3f and %0.3f. (Got '
-                             '%0.3f).' % (tmin, tmax, t))
-
-    picks, pos, merge_grads = _prepare_topo_plot(evoked, ch_type, layout)
-
-    n = len(times)
-    nax = n + bool(colorbar)
-    width = size * nax
-    height = size * 1. + max(0, 0.1 * (3 - size))
-    fig = plt.figure(figsize=(width, height))
-    w_frame = plt.rcParams['figure.subplot.wspace'] / (2 * nax)
-    top_frame = max(.05, .2 / size)
-    fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0,
-                        top=1 - top_frame)
-    time_idx = [np.where(evoked.times >= t)[0][0] for t in times]
-
-    if proj is True and evoked.proj is not True:
-        data = evoked.copy().apply_proj().data
-    else:
-        data = evoked.data
-
-    data = data[np.ix_(picks, time_idx)] * scale
-    if merge_grads:
-        from .layouts.layout import _merge_grad_data
-        data = _merge_grad_data(data)
-    vmax = vmax or np.max(np.abs(data))
-    images = []
-    for i, t in enumerate(times):
-        plt.subplot(1, nax, i + 1)
-        images.append(plot_topomap(data[:, i], pos, vmax=vmax, cmap=cmap,
-                      sensors=sensors, res=res))
-        plt.title('%i ms' % (t * 1000))
-
-    if colorbar:
-        cax = plt.subplot(1, n + 1, n + 1)
-        plt.colorbar(cax=cax, ticks=[-vmax, 0, vmax], format=format)
-        # resize the colorbar (by default the color fills the whole axes)
-        cpos = cax.get_position()
-        cpos.x0 = 1 - (.7 + .1 / size) / nax
-        cpos.x1 = cpos.x0 + .1 / nax
-        cpos.y0 = .1
-        cpos.y1 = .7
-        cax.set_position(cpos)
-        if unit is not None:
-            cax.set_title(unit)
-
-    if proj == 'interactive':
-        _check_delayed_ssp(evoked)
-        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
-                      picks=picks, images=images, time_idx=time_idx,
-                      scale=scale, merge_grads=merge_grads, res=res, pos=pos,
-                      plot_update_proj_callback=_plot_update_evoked_topomap)
-        _draw_proj_checkbox(None, params)
-
-    if show:
-        plt.show()
-
-    return fig
-
-
-def _plot_update_evoked_topomap(params, bools):
-    """ Helper to update topomaps """
-    projs = [proj for ii, proj in enumerate(params['projs'])
-             if ii in np.where(bools)[0]]
-
-    params['proj_bools'] = bools
-    new_evoked = params['evoked'].copy()
-    new_evoked.info['projs'] = []
-    new_evoked.add_proj(projs)
-    new_evoked.apply_proj()
-
-    data = new_evoked.data[np.ix_(params['picks'],
-                                  params['time_idx'])] * params['scale']
-    if params['merge_grads']:
-        from .layouts.layout import _merge_grad_data
-        data = _merge_grad_data(data)
-
-    pos = np.asarray(params['pos'])
-    pos_x = pos[:, 0]
-    pos_y = pos[:, 1]
-    xmin, xmax = pos_x.min(), pos_x.max()
-    ymin, ymax = pos_y.min(), pos_y.max()
-    triang = delaunay.Triangulation(pos_x, pos_y)
-    x = np.linspace(xmin, xmax, params['res'])
-    y = np.linspace(ymin, ymax, params['res'])
-    xi, yi = np.meshgrid(x, y)
-
-    for ii, im in enumerate(params['images']):
-        interp = triang.linear_interpolator(data[:, ii])
-        im_ = interp[yi.min():yi.max():complex(0, yi.shape[0]),
-                     xi.min():xi.max():complex(0, xi.shape[1])]
-        im_ = np.ma.masked_array(im_, im_ == np.nan)
-        im.set_data(im_)
-    params['fig'].canvas.draw()
-
-
-def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
-                       colorbar=False, res=256, size=1, show=True):
-    """Plot topographic maps of SSP projections
-
-    Parameters
-    ----------
-    projs : list of Projection
-        The projections
-    layout : None | Layout | list of Layout
-        Layout instance specifying sensor positions (does not need to be
-        specified for Neuromag data). Or a list of Layout if projections
-        are from different sensor types.
-    cmap : matplotlib colormap
-        Colormap.
-    sensors : bool | str
-        Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
-    colorbar : bool
-        Plot a colorbar.
-    res : int
-        The resolution of the topomap image (n pixels along each side).
-    size : scalar
-        Side length of the topomaps in inches (only applies when plotting
-        multiple topomaps at a time).
-    show : bool
-        Show figures if True
-
-    Returns
-    -------
-    fig : instance of matplotlib figure
-        Figure distributing one image per channel across sensor topography.
-    """
-    import matplotlib.pyplot as plt
-
-    if layout is None:
-        from .layouts import read_layout
-        layout = read_layout('Vectorview-all')
-
-    if not isinstance(layout, list):
-        layout = [layout]
-
-    n_projs = len(projs)
-    nrows = math.floor(math.sqrt(n_projs))
-    ncols = math.ceil(n_projs / nrows)
-
-    fig = plt.gcf()
-    fig.clear()
-    for k, proj in enumerate(projs):
-
-        ch_names = _clean_names(proj['data']['col_names'])
-        data = proj['data']['data'].ravel()
-
-        idx = []
-        for l in layout:
-            is_vv = l.kind.startswith('Vectorview')
-            if is_vv:
-                from .layouts.layout import _pair_grad_sensors_from_ch_names
-                grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
-                if grad_pairs:
-                    ch_names = [ch_names[i] for i in grad_pairs]
-
-            idx = [l.names.index(c) for c in ch_names if c in l.names]
-            if len(idx) == 0:
-                continue
-
-            pos = l.pos[idx]
-            if is_vv and grad_pairs:
-                from .layouts.layout import _merge_grad_data
-                shape = (len(idx) / 2, 2, -1)
-                pos = pos.reshape(shape).mean(axis=1)
-                data = _merge_grad_data(data[grad_pairs]).ravel()
-
-            break
-
-        ax = plt.subplot(nrows, ncols, k + 1)
-        ax.set_title(proj['desc'])
-        if len(idx):
-            plot_topomap(data, pos, vmax=None, cmap=cmap,
-                         sensors=sensors, res=res)
-            if colorbar:
-                plt.colorbar()
-        else:
-            raise RuntimeError('Cannot find a proper layout for projection %s'
-                               % proj['desc'])
-    fig = ax.get_figure()
-    if show and plt.get_backend() != 'agg':
-        fig.show()
-    
-    return fig
-
-
-def plot_topomap(data, pos, vmax=None, cmap='RdBu_r', sensors='k,', res=100,
-                 axis=None):
-    """Plot a topographic map as image
-
-    Parameters
-    ----------
-    data : array, length = n_points
-        The data values to plot.
-    pos : array, shape = (n_points, 2)
-        For each data point, the x and y coordinates.
-    vmax : scalar
-        The value specfying the range of the color scale (-vmax to +vmax). If
-        None, the largest absolute value in the data is used.
-    cmap : matplotlib colormap
-        Colormap.
-    sensors : bool | str
-        Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
-    res : int
-        The resolution of the topomap image (n pixels along each side).
-    axis : instance of Axes | None
-        The axis to plot to. If None, the current axis will be used.
-    """
-    import matplotlib.pyplot as plt
-
-    data = np.asarray(data)
-    pos = np.asarray(pos)
-    if data.ndim > 1:
-        err = ("Data needs to be array of shape (n_sensors,); got shape "
-               "%s." % str(data.shape))
-        raise ValueError(err)
-    elif len(data) != len(pos):
-        err = ("Data and pos need to be of same length. Got data of shape %s, "
-               "pos of shape %s." % (str(), str()))
-
-    axes = plt.gca()
-    axes.set_frame_on(False)
-
-    vmax = vmax or np.abs(data).max()
-
-    plt.xticks(())
-    plt.yticks(())
-
-    pos_x = pos[:, 0]
-    pos_y = pos[:, 1]
-    ax = axis if axis else plt
-    if sensors:
-        if sensors is True:
-            sensors = 'k,'
-        ax.plot(pos_x, pos_y, sensors)
-
-    xmin, xmax = pos_x.min(), pos_x.max()
-    ymin, ymax = pos_y.min(), pos_y.max()
-    if any([not pos_y.any(), not pos_x.any()]):
-        raise RuntimeError('No position information found, cannot compute '
-                           'geometries for topomap.')
-
-    triang = delaunay.Triangulation(pos_x, pos_y)
-    interp = triang.linear_interpolator(data)
-    x = np.linspace(xmin, xmax, res)
-    y = np.linspace(ymin, ymax, res)
-    xi, yi = np.meshgrid(x, y)
-
-    im = interp[yi.min():yi.max():complex(0, yi.shape[0]),
-                xi.min():xi.max():complex(0, xi.shape[1])]
-    im = np.ma.masked_array(im, im == np.nan)
-    im = ax.imshow(im, cmap=cmap, vmin=-vmax, vmax=vmax, origin='lower',
-                   aspect='equal', extent=(xmin, xmax, ymin, ymax))
-    return im
-
-
-def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
-                ylim=None, proj=False, xlim='tight', hline=None, units=None,
-                scalings=None, titles=None, axes=None):
-    """Plot evoked data
-
-    Note: If bad channels are not excluded they are shown in red.
-
-    Parameters
-    ----------
-    evoked : instance of Evoked
-        The evoked data
-    picks : None | array-like of int
-        The indices of channels to plot. If None show all.
-    exclude : list of str | 'bads'
-        Channels names to exclude from being shown. If 'bads', the
-        bad channels are excluded.
-    unit : bool
-        Scale plot with channel (SI) unit.
-    show : bool
-        Call pyplot.show() as the end or not.
-    ylim : dict | None
-        ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
-        Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
-        for each channel equals the pyplot default.
-    xlim : 'tight' | tuple | None
-        xlim for plots.
-    proj : bool | 'interactive'
-        If true SSP projections are applied before display. If 'interactive',
-        a check box for reversible selection of SSP projection vectors will
-        be shown.
-    hline : list of floats | None
-        The values at which to show an horizontal line.
-    units : dict | None
-        The units of the channel types used for axes lables. If None,
-        defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
-    scalings : dict | None
-        The scalings of the channel types to be applied for plotting. If None,`
-        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
-    titles : dict | None
-        The titles associated with the channels. If None, defaults to
-        `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
-    axes : instance of Axes | list | None
-        The axes to plot to. If list, the list must be a list of Axes of
-        the same length as the number of channel types. If instance of
-        Axes, there must be only one channel type plotted.
-    """
-    import matplotlib.pyplot as plt
-    if axes is not None and proj == 'interactive':
-        raise RuntimeError('Currently only single axis figures are supported'
-                           ' for interactive SSP selection.')
-
-    scalings, titles, units = _mutable_defaults(('scalings', scalings),
-                                                ('titles', titles),
-                                                ('units', units))
-
-    channel_types = set(key for d in [scalings, titles, units] for key in d)
-    if picks is None:
-        picks = range(evoked.info['nchan'])
-
-    bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
-                  if ch in evoked.ch_names]
-    if len(exclude) > 0:
-        if isinstance(exclude, basestring) and exclude == 'bads':
-            exclude = bad_ch_idx
-        elif (isinstance(exclude, list)
-              and all([isinstance(ch, basestring) for ch in exclude])):
-            exclude = [evoked.ch_names.index(ch) for ch in exclude]
-        else:
-            raise ValueError('exclude has to be a list of channel names or '
-                             '"bads"')
-
-        picks = list(set(picks).difference(exclude))
-
-    types = [channel_type(evoked.info, idx) for idx in picks]
-    n_channel_types = 0
-    ch_types_used = []
-    for t in channel_types:
-        if t in types:
-            n_channel_types += 1
-            ch_types_used.append(t)
-
-    axes_init = axes  # remember if axes where given as input
-
-    fig = None
-    if axes is None:
-        fig, axes = plt.subplots(n_channel_types, 1)
-    if isinstance(axes, plt.Axes):
-        axes = [axes]
-    elif isinstance(axes, np.ndarray):
-        axes = list(axes)
-
-    if axes_init is not None:
-        fig = axes[0].get_figure()
-
-    if not len(axes) == n_channel_types:
-        raise ValueError('Number of axes (%g) must match number of channel '
-                         'types (%g)' % (len(axes), n_channel_types))
-
-    # instead of projecting during each iteration let's use the mixin here.
-    if proj is True and evoked.proj is not True:
-        evoked = evoked.copy()
-        evoked.apply_proj()
-
-    times = 1e3 * evoked.times  # time in miliseconds
-    for ax, t in zip(axes, ch_types_used):
-        ch_unit = units[t]
-        this_scaling = scalings[t]
-        if unit is False:
-            this_scaling = 1.0
-            ch_unit = 'NA'  # no unit
-        idx = [picks[i] for i in range(len(picks)) if types[i] == t]
-        if len(idx) > 0:
-            if any([i in bad_ch_idx for i in idx]):
-                colors = ['k'] * len(idx)
-                for i in bad_ch_idx:
-                    if i in idx:
-                        colors[idx.index(i)] = 'r'
-
-                ax._get_lines.color_cycle = iter(colors)
-            else:
-                ax._get_lines.color_cycle = cycle(['k'])
-
-            D = this_scaling * evoked.data[idx, :]
-            # plt.axes(ax)
-            ax.plot(times, D.T)
-            if xlim is not None:
-                if xlim == 'tight':
-                    xlim = (times[0], times[-1])
-                ax.set_xlim(xlim)
-            if ylim is not None and t in ylim:
-                ax.set_ylim(ylim[t])
-            ax.set_title(titles[t] + ' (%d channel%s)' % (
-                         len(D), 's' if len(D) > 1 else ''))
-            ax.set_xlabel('time (ms)')
-            ax.set_ylabel('data (%s)' % ch_unit)
-
-            if hline is not None:
-                for h in hline:
-                    ax.axhline(h, color='r', linestyle='--', linewidth=2)
-
-    if axes_init is None:
-        plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
-        tight_layout()
-
-    if proj == 'interactive':
-        _check_delayed_ssp(evoked)
-        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
-                      axes=axes, types=types, units=units, scalings=scalings,
-                      unit=unit, ch_types_used=ch_types_used, picks=picks,
-                      plot_update_proj_callback=_plot_update_evoked)
-        _draw_proj_checkbox(None, params)
-
-    if show and plt.get_backend() != 'agg':
-        fig.show()
-        fig.canvas.draw()  # for axes plots update axes.
-
-    return fig
-
-
-def _plot_update_evoked(params, bools):
-    """ update the plot evoked lines
-    """
-    picks, evoked = [params[k] for k in 'picks', 'evoked']
-    times = evoked.times * 1e3
-    projs = [proj for ii, proj in enumerate(params['projs'])
-             if ii in np.where(bools)[0]]
-    params['proj_bools'] = bools
-    new_evoked = evoked.copy()
-    new_evoked.info['projs'] = []
-    new_evoked.add_proj(projs)
-    new_evoked.apply_proj()
-    for ax, t in zip(params['axes'], params['ch_types_used']):
-        this_scaling = params['scalings'][t]
-        idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
-        D = this_scaling * new_evoked.data[idx, :]
-        [line.set_data(times, di) for line, di in zip(ax.lines, D)]
-    params['fig'].canvas.draw()
-
-
-def _draw_proj_checkbox(event, params, draw_current_state=True):
-    """Toggle options (projectors) dialog"""
-    import matplotlib.pyplot as plt
-    import matplotlib as mpl
-    projs = params['projs']
-    # turn on options dialog
-    fig_proj = figure_nobar()
-    fig_proj.canvas.set_window_title('SSP projection vectors')
-    ax_temp = plt.axes((0, 0, 1, 1))
-    ax_temp.get_yaxis().set_visible(False)
-    ax_temp.get_xaxis().set_visible(False)
-    fig_proj.add_axes(ax_temp)
-    labels = [p['desc'] for p in projs]
-    actives = [p['active'] for p in projs] if draw_current_state else \
-              [True] * len(params['projs'])
-    proj_checks = mpl.widgets.CheckButtons(ax_temp, labels=labels,
-                                           actives=actives)
-    # change already-applied projectors to red
-    for ii, p in enumerate(projs):
-        if p['active'] is True:
-            for x in proj_checks.lines[ii]:
-                x.set_color('r')
-    # make minimal size
-    width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
-    height = len(projs) / 6.0 + 0.5
-    # have to try/catch when there's no toolbar
-    try:
-        fig_proj.set_size_inches((width, height), forward=True)
-    except Exception:
-        pass
-    # pass key presses from option dialog over
-    proj_checks.on_clicked(partial(_toggle_proj, params=params))
-    params['proj_checks'] = proj_checks
-    # this should work for non-test cases
-    try:
-        fig_proj.show()
-    except Exception:
-        pass
-
-
-def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
-                                 fontsize=18, bgcolor=(.05, 0, .1),
-                                 opacity=0.2, brain_color=(0.7,) * 3,
-                                 show=True, high_resolution=False,
-                                 fig_name=None, fig_number=None, labels=None,
-                                 modes=['cone', 'sphere'],
-                                 scale_factors=[1, 0.6],
-                                 verbose=None, **kwargs):
-    """Plot source estimates obtained with sparse solver
-
-    Active dipoles are represented in a "Glass" brain.
-    If the same source is active in multiple source estimates it is
-    displayed with a sphere otherwise with a cone in 3D.
-
-    Parameters
-    ----------
-    src : dict
-        The source space
-    stcs : instance of SourceEstimate or list of instances of SourceEstimate
-        The source estimates (up to 3)
-    colors : list
-        List of colors
-    linewidth : int
-        Line width in 2D plot
-    fontsize : int
-        Font size
-    bgcolor : tuple of length 3
-        Background color in 3D
-    opacity : float in [0, 1]
-        Opacity of brain mesh
-    brain_color : tuple of length 3
-        Brain color
-    show : bool
-        Show figures if True
-    fig_name :
-        Mayavi figure name
-    fig_number :
-        Pylab figure number
-    labels : ndarray or list of ndarrays
-        Labels to show sources in clusters. Sources with the same
-        label and the waveforms within each cluster are presented in
-        the same color. labels should be a list of ndarrays when
-        stcs is a list ie. one label for each stc.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-    kwargs : kwargs
-        Keyword arguments to pass to mlab.triangular_mesh
-    """
-    if not isinstance(stcs, list):
-        stcs = [stcs]
-    if labels is not None and not isinstance(labels, list):
-        labels = [labels]
-
-    if colors is None:
-        colors = COLORS
-
-    linestyles = ['-', '--', ':']
-
-    # Show 3D
-    lh_points = src[0]['rr']
-    rh_points = src[1]['rr']
-    points = np.r_[lh_points, rh_points]
-
-    lh_normals = src[0]['nn']
-    rh_normals = src[1]['nn']
-    normals = np.r_[lh_normals, rh_normals]
-
-    if high_resolution:
-        use_lh_faces = src[0]['tris']
-        use_rh_faces = src[1]['tris']
-    else:
-        use_lh_faces = src[0]['use_tris']
-        use_rh_faces = src[1]['use_tris']
-
-    use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
-
-    points *= 170
-
-    vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
-               for stc in stcs]
-    unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
-
-    try:
-        from mayavi import mlab
-    except ImportError:
-        from enthought.mayavi import mlab
-
-    from matplotlib.colors import ColorConverter
-    color_converter = ColorConverter()
-
-    f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
-    mlab.clf()
-    if mlab.options.backend != 'test':
-        f.scene.disable_render = True
-    surface = mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
-                                   use_faces, color=brain_color,
-                                   opacity=opacity, **kwargs)
-
-    import matplotlib.pyplot as plt
-    # Show time courses
-    plt.figure(fig_number)
-    plt.clf()
-
-    colors = cycle(colors)
-
-    logger.info("Total number of active sources: %d" % len(unique_vertnos))
-
-    if labels is not None:
-        colors = [colors.next() for _ in
-                  range(np.unique(np.concatenate(labels).ravel()).size)]
-
-    for idx, v in enumerate(unique_vertnos):
-        # get indices of stcs it belongs to
-        ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
-        is_common = len(ind) > 1
-
-        if labels is None:
-            c = colors.next()
-        else:
-            # if vertex is in different stcs than take label from first one
-            c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
-
-        mode = modes[1] if is_common else modes[0]
-        scale_factor = scale_factors[1] if is_common else scale_factors[0]
-
-        if (isinstance(scale_factor, (np.ndarray, list, tuple))
-            and len(unique_vertnos) == len(scale_factor)):
-            scale_factor = scale_factor[idx]
-
-        x, y, z = points[v]
-        nx, ny, nz = normals[v]
-        mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
-                      mode=mode, scale_factor=scale_factor)
-
-        for k in ind:
-            vertno = vertnos[k]
-            mask = (vertno == v)
-            assert np.sum(mask) == 1
-            linestyle = linestyles[k]
-            plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
-                     linewidth=linewidth, linestyle=linestyle)
-
-    plt.xlabel('Time (ms)', fontsize=18)
-    plt.ylabel('Source amplitude (nAm)', fontsize=18)
-
-    if fig_name is not None:
-        plt.title(fig_name)
-
-    if show:
-        plt.show()
-
-    surface.actor.property.backface_culling = True
-    surface.actor.property.shading = True
-
-    return surface
-
-
- at verbose
-def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
-             show=True, verbose=None):
-    """Plot Covariance data
-
-    Parameters
-    ----------
-    cov : instance of Covariance
-        The covariance matrix.
-    info: dict
-        Measurement info.
-    exclude : list of string | str
-        List of channels to exclude. If empty do not exclude any channel.
-        If 'bads', exclude info['bads'].
-    colorbar : bool
-        Show colorbar or not.
-    proj : bool
-        Apply projections or not.
-    show : bool
-        Call pyplot.show() as the end or not.
-    show_svd : bool
-        Plot also singular values of the noise covariance for each sensor type.
-        We show square roots ie. standard deviations.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-    """
-    if exclude == 'bads':
-        exclude = info['bads']
-    ch_names = [n for n in cov.ch_names if not n in exclude]
-    ch_idx = [cov.ch_names.index(n) for n in ch_names]
-    info_ch_names = info['ch_names']
-    sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
-                         exclude=exclude)
-    sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
-                         exclude=exclude)
-    sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
-                          exclude=exclude)
-    idx_eeg = [ch_names.index(info_ch_names[c])
-               for c in sel_eeg if info_ch_names[c] in ch_names]
-    idx_mag = [ch_names.index(info_ch_names[c])
-               for c in sel_mag if info_ch_names[c] in ch_names]
-    idx_grad = [ch_names.index(info_ch_names[c])
-                for c in sel_grad if info_ch_names[c] in ch_names]
-
-    idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
-                 (idx_grad, 'Gradiometers', 'fT/cm', 1e13),
-                 (idx_mag, 'Magnetometers', 'fT', 1e15)]
-    idx_names = [(idx, name, unit, scaling)
-                 for idx, name, unit, scaling in idx_names if len(idx) > 0]
-
-    C = cov.data[ch_idx][:, ch_idx]
-
-    if proj:
-        projs = copy.deepcopy(info['projs'])
-
-        #   Activate the projection items
-        for p in projs:
-            p['active'] = True
-
-        P, ncomp, _ = make_projector(projs, ch_names)
-        if ncomp > 0:
-            logger.info('    Created an SSP operator (subspace dimension'
-                        ' = %d)' % ncomp)
-            C = np.dot(P, np.dot(C, P.T))
-        else:
-            logger.info('    The projection vectors do not apply to these '
-                        'channels.')
-
-    import matplotlib.pyplot as plt
-
-    plt.figure(figsize=(2.5 * len(idx_names), 2.7))
-    for k, (idx, name, _, _) in enumerate(idx_names):
-        plt.subplot(1, len(idx_names), k + 1)
-        plt.imshow(C[idx][:, idx], interpolation="nearest")
-        plt.title(name)
-    plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
-    tight_layout()
-
-    if show_svd:
-        plt.figure()
-        for k, (idx, name, unit, scaling) in enumerate(idx_names):
-            _, s, _ = linalg.svd(C[idx][:, idx])
-            plt.subplot(1, len(idx_names), k + 1)
-            plt.ylabel('Noise std (%s)' % unit)
-            plt.xlabel('Eigenvalue index')
-            plt.semilogy(np.sqrt(s) * scaling)
-            plt.title(name)
-            tight_layout()
-
-    if show:
-        plt.show()
-
-
-def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
-                          colormap='hot', time_label='time=%0.2f ms',
-                          smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
-                          transparent=True, alpha=1.0, time_viewer=False,
-                          config_opts={}, subjects_dir=None, figure=None,
-                          views='lat', colorbar=True):
-    """Plot SourceEstimates with PySurfer
-
-    Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
-    which will automatically be set by this function. Plotting multiple
-    SourceEstimates with different values for subjects_dir will cause
-    PySurfer to use the wrong FreeSurfer surfaces when using methods of
-    the returned Brain object. It is therefore recommended to set the
-    SUBJECTS_DIR environment variable or always use the same value for
-    subjects_dir (within the same Python session).
-
-    Parameters
-    ----------
-    stc : SourceEstimates
-        The source estimates to plot.
-    subject : str | None
-        The subject name corresponding to FreeSurfer environment
-        variable SUBJECT. If None stc.subject will be used. If that
-        is None, the environment will be used.
-    surface : str
-        The type of surface (inflated, white etc.).
-    hemi : str, 'lh' | 'rh' | 'split' | 'both'
-        The hemisphere to display. Using 'both' or 'split' requires
-        PySurfer version 0.4 or above.
-    colormap : str
-        The type of colormap to use.
-    time_label : str
-        How to print info about the time instant visualized.
-    smoothing_steps : int
-        The amount of smoothing
-    fmin : float
-        The minimum value to display.
-    fmid : float
-        The middle value on the colormap.
-    fmax : float
-        The maximum value for the colormap.
-    transparent : bool
-        If True, use a linear transparency between fmin and fmid.
-    alpha : float
-        Alpha value to apply globally to the overlay.
-    time_viewer : bool
-        Display time viewer GUI.
-    config_opts : dict
-        Keyword arguments for Brain initialization.
-        See pysurfer.viz.Brain.
-    subjects_dir : str
-        The path to the freesurfer subjects reconstructions.
-        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
-    figure : instance of mayavi.core.scene.Scene | list | int | None
-        If None, a new figure will be created. If multiple views or a
-        split view is requested, this must be a list of the appropriate
-        length. If int is provided it will be used to identify the Mayavi
-        figure by it's id or create a new figure with the given id.
-    views : str | list
-        View to use. See surfer.Brain().
-    colorbar : bool
-        If True, display colorbar on scene.
-
-    Returns
-    -------
-    brain : Brain
-        A instance of surfer.viz.Brain from PySurfer.
-    """
-    import surfer
-    from surfer import Brain, TimeViewer
-
-    if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
-        raise NotImplementedError('hemi type "%s" not supported with your '
-                                  'version of pysurfer. Please upgrade to '
-                                  'version 0.4 or higher.' % hemi)
-
-    try:
-        import mayavi
-        from mayavi import mlab
-    except ImportError:
-        from enthought import mayavi
-        from enthought.mayavi import mlab
-
-    # import here to avoid circular import problem
-    from .source_estimate import SourceEstimate
-
-    if not isinstance(stc, SourceEstimate):
-        raise ValueError('stc has to be a surface source estimate')
-
-    if hemi not in ['lh', 'rh', 'split', 'both']:
-        raise ValueError('hemi has to be either "lh", "rh", "split", '
-                         'or "both"')
-
-    n_split = 2 if hemi == 'split' else 1
-    n_views = 1 if isinstance(views, basestring) else len(views)
-    if figure is not None:
-        # use figure with specified id or create new figure
-        if isinstance(figure, int):
-            figure = mlab.figure(figure, size=(600, 600))
-        # make sure it is of the correct type
-        if not isinstance(figure, list):
-            figure = [figure]
-        if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
-            raise TypeError('figure must be a mayavi scene or list of scenes')
-        # make sure we have the right number of figures
-        n_fig = len(figure)
-        if not n_fig == n_split * n_views:
-            raise RuntimeError('`figure` must be a list with the same '
-                               'number of elements as PySurfer plots that '
-                               'will be created (%s)' % n_split * n_views)
-
-    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
-
-    subject = _check_subject(stc.subject, subject, False)
-    if subject is None:
-        if 'SUBJECT' in os.environ:
-            subject = os.environ['SUBJECT']
-        else:
-            raise ValueError('SUBJECT environment variable not set')
-
-    if hemi in ['both', 'split']:
-        hemis = ['lh', 'rh']
-    else:
-        hemis = [hemi]
-
-    title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
-    args = inspect.getargspec(Brain.__init__)[0]
-    kwargs = dict(title=title, figure=figure, config_opts=config_opts,
-                  subjects_dir=subjects_dir)
-    if 'views' in args:
-        kwargs['views'] = views
-    else:
-        logger.info('PySurfer does not support "views" argument, please '
-                    'consider updating to a newer version (0.4 or later)')
-    brain = Brain(subject, hemi, surface, **kwargs)
-    for hemi in hemis:
-        hemi_idx = 0 if hemi == 'lh' else 1
-        if hemi_idx == 0:
-            data = stc.data[:len(stc.vertno[0])]
-        else:
-            data = stc.data[len(stc.vertno[0]):]
-        vertices = stc.vertno[hemi_idx]
-        time = 1e3 * stc.times
-        brain.add_data(data, colormap=colormap, vertices=vertices,
-                       smoothing_steps=smoothing_steps, time=time,
-                       time_label=time_label, alpha=alpha, hemi=hemi,
-                       colorbar=colorbar)
-
-        # scale colormap and set time (index) to display
-        brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
-                                  transparent=transparent)
-
-    if time_viewer:
-        TimeViewer(brain)
-
-    return brain
-
-
-def _plot_ica_panel_onpick(event, sources=None, ylims=None):
-    """Onpick callback for plot_ica_panel"""
-
-    # make sure that the swipe gesture in OS-X doesn't open many figures
-    if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
-        return
-
-    artist = event.artist
-    try:
-        import matplotlib.pyplot as plt
-        plt.figure()
-        src_idx = artist._mne_src_idx
-        component = artist._mne_component
-        plt.plot(sources[src_idx], 'r')
-        plt.ylim(ylims)
-        plt.grid(linestyle='-', color='gray', linewidth=.25)
-        plt.title(component)
-    except Exception as err:
-        # matplotlib silently ignores exceptions in event handlers, so we print
-        # it here to know what went wrong
-        print err
-        raise err
-
-
- at verbose
-def plot_ica_panel(sources, start=None, stop=None, n_components=None,
-                   source_idx=None, ncol=3, nrow=10, verbose=None,
-                   title=None, show=True):
-    """Create panel plots of ICA sources
-
-    Clicking on the plot of an individual source opens a new figure showing
-    the source.
-
-    Parameters
-    ----------
-    sources : ndarray
-        Sources as drawn from ica.get_sources.
-    start : int
-        x-axis start index. If None from the beginning.
-    stop : int
-        x-axis stop index. If None to the end.
-    n_components : int
-        Number of components fitted.
-    source_idx : array-like
-        Indices for subsetting the sources.
-    ncol : int
-        Number of panel-columns.
-    nrow : int
-        Number of panel-rows.
-    title : str
-        The figure title. If None a default is provided.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-    show : bool
-        If True, plot will be shown, else just the figure is returned.
-
-    Returns
-    -------
-    fig : instance of pyplot.Figure
-    """
-    import matplotlib.pyplot as plt
-
-    if source_idx is None:
-        source_idx = np.arange(len(sources))
-    else:
-        source_idx = np.array(source_idx)
-
-    for param in ['nrow', 'n_components']:
-        if eval(param) is not None:
-            warnings.warn('The `%s` parameter is deprecated and will be'
-                          'removed in MNE-Python 0.8' % param,
-                          DeprecationWarning)
-
-    n_components = len(sources)
-    sources = sources[source_idx, start:stop]
-    ylims = sources.min(), sources.max()
-    xlims = np.arange(sources.shape[-1])[[0, -1]]
-    fig, axes = _prepare_trellis(n_components, ncol)
-    if title is None:
-        fig.suptitle('MEG signal decomposition'
-                     ' -- %i components.' % n_components, size=16)
-    elif title:
-        fig.suptitle(title, size=16)
-
-    plt.subplots_adjust(wspace=0.05, hspace=0.05)
-
-    for idx, (ax, source) in enumerate(zip(axes, sources)):
-        ax.grid(linestyle='-', color='gray', linewidth=.25)
-        component = '[%i]' % idx
-
-        # plot+ emebed idx and comp. name to use in callback
-        line = ax.plot(source, linewidth=0.5, color='red', picker=1e9)[0]
-        vars(line)['_mne_src_idx'] = idx
-        vars(line)['_mne_component'] = component
-        ax.set_xlim(xlims)
-        ax.set_ylim(ylims)
-        ax.text(0.05, .95, component, transform=ax.transAxes,
-                verticalalignment='top')
-        plt.setp(ax.get_xticklabels(), visible=False)
-        plt.setp(ax.get_yticklabels(), visible=False)
-    # register callback
-    callback = partial(_plot_ica_panel_onpick, sources=sources, ylims=ylims)
-    fig.canvas.mpl_connect('pick_event', callback)
-
-    if show:
-        plt.show()
-
-    return fig
-
-
-def plot_ica_topomap(ica, source_idx, ch_type='mag', res=500, layout=None,
-                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
-                     show=True):
-    """ Plot topographic map from ICA component.
-
-    Parameters
-    ----------
-    ica : instance of mne.prerocessing.ICA
-        The ica object to plot from.
-    source_idx : int | array-like
-        The indices of the sources to be plotted.
-    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
-        The channel type to plot. For 'grad', the gradiometers are collected in
-        pairs and the RMS for each pair is plotted.
-    layout : None | Layout
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    vmax : scalar
-        The value specfying the range of the color scale (-vmax to +vmax). If
-        None, the largest absolute value in the data is used.
-    cmap : matplotlib colormap
-        Colormap.
-    sensors : bool | str
-        Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
-    colorbar : bool
-        Plot a colorbar.
-    res : int
-        The resolution of the topomap image (n pixels along each side).
-    show : bool
-        Call pyplot.show() at the end.
-    """
-    import matplotlib.pyplot as plt
-
-    if np.isscalar(source_idx):
-        source_idx = [source_idx]
-
-    data = np.dot(ica.mixing_matrix_[:, source_idx].T,
-                  ica.pca_components_[:ica.n_components_])
-
-    if ica.info is None:
-        raise RuntimeError('The ICA\'s measurement info is missing. Please '
-                           'fit the ICA or add the corresponding info object.')
-
-    picks, pos, merge_grads = _prepare_topo_plot(ica, ch_type, layout)
-    data = np.atleast_2d(data)
-    data = data[:, picks]
-
-    # prepare data for iteration
-    fig, axes = _prepare_trellis(len(data), max_col=5)
-
-    if vmax is None:
-        vrange = np.array([f(data) for f in np.min, np.max])
-        vmax = max(abs(vrange))
-
-    if merge_grads:
-        from .layouts.layout import _merge_grad_data
-    for ii, data_, ax in zip(source_idx, data, axes):
-        data_ = _merge_grad_data(data_) if merge_grads else data_
-        plot_topomap(data_.flatten(), pos, vmax=vmax, res=res, axis=ax)
-        ax.set_title('IC #%03d' % ii, fontsize=12)
-        ax.set_yticks([])
-        ax.set_xticks([])
-        ax.set_frame_on(False)
-
-    tight_layout()
-    if colorbar:
-        vmax_ = normalize_colors(vmin=-vmax, vmax=vmax)
-        sm = plt.cm.ScalarMappable(cmap=cmap, norm=vmax_)
-        sm.set_array(np.linspace(-vmax, vmax))
-        fig.subplots_adjust(right=0.8)
-        cax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
-        fig.colorbar(sm, cax=cax)
-        cax.set_title('AU')
-
-    if show is True:
-        plt.show()
-
-    return fig
-
-
-def _prepare_topo_plot(obj, ch_type, layout):
-    """"Aux Function"""
-    info = copy.deepcopy(obj.info)
-    if layout is None and ch_type is not 'eeg':
-        from .layouts.layout import find_layout
-        layout = find_layout(info)
-    elif layout == 'auto':
-        layout = None
-
-    info['ch_names'] = _clean_names(info['ch_names'])
-    for ii, this_ch in enumerate(info['chs']):
-        this_ch['ch_name'] = info['ch_names'][ii]
-
-    # special case for merging grad channels
-    if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
-                    np.unique([ch['coil_type'] for ch in info['chs']])):
-        from .layouts.layout import _pair_grad_sensors
-        picks, pos = _pair_grad_sensors(info, layout)
-        merge_grads = True
-    else:
-        merge_grads = False
-        if ch_type == 'eeg':
-            picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
-                               exclude='bads')
-        else:
-            picks = pick_types(info, meg=ch_type, ref_meg=False,
-                               exclude='bads')
-
-        if len(picks) == 0:
-            raise ValueError("No channels of type %r" % ch_type)
-
-        if layout is None:
-            chs = [info['chs'][i] for i in picks]
-            from .layouts.layout import _find_topomap_coords
-            pos = _find_topomap_coords(chs, layout)
-        else:
-            pos = [layout.pos[layout.names.index(info['ch_names'][k])] for k in
-                   picks]
-
-    return picks, pos, merge_grads
-
-
-def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
-                      vmax=None, colorbar=True, order=None, show=True,
-                      units=None, scalings=None):
-    """Plot Event Related Potential / Fields image
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs
-    picks : int | array of int | None
-        The indices of the channels to consider. If None, all good
-        data channels are plotted.
-    sigma : float
-        The standard deviation of the Gaussian smoothing to apply along
-        the epoch axis to apply in the image.
-    vmin : float
-        The min value in the image. The unit is uV for EEG channels,
-        fT for magnetometers and fT/cm for gradiometers
-    vmax : float
-        The max value in the image. The unit is uV for EEG channels,
-        fT for magnetometers and fT/cm for gradiometers
-    colorbar : bool
-        Display or not a colorbar
-    order : None | array of int | callable
-        If not None, order is used to reorder the epochs on the y-axis
-        of the image. If it's an array of int it should be of length
-        the number of good epochs. If it's a callable the arguments
-        passed are the times vector and the data as 2d array
-        (data.shape[1] == len(times)
-    show : bool
-        Show or not the figure at the end
-    units : dict | None
-        The units of the channel types used for axes lables. If None,
-        defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
-    scalings : dict | None
-        The scalings of the channel types to be applied for plotting.
-        If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15)`
-
-    Returns
-    -------
-    figs : the list of matplotlib figures
-        One figure per channel displayed
-    """
-    units, scalings = _mutable_defaults(('units', units),
-                                        ('scalings', scalings))
-
-    import matplotlib.pyplot as plt
-    if picks is None:
-        picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
-                           exclude='bads')
-
-    if units.keys() != scalings.keys():
-        raise ValueError('Scalings and units must have the same keys.')
-
-    picks = np.atleast_1d(picks)
-    evoked = epochs.average(picks)
-    data = epochs.get_data()[:, picks, :]
-    if vmin is None:
-        vmin = data.min()
-    if vmax is None:
-        vmax = data.max()
-
-    figs = list()
-    for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
-        this_fig = plt.figure()
-        figs.append(this_fig)
-
-        ch_type = channel_type(epochs.info, idx)
-        if not ch_type in scalings:
-            # We know it's not in either scalings or units since keys match
-            raise KeyError('%s type not in scalings and units' % ch_type)
-        this_data *= scalings[ch_type]
-
-        this_order = order
-        if callable(order):
-            this_order = order(epochs.times, this_data)
-
-        if this_order is not None:
-            this_data = this_data[this_order]
-
-        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
-
-        ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
-        im = plt.imshow(this_data,
-                        extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
-                                0, len(data)],
-                        aspect='auto', origin='lower',
-                        vmin=vmin, vmax=vmax)
-        ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
-        if colorbar:
-            ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
-        ax1.set_title(epochs.ch_names[idx])
-        ax1.set_ylabel('Epochs')
-        ax1.axis('auto')
-        ax1.axis('tight')
-        ax1.axvline(0, color='m', linewidth=3, linestyle='--')
-        ax2.plot(1e3 * evoked.times, scalings[ch_type] * evoked.data[i])
-        ax2.set_xlabel('Time (ms)')
-        ax2.set_ylabel(units[ch_type])
-        ax2.set_ylim([vmin, vmax])
-        ax2.axvline(0, color='m', linewidth=3, linestyle='--')
-        if colorbar:
-            plt.colorbar(im, cax=ax3)
-            tight_layout()
-
-    if show:
-        plt.show()
-
-    return figs
-
-
-def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
-    """Return a colormap similar to that used by mne_analyze
-
-    Parameters
-    ----------
-    limits : list (or array) of length 3
-        Bounds for the colormap.
-    format : str
-        Type of colormap to return. If 'matplotlib', will return a
-        matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
-        return an RGBA array of shape (256, 4).
-
-    Returns
-    -------
-    cmap : instance of matplotlib.pyplot.colormap | array
-        A teal->blue->gray->red->yellow colormap.
-
-    Notes
-    -----
-    For this will return a colormap that will display correctly for data
-    that are scaled by the plotting function to span [-fmax, fmax].
-
-    Examples
-    --------
-    The following code will plot a STC using standard MNE limits:
-
-        colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
-        brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
-        brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
-
-    """
-    l = np.asarray(limits, dtype='float')
-    if len(l) != 3:
-        raise ValueError('limits must have 3 elements')
-    if any(l < 0):
-        raise ValueError('limits must all be positive')
-    if any(np.diff(l) <= 0):
-        raise ValueError('limits must be monotonically increasing')
-    if format == 'matplotlib':
-        from matplotlib import colors
-        l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
-        cdict = {'red': ((l[0], 0.0, 0.0),
-                         (l[1], 0.0, 0.0),
-                         (l[2], 0.5, 0.5),
-                         (l[3], 0.5, 0.5),
-                         (l[4], 1.0, 1.0),
-                         (l[5], 1.0, 1.0)),
-                 'green': ((l[0], 1.0, 1.0),
-                           (l[1], 0.0, 0.0),
-                           (l[2], 0.5, 0.5),
-                           (l[3], 0.5, 0.5),
-                           (l[4], 0.0, 0.0),
-                           (l[5], 1.0, 1.0)),
-                 'blue': ((l[0], 1.0, 1.0),
-                          (l[1], 1.0, 1.0),
-                          (l[2], 0.5, 0.5),
-                          (l[3], 0.5, 0.5),
-                          (l[4], 0.0, 0.0),
-                          (l[5], 0.0, 0.0))}
-        return colors.LinearSegmentedColormap('mne_analyze', cdict)
-    elif format == 'mayavi':
-        l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
-        r = np.array([0, 0, 0, 0, 1, 1, 1])
-        g = np.array([1, 0, 0, 0, 0, 0, 1])
-        b = np.array([1, 1, 1, 0, 0, 0, 0])
-        a = np.array([1, 1, 0, 0, 0, 1, 1])
-        xp = (np.arange(256) - 128) / 128.0
-        colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
-        return colormap
-    else:
-        raise ValueError('format must be either matplotlib or mayavi')
-
-
-def circular_layout(node_names, node_order, start_pos=90, start_between=True):
-    """Create layout arranging nodes on a circle.
-
-    Parameters
-    ----------
-    node_names : list of str
-        Node names.
-    node_order : list of str
-        List with node names defining the order in which the nodes are
-        arranged. Must have the elements as node_names but the order can be
-        different. The nodes are arranged clockwise starting at "start_pos"
-        degrees.
-    start_pos : float
-        Angle in degrees that defines where the first node is plotted.
-    start_between : bool
-        If True, the layout starts with the position between the nodes. This is
-        the same as adding "180. / len(node_names)" to start_pos.
-
-    Returns
-    -------
-    node_angles : array, shape=(len(node_names,))
-        Node angles in degrees.
-    """
-    n_nodes = len(node_names)
-
-    if len(node_order) != n_nodes:
-        raise ValueError('node_order has to be the same length as node_names')
-
-    # convert it to a list with indices
-    node_order = [node_order.index(name) for name in node_names]
-    node_order = np.array(node_order)
-    if len(np.unique(node_order)) != n_nodes:
-        raise ValueError('node_order has repeated entries')
-
-    if start_between:
-        start_pos += 180. / n_nodes
-
-    node_angles = start_pos + 360 * node_order / float(n_nodes)
-
-    return node_angles
-
-
-def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
-                             node_angles=None, node_width=None,
-                             node_colors=None, facecolor='black',
-                             textcolor='white', node_edgecolor='black',
-                             linewidth=1.5, colormap='hot', vmin=None,
-                             vmax=None, colorbar=True, title=None):
-    """Visualize connectivity as a circular graph.
-
-    Note: This code is based on the circle graph example by Nicolas P. Rougier
-    http://www.loria.fr/~rougier/coding/recipes.html
-
-    Parameters
-    ----------
-    con : array
-        Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
-        array is provided, "indices" has to be used to define the connection
-        indices.
-    node_names : list of str
-        Node names. The order corresponds to the order in con.
-    indices : tuple of arrays | None
-        Two arrays with indices of connections for which the connections
-        strenghts are defined in con. Only needed if con is a 1D array.
-    n_lines : int | None
-        If not None, only the n_lines strongest connections (strength=abs(con))
-        are drawn.
-    node_angles : array, shape=(len(node_names,)) | None
-        Array with node positions in degrees. If None, the nodes are equally
-        spaced on the circle. See mne.viz.circular_layout.
-    node_width : float | None
-        Width of each node in degrees. If None, "360. / len(node_names)" is
-        used.
-    node_colors : list of tuples | list of str
-        List with the color to use for each node. If fewer colors than nodes
-        are provided, the colors will be repeated. Any color supported by
-        matplotlib can be used, e.g., RGBA tuples, named colors.
-    facecolor : str
-        Color to use for background. See matplotlib.colors.
-    textcolor : str
-        Color to use for text. See matplotlib.colors.
-    node_edgecolor : str
-        Color to use for lines around nodes. See matplotlib.colors.
-    linewidth : float
-        Line width to use for connections.
-    colormap : str
-        Colormap to use for coloring the connections.
-    vmin : float | None
-        Minimum value for colormap. If None, it is determined automatically.
-    vmax : float | None
-        Maximum value for colormap. If None, it is determined automatically.
-    colorbar : bool
-        Display a colorbar or not.
-    title : str
-        The figure title.
-
-    Returns
-    -------
-    fig : instance of pyplot.Figure
-        The figure handle.
-    """
-    import matplotlib.pyplot as plt
-    import matplotlib.path as m_path
-    import matplotlib.patches as m_patches
-
-    n_nodes = len(node_names)
-
-    if node_angles is not None:
-        if len(node_angles) != n_nodes:
-            raise ValueError('node_angles has to be the same length '
-                             'as node_names')
-        # convert it to radians
-        node_angles = node_angles * np.pi / 180
-    else:
-        # uniform layout on unit circle
-        node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
-
-    if node_width is None:
-        node_width = 2 * np.pi / n_nodes
-    else:
-        node_width = node_width * np.pi / 180
-
-    if node_colors is not None:
-        if len(node_colors) < n_nodes:
-            node_colors = cycle(node_colors)
-    else:
-        # assign colors using colormap
-        node_colors = [plt.cm.spectral(i / float(n_nodes))
-                       for i in range(n_nodes)]
-
-    # handle 1D and 2D connectivity information
-    if con.ndim == 1:
-        if indices is None:
-            raise ValueError('indices has to be provided if con.ndim == 1')
-    elif con.ndim == 2:
-        if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
-            raise ValueError('con has to be 1D or a square matrix')
-        # we use the lower-triangular part
-        indices = tril_indices(n_nodes, -1)
-        con = con[indices]
-    else:
-        raise ValueError('con has to be 1D or a square matrix')
-
-    # get the colormap
-    if isinstance(colormap, basestring):
-        colormap = plt.get_cmap(colormap)
-
-    # Make figure background the same colors as axes
-    fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
-
-    # Use a polar axes
-    axes = plt.subplot(111, polar=True, axisbg=facecolor)
-
-    # No ticks, we'll put our own
-    plt.xticks([])
-    plt.yticks([])
-
-    # Set y axes limit
-    plt.ylim(0, 10)
-
-    # Draw lines between connected nodes, only draw the strongest connections
-    if n_lines is not None and len(con) > n_lines:
-        con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
-    else:
-        con_thresh = 0.
-
-    # get the connections which we are drawing and sort by connection strength
-    # this will allow us to draw the strongest connections first
-    con_abs = np.abs(con)
-    con_draw_idx = np.where(con_abs >= con_thresh)[0]
-
-    con = con[con_draw_idx]
-    con_abs = con_abs[con_draw_idx]
-    indices = [ind[con_draw_idx] for ind in indices]
-
-    # now sort them
-    sort_idx = np.argsort(con_abs)
-    con_abs = con_abs[sort_idx]
-    con = con[sort_idx]
-    indices = [ind[sort_idx] for ind in indices]
-
-    # Get vmin vmax for color scaling
-    if vmin is None:
-        vmin = np.min(con[np.abs(con) >= con_thresh])
-    if vmax is None:
-        vmax = np.max(con)
-    vrange = vmax - vmin
-
-    # We want to add some "noise" to the start and end position of the
-    # edges: We modulate the noise with the number of connections of the
-    # node and the connection strength, such that the strongest connections
-    # are closer to the node center
-    nodes_n_con = np.zeros((n_nodes), dtype=np.int)
-    for i, j in zip(indices[0], indices[1]):
-        nodes_n_con[i] += 1
-        nodes_n_con[j] += 1
-
-    # initalize random number generator so plot is reproducible
-    rng = np.random.mtrand.RandomState(seed=0)
-
-    n_con = len(indices[0])
-    noise_max = 0.25 * node_width
-    start_noise = rng.uniform(-noise_max, noise_max, n_con)
-    end_noise = rng.uniform(-noise_max, noise_max, n_con)
-
-    nodes_n_con_seen = np.zeros_like(nodes_n_con)
-    for i, (start, end) in enumerate(zip(indices[0], indices[1])):
-        nodes_n_con_seen[start] += 1
-        nodes_n_con_seen[end] += 1
-
-        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start])
-                           / float(nodes_n_con[start]))
-        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end])
-                         / float(nodes_n_con[end]))
-
-    # scale connectivity for colormap (vmin<=>0, vmax<=>1)
-    con_val_scaled = (con - vmin) / vrange
-
-    # Finally, we draw the connections
-    for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
-        # Start point
-        t0, r0 = node_angles[i], 10
-
-        # End point
-        t1, r1 = node_angles[j], 10
-
-        # Some noise in start and end point
-        t0 += start_noise[pos]
-        t1 += end_noise[pos]
-
-        verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
-        codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
-                 m_path.Path.LINETO]
-        path = m_path.Path(verts, codes)
-
-        color = colormap(con_val_scaled[pos])
-
-        # Actual line
-        patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
-                                    linewidth=linewidth, alpha=1.)
-        axes.add_patch(patch)
-
-    # Draw ring with colored nodes
-    radii = np.ones(n_nodes) * 10
-    bars = axes.bar(node_angles, radii, width=node_width, bottom=9,
-                    edgecolor=node_edgecolor, lw=2, facecolor='.9',
-                    align='center')
-
-    for bar, color in zip(bars, node_colors):
-        bar.set_facecolor(color)
-
-    # Draw node labels
-    angles_deg = 180 * node_angles / np.pi
-    for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
-        if angle_deg >= 270:
-            ha = 'left'
-        else:
-            # Flip the label, so text is always upright
-            angle_deg += 180
-            ha = 'right'
-
-        plt.text(angle_rad, 10.4, name, size=10, rotation=angle_deg,
-                 rotation_mode='anchor', horizontalalignment=ha,
-                 verticalalignment='center', color=textcolor)
-
-    if title is not None:
-        plt.subplots_adjust(left=0.2, bottom=0.2, right=0.8, top=0.75)
-        plt.figtext(0.03, 0.95, title, color=textcolor, fontsize=14)
-    else:
-        plt.subplots_adjust(left=0.2, bottom=0.2, right=0.8, top=0.8)
-
-    if colorbar:
-        norm = normalize_colors(vmin=vmin, vmax=vmax)
-        sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
-        sm.set_array(np.linspace(vmin, vmax))
-        ax = fig.add_axes([.92, 0.03, .015, .25])
-        cb = fig.colorbar(sm, cax=ax)
-        cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
-        plt.setp(cb_yticks, color=textcolor)
-
-    return fig
-
-
-def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
-                  color=(0.9, 0.9, 0.9), width=0.8):
-    """Show the channel stats based on a drop_log from Epochs
-
-    Parameters
-    ----------
-    drop_log : list of lists
-        Epoch drop log from Epochs.drop_log.
-    threshold : float
-        The percentage threshold to use to decide whether or not to
-        plot. Default is zero (always plot).
-    n_max_plot : int
-        Maximum number of channels to show stats for.
-    subject : str
-        The subject name to use in the title of the plot.
-    color : tuple | str
-        Color to use for the bars.
-    width : float
-        Width of the bars.
-
-    Returns
-    -------
-    perc : float
-        Total percentage of epochs dropped.
-    """
-    if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
-        raise ValueError('drop_log must be a list of lists')
-    import matplotlib.pyplot as plt
-    scores = Counter([ch for d in drop_log for ch in d])
-    ch_names = np.array(scores.keys())
-    perc = 100 * np.mean([len(d) > 0 for d in drop_log])
-    if perc < threshold or len(ch_names) == 0:
-        return perc
-    counts = 100 * np.array(scores.values(), dtype=float) / len(drop_log)
-    n_plot = min(n_max_plot, len(ch_names))
-    order = np.flipud(np.argsort(counts))
-    plt.figure()
-    plt.title('%s: %0.1f%%' % (subject, perc))
-    x = np.arange(n_plot)
-    plt.bar(x, counts[order[:n_plot]], color=color, width=width)
-    plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
-               horizontalalignment='right')
-    plt.tick_params(axis='x', which='major', labelsize=10)
-    plt.ylabel('% of epochs rejected')
-    plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
-    plt.grid(True, axis='y')
-    plt.show()
-    return perc
-
-
-def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
-             bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
-             event_color='cyan', scalings=None, remove_dc=True, order='type',
-             show_options=False, title=None, show=True, block=False):
-    """Plot raw data
-
-    Parameters
-    ----------
-    raw : instance of Raw
-        The raw data to plot.
-    events : array | None
-        Events to show with vertical bars.
-    duration : float
-        Time window (sec) to plot in a given time.
-    start : float
-        Initial time to show (can be changed dynamically once plotted).
-    n_channels : int
-        Number of channels to plot at once.
-    bgcolor : color object
-        Color of the background.
-    color : dict | color object | None
-        Color for the data traces. If None, defaults to:
-        `dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
-             ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
-    bad_color : color object
-        Color to make bad channels.
-    event_color : color object
-        Color to use for events.
-    scalings : dict | None
-        Scale factors for the traces. If None, defaults to:
-        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
-             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
-    remove_dc : bool
-        If True remove DC component when plotting data.
-    order : 'type' | 'original' | array
-        Order in which to plot data. 'type' groups by channel type,
-        'original' plots in the order of ch_names, array gives the
-        indices to use in plotting.
-    show_options : bool
-        If True, a dialog for options related to projecion is shown.
-    title : str | None
-        The title of the window. If None, and either the filename of the
-        raw object or '<unknown>' will be displayed as title.
-    show : bool
-        Show figure if True
-    block : bool
-        Whether to halt program execution until the figure is closed.
-        Useful for setting bad channels on the fly by clicking on a line.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        Raw traces.
-
-    Notes
-    -----
-    The arrow keys (up/down/left/right) can typically be used to navigate
-    between channels and time ranges, but this depends on the backend
-    matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
-    To mark or un-mark a channel as bad, click on the rather flat segments
-    of a channel's time series. The changes will be reflected immediately
-    in the raw object's ``raw.info['bads']`` entry.
-    """
-    import matplotlib.pyplot as plt
-    import matplotlib as mpl
-    color, scalings = _mutable_defaults(('color', color),
-                                        ('scalings_plot_raw', scalings))
-
-    # make a copy of info, remove projection (for now)
-    info = copy.deepcopy(raw.info)
-    projs = info['projs']
-    info['projs'] = []
-    n_times = raw.n_times
-
-    # allow for raw objects without filename, e.g., ICA
-    if title is None:
-        title = raw.info.get('filenames', None)  # should return a list
-        if not title:  # empty list or absent key
-            title = '<unknown>'
-        else:
-            if len(title) > 1:
-                title = '<unknown>'
-            else:
-                title = title[0]
-    elif not isinstance(title, basestring):
-        raise TypeError('title must be None or a string')
-    if len(title) > 60:
-        title = '...' + title[-60:]
-    if len(raw.info['filenames']) > 1:
-        title += ' ... (+ %d more) ' % (len(raw.info['filenames']) - 1)
-    if events is not None:
-        events = events[:, 0].astype(float) - raw.first_samp
-        events /= info['sfreq']
-
-    # reorganize the data in plotting order
-    inds = list()
-    types = list()
-    for t in ['grad', 'mag']:
-        inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])]
-        types += [t] * len(inds[-1])
-    pick_args = dict(meg=False, exclude=[])
-    for t in ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp',
-              'misc', 'chpi', 'syst', 'ias', 'exci']:
-        pick_args[t] = True
-        inds += [pick_types(raw.info, **pick_args)]
-        types += [t] * len(inds[-1])
-        pick_args[t] = False
-    inds = np.concatenate(inds).astype(int)
-    if not len(inds) == len(info['ch_names']):
-        raise RuntimeError('Some channels not classified, please report '
-                           'this problem')
-
-    # put them back to original or modified order for natral plotting
-    reord = np.argsort(inds)
-    types = [types[ri] for ri in reord]
-    if isinstance(order, str):
-        if order == 'original':
-            inds = inds[reord]
-        elif order != 'type':
-            raise ValueError('Unknown order type %s' % order)
-    elif isinstance(order, np.ndarray):
-        if not np.array_equal(np.sort(order),
-                              np.arange(len(info['ch_names']))):
-            raise ValueError('order, if array, must have integers from '
-                             '0 to n_channels - 1')
-        # put back to original order first, then use new order
-        inds = inds[reord][order]
-
-    # set up projection and data parameters
-    params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
-                  info=info, projs=projs, remove_dc=remove_dc,
-                  n_channels=n_channels, scalings=scalings, types=types,
-                  n_times=n_times, events=events)
-
-    # set up plotting
-    fig = figure_nobar(facecolor=bgcolor)
-    fig.canvas.set_window_title('mne_browse_raw')
-    size = get_config('MNE_BROWSE_RAW_SIZE')
-    if size is not None:
-        size = size.split(',')
-        size = tuple([float(s) for s in size])
-        # have to try/catch when there's no toolbar
-        try:
-            fig.set_size_inches(size, forward=True)
-        except Exception:
-            pass
-    ax = plt.subplot2grid((10, 10), (0, 0), colspan=9, rowspan=9)
-    ax.set_title(title, fontsize=12)
-    ax_hscroll = plt.subplot2grid((10, 10), (9, 0), colspan=9)
-    ax_hscroll.get_yaxis().set_visible(False)
-    ax_hscroll.set_xlabel('Time (s)')
-    ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
-    ax_vscroll.set_axis_off()
-    ax_button = plt.subplot2grid((10, 10), (9, 9))
-    # store these so they can be fixed on resize
-    params['fig'] = fig
-    params['ax'] = ax
-    params['ax_hscroll'] = ax_hscroll
-    params['ax_vscroll'] = ax_vscroll
-    params['ax_button'] = ax_button
-
-    # populate vertical and horizontal scrollbars
-    for ci in xrange(len(info['ch_names'])):
-        this_color = (bad_color if info['ch_names'][inds[ci]] in info['bads']
-                      else color)
-        if isinstance(this_color, dict):
-            this_color = this_color[types[inds[ci]]]
-        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
-                                                   facecolor=this_color,
-                                                   edgecolor=this_color))
-    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
-                                       facecolor='w', edgecolor='w')
-    ax_vscroll.add_patch(vsel_patch)
-    params['vsel_patch'] = vsel_patch
-    hsel_patch = mpl.patches.Rectangle((start, 0), duration, 1, color='k',
-                                       edgecolor=None, alpha=0.5)
-    ax_hscroll.add_patch(hsel_patch)
-    params['hsel_patch'] = hsel_patch
-    ax_hscroll.set_xlim(0, n_times / float(info['sfreq']))
-    n_ch = len(info['ch_names'])
-    ax_vscroll.set_ylim(n_ch, 0)
-    ax_vscroll.set_title('Ch.')
-
-    # make shells for plotting traces
-    offsets = np.arange(n_channels) * 2 + 1
-    ax.set_yticks(offsets)
-    ax.set_ylim([n_channels * 2 + 1, 0])
-    # plot event_line first so it's in the back
-    event_line = ax.plot([np.nan], color=event_color)[0]
-    lines = [ax.plot([np.nan])[0] for _ in xrange(n_ch)]
-    ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
-
-    params['plot_fun'] = partial(_plot_traces, params=params, inds=inds,
-                                 color=color, bad_color=bad_color, lines=lines,
-                                 event_line=event_line, offsets=offsets)
-
-    # set up callbacks
-    opt_button = mpl.widgets.Button(ax_button, 'Opt')
-    callback_option = partial(_toggle_options, params=params)
-    opt_button.on_clicked(callback_option)
-    callback_key = partial(_plot_raw_onkey, params=params)
-    fig.canvas.mpl_connect('key_press_event', callback_key)
-    callback_pick = partial(_mouse_click, params=params)
-    fig.canvas.mpl_connect('button_press_event', callback_pick)
-    callback_resize = partial(_helper_resize, params=params)
-    fig.canvas.mpl_connect('resize_event', callback_resize)
-
-    # As here code is shared with plot_evoked, some extra steps:
-    # first the actual plot update function
-    params['plot_update_proj_callback'] = _plot_update_raw_proj
-    # then the toggle handler
-    callback_proj = partial(_toggle_proj, params=params)
-    # store these for use by callbacks in the options figure
-    params['callback_proj'] = callback_proj
-    params['callback_key'] = callback_key
-    # have to store this, or it could get garbage-collected
-    params['opt_button'] = opt_button
-
-    # do initial plots
-    callback_proj('none')
-    _layout_raw(params)
-
-    # deal with projectors
-    params['fig_opts'] = None
-    if show_options is True:
-        _toggle_options(None, params)
-
-    if show:
-        plt.show(block=block)
-
-    return fig
-
-
-def _toggle_options(event, params):
-    """Toggle options (projectors) dialog"""
-    import matplotlib.pyplot as plt
-    if len(params['projs']) > 0:
-        if params['fig_opts'] is None:
-            _draw_proj_checkbox(event, params, draw_current_state=False)
-        else:
-            # turn off options dialog
-            plt.close(params['fig_opts'])
-            del params['proj_checks']
-            params['fig_opts'] = None
-
-
-def _toggle_proj(event, params):
-    """Operation to perform when proj boxes clicked"""
-    # read options if possible
-    if 'proj_checks' in params:
-        bools = [x[0].get_visible() for x in params['proj_checks'].lines]
-        for bi, (b, p) in enumerate(zip(bools, params['projs'])):
-            # see if they tried to deactivate an active one
-            if not b and p['active']:
-                bools[bi] = True
-    else:
-        bools = [True] * len(params['projs'])
-
-    compute_proj = False
-    if not 'proj_bools' in params:
-        compute_proj = True
-    elif not np.array_equal(bools, params['proj_bools']):
-        compute_proj = True
-
-    # if projectors changed, update plots
-    if compute_proj is True:
-        params['plot_update_proj_callback'](params, bools)
-
-
-def _plot_update_raw_proj(params, bools):
-    """Helper only needs to be called when proj is changed"""
-    inds = np.where(bools)[0]
-    params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
-                               for ii in inds]
-    params['proj_bools'] = bools
-    params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
-                                        verbose=False)
-    _update_raw_data(params)
-    params['plot_fun']()
-
-
-def _update_raw_data(params):
-    """Helper only needs to be called when time or proj is changed"""
-    start = params['t_start']
-    stop = params['raw'].time_as_index(start + params['duration'])[0]
-    start = params['raw'].time_as_index(start)[0]
-    data, times = params['raw'][:, start:stop]
-    if params['projector'] is not None:
-        data = np.dot(params['projector'], data)
-    # remove DC
-    if params['remove_dc'] is True:
-        data -= np.mean(data, axis=1)[:, np.newaxis]
-    # scale
-    for di in xrange(data.shape[0]):
-        data[di] /= params['scalings'][params['types'][di]]
-        # stim channels should be hard limited
-        if params['types'][di] == 'stim':
-            data[di] = np.minimum(data[di], 1.0)
-    params['data'] = data
-    params['times'] = times
-
-
-def _layout_raw(params):
-    """Set raw figure layout"""
-    s = params['fig'].get_size_inches()
-    scroll_width = 0.33
-    hscroll_dist = 0.33
-    vscroll_dist = 0.1
-    l_border = 1.2
-    r_border = 0.1
-    t_border = 0.33
-    b_border = 0.5
-
-    # only bother trying to reset layout if it's reasonable to do so
-    if s[0] < 2 * scroll_width or s[1] < 2 * scroll_width + hscroll_dist:
-        return
-
-    # convert to relative units
-    scroll_width_x = scroll_width / s[0]
-    scroll_width_y = scroll_width / s[1]
-    vscroll_dist /= s[0]
-    hscroll_dist /= s[1]
-    l_border /= s[0]
-    r_border /= s[0]
-    t_border /= s[1]
-    b_border /= s[1]
-    # main axis (traces)
-    ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
-    ax_y = hscroll_dist + scroll_width_y + b_border
-    ax_height = 1.0 - ax_y - t_border
-    params['ax'].set_position([l_border, ax_y, ax_width, ax_height])
-    # vscroll (channels)
-    pos = [ax_width + l_border + vscroll_dist, ax_y,
-           scroll_width_x, ax_height]
-    params['ax_vscroll'].set_position(pos)
-    # hscroll (time)
-    pos = [l_border, b_border, ax_width, scroll_width_y]
-    params['ax_hscroll'].set_position(pos)
-    # options button
-    pos = [l_border + ax_width + vscroll_dist, b_border,
-           scroll_width_x, scroll_width_y]
-    params['ax_button'].set_position(pos)
-    params['fig'].canvas.draw()
-
-
-def _helper_resize(event, params):
-    """Helper for resizing"""
-    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
-    set_config('MNE_BROWSE_RAW_SIZE', size)
-    _layout_raw(params)
-
-
-def _pick_bad_channels(event, params):
-    """Helper for selecting / dropping bad channels onpick"""
-    bads = params['raw'].info['bads']
-    # trade-off, avoid selecting more than one channel when drifts are present
-    # however for clean data don't click on peaks but on flat segments
-    f = lambda x, y: y(np.mean(x), x.std() * 2)
-    for l in event.inaxes.lines:
-        ydata = l.get_ydata()
-        if not isinstance(ydata, list) and not np.isnan(ydata).any():
-            ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
-            if ymin <= event.ydata <= ymax:
-                this_chan = vars(l)['ch_name']
-                if this_chan in params['raw'].ch_names:
-                    if this_chan not in bads:
-                        bads.append(this_chan)
-                        l.set_color(params['bad_color'])
-                    else:
-                        bads.pop(bads.index(this_chan))
-                        l.set_color(vars(l)['def-color'])
-                event.canvas.draw()
-                break
-    # update deep-copied info to persistently draw bads
-    params['info']['bads'] = bads
-
-
-def _mouse_click(event, params):
-    """Vertical select callback"""
-    if event.inaxes is None or event.button != 1:
-        return
-    plot_fun = params['plot_fun']
-    # vertical scrollbar changed
-    if event.inaxes == params['ax_vscroll']:
-        ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
-        if params['ch_start'] != ch_start:
-            params['ch_start'] = ch_start
-            plot_fun()
-    # horizontal scrollbar changed
-    elif event.inaxes == params['ax_hscroll']:
-        _plot_raw_time(event.xdata - params['duration'] / 2, params)
-
-    elif event.inaxes == params['ax']:
-        _pick_bad_channels(event, params)
-
-
-def _plot_raw_time(value, params):
-    """Deal with changed time value"""
-    info = params['info']
-    max_times = params['n_times'] / float(info['sfreq']) - params['duration']
-    if value > max_times:
-        value = params['n_times'] / info['sfreq'] - params['duration']
-    if value < 0:
-        value = 0
-    if params['t_start'] != value:
-        params['t_start'] = value
-        params['hsel_patch'].set_x(value)
-        _update_raw_data(params)
-        params['plot_fun']()
-
-
-def _plot_raw_onkey(event, params):
-    """Interpret key presses"""
-    import matplotlib.pyplot as plt
-    # check for initial plot
-    plot_fun = params['plot_fun']
-    if event is None:
-        plot_fun()
-        return
-
-    # quit event
-    if event.key == 'escape':
-        plt.close(params['fig'])
-        return
-
-    # change plotting params
-    ch_changed = False
-    if event.key == 'down':
-        params['ch_start'] += params['n_channels']
-        ch_changed = True
-    elif event.key == 'up':
-        params['ch_start'] -= params['n_channels']
-        ch_changed = True
-    elif event.key == 'right':
-        _plot_raw_time(params['t_start'] + params['duration'], params)
-        return
-    elif event.key == 'left':
-        _plot_raw_time(params['t_start'] - params['duration'], params)
-        return
-    elif event.key in ['o', 'p']:
-        _toggle_options(None, params)
-        return
-
-    # deal with plotting changes
-    if ch_changed is True:
-        if params['ch_start'] >= len(params['info']['ch_names']):
-            params['ch_start'] = 0
-        elif params['ch_start'] < 0:
-            # wrap to end
-            rem = len(params['info']['ch_names']) % params['n_channels']
-            params['ch_start'] = len(params['info']['ch_names'])
-            params['ch_start'] -= rem if rem != 0 else params['n_channels']
-
-    if ch_changed:
-        plot_fun()
-
-
-def _plot_traces(params, inds, color, bad_color, lines, event_line, offsets):
-    """Helper for plotting raw"""
-
-    info = params['info']
-    n_channels = params['n_channels']
-    params['bad_color'] = bad_color
-    # do the plotting
-    tick_list = []
-    for ii in xrange(n_channels):
-        ch_ind = ii + params['ch_start']
-        # let's be generous here and allow users to pass
-        # n_channels per view >= the number of traces available
-        if ii >= len(lines):
-            break
-        elif ch_ind < len(info['ch_names']):
-            # scale to fit
-            ch_name = info['ch_names'][inds[ch_ind]]
-            tick_list += [ch_name]
-            offset = offsets[ii]
-
-            # do NOT operate in-place lest this get screwed up
-            this_data = params['data'][inds[ch_ind]]
-            this_color = bad_color if ch_name in info['bads'] else color
-            if isinstance(this_color, dict):
-                this_color = this_color[params['types'][inds[ch_ind]]]
-
-            # subtraction here gets corect orientation for flipped ylim
-            lines[ii].set_ydata(offset - this_data)
-            lines[ii].set_xdata(params['times'])
-            lines[ii].set_color(this_color)
-            vars(lines[ii])['ch_name'] = ch_name
-            vars(lines[ii])['def-color'] = color[params['types'][inds[ch_ind]]]
-        else:
-            # "remove" lines
-            lines[ii].set_xdata([])
-            lines[ii].set_ydata([])
-    # deal with event lines
-    if params['events'] is not None:
-        t = params['events']
-        t = t[np.where(np.logical_and(t >= params['times'][0],
-                       t <= params['times'][-1]))[0]]
-        if len(t) > 0:
-            xs = list()
-            ys = list()
-            for tt in t:
-                xs += [tt, tt, np.nan]
-                ys += [0, 2 * n_channels + 1, np.nan]
-            event_line.set_xdata(xs)
-            event_line.set_ydata(ys)
-        else:
-            event_line.set_xdata([])
-            event_line.set_ydata([])
-    # finalize plot
-    params['ax'].set_xlim(params['times'][0],
-                          params['times'][0] + params['duration'], False)
-    params['ax'].set_yticklabels(tick_list)
-    params['vsel_patch'].set_y(params['ch_start'])
-    params['fig'].canvas.draw()
-
-
-def figure_nobar(*args, **kwargs):
-    """Make matplotlib figure with no toolbar"""
-    import matplotlib.pyplot as plt
-    import matplotlib as mpl
-    old_val = mpl.rcParams['toolbar']
-    try:
-        mpl.rcParams['toolbar'] = 'none'
-        fig = plt.figure(*args, **kwargs)
-        # remove button press catchers (for toolbar)
-        for key in fig.canvas.callbacks.callbacks['key_press_event'].keys():
-            fig.canvas.callbacks.disconnect(key)
-    except Exception as ex:
-        raise ex
-    finally:
-        mpl.rcParams['toolbar'] = old_val
-    return fig
-
-
- at verbose
-def plot_raw_psds(raw, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
-                  proj=False, n_fft=2048, picks=None, ax=None, color='black',
-                  area_mode='std', area_alpha=0.33, n_jobs=1, verbose=None):
-    """Plot the power spectral density across channels
-
-    Parameters
-    ----------
-    raw : instance of fiff.Raw
-        The raw instance to use.
-    tmin : float
-        Start time for calculations.
-    tmax : float
-        End time for calculations.
-    fmin : float
-        Start frequency to consider.
-    fmax : float
-        End frequency to consider.
-    proj : bool
-        Apply projection.
-    n_fft : int
-        Number of points to use in Welch FFT calculations.
-    picks : list | None
-        List of channels to use. Cannot be None if `ax` is supplied. If both
-        `picks` and `ax` are None, separate subplots will be created for
-        each standard channel type (`mag`, `grad`, and `eeg`).
-    ax : instance of matplotlib Axes | None
-        Axes to plot into. If None, axes will be created.
-    color : str | tuple
-        A matplotlib-compatible color to use.
-    area_mode : str | None
-        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
-        will be plotted. If 'range', the min and max (across channels) will be
-        plotted. Bad channels will be excluded from these calculations.
-        If None, no area will be plotted.
-    area_alpha : float
-        Alpha for the area.
-    n_jobs : int
-        Number of jobs to run in parallel.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-    """
-    import matplotlib.pyplot as plt
-    if area_mode not in [None, 'std', 'range']:
-        raise ValueError('"area_mode" must be "std", "range", or None')
-    if picks is None:
-        if ax is not None:
-            raise ValueError('If "ax" is not supplied (None), then "picks" '
-                             'must also be supplied')
-        megs = ['mag', 'grad', False]
-        eegs = [False, False, True]
-        names = ['Magnetometers', 'Gradiometers', 'EEG']
-        picks_list = list()
-        titles_list = list()
-        for meg, eeg, name in zip(megs, eegs, names):
-            picks = pick_types(raw.info, meg=meg, eeg=eeg, ref_meg=False)
-            if len(picks) > 0:
-                picks_list.append(picks)
-                titles_list.append(name)
-        if len(picks_list) == 0:
-            raise RuntimeError('No MEG or EEG channels found')
-    else:
-        picks_list = [picks]
-        titles_list = ['Selected channels']
-        ax_list = [ax]
-
-    make_label = False
-    if ax is None:
-        plt.figure()
-        ax_list = list()
-        for ii in range(len(picks_list)):
-            # Make x-axes change together
-            if ii > 0:
-                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
-                                           sharex=ax_list[0]))
-            else:
-                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
-        make_label = True
-
-    for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
-                                                ax_list)):
-        psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
-                                      fmin=fmin, fmax=fmax, NFFT=n_fft,
-                                      n_jobs=n_jobs, plot=False, proj=proj)
-
-        # Convert PSDs to dB
-        psds = 10 * np.log10(psds)
-        psd_mean = np.mean(psds, axis=0)
-        if area_mode == 'std':
-            psd_std = np.std(psds, axis=0)
-            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
-        elif area_mode == 'range':
-            hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
-        else:  # area_mode is None
-            hyp_limits = None
-
-        ax.plot(freqs, psd_mean, color=color)
-        if hyp_limits is not None:
-            ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
-                            color=color, alpha=area_alpha)
-        if make_label:
-            if ii == len(picks_list) - 1:
-                ax.set_xlabel('Freq (Hz)')
-            if ii == len(picks_list) / 2:
-                ax.set_ylabel('Power Spectral Density (dB/Hz)')
-            ax.set_title(title)
-            ax.set_xlim(freqs[0], freqs[-1])
-    if make_label:
-        tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1)
-    plt.show()
-
-
- at verbose
-def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
-                 read_limit=np.inf, max_str=30, verbose=None):
-    """Compare the contents of two fiff files using diff and show_fiff
-
-    Parameters
-    ----------
-    fname_1 : str
-        First file to compare.
-    fname_2 : str
-        Second file to compare.
-    fname_out : str | None
-        Filename to store the resulting diff. If None, a temporary
-        file will be created.
-    show : bool
-        If True, show the resulting diff in a new tab in a web browser.
-    indent : str
-        How to indent the lines.
-    read_limit : int
-        Max number of bytes of data to read from a tag. Can be np.inf
-        to always read all data (helps test read completion).
-    max_str : int
-        Max number of characters of string representation to print for
-        each tag's data.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    Returns
-    -------
-    fname_out : str
-        The filename used for storing the diff. Could be useful for
-        when a temporary file is used.
-    """
-    file_1 = show_fiff(fname_1, output=list, indent=indent,
-                       read_limit=read_limit, max_str=max_str)
-    file_2 = show_fiff(fname_2, output=list, indent=indent,
-                       read_limit=read_limit, max_str=max_str)
-    diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
-    if fname_out is not None:
-        f = open(fname_out, 'w')
-    else:
-        f = tempfile.NamedTemporaryFile('w', delete=False)
-        fname_out = f.name
-    with f as fid:
-        fid.write(diff)
-    if show is True:
-        webbrowser.open_new_tab(fname_out)
-    return fname_out
-
-
-def _prepare_trellis(n_cells, max_col):
-    """Aux function
-    """
-    import matplotlib.pyplot as plt
-    if n_cells == 1:
-        nrow = ncol = 1
-    elif n_cells <= max_col:
-        nrow, ncol = 1, n_cells
-    else:
-        nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
-
-    fig, axes = plt.subplots(nrow, ncol)
-    axes = [axes] if ncol == nrow == 1 else axes.flatten()
-    for ax in axes[n_cells:]:  # hide unused axes
-        ax.set_visible(False)
-    return fig, axes
-
-
-def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
-                      title_str, axes_handler):
-    """Aux functioin"""
-    this = axes_handler[0]
-    for ii, data_, ax in zip(epoch_idx, data, axes):
-        [l.set_data(times, d) for l, d in zip(ax.lines, data_[good_ch_idx])]
-        if bad_ch_idx is not None:
-            bad_lines = [ax.lines[k] for k in bad_ch_idx]
-            [l.set_data(times, d) for l, d in zip(bad_lines,
-                                                  data_[bad_ch_idx])]
-        if title_str is not None:
-            ax.set_title(title_str % ii, fontsize=12)
-        ax.set_ylim(data.min(), data.max())
-        ax.set_yticks([])
-        ax.set_xticks([])
-        if vars(ax)[this]['reject'] is True:
-            #  memorizing reject
-            [l.set_color((0.8, 0.8, 0.8)) for l in ax.lines]
-            ax.get_figure().canvas.draw()
-        else:
-            #  forgetting previous reject
-            for k in axes_handler:
-                if k == this:
-                    continue
-                if vars(ax).get(k, {}).get('reject', None) is True:
-                    [l.set_color('k') for l in ax.lines[:len(good_ch_idx)]]
-                    if bad_ch_idx is not None:
-                        [l.set_color('r') for l in ax.lines[-len(bad_ch_idx):]]
-                    ax.get_figure().canvas.draw()
-                    break
-
-
-def _epochs_navigation_onclick(event, params):
-    """Aux function"""
-    import matplotlib.pyplot as plt
-    p = params
-    here = None
-    if event.inaxes == p['back'].ax:
-        here = 1
-    elif event.inaxes == p['next'].ax:
-        here = -1
-    elif event.inaxes == p['reject-quit'].ax:
-        if p['reject_idx']:
-            p['epochs'].drop_epochs(p['reject_idx'])
-        plt.close(p['fig'])
-        plt.close(event.inaxes.get_figure())
-
-    if here is not None:
-        p['idx_handler'].rotate(here)
-        p['axes_handler'].rotate(here)
-        this_idx = p['idx_handler'][0]
-        _draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
-                          p['data'][this_idx],
-                          p['times'], p['axes'], p['title_str'],
-                          p['axes_handler'])
-            # XXX don't ask me why
-        p['axes'][0].get_figure().canvas.draw()
-
-
-def _epochs_axes_onclick(event, params):
-    """Aux function"""
-    reject_color = (0.8, 0.8, 0.8)
-    ax = event.inaxes
-    if event.inaxes is None:
-        return
-    p = params
-    here = vars(ax)[p['axes_handler'][0]]
-    if here.get('reject', None) is False:
-        idx = here['idx']
-        if idx not in p['reject_idx']:
-            p['reject_idx'].append(idx)
-            [l.set_color(reject_color) for l in ax.lines]
-            here['reject'] = True
-    elif here.get('reject', None) is True:
-        idx = here['idx']
-        if idx in p['reject_idx']:
-            p['reject_idx'].pop(p['reject_idx'].index(idx))
-            good_lines = [ax.lines[k] for k in p['good_ch_idx']]
-            [l.set_color('k') for l in good_lines]
-            if p['bad_ch_idx'] is not None:
-                bad_lines = ax.lines[-len(p['bad_ch_idx']):]
-                [l.set_color('r') for l in bad_lines]
-            here['reject'] = False
-    ax.get_figure().canvas.draw()
-
-
-def plot_epochs(epochs, epoch_idx=None, picks=None, scalings=None,
-                title_str='#%003i', show=True, block=False):
-    """ Visualize single trials using Trellis plot.
-
-    Parameters
-    ----------
-
-    epochs : instance of Epochs
-        The epochs object
-    epoch_idx : array-like | int | None
-        The epochs to visualize. If None, the first 20 epochs are shown.
-        Defaults to None.
-    picks : array-like | None
-        Channels to be included. If None only good data channels are used.
-        Defaults to None
-    scalings : dict | None
-        Scale factors for the traces. If None, defaults to:
-        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
-             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
-    title_str : None | str
-        The string formatting to use for axes titles. If None, no titles
-        will be shown. Defaults expand to ``#001, #002, ...``
-    show : bool
-        Whether to show the figure or not.
-    block : bool
-        Whether to halt program execution until the figure is closed.
-        Useful for rejecting bad trials on the fly by clicking on a
-        sub plot.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        The figure.
-    """
-    import matplotlib.pyplot as plt
-    import matplotlib as mpl
-    scalings = _mutable_defaults(('scalings_plot_raw', None))[0]
-    if np.isscalar(epoch_idx):
-        epoch_idx = [epoch_idx]
-    if epoch_idx is None:
-        n_events = len(epochs.events)
-        epoch_idx = range(n_events)
-    else:
-        n_events = len(epoch_idx)
-    epoch_idx = epoch_idx[:n_events]
-    idx_handler = deque(create_chunks(epoch_idx, 20))
-
-    if picks is None:
-        if any('ICA' in k for k in epochs.ch_names):
-            picks = pick_types(epochs.info, misc=True, ref_meg=False,
-                               exclude=[])
-        else:
-            picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
-                               exclude=[])
-    if len(picks) < 1:
-        raise RuntimeError('No appropriate channels found. Please'
-                           ' check your picks')
-    times = epochs.times * 1e3
-    n_channels = epochs.info['nchan']
-    types = [channel_type(epochs.info, idx) for idx in
-             picks]
-
-    # preallocation needed for min / max scaling
-    data = np.zeros((len(epochs.events), n_channels, len(times)))
-    for ii, epoch in enumerate(epochs.get_data()):
-        for jj, (this_type, this_channel) in enumerate(zip(types, epoch)):
-            data[ii, jj] = this_channel / scalings[this_type]
-
-    n_events = len(epochs.events)
-    epoch_idx = epoch_idx[:n_events]
-    idx_handler = deque(create_chunks(epoch_idx, 20))
-    # handle bads
-    bad_ch_idx = None
-    ch_names = epochs.ch_names
-    bads = epochs.info['bads']
-    if any([ch_names[k] in bads for k in picks]):
-        ch_picked = [k for k in ch_names if ch_names.index(k) in picks]
-        bad_ch_idx = [ch_picked.index(k) for k in bads if k in ch_names]
-        good_ch_idx = [p for p in picks if p not in bad_ch_idx]
-    else:
-        good_ch_idx = np.arange(n_channels)
-
-    fig, axes = _prepare_trellis(len(data[idx_handler[0]]), max_col=5)
-    axes_handler = deque(range(len(idx_handler)))
-    for ii, data_, ax in zip(idx_handler[0], data[idx_handler[0]], axes):
-        ax.plot(times, data_[good_ch_idx].T, color='k')
-        if bad_ch_idx is not None:
-            ax.plot(times, data_[bad_ch_idx].T, color='r')
-        if title_str is not None:
-            ax.set_title(title_str % ii, fontsize=12)
-        ax.set_ylim(data.min(), data.max())
-        ax.set_yticks([])
-        ax.set_xticks([])
-        vars(ax)[axes_handler[0]] = {'idx': ii, 'reject': False}
-
-    # initialize memory
-    for this_view, this_inds in zip(axes_handler, idx_handler):
-        for ii, ax in zip(this_inds, axes):
-            vars(ax)[this_view] = {'idx': ii, 'reject': False}
-
-    tight_layout()
-    navigation = figure_nobar(figsize=(3, 1.5))
-    from matplotlib import gridspec
-    gs = gridspec.GridSpec(2, 2)
-    ax1 = plt.subplot(gs[0, 0])
-    ax2 = plt.subplot(gs[0, 1])
-    ax3 = plt.subplot(gs[1, :])
-
-    params = {
-        'fig': fig,
-        'idx_handler': idx_handler,
-        'epochs': epochs,
-        'picks': picks,
-        'times': times,
-        'scalings': scalings,
-        'good_ch_idx': good_ch_idx,
-        'bad_ch_idx': bad_ch_idx,
-        'axes': axes,
-        'back': mpl.widgets.Button(ax1, 'back'),
-        'next': mpl.widgets.Button(ax2, 'next'),
-        'reject-quit': mpl.widgets.Button(ax3, 'reject-quit'),
-        'title_str': title_str,
-        'reject_idx': [],
-        'axes_handler': axes_handler,
-        'data': data
-    }
-    fig.canvas.mpl_connect('button_press_event',
-                           partial(_epochs_axes_onclick, params=params))
-    navigation.canvas.mpl_connect('button_press_event',
-                                  partial(_epochs_navigation_onclick,
-                                          params=params))
-    if show is True:
-        plt.show(block=block)
-    return fig
-
-
-def plot_source_spectrogram(stcs, freq_bins, source_index=None, colorbar=False,
-                            show=True):
-    """Plot source power in time-freqency grid
-
-    Parameters
-    ----------
-    stcs : list of SourceEstimate
-        Source power for consecutive time windows, one SourceEstimate object
-        should be provided for each frequency bin.
-    freq_bins : list of tuples of float
-        Start and end points of frequency bins of interest.
-    source_index : int | None
-        Index of source for which the spectrogram will be plotted. If None,
-        the source with the largest activation will be selected.
-    colorbar : bool
-        If true, a colorbar will be added to the plot.
-    show : bool
-        Show figure if True.
-    """
-    import matplotlib.pyplot as plt
-
-    # Gathering results for each time window
-    if len(stcs) == 0:
-        raise ValueError('cannot plot spectrogram if len(stcs) == 0')
-    source_power = np.array([stc.data for stc in stcs])
-
-    # Finding the source with maximum source power
-    if source_index is None:
-        source_index = np.unravel_index(source_power.argmax(),
-                                        source_power.shape)[1]
-
-    # Preparing time-frequency cell boundaries for plotting
-    stc = stcs[0]
-    time_bounds = np.append(stc.times, stc.times[-1] + stc.tstep)
-    freq_bounds = sorted(set(np.ravel(freq_bins)))
-    freq_ticks = deepcopy(freq_bounds)
-
-    # If there is a gap in the frequency bins record its locations so that it
-    # can be covered with a gray horizontal bar
-    gap_bounds = []
-    for i in range(len(freq_bins) - 1):
-        lower_bound = freq_bins[i][1]
-        upper_bound = freq_bins[i + 1][0]
-        if lower_bound != upper_bound:
-            freq_bounds.remove(lower_bound)
-            gap_bounds.append((lower_bound, upper_bound))
-
-    # Preparing time-frequency grid for plotting
-    time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
-
-    # Plotting the results
-    plt.figure(figsize=(9, 6))
-    plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
-               cmap=plt.cm.jet)
-    ax = plt.gca()
-
-    plt.title('Time-frequency source power')
-    plt.xlabel('Time (s)')
-    plt.ylabel('Frequency (Hz)')
-
-    time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
-    n_skip = 1 + len(time_bounds) // 10
-    for i in range(len(time_bounds)):
-        if i % n_skip != 0:
-            time_tick_labels[i] = ''
-
-    ax.set_xticks(time_bounds)
-    ax.set_xticklabels(time_tick_labels)
-    plt.xlim(time_bounds[0], time_bounds[-1])
-    plt.yscale('log')
-    ax.set_yticks(freq_ticks)
-    ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
-    plt.ylim(freq_bounds[0], freq_bounds[-1])
-
-    plt.grid(True, ls='-')
-    if colorbar:
-        plt.colorbar()
-    tight_layout()
-
-    # Covering frequency gaps with horizontal bars
-    for lower_bound, upper_bound in gap_bounds:
-        plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
-                 lower_bound, time_bounds[0], color='#666666')
-
-    if show:
-        plt.show()
diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py
new file mode 100644
index 0000000..6e06884
--- /dev/null
+++ b/mne/viz/_3d.py
@@ -0,0 +1,651 @@
+"""Functions to make 3D plots with M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+from ..externals.six import string_types, advance_iterator
+
+from distutils.version import LooseVersion
+
+import os
+import inspect
+import warnings
+from itertools import cycle
+
+import numpy as np
+from scipy import linalg
+
+from ..io.pick import pick_types
+from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
+from ..transforms import read_trans, _find_trans, apply_trans
+from ..utils import get_subjects_dir, logger, _check_subject
+from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
+
+
+def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
+                      n_jobs=1):
+    """Plot MEG/EEG fields on head surface and helmet in 3D
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The evoked object.
+    surf_maps : list
+        The surface mapping information obtained with make_field_map.
+    time : float | None
+        The time point at which the field map shall be displayed. If None,
+        the average peak latency (across sensor types) is used.
+    time_label : str
+        How to print info about the time instant visualized.
+    n_jobs : int
+        Number of jobs to run in parallel.
+
+    Returns
+    -------
+    fig : instance of mlab.Figure
+        The mayavi figure.
+    """
+    types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
+
+    time_idx = None
+    if time is None:
+        time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
+
+    if not evoked.times[0] <= time <= evoked.times[-1]:
+        raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
+    time_idx = np.argmin(np.abs(evoked.times - time))
+
+    types = [sm['kind'] for sm in surf_maps]
+
+    # Plot them
+    from mayavi import mlab
+    alphas = [1.0, 0.5]
+    colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
+    colormap = mne_analyze_colormap(format='mayavi')
+    colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
+                                     np.tile([0., 0., 0., 255.], (2, 1)),
+                                     np.tile([255., 0., 0., 255.], (127, 1))])
+
+    fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
+
+    for ii, this_map in enumerate(surf_maps):
+        surf = this_map['surf']
+        map_data = this_map['data']
+        map_type = this_map['kind']
+        map_ch_names = this_map['ch_names']
+
+        if map_type == 'eeg':
+            pick = pick_types(evoked.info, meg=False, eeg=True)
+        else:
+            pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
+
+        ch_names = [evoked.ch_names[k] for k in pick]
+
+        set_ch_names = set(ch_names)
+        set_map_ch_names = set(map_ch_names)
+        if set_ch_names != set_map_ch_names:
+            message = ['Channels in map and data do not match.']
+            diff = set_map_ch_names - set_ch_names
+            if len(diff):
+                message += ['%s not in data file. ' % list(diff)]
+            diff = set_ch_names - set_map_ch_names
+            if len(diff):
+                message += ['%s not in map file.' % list(diff)]
+            raise RuntimeError(' '.join(message))
+
+        data = np.dot(map_data, evoked.data[pick, time_idx])
+
+        x, y, z = surf['rr'].T
+        nn = surf['nn']
+        # make absolutely sure these are normalized for Mayavi
+        nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
+
+        # Make a solid surface
+        vlim = np.max(np.abs(data))
+        alpha = alphas[ii]
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
+
+        # Now show our field pattern
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
+                                                        scalars=data)
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        with warnings.catch_warnings(record=True):  # traits
+            fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
+        fsurf.module_manager.scalar_lut_manager.lut.table = colormap
+
+        # And the field lines on top
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
+                                                        scalars=data)
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        with warnings.catch_warnings(record=True):  # traits
+            cont = mlab.pipeline.contour_surface(mesh, contours=21,
+                                                 line_width=1.0,
+                                                 vmin=-vlim, vmax=vlim,
+                                                 opacity=alpha)
+        cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
+
+    if '%' in time_label:
+        time_label %= (1e3 * evoked.times[time_idx])
+    mlab.text(0.01, 0.01, time_label, width=0.4)
+    mlab.view(10, 60)
+    return fig
+
+
+def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
+                       slices=None, show=True):
+    """Plot BEM contours on anatomical slices.
+
+    Parameters
+    ----------
+    mri_fname : str
+        The name of the file containing anatomical data.
+    surf_fnames : list of str
+        The filenames for the BEM surfaces in the format
+        ['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
+    orientation : str
+        'coronal' or 'transverse' or 'sagittal'
+    slices : list of int
+        Slice indices.
+    show : bool
+        Call pyplot.show() at the end.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    import matplotlib.pyplot as plt
+    import nibabel as nib
+
+    if orientation not in ['coronal', 'axial', 'sagittal']:
+        raise ValueError("Orientation must be 'coronal', 'axial' or "
+                         "'sagittal'. Got %s." % orientation)
+
+    # Load the T1 data
+    nim = nib.load(mri_fname)
+    data = nim.get_data()
+    affine = nim.get_affine()
+
+    n_sag, n_axi, n_cor = data.shape
+    orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
+    orientation_axis = orientation_name2axis[orientation]
+
+    if slices is None:
+        n_slices = data.shape[orientation_axis]
+        slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
+
+    # create of list of surfaces
+    surfs = list()
+
+    trans = linalg.inv(affine)
+    # XXX : next line is a hack don't ask why
+    trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
+
+    for surf_fname in surf_fnames:
+        surf = dict()
+        surf['rr'], surf['tris'] = read_surface(surf_fname)
+        # move back surface to MRI coordinate system
+        surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
+        surfs.append(surf)
+
+    fig, axs = _prepare_trellis(len(slices), 4)
+
+    for ax, sl in zip(axs, slices):
+
+        # adjust the orientations for good view
+        if orientation == 'coronal':
+            dat = data[:, :, sl].transpose()
+        elif orientation == 'axial':
+            dat = data[:, sl, :]
+        elif orientation == 'sagittal':
+            dat = data[sl, :, :]
+
+        # First plot the anatomical data
+        ax.imshow(dat, cmap=plt.cm.gray)
+        ax.axis('off')
+
+        # and then plot the contours on top
+        for surf in surfs:
+            if orientation == 'coronal':
+                ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
+                              surf['tris'], surf['rr'][:, 2],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+            elif orientation == 'axial':
+                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
+                              surf['tris'], surf['rr'][:, 1],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+            elif orientation == 'sagittal':
+                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
+                              surf['tris'], surf['rr'][:, 0],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+
+    if show:
+        plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
+                            hspace=0.)
+        plt.show()
+
+    return fig
+
+
+def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
+               ch_type=None, source='bem'):
+    """Plot MEG/EEG head surface and helmet in 3D.
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    trans_fname : str | 'auto'
+        The full path to the `*-trans.fif` file produced during
+        coregistration.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    ch_type : None | 'eeg' | 'meg'
+        If None, both the MEG helmet and EEG electrodes will be shown.
+        If 'meg', only the MEG helmet will be shown. If 'eeg', only the
+        EEG electrodes will be shown.
+    source : str
+        Type to load. Common choices would be `'bem'` or `'head'`. We first
+        try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
+        then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
+        to 'bem'. Note. For single layer bems it is recommended to use 'head'.
+
+    Returns
+    -------
+    fig : instance of mlab.Figure
+        The mayavi figure.
+    """
+
+    if ch_type not in [None, 'eeg', 'meg']:
+        raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
+                         % ch_type)
+
+    if trans_fname == 'auto':
+        # let's try to do this in MRI coordinates so they're easy to plot
+        trans_fname = _find_trans(subject, subjects_dir)
+
+    trans = read_trans(trans_fname)
+
+    surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
+    if ch_type is None or ch_type == 'meg':
+        surfs.append(get_meg_helmet_surf(info, trans))
+
+    # Plot them
+    from mayavi import mlab
+    alphas = [1.0, 0.5]
+    colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
+
+    fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
+
+    for ii, surf in enumerate(surfs):
+
+        x, y, z = surf['rr'].T
+        nn = surf['nn']
+        # make absolutely sure these are normalized for Mayavi
+        nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
+
+        # Make a solid surface
+        alpha = alphas[ii]
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
+
+    if ch_type is None or ch_type == 'eeg':
+        eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
+                    if l['eeg_loc'] is not None]
+
+        if len(eeg_locs) > 0:
+            eeg_loc = np.array(eeg_locs)
+
+            # Transform EEG electrodes to MRI coordinates
+            eeg_loc = apply_trans(trans['trans'], eeg_loc)
+
+            with warnings.catch_warnings(record=True):  # traits
+                mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
+                              color=(1.0, 0.0, 0.0), scale_factor=0.005)
+        else:
+            warnings.warn('EEG electrode locations not found. '
+                          'Cannot plot EEG electrodes.')
+
+    mlab.view(90, 90)
+    return fig
+
+
+def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
+                          colormap='hot', time_label='time=%0.2f ms',
+                          smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
+                          transparent=True, alpha=1.0, time_viewer=False,
+                          config_opts={}, subjects_dir=None, figure=None,
+                          views='lat', colorbar=True):
+    """Plot SourceEstimates with PySurfer
+
+    Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+    which will automatically be set by this function. Plotting multiple
+    SourceEstimates with different values for subjects_dir will cause
+    PySurfer to use the wrong FreeSurfer surfaces when using methods of
+    the returned Brain object. It is therefore recommended to set the
+    SUBJECTS_DIR environment variable or always use the same value for
+    subjects_dir (within the same Python session).
+
+    Parameters
+    ----------
+    stc : SourceEstimates
+        The source estimates to plot.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT. If None stc.subject will be used. If that
+        is None, the environment will be used.
+    surface : str
+        The type of surface (inflated, white etc.).
+    hemi : str, 'lh' | 'rh' | 'split' | 'both'
+        The hemisphere to display. Using 'both' or 'split' requires
+        PySurfer version 0.4 or above.
+    colormap : str
+        The type of colormap to use.
+    time_label : str
+        How to print info about the time instant visualized.
+    smoothing_steps : int
+        The amount of smoothing
+    fmin : float
+        The minimum value to display.
+    fmid : float
+        The middle value on the colormap.
+    fmax : float
+        The maximum value for the colormap.
+    transparent : bool
+        If True, use a linear transparency between fmin and fmid.
+    alpha : float
+        Alpha value to apply globally to the overlay.
+    time_viewer : bool
+        Display time viewer GUI.
+    config_opts : dict
+        Keyword arguments for Brain initialization.
+        See pysurfer.viz.Brain.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    figure : instance of mayavi.core.scene.Scene | list | int | None
+        If None, a new figure will be created. If multiple views or a
+        split view is requested, this must be a list of the appropriate
+        length. If int is provided it will be used to identify the Mayavi
+        figure by it's id or create a new figure with the given id.
+    views : str | list
+        View to use. See surfer.Brain().
+    colorbar : bool
+        If True, display colorbar on scene.
+
+    Returns
+    -------
+    brain : Brain
+        A instance of surfer.viz.Brain from PySurfer.
+    """
+    import surfer
+    from surfer import Brain, TimeViewer
+
+    if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
+        raise NotImplementedError('hemi type "%s" not supported with your '
+                                  'version of pysurfer. Please upgrade to '
+                                  'version 0.4 or higher.' % hemi)
+
+    try:
+        import mayavi
+        from mayavi import mlab
+    except ImportError:
+        from enthought import mayavi
+        from enthought.mayavi import mlab
+
+    # import here to avoid circular import problem
+    from ..source_estimate import SourceEstimate
+
+    if not isinstance(stc, SourceEstimate):
+        raise ValueError('stc has to be a surface source estimate')
+
+    if hemi not in ['lh', 'rh', 'split', 'both']:
+        raise ValueError('hemi has to be either "lh", "rh", "split", '
+                         'or "both"')
+
+    n_split = 2 if hemi == 'split' else 1
+    n_views = 1 if isinstance(views, string_types) else len(views)
+    if figure is not None:
+        # use figure with specified id or create new figure
+        if isinstance(figure, int):
+            figure = mlab.figure(figure, size=(600, 600))
+        # make sure it is of the correct type
+        if not isinstance(figure, list):
+            figure = [figure]
+        if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
+            raise TypeError('figure must be a mayavi scene or list of scenes')
+        # make sure we have the right number of figures
+        n_fig = len(figure)
+        if not n_fig == n_split * n_views:
+            raise RuntimeError('`figure` must be a list with the same '
+                               'number of elements as PySurfer plots that '
+                               'will be created (%s)' % n_split * n_views)
+
+    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
+
+    subject = _check_subject(stc.subject, subject, False)
+    if subject is None:
+        if 'SUBJECT' in os.environ:
+            subject = os.environ['SUBJECT']
+        else:
+            raise ValueError('SUBJECT environment variable not set')
+
+    if hemi in ['both', 'split']:
+        hemis = ['lh', 'rh']
+    else:
+        hemis = [hemi]
+
+    title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
+    args = inspect.getargspec(Brain.__init__)[0]
+    kwargs = dict(title=title, figure=figure, config_opts=config_opts,
+                  subjects_dir=subjects_dir)
+    if 'views' in args:
+        kwargs['views'] = views
+    else:
+        logger.info('PySurfer does not support "views" argument, please '
+                    'consider updating to a newer version (0.4 or later)')
+    with warnings.catch_warnings(record=True):  # traits warnings
+        brain = Brain(subject, hemi, surface, **kwargs)
+    for hemi in hemis:
+        hemi_idx = 0 if hemi == 'lh' else 1
+        if hemi_idx == 0:
+            data = stc.data[:len(stc.vertno[0])]
+        else:
+            data = stc.data[len(stc.vertno[0]):]
+        vertices = stc.vertno[hemi_idx]
+        time = 1e3 * stc.times
+        with warnings.catch_warnings(record=True):  # traits warnings
+            brain.add_data(data, colormap=colormap, vertices=vertices,
+                           smoothing_steps=smoothing_steps, time=time,
+                           time_label=time_label, alpha=alpha, hemi=hemi,
+                           colorbar=colorbar)
+
+        # scale colormap and set time (index) to display
+        brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
+                                  transparent=transparent)
+
+    if time_viewer:
+        TimeViewer(brain)
+    return brain
+
+
+def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
+                                 fontsize=18, bgcolor=(.05, 0, .1),
+                                 opacity=0.2, brain_color=(0.7,) * 3,
+                                 show=True, high_resolution=False,
+                                 fig_name=None, fig_number=None, labels=None,
+                                 modes=['cone', 'sphere'],
+                                 scale_factors=[1, 0.6],
+                                 verbose=None, **kwargs):
+    """Plot source estimates obtained with sparse solver
+
+    Active dipoles are represented in a "Glass" brain.
+    If the same source is active in multiple source estimates it is
+    displayed with a sphere otherwise with a cone in 3D.
+
+    Parameters
+    ----------
+    src : dict
+        The source space.
+    stcs : instance of SourceEstimate or list of instances of SourceEstimate
+        The source estimates (up to 3).
+    colors : list
+        List of colors
+    linewidth : int
+        Line width in 2D plot.
+    fontsize : int
+        Font size.
+    bgcolor : tuple of length 3
+        Background color in 3D.
+    opacity : float in [0, 1]
+        Opacity of brain mesh.
+    brain_color : tuple of length 3
+        Brain color.
+    show : bool
+        Show figures if True.
+    fig_name :
+        Mayavi figure name.
+    fig_number :
+        Matplotlib figure number.
+    labels : ndarray or list of ndarrays
+        Labels to show sources in clusters. Sources with the same
+        label and the waveforms within each cluster are presented in
+        the same color. labels should be a list of ndarrays when
+        stcs is a list ie. one label for each stc.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    kwargs : kwargs
+        Keyword arguments to pass to mlab.triangular_mesh.
+    """
+    if not isinstance(stcs, list):
+        stcs = [stcs]
+    if labels is not None and not isinstance(labels, list):
+        labels = [labels]
+
+    if colors is None:
+        colors = COLORS
+
+    linestyles = ['-', '--', ':']
+
+    # Show 3D
+    lh_points = src[0]['rr']
+    rh_points = src[1]['rr']
+    points = np.r_[lh_points, rh_points]
+
+    lh_normals = src[0]['nn']
+    rh_normals = src[1]['nn']
+    normals = np.r_[lh_normals, rh_normals]
+
+    if high_resolution:
+        use_lh_faces = src[0]['tris']
+        use_rh_faces = src[1]['tris']
+    else:
+        use_lh_faces = src[0]['use_tris']
+        use_rh_faces = src[1]['use_tris']
+
+    use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
+
+    points *= 170
+
+    vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
+               for stc in stcs]
+    unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
+
+    try:
+        from mayavi import mlab
+    except ImportError:
+        from enthought.mayavi import mlab
+
+    from matplotlib.colors import ColorConverter
+    color_converter = ColorConverter()
+
+    f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
+    mlab.clf()
+    if mlab.options.backend != 'test':
+        f.scene.disable_render = True
+    with warnings.catch_warnings(record=True):  # traits warnings
+        surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
+                                       points[:, 2], use_faces,
+                                       color=brain_color,
+                                       opacity=opacity, **kwargs)
+
+    import matplotlib.pyplot as plt
+    # Show time courses
+    plt.figure(fig_number)
+    plt.clf()
+
+    colors = cycle(colors)
+
+    logger.info("Total number of active sources: %d" % len(unique_vertnos))
+
+    if labels is not None:
+        colors = [advance_iterator(colors) for _ in
+                  range(np.unique(np.concatenate(labels).ravel()).size)]
+
+    for idx, v in enumerate(unique_vertnos):
+        # get indices of stcs it belongs to
+        ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
+        is_common = len(ind) > 1
+
+        if labels is None:
+            c = advance_iterator(colors)
+        else:
+            # if vertex is in different stcs than take label from first one
+            c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
+
+        mode = modes[1] if is_common else modes[0]
+        scale_factor = scale_factors[1] if is_common else scale_factors[0]
+
+        if (isinstance(scale_factor, (np.ndarray, list, tuple))
+                and len(unique_vertnos) == len(scale_factor)):
+            scale_factor = scale_factor[idx]
+
+        x, y, z = points[v]
+        nx, ny, nz = normals[v]
+        with warnings.catch_warnings(record=True):  # traits
+            mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
+                          mode=mode, scale_factor=scale_factor)
+
+        for k in ind:
+            vertno = vertnos[k]
+            mask = (vertno == v)
+            assert np.sum(mask) == 1
+            linestyle = linestyles[k]
+            plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
+                     linewidth=linewidth, linestyle=linestyle)
+
+    plt.xlabel('Time (ms)', fontsize=18)
+    plt.ylabel('Source amplitude (nAm)', fontsize=18)
+
+    if fig_name is not None:
+        plt.title(fig_name)
+
+    if show:
+        plt.show()
+
+    surface.actor.property.backface_culling = True
+    surface.actor.property.shading = True
+
+    return surface
diff --git a/mne/viz/__init__.py b/mne/viz/__init__.py
new file mode 100644
index 0000000..738ff9b
--- /dev/null
+++ b/mne/viz/__init__.py
@@ -0,0 +1,20 @@
+"""Visualization routines
+"""
+
+from .topomap import plot_evoked_topomap, plot_projs_topomap
+from .topomap import plot_ica_components, plot_ica_topomap
+from .topomap import plot_tfr_topomap, plot_topomap
+from .topo import (plot_topo, plot_topo_tfr, plot_topo_image_epochs,
+                   iter_topography)
+from .utils import tight_layout, mne_analyze_colormap, compare_fiff
+from ._3d import plot_sparse_source_estimates, plot_source_estimates
+from ._3d import plot_trans, plot_evoked_field
+from .misc import plot_cov, plot_bem, plot_events
+from .misc import plot_source_spectrogram
+from .utils import _mutable_defaults
+from .evoked import plot_evoked, plot_evoked_image
+from .circle import plot_connectivity_circle, circular_layout
+from .epochs import plot_image_epochs, plot_drop_log, plot_epochs
+from .epochs import _drop_log_stats
+from .raw import plot_raw, plot_raw_psds
+from .ica import plot_ica_scores, plot_ica_sources, plot_ica_overlay
diff --git a/mne/viz/circle.py b/mne/viz/circle.py
new file mode 100644
index 0000000..b2f8dab
--- /dev/null
+++ b/mne/viz/circle.py
@@ -0,0 +1,408 @@
+"""Functions to plot on circle as for connectivity
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+
+from itertools import cycle
+from functools import partial
+
+import numpy as np
+
+from ..externals.six import string_types
+from ..fixes import tril_indices, normalize_colors
+
+
+def circular_layout(node_names, node_order, start_pos=90, start_between=True,
+                    group_boundaries=None, group_sep=10):
+    """Create layout arranging nodes on a circle.
+
+    Parameters
+    ----------
+    node_names : list of str
+        Node names.
+    node_order : list of str
+        List with node names defining the order in which the nodes are
+        arranged. Must have the elements as node_names but the order can be
+        different. The nodes are arranged clockwise starting at "start_pos"
+        degrees.
+    start_pos : float
+        Angle in degrees that defines where the first node is plotted.
+    start_between : bool
+        If True, the layout starts with the position between the nodes. This is
+        the same as adding "180. / len(node_names)" to start_pos.
+    group_boundaries : None | array-like
+        List of of boundaries between groups at which point a "group_sep" will
+        be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
+    group_sep : float
+        Group separation angle in degrees. See "group_boundaries".
+
+    Returns
+    -------
+    node_angles : array, shape=(len(node_names,))
+        Node angles in degrees.
+    """
+    n_nodes = len(node_names)
+
+    if len(node_order) != n_nodes:
+        raise ValueError('node_order has to be the same length as node_names')
+
+    if group_boundaries is not None:
+        boundaries = np.array(group_boundaries, dtype=np.int)
+        if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
+            raise ValueError('"group_boundaries" has to be between 0 and '
+                             'n_nodes - 1.')
+        if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
+            raise ValueError('"group_boundaries" must have non-decreasing '
+                             'values.')
+        n_group_sep = len(group_boundaries)
+    else:
+        n_group_sep = 0
+        boundaries = None
+
+    # convert it to a list with indices
+    node_order = [node_order.index(name) for name in node_names]
+    node_order = np.array(node_order)
+    if len(np.unique(node_order)) != n_nodes:
+        raise ValueError('node_order has repeated entries')
+
+    node_sep = (360. - n_group_sep * group_sep) / n_nodes
+
+    if start_between:
+        start_pos += node_sep / 2
+
+        if boundaries is not None and boundaries[0] == 0:
+            # special case when a group separator is at the start
+            start_pos += group_sep / 2
+            boundaries = boundaries[1:] if n_group_sep > 1 else None
+
+    node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
+    node_angles[0] = start_pos
+    if boundaries is not None:
+        node_angles[boundaries] += group_sep
+
+    node_angles = np.cumsum(node_angles)[node_order]
+
+    return node_angles
+
+
+def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
+                                     n_nodes=0, node_angles=None, ylim=[9, 10]):
+    """Isolates connections around a single node when user left clicks a node.
+
+    On right click, resets all connections."""
+    if event.inaxes != axes:
+        return
+
+    if event.button == 1:  # left click
+        # click must be near node radius
+        if not ylim[0] <= event.ydata <= ylim[1]:
+            return
+
+        # all angles in range [0, 2*pi]
+        node_angles = node_angles % (np.pi * 2)
+        node = np.argmin(np.abs(event.xdata - node_angles))
+
+        patches = event.inaxes.patches
+        for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
+            patches[ii].set_visible(node in [x, y])
+        fig.canvas.draw()
+    elif event.button == 3:  # right click
+        patches = event.inaxes.patches
+        for ii in xrange(np.size(indices, axis=1)):
+            patches[ii].set_visible(True)
+        fig.canvas.draw()
+
+
+def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
+                             node_angles=None, node_width=None,
+                             node_colors=None, facecolor='black',
+                             textcolor='white', node_edgecolor='black',
+                             linewidth=1.5, colormap='hot', vmin=None,
+                             vmax=None, colorbar=True, title=None,
+                             colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
+                             fontsize_title=12, fontsize_names=8,
+                             fontsize_colorbar=8, padding=6.,
+                             fig=None, subplot=111, interactive=True):
+    """Visualize connectivity as a circular graph.
+
+    Note: This code is based on the circle graph example by Nicolas P. Rougier
+    http://www.loria.fr/~rougier/coding/recipes.html
+
+    Parameters
+    ----------
+    con : array
+        Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
+        array is provided, "indices" has to be used to define the connection
+        indices.
+    node_names : list of str
+        Node names. The order corresponds to the order in con.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which the connections
+        strenghts are defined in con. Only needed if con is a 1D array.
+    n_lines : int | None
+        If not None, only the n_lines strongest connections (strength=abs(con))
+        are drawn.
+    node_angles : array, shape=(len(node_names,)) | None
+        Array with node positions in degrees. If None, the nodes are equally
+        spaced on the circle. See mne.viz.circular_layout.
+    node_width : float | None
+        Width of each node in degrees. If None, the minimum angle between any
+        two nodes is used as the width.
+    node_colors : list of tuples | list of str
+        List with the color to use for each node. If fewer colors than nodes
+        are provided, the colors will be repeated. Any color supported by
+        matplotlib can be used, e.g., RGBA tuples, named colors.
+    facecolor : str
+        Color to use for background. See matplotlib.colors.
+    textcolor : str
+        Color to use for text. See matplotlib.colors.
+    node_edgecolor : str
+        Color to use for lines around nodes. See matplotlib.colors.
+    linewidth : float
+        Line width to use for connections.
+    colormap : str
+        Colormap to use for coloring the connections.
+    vmin : float | None
+        Minimum value for colormap. If None, it is determined automatically.
+    vmax : float | None
+        Maximum value for colormap. If None, it is determined automatically.
+    colorbar : bool
+        Display a colorbar or not.
+    title : str
+        The figure title.
+    colorbar_size : float
+        Size of the colorbar.
+    colorbar_pos : 2-tuple
+        Position of the colorbar.
+    fontsize_title : int
+        Font size to use for title.
+    fontsize_names : int
+        Font size to use for node names.
+    fontsize_colorbar : int
+        Font size to use for colorbar.
+    padding : float
+        Space to add around figure to accommodate long labels.
+    fig : None | instance of matplotlib.pyplot.Figure
+        The figure to use. If None, a new figure with the specified background
+        color will be created.
+    subplot : int | 3-tuple
+        Location of the subplot when creating figures with multiple plots. E.g.
+        121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
+        matplotlib.pyplot.subplot.
+    interactive : bool
+        When enabled, left-click on a node to show only connections to that
+        node. Right-click shows all connections.
+
+    Returns
+    -------
+    fig : instance of matplotlib.pyplot.Figure
+        The figure handle.
+    axes : instance of matplotlib.axes.PolarAxesSubplot
+        The subplot handle.
+    """
+    import matplotlib.pyplot as plt
+    import matplotlib.path as m_path
+    import matplotlib.patches as m_patches
+
+    n_nodes = len(node_names)
+
+    if node_angles is not None:
+        if len(node_angles) != n_nodes:
+            raise ValueError('node_angles has to be the same length '
+                             'as node_names')
+        # convert it to radians
+        node_angles = node_angles * np.pi / 180
+    else:
+        # uniform layout on unit circle
+        node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
+
+    if node_width is None:
+        # widths correspond to the minimum angle between two nodes
+        dist_mat = node_angles[None, :] - node_angles[:, None]
+        dist_mat[np.diag_indices(n_nodes)] = 1e9
+        node_width = np.min(np.abs(dist_mat))
+    else:
+        node_width = node_width * np.pi / 180
+
+    if node_colors is not None:
+        if len(node_colors) < n_nodes:
+            node_colors = cycle(node_colors)
+    else:
+        # assign colors using colormap
+        node_colors = [plt.cm.spectral(i / float(n_nodes))
+                       for i in range(n_nodes)]
+
+    # handle 1D and 2D connectivity information
+    if con.ndim == 1:
+        if indices is None:
+            raise ValueError('indices has to be provided if con.ndim == 1')
+    elif con.ndim == 2:
+        if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
+            raise ValueError('con has to be 1D or a square matrix')
+        # we use the lower-triangular part
+        indices = tril_indices(n_nodes, -1)
+        con = con[indices]
+    else:
+        raise ValueError('con has to be 1D or a square matrix')
+
+    # get the colormap
+    if isinstance(colormap, string_types):
+        colormap = plt.get_cmap(colormap)
+
+    # Make figure background the same colors as axes
+    if fig is None:
+        fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
+
+    # Use a polar axes
+    if not isinstance(subplot, tuple):
+        subplot = (subplot,)
+    axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
+
+    # No ticks, we'll put our own
+    plt.xticks([])
+    plt.yticks([])
+
+    # Set y axes limit, add additonal space if requested
+    plt.ylim(0, 10 + padding)
+
+    # Remove the black axes border which may obscure the labels
+    axes.spines['polar'].set_visible(False)
+
+    # Draw lines between connected nodes, only draw the strongest connections
+    if n_lines is not None and len(con) > n_lines:
+        con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
+    else:
+        con_thresh = 0.
+
+    # get the connections which we are drawing and sort by connection strength
+    # this will allow us to draw the strongest connections first
+    con_abs = np.abs(con)
+    con_draw_idx = np.where(con_abs >= con_thresh)[0]
+
+    con = con[con_draw_idx]
+    con_abs = con_abs[con_draw_idx]
+    indices = [ind[con_draw_idx] for ind in indices]
+
+    # now sort them
+    sort_idx = np.argsort(con_abs)
+    con_abs = con_abs[sort_idx]
+    con = con[sort_idx]
+    indices = [ind[sort_idx] for ind in indices]
+
+    # Get vmin vmax for color scaling
+    if vmin is None:
+        vmin = np.min(con[np.abs(con) >= con_thresh])
+    if vmax is None:
+        vmax = np.max(con)
+    vrange = vmax - vmin
+
+    # We want to add some "noise" to the start and end position of the
+    # edges: We modulate the noise with the number of connections of the
+    # node and the connection strength, such that the strongest connections
+    # are closer to the node center
+    nodes_n_con = np.zeros((n_nodes), dtype=np.int)
+    for i, j in zip(indices[0], indices[1]):
+        nodes_n_con[i] += 1
+        nodes_n_con[j] += 1
+
+    # initalize random number generator so plot is reproducible
+    rng = np.random.mtrand.RandomState(seed=0)
+
+    n_con = len(indices[0])
+    noise_max = 0.25 * node_width
+    start_noise = rng.uniform(-noise_max, noise_max, n_con)
+    end_noise = rng.uniform(-noise_max, noise_max, n_con)
+
+    nodes_n_con_seen = np.zeros_like(nodes_n_con)
+    for i, (start, end) in enumerate(zip(indices[0], indices[1])):
+        nodes_n_con_seen[start] += 1
+        nodes_n_con_seen[end] += 1
+
+        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start])
+                           / float(nodes_n_con[start]))
+        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end])
+                         / float(nodes_n_con[end]))
+
+    # scale connectivity for colormap (vmin<=>0, vmax<=>1)
+    con_val_scaled = (con - vmin) / vrange
+
+    # Finally, we draw the connections
+    for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
+        # Start point
+        t0, r0 = node_angles[i], 10
+
+        # End point
+        t1, r1 = node_angles[j], 10
+
+        # Some noise in start and end point
+        t0 += start_noise[pos]
+        t1 += end_noise[pos]
+
+        verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
+        codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
+                 m_path.Path.LINETO]
+        path = m_path.Path(verts, codes)
+
+        color = colormap(con_val_scaled[pos])
+
+        # Actual line
+        patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
+                                    linewidth=linewidth, alpha=1.)
+        axes.add_patch(patch)
+
+    # Draw ring with colored nodes
+    height = np.ones(n_nodes) * 1.0
+    bars = axes.bar(node_angles, height, width=node_width, bottom=9,
+                    edgecolor=node_edgecolor, lw=2, facecolor='.9',
+                    align='center')
+
+    for bar, color in zip(bars, node_colors):
+        bar.set_facecolor(color)
+
+    # Draw node labels
+    angles_deg = 180 * node_angles / np.pi
+    for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
+        if angle_deg >= 270:
+            ha = 'left'
+        else:
+            # Flip the label, so text is always upright
+            angle_deg += 180
+            ha = 'right'
+
+        axes.text(angle_rad, 10.4, name, size=fontsize_names,
+                  rotation=angle_deg, rotation_mode='anchor',
+                  horizontalalignment=ha, verticalalignment='center',
+                  color=textcolor)
+
+    if title is not None:
+        plt.title(title, color=textcolor, fontsize=fontsize_title,
+                  axes=axes)
+
+    if colorbar:
+        norm = normalize_colors(vmin=vmin, vmax=vmax)
+        sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
+        sm.set_array(np.linspace(vmin, vmax))
+        cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
+                          shrink=colorbar_size,
+                          anchor=colorbar_pos)
+        cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
+        cb.ax.tick_params(labelsize=fontsize_colorbar)
+        plt.setp(cb_yticks, color=textcolor)
+
+    #Add callback for interaction
+    if interactive:
+        callback = partial(_plot_connectivity_circle_onpick, fig=fig,
+                           axes=axes, indices=indices, n_nodes=n_nodes,
+                           node_angles=node_angles)
+
+        fig.canvas.mpl_connect('button_press_event', callback)
+
+    return fig, axes
+
+
diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py
new file mode 100644
index 0000000..11c660f
--- /dev/null
+++ b/mne/viz/epochs.py
@@ -0,0 +1,451 @@
+"""Functions to plot epochs data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import warnings
+from collections import deque
+from functools import partial
+
+import numpy as np
+from scipy import ndimage
+
+from ..utils import create_chunks
+from ..io.pick import pick_types, channel_type
+from ..fixes import Counter
+from .utils import _mutable_defaults, tight_layout, _prepare_trellis
+from .utils import figure_nobar
+
+
+def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
+                      vmax=None, colorbar=True, order=None, show=True,
+                      units=None, scalings=None, cmap='RdBu_r'):
+    """Plot Event Related Potential / Fields image
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs
+    picks : int | array-like of int | None
+        The indices of the channels to consider. If None, all good
+        data channels are plotted.
+    sigma : float
+        The standard deviation of the Gaussian smoothing to apply along
+        the epoch axis to apply in the image.
+    vmin : float
+        The min value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers
+    vmax : float
+        The max value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers
+    colorbar : bool
+        Display or not a colorbar
+    order : None | array of int | callable
+        If not None, order is used to reorder the epochs on the y-axis
+        of the image. If it's an array of int it should be of length
+        the number of good epochs. If it's a callable the arguments
+        passed are the times vector and the data as 2d array
+        (data.shape[1] == len(times)
+    show : bool
+        Show or not the figure at the end
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting.
+        If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15)`
+    cmap : matplotlib colormap
+        Colormap.
+
+    Returns
+    -------
+    figs : the list of matplotlib figures
+        One figure per channel displayed
+    """
+    units, scalings = _mutable_defaults(('units', units),
+                                        ('scalings', scalings))
+
+    import matplotlib.pyplot as plt
+    if picks is None:
+        picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+
+    if list(units.keys()) != list(scalings.keys()):
+        raise ValueError('Scalings and units must have the same keys.')
+
+    picks = np.atleast_1d(picks)
+    evoked = epochs.average(picks)
+    data = epochs.get_data()[:, picks, :]
+    if vmin is None:
+        vmin = data.min()
+    if vmax is None:
+        vmax = data.max()
+
+    figs = list()
+    for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
+        this_fig = plt.figure()
+        figs.append(this_fig)
+
+        ch_type = channel_type(epochs.info, idx)
+        if not ch_type in scalings:
+            # We know it's not in either scalings or units since keys match
+            raise KeyError('%s type not in scalings and units' % ch_type)
+        this_data *= scalings[ch_type]
+
+        this_order = order
+        if callable(order):
+            this_order = order(epochs.times, this_data)
+
+        if this_order is not None:
+            this_data = this_data[this_order]
+
+        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
+
+        ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
+        im = plt.imshow(this_data,
+                        extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
+                                0, len(data)],
+                        aspect='auto', origin='lower',
+                        vmin=vmin, vmax=vmax, cmap=cmap)
+        ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
+        if colorbar:
+            ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
+        ax1.set_title(epochs.ch_names[idx])
+        ax1.set_ylabel('Epochs')
+        ax1.axis('auto')
+        ax1.axis('tight')
+        ax1.axvline(0, color='m', linewidth=3, linestyle='--')
+        ax2.plot(1e3 * evoked.times, scalings[ch_type] * evoked.data[i])
+        ax2.set_xlabel('Time (ms)')
+        ax2.set_ylabel(units[ch_type])
+        ax2.set_ylim([vmin, vmax])
+        ax2.axvline(0, color='m', linewidth=3, linestyle='--')
+        if colorbar:
+            plt.colorbar(im, cax=ax3)
+            tight_layout(fig=this_fig)
+
+    if show:
+        plt.show()
+
+    return figs
+
+
+def _drop_log_stats(drop_log, ignore=['IGNORED']):
+    """
+    Parameters
+    ----------
+    drop_log : list of lists
+        Epoch drop log from Epochs.drop_log.
+    ignore : list
+        The drop reasons to ignore.
+
+    Returns
+    -------
+    perc : float
+        Total percentage of epochs dropped.
+    """
+    # XXX: This function should be moved to epochs.py after
+    # removal of perc return parameter in plot_drop_log()
+
+    if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
+        raise ValueError('drop_log must be a list of lists')
+
+    perc = 100 * np.mean([len(d) > 0 for d in drop_log
+                          if not any([r in ignore for r in d])])
+
+    return perc
+
+
+def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
+                  color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
+                  show=True, return_fig=False):
+    """Show the channel stats based on a drop_log from Epochs
+
+    Parameters
+    ----------
+    drop_log : list of lists
+        Epoch drop log from Epochs.drop_log.
+    threshold : float
+        The percentage threshold to use to decide whether or not to
+        plot. Default is zero (always plot).
+    n_max_plot : int
+        Maximum number of channels to show stats for.
+    subject : str
+        The subject name to use in the title of the plot.
+    color : tuple | str
+        Color to use for the bars.
+    width : float
+        Width of the bars.
+    ignore : list
+        The drop reasons to ignore.
+    show : bool
+        Show figure if True.
+    return_fig : bool
+        Return only figure handle if True. This argument will default
+        to True in v0.9 and then be removed.
+
+    Returns
+    -------
+    perc : float
+        Total percentage of epochs dropped.
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    import matplotlib.pyplot as plt
+    perc = _drop_log_stats(drop_log, ignore)
+    scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
+    ch_names = np.array(list(scores.keys()))
+    if perc < threshold or len(ch_names) == 0:
+        return perc
+    counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
+    n_plot = min(n_max_plot, len(ch_names))
+    order = np.flipud(np.argsort(counts))
+    fig = plt.figure()
+    plt.title('%s: %0.1f%%' % (subject, perc))
+    x = np.arange(n_plot)
+    plt.bar(x, counts[order[:n_plot]], color=color, width=width)
+    plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
+               horizontalalignment='right')
+    plt.tick_params(axis='x', which='major', labelsize=10)
+    plt.ylabel('% of epochs rejected')
+    plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
+    plt.grid(True, axis='y')
+
+    if show:
+        plt.show()
+
+    if return_fig:
+        return fig
+    else:
+        msg = ("'return_fig=False' will be deprecated in v0.9. "
+               "Use 'Epochs.drop_log_stats' to get percentages instead.")
+        warnings.warn(msg, DeprecationWarning)
+        return perc, fig
+
+
+def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
+                      title_str, axes_handler):
+    """Aux functioin"""
+    this = axes_handler[0]
+    for ii, data_, ax in zip(epoch_idx, data, axes):
+        [l.set_data(times, d) for l, d in zip(ax.lines, data_[good_ch_idx])]
+        if bad_ch_idx is not None:
+            bad_lines = [ax.lines[k] for k in bad_ch_idx]
+            [l.set_data(times, d) for l, d in zip(bad_lines,
+                                                  data_[bad_ch_idx])]
+        if title_str is not None:
+            ax.set_title(title_str % ii, fontsize=12)
+        ax.set_ylim(data.min(), data.max())
+        ax.set_yticks([])
+        ax.set_xticks([])
+        if vars(ax)[this]['reject'] is True:
+            #  memorizing reject
+            [l.set_color((0.8, 0.8, 0.8)) for l in ax.lines]
+            ax.get_figure().canvas.draw()
+        else:
+            #  forgetting previous reject
+            for k in axes_handler:
+                if k == this:
+                    continue
+                if vars(ax).get(k, {}).get('reject', None) is True:
+                    [l.set_color('k') for l in ax.lines[:len(good_ch_idx)]]
+                    if bad_ch_idx is not None:
+                        [l.set_color('r') for l in ax.lines[-len(bad_ch_idx):]]
+                    ax.get_figure().canvas.draw()
+                    break
+
+
+def _epochs_navigation_onclick(event, params):
+    """Aux function"""
+    import matplotlib.pyplot as plt
+    p = params
+    here = None
+    if event.inaxes == p['back'].ax:
+        here = 1
+    elif event.inaxes == p['next'].ax:
+        here = -1
+    elif event.inaxes == p['reject-quit'].ax:
+        if p['reject_idx']:
+            p['epochs'].drop_epochs(p['reject_idx'])
+        plt.close(p['fig'])
+        plt.close(event.inaxes.get_figure())
+
+    if here is not None:
+        p['idx_handler'].rotate(here)
+        p['axes_handler'].rotate(here)
+        this_idx = p['idx_handler'][0]
+        _draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
+                          p['data'][this_idx],
+                          p['times'], p['axes'], p['title_str'],
+                          p['axes_handler'])
+            # XXX don't ask me why
+        p['axes'][0].get_figure().canvas.draw()
+
+
+def _epochs_axes_onclick(event, params):
+    """Aux function"""
+    reject_color = (0.8, 0.8, 0.8)
+    ax = event.inaxes
+    if event.inaxes is None:
+        return
+    p = params
+    here = vars(ax)[p['axes_handler'][0]]
+    if here.get('reject', None) is False:
+        idx = here['idx']
+        if idx not in p['reject_idx']:
+            p['reject_idx'].append(idx)
+            [l.set_color(reject_color) for l in ax.lines]
+            here['reject'] = True
+    elif here.get('reject', None) is True:
+        idx = here['idx']
+        if idx in p['reject_idx']:
+            p['reject_idx'].pop(p['reject_idx'].index(idx))
+            good_lines = [ax.lines[k] for k in p['good_ch_idx']]
+            [l.set_color('k') for l in good_lines]
+            if p['bad_ch_idx'] is not None:
+                bad_lines = ax.lines[-len(p['bad_ch_idx']):]
+                [l.set_color('r') for l in bad_lines]
+            here['reject'] = False
+    ax.get_figure().canvas.draw()
+
+
+def plot_epochs(epochs, epoch_idx=None, picks=None, scalings=None,
+                title_str='#%003i', show=True, block=False):
+    """ Visualize single trials using Trellis plot.
+
+    Parameters
+    ----------
+
+    epochs : instance of Epochs
+        The epochs object
+    epoch_idx : array-like | int | None
+        The epochs to visualize. If None, the first 20 epochs are shown.
+        Defaults to None.
+    picks : array-like of int | None
+        Channels to be included. If None only good data channels are used.
+        Defaults to None
+    scalings : dict | None
+        Scale factors for the traces. If None, defaults to:
+        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
+             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
+    title_str : None | str
+        The string formatting to use for axes titles. If None, no titles
+        will be shown. Defaults expand to ``#001, #002, ...``
+    show : bool
+        Whether to show the figure or not.
+    block : bool
+        Whether to halt program execution until the figure is closed.
+        Useful for rejecting bad trials on the fly by clicking on a
+        sub plot.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    scalings = _mutable_defaults(('scalings_plot_raw', None))[0]
+    if np.isscalar(epoch_idx):
+        epoch_idx = [epoch_idx]
+    if epoch_idx is None:
+        n_events = len(epochs.events)
+        epoch_idx = list(range(n_events))
+    else:
+        n_events = len(epoch_idx)
+    epoch_idx = epoch_idx[:n_events]
+    idx_handler = deque(create_chunks(epoch_idx, 20))
+
+    if picks is None:
+        if any('ICA' in k for k in epochs.ch_names):
+            picks = pick_types(epochs.info, misc=True, ref_meg=False,
+                               exclude=[])
+        else:
+            picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
+                               exclude=[])
+    if len(picks) < 1:
+        raise RuntimeError('No appropriate channels found. Please'
+                           ' check your picks')
+    times = epochs.times * 1e3
+    n_channels = epochs.info['nchan']
+    types = [channel_type(epochs.info, idx) for idx in
+             picks]
+
+    # preallocation needed for min / max scaling
+    data = np.zeros((len(epochs.events), n_channels, len(times)))
+    for ii, epoch in enumerate(epochs.get_data()):
+        for jj, (this_type, this_channel) in enumerate(zip(types, epoch)):
+            data[ii, jj] = this_channel / scalings[this_type]
+
+    n_events = len(epochs.events)
+    epoch_idx = epoch_idx[:n_events]
+    idx_handler = deque(create_chunks(epoch_idx, 20))
+    # handle bads
+    bad_ch_idx = None
+    ch_names = epochs.ch_names
+    bads = epochs.info['bads']
+    if any([ch_names[k] in bads for k in picks]):
+        ch_picked = [k for k in ch_names if ch_names.index(k) in picks]
+        bad_ch_idx = [ch_picked.index(k) for k in bads if k in ch_names]
+        good_ch_idx = [p for p in picks if p not in bad_ch_idx]
+    else:
+        good_ch_idx = np.arange(n_channels)
+
+    fig, axes = _prepare_trellis(len(data[idx_handler[0]]), max_col=5)
+    axes_handler = deque(list(range(len(idx_handler))))
+    for ii, data_, ax in zip(idx_handler[0], data[idx_handler[0]], axes):
+        ax.plot(times, data_[good_ch_idx].T, color='k')
+        if bad_ch_idx is not None:
+            ax.plot(times, data_[bad_ch_idx].T, color='r')
+        if title_str is not None:
+            ax.set_title(title_str % ii, fontsize=12)
+        ax.set_ylim(data.min(), data.max())
+        ax.set_yticks([])
+        ax.set_xticks([])
+        vars(ax)[axes_handler[0]] = {'idx': ii, 'reject': False}
+
+    # initialize memory
+    for this_view, this_inds in zip(axes_handler, idx_handler):
+        for ii, ax in zip(this_inds, axes):
+            vars(ax)[this_view] = {'idx': ii, 'reject': False}
+
+    tight_layout(fig=fig)
+    navigation = figure_nobar(figsize=(3, 1.5))
+    from matplotlib import gridspec
+    gs = gridspec.GridSpec(2, 2)
+    ax1 = plt.subplot(gs[0, 0])
+    ax2 = plt.subplot(gs[0, 1])
+    ax3 = plt.subplot(gs[1, :])
+
+    params = {
+        'fig': fig,
+        'idx_handler': idx_handler,
+        'epochs': epochs,
+        'picks': picks,
+        'times': times,
+        'scalings': scalings,
+        'good_ch_idx': good_ch_idx,
+        'bad_ch_idx': bad_ch_idx,
+        'axes': axes,
+        'back': mpl.widgets.Button(ax1, 'back'),
+        'next': mpl.widgets.Button(ax2, 'next'),
+        'reject-quit': mpl.widgets.Button(ax3, 'reject-quit'),
+        'title_str': title_str,
+        'reject_idx': [],
+        'axes_handler': axes_handler,
+        'data': data
+    }
+    fig.canvas.mpl_connect('button_press_event',
+                           partial(_epochs_axes_onclick, params=params))
+    navigation.canvas.mpl_connect('button_press_event',
+                                  partial(_epochs_navigation_onclick,
+                                          params=params))
+    if show is True:
+        plt.show(block=block)
+    return fig
diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py
new file mode 100644
index 0000000..22bc620
--- /dev/null
+++ b/mne/viz/evoked.py
@@ -0,0 +1,296 @@
+"""Functions to make simple plot on evoked M/EEG data (besides topographies)
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+from itertools import cycle
+
+import numpy as np
+
+from ..io.pick import channel_type
+from ..externals.six import string_types
+from .utils import _mutable_defaults, _check_delayed_ssp
+from .utils import _draw_proj_checkbox, tight_layout
+
+
+def _plot_evoked(evoked, picks, exclude, unit, show,
+                 ylim, proj, xlim, hline, units,
+                 scalings, titles, axes, plot_type,
+                 cmap=None):
+    """Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
+
+    Extra param is:
+
+    plot_type : str, value ('butterfly' | 'image')
+        The type of graph to plot: 'butterfly' plots each channel as a line
+        (x axis: time, y axis: amplitude). 'image' plots a 2D image where
+        color depicts the amplitude of each channel at a given time point
+        (x axis: time, y axis: channel). In 'image' mode, the plot is not
+        interactive.
+    """
+    import matplotlib.pyplot as plt
+    if axes is not None and proj == 'interactive':
+        raise RuntimeError('Currently only single axis figures are supported'
+                           ' for interactive SSP selection.')
+
+    scalings, titles, units = _mutable_defaults(('scalings', scalings),
+                                                ('titles', titles),
+                                                ('units', units))
+
+    channel_types = set(key for d in [scalings, titles, units] for key in d)
+    channel_types = sorted(channel_types)  # to guarantee consistent order
+
+    if picks is None:
+        picks = list(range(evoked.info['nchan']))
+
+    bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
+                  if ch in evoked.ch_names]
+    if len(exclude) > 0:
+        if isinstance(exclude, string_types) and exclude == 'bads':
+            exclude = bad_ch_idx
+        elif (isinstance(exclude, list)
+              and all([isinstance(ch, string_types) for ch in exclude])):
+            exclude = [evoked.ch_names.index(ch) for ch in exclude]
+        else:
+            raise ValueError('exclude has to be a list of channel names or '
+                             '"bads"')
+
+        picks = list(set(picks).difference(exclude))
+
+    types = [channel_type(evoked.info, idx) for idx in picks]
+    n_channel_types = 0
+    ch_types_used = []
+    for t in channel_types:
+        if t in types:
+            n_channel_types += 1
+            ch_types_used.append(t)
+
+    axes_init = axes  # remember if axes where given as input
+
+    fig = None
+    if axes is None:
+        fig, axes = plt.subplots(n_channel_types, 1)
+
+    if isinstance(axes, plt.Axes):
+        axes = [axes]
+    elif isinstance(axes, np.ndarray):
+        axes = list(axes)
+
+    if axes_init is not None:
+        fig = axes[0].get_figure()
+
+    if not len(axes) == n_channel_types:
+        raise ValueError('Number of axes (%g) must match number of channel '
+                         'types (%g)' % (len(axes), n_channel_types))
+
+    # instead of projecting during each iteration let's use the mixin here.
+    if proj is True and evoked.proj is not True:
+        evoked = evoked.copy()
+        evoked.apply_proj()
+
+    times = 1e3 * evoked.times  # time in miliseconds
+    for ax, t in zip(axes, ch_types_used):
+        ch_unit = units[t]
+        this_scaling = scalings[t]
+        if unit is False:
+            this_scaling = 1.0
+            ch_unit = 'NA'  # no unit
+        idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+        if len(idx) > 0:
+            # Parameters for butterfly interactive plots
+            if plot_type == 'butterfly':
+                if any([i in bad_ch_idx for i in idx]):
+                    colors = ['k'] * len(idx)
+                    for i in bad_ch_idx:
+                        if i in idx:
+                            colors[idx.index(i)] = 'r'
+
+                    ax._get_lines.color_cycle = iter(colors)
+                else:
+                    ax._get_lines.color_cycle = cycle(['k'])
+            # Set amplitude scaling
+            D = this_scaling * evoked.data[idx, :]
+            # plt.axes(ax)
+            if plot_type == 'butterfly':
+                ax.plot(times, D.T)
+            elif plot_type == 'image':
+                im = ax.imshow(D, interpolation='nearest', origin='lower',
+                               extent=[times[0], times[-1], 0, D.shape[0]],
+                               aspect='auto', cmap=cmap)
+                plt.colorbar(im, ax=ax)
+            if xlim is not None:
+                if xlim == 'tight':
+                    xlim = (times[0], times[-1])
+                ax.set_xlim(xlim)
+            if ylim is not None and t in ylim:
+                if plot_type == 'butterfly':
+                    ax.set_ylim(ylim[t])
+                elif plot_type == 'image':
+                    im.set_clim(ylim[t])
+            ax.set_title(titles[t] + ' (%d channel%s)' % (
+                         len(D), 's' if len(D) > 1 else ''))
+            ax.set_xlabel('time (ms)')
+            if plot_type == 'butterfly':
+                ax.set_ylabel('data (%s)' % ch_unit)
+            elif plot_type == 'image':
+                ax.set_ylabel('channels (%s)' % ch_unit)
+            else:
+                raise ValueError("plot_type has to be 'butterfly' or 'image'."
+                                 "Got %s." % plot_type)
+
+            if (plot_type == 'butterfly') and (hline is not None):
+                for h in hline:
+                    ax.axhline(h, color='r', linestyle='--', linewidth=2)
+
+    if axes_init is None:
+        plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
+
+    if proj == 'interactive':
+        _check_delayed_ssp(evoked)
+        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
+                      axes=axes, types=types, units=units, scalings=scalings,
+                      unit=unit, ch_types_used=ch_types_used, picks=picks,
+                      plot_update_proj_callback=_plot_update_evoked,
+                      plot_type=plot_type)
+        _draw_proj_checkbox(None, params)
+
+    if show and plt.get_backend() != 'agg':
+        plt.show()
+        fig.canvas.draw()  # for axes plots update axes.
+    tight_layout(fig=fig)
+
+    return fig
+
+
+def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
+                ylim=None, proj=False, xlim='tight', hline=None, units=None,
+                scalings=None, titles=None, axes=None, plot_type="butterfly"):
+    """Plot evoked data
+
+    Note: If bad channels are not excluded they are shown in red.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    picks : array-like of int | None
+        The indices of channels to plot. If None show all.
+    exclude : list of str | 'bads'
+        Channels names to exclude from being shown. If 'bads', the
+        bad channels are excluded.
+    unit : bool
+        Scale plot with channel (SI) unit.
+    show : bool
+        Call pyplot.show() as the end or not.
+    ylim : dict | None
+        ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
+        Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
+        for each channel equals the pyplot default.
+    xlim : 'tight' | tuple | None
+        xlim for plots.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    hline : list of floats | None
+        The values at which to show an horizontal line.
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    titles : dict | None
+        The titles associated with the channels. If None, defaults to
+        `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+    axes : instance of Axis | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of channel types. If instance of
+        Axes, there must be only one channel type plotted.
+    """
+    return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
+                        show=show, ylim=ylim, proj=proj, xlim=xlim,
+                        hline=hline, units=units, scalings=scalings,
+                        titles=titles, axes=axes, plot_type="butterfly")
+
+
+def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
+                      clim=None, proj=False, xlim='tight', units=None,
+                      scalings=None, titles=None, axes=None, cmap='RdBu_r'):
+    """Plot evoked data as images
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    picks : array-like of int | None
+        The indices of channels to plot. If None show all.
+    exclude : list of str | 'bads'
+        Channels names to exclude from being shown. If 'bads', the
+        bad channels are excluded.
+    unit : bool
+        Scale plot with channel (SI) unit.
+    show : bool
+        Call pyplot.show() as the end or not.
+    clim : dict | None
+        clim for plots. e.g. clim = dict(eeg=[-200e-6, 200e6])
+        Valid keys are eeg, mag, grad, misc. If None, the clim parameter
+        for each channel equals the pyplot default.
+    xlim : 'tight' | tuple | None
+        xlim for plots.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    titles : dict | None
+        The titles associated with the channels. If None, defaults to
+        `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+    axes : instance of Axis | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of channel types. If instance of
+        Axes, there must be only one channel type plotted.
+    cmap : matplotlib colormap
+        Colormap.
+    """
+    return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
+                        show=show, ylim=clim, proj=proj, xlim=xlim,
+                        hline=None, units=units, scalings=scalings,
+                        titles=titles, axes=axes, plot_type="image",
+                        cmap=cmap)
+
+
+def _plot_update_evoked(params, bools):
+    """ update the plot evoked lines
+    """
+    picks, evoked = [params[k] for k in ('picks', 'evoked')]
+    times = evoked.times * 1e3
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+    params['proj_bools'] = bools
+    new_evoked = evoked.copy()
+    new_evoked.info['projs'] = []
+    new_evoked.add_proj(projs)
+    new_evoked.apply_proj()
+    for ax, t in zip(params['axes'], params['ch_types_used']):
+        this_scaling = params['scalings'][t]
+        idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
+        D = this_scaling * new_evoked.data[idx, :]
+        if params['plot_type'] == 'butterfly':
+            [line.set_data(times, di) for line, di in zip(ax.lines, D)]
+        else:
+            ax.images[0].set_data(D)
+    params['fig'].canvas.draw()
diff --git a/mne/viz/ica.py b/mne/viz/ica.py
new file mode 100644
index 0000000..302072f
--- /dev/null
+++ b/mne/viz/ica.py
@@ -0,0 +1,484 @@
+"""Functions to plot ICA specific data (besides topographies)
+"""
+from __future__ import print_function
+
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+from functools import partial
+
+import numpy as np
+
+from ..utils import deprecated
+from .utils import tight_layout, _prepare_trellis
+
+
+def _ica_plot_sources_onpick_(event, sources=None, ylims=None):
+    """Onpick callback for plot_ica_panel"""
+
+    # make sure that the swipe gesture in OS-X doesn't open many figures
+    if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
+        return
+
+    artist = event.artist
+    try:
+        import matplotlib.pyplot as plt
+        plt.figure()
+        src_idx = artist._mne_src_idx
+        component = artist._mne_component
+        plt.plot(sources[src_idx], 'r' if artist._mne_is_bad else 'k')
+        plt.ylim(ylims)
+        plt.grid(linestyle='-', color='gray', linewidth=.25)
+        plt.title('ICA #%i' % component)
+    except Exception as err:
+        # matplotlib silently ignores exceptions in event handlers, so we print
+        # it here to know what went wrong
+        print(err)
+        raise err
+
+
+ at deprecated('`plot_ica_panel` is deprecated and will be removed in '
+            'MNE 1.0. Use `plot_ica_sources` instead')
+def plot_ica_panel(sources, start=None, stop=None,
+                   source_idx=None, ncol=3, verbose=None,
+                   title=None, show=True):
+    """Create panel plots of ICA sources
+
+    Clicking on the plot of an individual source opens a new figure showing
+    the source.
+
+    Parameters
+    ----------
+    sources : ndarray
+        Sources as drawn from ica.get_sources.
+    start : int
+        x-axis start index. If None from the beginning.
+    stop : int
+        x-axis stop index. If None to the end.
+    source_idx : array-like
+        Indices for subsetting the sources.
+    ncol : int
+        Number of panel-columns.
+    title : str
+        The figure title. If None a default is provided.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    show : bool
+        If True, plot will be shown, else just the figure is returned.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+    """
+
+    return _plot_ica_grid(sources=sources, start=start, stop=stop,
+                          source_idx=source_idx, ncol=ncol, verbose=verbose,
+                          title=title, show=show)
+
+
+def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
+                     stop=None, show=True, title=None):
+    """Plot estimated latent sources given the unmixing matrix.
+
+    Typical usecases:
+
+    1. plot evolution of latent sources over time based on (Raw input)
+    2. plot latent source around event related time windows (Epochs input)
+    3. plot time-locking in ICA space (Evoked input)
+
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA solution.
+    inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
+        The object to plot the sources from.
+    picks : ndarray | None.
+        The components to be displayed. If None, plot will show the
+        sources in the order as fitted.
+    start : int
+        X-axis start index. If None from the beginning.
+    stop : int
+        X-axis stop index. If None to the end.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    title : str | None
+        The figure title. If None a default is provided.
+    show : bool
+        If True, plot will be shown, else just the figure is returned.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+        The figure.
+    """
+
+    from ..io.base import _BaseRaw
+    from ..evoked import Evoked
+    from ..epochs import _BaseEpochs
+
+    if exclude is None:
+        exclude = ica.exclude
+
+    if isinstance(inst, (_BaseRaw, _BaseEpochs)):
+        if isinstance(inst, _BaseRaw):
+            sources = ica._transform_raw(inst, start, stop)
+        else:
+            if start is not None or stop is not None:
+                inst = inst.crop(start, stop, copy=True)
+            sources = ica._transform_epochs(inst, concatenate=True)
+        if picks is not None:
+            if np.isscalar(picks):
+                picks = [picks]
+            sources = np.atleast_2d(sources[picks])
+
+        fig = _plot_ica_grid(sources, start=start, stop=stop,
+                             ncol=len(sources) // 10 or 1,
+                             exclude=exclude,
+                             source_idx=picks,
+                             title=title, show=show)
+    elif isinstance(inst, Evoked):
+        sources = ica.get_sources(inst)
+        if start is not None or stop is not None:
+            inst = inst.crop(start, stop, copy=True)
+        fig = _plot_ica_sources_evoked(evoked=sources,
+                                       exclude=exclude,
+                                       title=title)
+    else:
+        raise ValueError('Data input must be of Raw or Epochs type')
+
+    return fig
+
+
+def _plot_ica_grid(sources, start, stop,
+                   source_idx, ncol, exclude,
+                   title, show):
+    """Create panel plots of ICA sources
+
+    Clicking on the plot of an individual source opens a new figure showing
+    the source.
+
+    Parameters
+    ----------
+    sources : ndarray
+        Sources as drawn from ica.get_sources.
+    start : int
+        x-axis start index. If None from the beginning.
+    stop : int
+        x-axis stop index. If None to the end.
+    n_components : int
+        Number of components fitted.
+    source_idx : array-like
+        Indices for subsetting the sources.
+    ncol : int
+        Number of panel-columns.
+    title : str
+        The figure title. If None a default is provided.
+    show : bool
+        If True, plot will be shown, else just the figure is returned.
+    """
+    import matplotlib.pyplot as plt
+
+    if source_idx is None:
+        source_idx = np.arange(len(sources))
+    elif isinstance(source_idx, list):
+        source_idx = np.array(source_idx)
+    if exclude is None:
+        exclude = []
+
+    n_components = len(sources)
+    ylims = sources.min(), sources.max()
+    xlims = np.arange(sources.shape[-1])[[0, -1]]
+    fig, axes = _prepare_trellis(n_components, ncol)
+    if title is None:
+        fig.suptitle('Reconstructed latent sources', size=16)
+    elif title:
+        fig.suptitle(title, size=16)
+
+    plt.subplots_adjust(wspace=0.05, hspace=0.05)
+    my_iter = enumerate(zip(source_idx, axes, sources))
+    for i_source, (i_selection, ax, source) in my_iter:
+        component = '[%i]' % i_selection
+        # plot+ emebed idx and comp. name to use in callback
+        color = 'r' if i_selection in exclude else 'k'
+        line = ax.plot(source, linewidth=0.5, color=color, picker=1e9)[0]
+        vars(line)['_mne_src_idx'] = i_source
+        vars(line)['_mne_component'] = i_selection
+        vars(line)['_mne_is_bad'] = i_selection in exclude
+        ax.set_xlim(xlims)
+        ax.set_ylim(ylims)
+        ax.text(0.05, .95, component, transform=ax.transAxes,
+                verticalalignment='top')
+        plt.setp(ax.get_xticklabels(), visible=False)
+        plt.setp(ax.get_yticklabels(), visible=False)
+    # register callback
+    callback = partial(_ica_plot_sources_onpick_, sources=sources, ylims=ylims)
+    fig.canvas.mpl_connect('pick_event', callback)
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_ica_sources_evoked(evoked, exclude, title):
+    """Plot average over epochs in ICA space
+
+    Parameters
+    ----------
+    ica : instance of mne.prerocessing.ICA
+        The ICA object.
+    epochs : instance of mne.Epochs
+        The Epochs to be regarded.
+    title : str
+        The figure title.
+    """
+    import matplotlib.pyplot as plt
+    if title is None:
+        title = 'Reconstructed latent sources, time-locked'
+
+    fig = plt.figure()
+    times = evoked.times * 1e3
+
+    # plot unclassified sources
+    plt.plot(times, evoked.data.T, 'k')
+    for ii in exclude:
+        # use indexing to expose event related sources
+        color, label = ('r', 'ICA %02d' % ii)
+        plt.plot(times, evoked.data[ii].T, color='r', label=label)
+
+    plt.title(title)
+    plt.xlim(times[[0, -1]])
+    plt.xlabel('Time (ms)')
+    plt.ylabel('(NA)')
+    plt.legend(loc='best')
+    tight_layout(fig=fig)
+    return fig
+
+
+def plot_ica_scores(ica, scores, exclude=None, axhline=None,
+                    title='ICA component scores',
+                    figsize=(12, 6)):
+    """Plot scores related to detected components.
+
+    Use this function to asses how well your score describes outlier
+    sources and how well you were detecting them.
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    scores : array_like of float, shape (n ica components) | list of arrays
+        Scores based on arbitrary metric to characterize ICA components.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    axhline : float
+        Draw horizontal line to e.g. visualize rejection threshold.
+    title : str
+        The figure title.
+    figsize : tuple of int
+        The figure size. Defaults to (12, 6)
+
+    Returns
+    -------
+    fig : instance of matplotlib.pyplot.Figure
+        The figure object
+    """
+    import matplotlib.pyplot as plt
+    my_range = np.arange(ica.n_components_)
+    if exclude is None:
+        exclude = ica.exclude
+    exclude = np.unique(exclude)
+    if not isinstance(scores[0], (list, np.ndarray)):
+        scores = [scores]
+    n_rows = len(scores)
+    figsize = (12, 6) if figsize is None else figsize
+    fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
+    if isinstance(axes, np.ndarray):
+        axes = axes.flatten()
+    else:
+        axes = [axes]
+    plt.suptitle(title)
+    for this_scores, ax in zip(scores, axes):
+        if len(my_range) != len(this_scores):
+            raise ValueError('The length ofr `scores` must equal the '
+                             'number of ICA components.')
+        ax.bar(my_range, this_scores, color='w')
+        for excl in exclude:
+            ax.bar(my_range[excl], this_scores[excl], color='r')
+        if axhline is not None:
+            if np.isscalar(axhline):
+                axhline = [axhline]
+            for axl in axhline:
+                ax.axhline(axl, color='r', linestyle='--')
+        ax.set_ylabel('score')
+        ax.set_xlabel('ICA components')
+        ax.set_xlim(0, len(this_scores))
+    plt.show()
+    tight_layout(fig=fig)
+    if len(axes) > 1:
+        plt.subplots_adjust(top=0.9)
+    return fig
+
+
+def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
+                     stop=None, title=None, show=True):
+    """Overlay of raw and cleaned signals given the unmixing matrix.
+
+    This method helps visualizing signal quality and arficat rejection.
+
+    Parameters
+    ----------
+    inst : instance of mne.io.Raw or mne.Evoked
+        The signals to be compared given the ICA solution. If Raw input,
+        The raw data are displayed before and after cleaning. In a second
+        panel the cross channel average will be displayed. Since dipolar
+        sources will be canceled out this display is sensitive to
+        artifacts. If evoked input, butterfly plots for clean and raw
+        signals will be superimposed.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels
+        are used that were included on fitting).
+    start : int
+        X-axis start index. If None from the beginning.
+    stop : int
+        X-axis stop index. If None to the end.
+    title : str
+        The figure title.
+
+    Returns
+    -------
+        fig : instance of pyplot.Figure
+        The figure.
+    """
+    # avoid circular imports
+    from ..io.base import _BaseRaw
+    from ..evoked import Evoked
+    from ..preprocessing.ica import _check_start_stop
+    import matplotlib.pyplot as plt
+
+    if not isinstance(inst, (_BaseRaw, Evoked)):
+        raise ValueError('Data input must be of Raw or Epochs type')
+    if title is None:
+        title = 'Signals before (red) and after (black) cleaning'
+    if picks is None:
+        picks = [inst.ch_names.index(k) for k in ica.ch_names]
+    if exclude is None:
+        exclude = ica.exclude
+    if isinstance(inst, _BaseRaw):
+        if start is None:
+            start = 0.0
+        if stop is None:
+            stop = 3.0
+        ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
+        start_compare, stop_compare = _check_start_stop(inst, start, stop)
+        data, times = inst[picks, start_compare:stop_compare]
+
+        raw_cln = ica.apply(inst, exclude=exclude, start=start, stop=stop,
+                            copy=True)
+        data_cln, _ = raw_cln[picks, start_compare:stop_compare]
+        fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
+                                    times=times * 1e3, title=title,
+                                    ch_types_used=ch_types_used)
+    elif isinstance(inst, Evoked):
+        if start is not None and stop is not None:
+            inst = inst.crop(start, stop, copy=True)
+        if picks is not None:
+            inst.pick_channels([inst.ch_names[p] for p in picks])
+        evoked_cln = ica.apply(inst, exclude=exclude, copy=True)
+        fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
+                                       title=title)
+    if show is True:
+        plt.show()
+    return fig
+
+
+def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used):
+    """Plot evoked after and before ICA cleaning
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    epochs : instance of mne.Epochs
+        The Epochs to be regarded.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+    """
+    import matplotlib.pyplot as plt
+        # Restore sensor space data and keep all PCA components
+    # let's now compare the date before and after cleaning.
+    # first the raw data
+    assert data.shape == data_cln.shape
+    fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
+    plt.suptitle(title)
+    ax1.plot(times, data.T, color='r')
+    ax1.plot(times, data_cln.T, color='k')
+    ax1.set_xlabel('time (s)')
+    ax1.set_xlim(times[0], times[-1])
+    ax1.set_xlim(times[0], times[-1])
+    ax1.set_title('Raw data')
+
+    _ch_types = {'mag': 'Magnetometers',
+                 'grad': 'Gradiometers',
+                 'eeg': 'EEG'}
+    ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
+    ax2.set_title('Average across channels ({})'.format(ch_types))
+    ax2.plot(times, data.mean(0), color='r')
+    ax2.plot(times, data_cln.mean(0), color='k')
+    ax2.set_xlim(100, 106)
+    ax2.set_xlabel('time (ms)')
+    ax2.set_xlim(times[0], times[-1])
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.90)
+    fig.canvas.draw()
+
+    return fig
+
+
+def _plot_ica_overlay_evoked(evoked, evoked_cln, title):
+    """Plot evoked after and before ICA cleaning
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    epochs : instance of mne.Epochs
+        The Epochs to be regarded.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+    """
+    import matplotlib.pyplot as plt
+    ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
+    n_rows = len(ch_types_used)
+    ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
+                         c in evoked_cln]
+
+    if len(ch_types_used) != len(ch_types_used_cln):
+        raise ValueError('Raw and clean evokeds must match. '
+                         'Found different channels.')
+
+    fig, axes = plt.subplots(n_rows, 1)
+    fig.suptitle('Average signal before (red) and after (black) ICA')
+    axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
+
+    evoked.plot(axes=axes)
+    for ax in fig.axes:
+        [l.set_color('r') for l in ax.get_lines()]
+    fig.canvas.draw()
+    evoked_cln.plot(axes=axes)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.90)
+    fig.canvas.draw()
+    return fig
diff --git a/mne/viz/misc.py b/mne/viz/misc.py
new file mode 100644
index 0000000..71f1fad
--- /dev/null
+++ b/mne/viz/misc.py
@@ -0,0 +1,521 @@
+"""Functions to make simple plots with M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import copy
+import warnings
+from glob import glob
+import os.path as op
+from itertools import cycle
+
+import numpy as np
+from scipy import linalg
+
+from ..surface import read_surface
+from ..io.proj import make_projector
+from ..utils import logger, verbose, get_subjects_dir
+from ..io.pick import pick_types
+from .utils import tight_layout, COLORS, _prepare_trellis
+
+
+ at verbose
+def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
+             show=True, verbose=None):
+    """Plot Covariance data
+
+    Parameters
+    ----------
+    cov : instance of Covariance
+        The covariance matrix.
+    info: dict
+        Measurement info.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any channel.
+        If 'bads', exclude info['bads'].
+    colorbar : bool
+        Show colorbar or not.
+    proj : bool
+        Apply projections or not.
+    show : bool
+        Call pyplot.show() as the end or not.
+    show_svd : bool
+        Plot also singular values of the noise covariance for each sensor type.
+        We show square roots ie. standard deviations.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig_cov : instance of matplotlib.pyplot.Figure
+        The covariance plot.
+    fig_svd : instance of matplotlib.pyplot.Figure | None
+        The SVD spectra plot of the covariance.
+    """
+    if exclude == 'bads':
+        exclude = info['bads']
+    ch_names = [n for n in cov.ch_names if not n in exclude]
+    ch_idx = [cov.ch_names.index(n) for n in ch_names]
+    info_ch_names = info['ch_names']
+    sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude=exclude)
+    sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
+                         exclude=exclude)
+    sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
+                          exclude=exclude)
+    idx_eeg = [ch_names.index(info_ch_names[c])
+               for c in sel_eeg if info_ch_names[c] in ch_names]
+    idx_mag = [ch_names.index(info_ch_names[c])
+               for c in sel_mag if info_ch_names[c] in ch_names]
+    idx_grad = [ch_names.index(info_ch_names[c])
+                for c in sel_grad if info_ch_names[c] in ch_names]
+
+    idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
+                 (idx_grad, 'Gradiometers', 'fT/cm', 1e13),
+                 (idx_mag, 'Magnetometers', 'fT', 1e15)]
+    idx_names = [(idx, name, unit, scaling)
+                 for idx, name, unit, scaling in idx_names if len(idx) > 0]
+
+    C = cov.data[ch_idx][:, ch_idx]
+
+    if proj:
+        projs = copy.deepcopy(info['projs'])
+
+        #   Activate the projection items
+        for p in projs:
+            p['active'] = True
+
+        P, ncomp, _ = make_projector(projs, ch_names)
+        if ncomp > 0:
+            logger.info('    Created an SSP operator (subspace dimension'
+                        ' = %d)' % ncomp)
+            C = np.dot(P, np.dot(C, P.T))
+        else:
+            logger.info('    The projection vectors do not apply to these '
+                        'channels.')
+
+    import matplotlib.pyplot as plt
+
+    fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
+    for k, (idx, name, _, _) in enumerate(idx_names):
+        plt.subplot(1, len(idx_names), k + 1)
+        plt.imshow(C[idx][:, idx], interpolation="nearest")
+        plt.title(name)
+    plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
+    tight_layout(fig=fig_cov)
+
+    fig_svd = None
+    if show_svd:
+        fig_svd = plt.figure()
+        for k, (idx, name, unit, scaling) in enumerate(idx_names):
+            s = linalg.svd(C[idx][:, idx], compute_uv=False)
+            plt.subplot(1, len(idx_names), k + 1)
+            plt.ylabel('Noise std (%s)' % unit)
+            plt.xlabel('Eigenvalue index')
+            plt.semilogy(np.sqrt(s) * scaling)
+            plt.title(name)
+            tight_layout(fig=fig_svd)
+
+    if show:
+        plt.show()
+
+    return fig_cov, fig_svd
+
+
+def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
+                            source_index=None, colorbar=False, show=True):
+    """Plot source power in time-freqency grid.
+
+    Parameters
+    ----------
+    stcs : list of SourceEstimate
+        Source power for consecutive time windows, one SourceEstimate object
+        should be provided for each frequency bin.
+    freq_bins : list of tuples of float
+        Start and end points of frequency bins of interest.
+    tmin : float
+        Minimum time instant to show.
+    tmax : float
+        Maximum time instant to show.
+    source_index : int | None
+        Index of source for which the spectrogram will be plotted. If None,
+        the source with the largest activation will be selected.
+    colorbar : bool
+        If true, a colorbar will be added to the plot.
+    show : bool
+        Show figure if True.
+    """
+    import matplotlib.pyplot as plt
+
+    # Input checks
+    if len(stcs) == 0:
+        raise ValueError('cannot plot spectrogram if len(stcs) == 0')
+
+    stc = stcs[0]
+    if tmin is not None and tmin < stc.times[0]:
+        raise ValueError('tmin cannot be smaller than the first time point '
+                         'provided in stcs')
+    if tmax is not None and tmax > stc.times[-1] + stc.tstep:
+        raise ValueError('tmax cannot be larger than the sum of the last time '
+                         'point and the time step, which are provided in stcs')
+
+    # Preparing time-frequency cell boundaries for plotting
+    if tmin is None:
+        tmin = stc.times[0]
+    if tmax is None:
+        tmax = stc.times[-1] + stc.tstep
+    time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
+    freq_bounds = sorted(set(np.ravel(freq_bins)))
+    freq_ticks = copy.deepcopy(freq_bounds)
+
+    # Rejecting time points that will not be plotted
+    for stc in stcs:
+        # Using 1e-10 to improve numerical stability
+        stc.crop(tmin - 1e-10, tmax - stc.tstep + 1e-10)
+
+    # Gathering results for each time window
+    source_power = np.array([stc.data for stc in stcs])
+
+    # Finding the source with maximum source power
+    if source_index is None:
+        source_index = np.unravel_index(source_power.argmax(),
+                                        source_power.shape)[1]
+
+    # If there is a gap in the frequency bins record its locations so that it
+    # can be covered with a gray horizontal bar
+    gap_bounds = []
+    for i in range(len(freq_bins) - 1):
+        lower_bound = freq_bins[i][1]
+        upper_bound = freq_bins[i + 1][0]
+        if lower_bound != upper_bound:
+            freq_bounds.remove(lower_bound)
+            gap_bounds.append((lower_bound, upper_bound))
+
+    # Preparing time-frequency grid for plotting
+    time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
+
+    # Plotting the results
+    fig = plt.figure(figsize=(9, 6))
+    plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
+               cmap=plt.cm.jet)
+    ax = plt.gca()
+
+    plt.title('Time-frequency source power')
+    plt.xlabel('Time (s)')
+    plt.ylabel('Frequency (Hz)')
+
+    time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
+    n_skip = 1 + len(time_bounds) // 10
+    for i in range(len(time_bounds)):
+        if i % n_skip != 0:
+            time_tick_labels[i] = ''
+
+    ax.set_xticks(time_bounds)
+    ax.set_xticklabels(time_tick_labels)
+    plt.xlim(time_bounds[0], time_bounds[-1])
+    plt.yscale('log')
+    ax.set_yticks(freq_ticks)
+    ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
+    plt.ylim(freq_bounds[0], freq_bounds[-1])
+
+    plt.grid(True, ls='-')
+    if colorbar:
+        plt.colorbar()
+    tight_layout(fig=fig)
+
+    # Covering frequency gaps with horizontal bars
+    for lower_bound, upper_bound in gap_bounds:
+        plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
+                 lower_bound, time_bounds[0], color='#666666')
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
+                       slices=None, show=True):
+    """Plot BEM contours on anatomical slices.
+
+    Parameters
+    ----------
+    mri_fname : str
+        The name of the file containing anatomical data.
+    surf_fnames : list of str
+        The filenames for the BEM surfaces in the format
+        ['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
+    orientation : str
+        'coronal' or 'transverse' or 'sagittal'
+    slices : list of int
+        Slice indices.
+    show : bool
+        Call pyplot.show() at the end.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    import matplotlib.pyplot as plt
+    import nibabel as nib
+
+    if orientation not in ['coronal', 'axial', 'sagittal']:
+        raise ValueError("Orientation must be 'coronal', 'axial' or "
+                         "'sagittal'. Got %s." % orientation)
+
+    # Load the T1 data
+    nim = nib.load(mri_fname)
+    data = nim.get_data()
+    affine = nim.get_affine()
+
+    n_sag, n_axi, n_cor = data.shape
+    orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
+    orientation_axis = orientation_name2axis[orientation]
+
+    if slices is None:
+        n_slices = data.shape[orientation_axis]
+        slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
+
+    # create of list of surfaces
+    surfs = list()
+
+    trans = linalg.inv(affine)
+    # XXX : next line is a hack don't ask why
+    trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
+
+    for surf_fname in surf_fnames:
+        surf = dict()
+        surf['rr'], surf['tris'] = read_surface(surf_fname)
+        # move back surface to MRI coordinate system
+        surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
+        surfs.append(surf)
+
+    fig, axs = _prepare_trellis(len(slices), 4)
+
+    for ax, sl in zip(axs, slices):
+
+        # adjust the orientations for good view
+        if orientation == 'coronal':
+            dat = data[:, :, sl].transpose()
+        elif orientation == 'axial':
+            dat = data[:, sl, :]
+        elif orientation == 'sagittal':
+            dat = data[sl, :, :]
+
+        # First plot the anatomical data
+        ax.imshow(dat, cmap=plt.cm.gray)
+        ax.axis('off')
+
+        # and then plot the contours on top
+        for surf in surfs:
+            if orientation == 'coronal':
+                ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
+                              surf['tris'], surf['rr'][:, 2],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+            elif orientation == 'axial':
+                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
+                              surf['tris'], surf['rr'][:, 1],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+            elif orientation == 'sagittal':
+                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
+                              surf['tris'], surf['rr'][:, 0],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+
+    if show:
+        plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
+                            hspace=0.)
+        plt.show()
+
+    return fig
+
+
+def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
+             slices=None, show=True):
+    """Plot BEM contours on anatomical slices.
+
+    Parameters
+    ----------
+    subject : str
+        Subject name.
+    subjects_dir : str | None
+        Path to the SUBJECTS_DIR. If None, the path is obtained by using
+        the environment variable SUBJECTS_DIR.
+    orientation : str
+        'coronal' or 'transverse' or 'sagittal'.
+    slices : list of int
+        Slice indices.
+    show : bool
+        Call pyplot.show() at the end.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    # Get the MRI filename
+    mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
+    if not op.isfile(mri_fname):
+        raise IOError('MRI file "%s" does not exist' % mri_fname)
+
+    # Get the BEM surface filenames
+    bem_path = op.join(subjects_dir, subject, 'bem')
+
+    if not op.isdir(bem_path):
+        raise IOError('Subject bem directory "%s" does not exist' % bem_path)
+
+    surf_fnames = []
+    for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
+        surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
+        if len(surf_name) > 0:
+            surf_fname = surf_fname[0]
+            logger.info("Using surface: %s" % surf_fname)
+        else:
+            raise IOError('No surface found for %s.' % surf_name)
+        if not op.isfile(surf_fname):
+            raise IOError('Surface file "%s" does not exist' % surf_fname)
+        surf_fnames.append(surf_fname)
+
+    # Plot the contours
+    return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
+                              slices=slices, show=show)
+
+
+def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
+                axes=None, equal_spacing=True, show=True):
+    """Plot events to get a visual display of the paradigm
+
+    Parameters
+    ----------
+    events : array, shape (n_events, 3)
+        The events.
+    sfreq : float | None
+        The sample frequency. If None, data will be displayed in samples (not
+        seconds).
+    first_samp : int
+        The index of the first sample. Typically the raw.first_samp
+        attribute. It is needed for recordings on a Neuromag
+        system as the events are defined relative to the system
+        start and not to the beginning of the recording.
+    color : dict | None
+        Dictionary of event_id value and its associated color. If None,
+        colors are automatically drawn from a default list (cycled through if
+        number of events longer than list of default colors).
+    event_id : dict | None
+        Dictionary of event label (e.g. 'aud_l') and its associated
+        event_id value. Label used to plot a legend. If None, no legend is
+        drawn.
+    axes : instance of matplotlib.axes.AxesSubplot
+       The subplot handle.
+    equal_spacing : bool
+        Use equal spacing between events in y-axis.
+    show : bool
+        Call pyplot.show() at the end.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The figure object containing the plot.
+    """
+
+    if sfreq is None:
+        sfreq = 1.0
+        xlabel = 'samples'
+    else:
+        xlabel = 'Time (s)'
+
+    events = np.asarray(events)
+    unique_events = np.unique(events[:, 2])
+
+    if event_id is not None:
+        # get labels and unique event ids from event_id dict,
+        # sorted by value
+        event_id_rev = dict((v, k) for k, v in event_id.items())
+        conditions, unique_events_id = zip(*sorted(event_id.items(),
+                                                   key=lambda x: x[1]))
+
+        for this_event in unique_events_id:
+            if this_event not in unique_events:
+                raise ValueError('%s from event_id is not present in events.'
+                                 % this_event)
+
+        for this_event in unique_events:
+            if this_event not in unique_events_id:
+                warnings.warn('event %s missing from event_id will be ignored.'
+                              % this_event)
+    else:
+        unique_events_id = unique_events
+
+    if color is None:
+        if len(unique_events) > len(COLORS):
+            warnings.warn('More events than colors available. '
+                          'You should pass a list of unique colors.')
+        colors = cycle(COLORS)
+        color = dict()
+        for this_event, this_color in zip(unique_events_id, colors):
+            color[this_event] = this_color
+    else:
+        for this_event in color:
+            if this_event not in unique_events_id:
+                raise ValueError('%s from color is not present in events '
+                                 'or event_id.' % this_event)
+
+        for this_event in unique_events_id:
+            if this_event not in color:
+                warnings.warn('Color is not available for event %d. Default '
+                              'colors will be used.' % this_event)
+
+    import matplotlib.pyplot as plt
+
+    fig = None
+    if axes is None:
+        fig = plt.figure()
+    ax = axes if axes else plt.gca()
+
+    unique_events_id = np.array(unique_events_id)
+    min_event = np.min(unique_events_id)
+    max_event = np.max(unique_events_id)
+
+    for idx, ev in enumerate(unique_events_id):
+        ev_mask = events[:, 2] == ev
+        kwargs = {}
+        if event_id is not None:
+            kwargs['label'] = event_id_rev[ev]
+        if ev in color:
+            kwargs['color'] = color[ev]
+        if equal_spacing:
+            ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
+                    (idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs)
+        else:
+            ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
+                    events[ev_mask, 2], '.', **kwargs)
+
+    if equal_spacing:
+        ax.set_ylim(0, unique_events_id.size + 1)
+        ax.set_yticks(1 + np.arange(unique_events_id.size))
+        ax.set_yticklabels(unique_events_id)
+    else:
+        ax.set_ylim([min_event - 1, max_event + 1])
+
+    ax.set_xlabel(xlabel)
+    ax.set_ylabel('Events id')
+
+    ax.grid('on')
+
+    if event_id is not None:
+        ax.legend()
+
+    if show:
+        plt.show()
+
+    return fig if fig is not None else plt.gcf()
diff --git a/mne/viz/raw.py b/mne/viz/raw.py
new file mode 100644
index 0000000..aed53da
--- /dev/null
+++ b/mne/viz/raw.py
@@ -0,0 +1,610 @@
+"""Functions to plot raw M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import copy
+from functools import partial
+
+import numpy as np
+
+from ..externals.six import string_types
+from ..io.pick import pick_types
+from ..io.proj import setup_proj
+from ..utils import set_config, get_config, verbose
+from ..time_frequency import compute_raw_psd
+from .utils import figure_nobar, _toggle_options
+from .utils import _mutable_defaults, _toggle_proj, tight_layout
+
+
+def _plot_update_raw_proj(params, bools):
+    """Helper only needs to be called when proj is changed"""
+    inds = np.where(bools)[0]
+    params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
+                               for ii in inds]
+    params['proj_bools'] = bools
+    params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
+                                        verbose=False)
+    _update_raw_data(params)
+    params['plot_fun']()
+
+
+def _update_raw_data(params):
+    """Helper only needs to be called when time or proj is changed"""
+    start = params['t_start']
+    stop = params['raw'].time_as_index(start + params['duration'])[0]
+    start = params['raw'].time_as_index(start)[0]
+    data, times = params['raw'][:, start:stop]
+    if params['projector'] is not None:
+        data = np.dot(params['projector'], data)
+    # remove DC
+    if params['remove_dc'] is True:
+        data -= np.mean(data, axis=1)[:, np.newaxis]
+    # scale
+    for di in range(data.shape[0]):
+        data[di] /= params['scalings'][params['types'][di]]
+        # stim channels should be hard limited
+        if params['types'][di] == 'stim':
+            data[di] = np.minimum(data[di], 1.0)
+    params['data'] = data
+    params['times'] = times
+
+
+def _layout_raw(params):
+    """Set raw figure layout"""
+    s = params['fig'].get_size_inches()
+    scroll_width = 0.33
+    hscroll_dist = 0.33
+    vscroll_dist = 0.1
+    l_border = 1.2
+    r_border = 0.1
+    t_border = 0.33
+    b_border = 0.5
+
+    # only bother trying to reset layout if it's reasonable to do so
+    if s[0] < 2 * scroll_width or s[1] < 2 * scroll_width + hscroll_dist:
+        return
+
+    # convert to relative units
+    scroll_width_x = scroll_width / s[0]
+    scroll_width_y = scroll_width / s[1]
+    vscroll_dist /= s[0]
+    hscroll_dist /= s[1]
+    l_border /= s[0]
+    r_border /= s[0]
+    t_border /= s[1]
+    b_border /= s[1]
+    # main axis (traces)
+    ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
+    ax_y = hscroll_dist + scroll_width_y + b_border
+    ax_height = 1.0 - ax_y - t_border
+    params['ax'].set_position([l_border, ax_y, ax_width, ax_height])
+    # vscroll (channels)
+    pos = [ax_width + l_border + vscroll_dist, ax_y,
+           scroll_width_x, ax_height]
+    params['ax_vscroll'].set_position(pos)
+    # hscroll (time)
+    pos = [l_border, b_border, ax_width, scroll_width_y]
+    params['ax_hscroll'].set_position(pos)
+    # options button
+    pos = [l_border + ax_width + vscroll_dist, b_border,
+           scroll_width_x, scroll_width_y]
+    params['ax_button'].set_position(pos)
+    params['fig'].canvas.draw()
+
+
+def _helper_resize(event, params):
+    """Helper for resizing"""
+    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
+    set_config('MNE_BROWSE_RAW_SIZE', size)
+    _layout_raw(params)
+
+
+def _pick_bad_channels(event, params):
+    """Helper for selecting / dropping bad channels onpick"""
+    bads = params['raw'].info['bads']
+    # trade-off, avoid selecting more than one channel when drifts are present
+    # however for clean data don't click on peaks but on flat segments
+    f = lambda x, y: y(np.mean(x), x.std() * 2)
+    for l in event.inaxes.lines:
+        ydata = l.get_ydata()
+        if not isinstance(ydata, list) and not np.isnan(ydata).any():
+            ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
+            if ymin <= event.ydata <= ymax:
+                this_chan = vars(l)['ch_name']
+                if this_chan in params['raw'].ch_names:
+                    if this_chan not in bads:
+                        bads.append(this_chan)
+                        l.set_color(params['bad_color'])
+                    else:
+                        bads.pop(bads.index(this_chan))
+                        l.set_color(vars(l)['def-color'])
+                event.canvas.draw()
+                break
+    # update deep-copied info to persistently draw bads
+    params['info']['bads'] = bads
+
+
+def _mouse_click(event, params):
+    """Vertical select callback"""
+    if event.inaxes is None or event.button != 1:
+        return
+    plot_fun = params['plot_fun']
+    # vertical scrollbar changed
+    if event.inaxes == params['ax_vscroll']:
+        ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
+        if params['ch_start'] != ch_start:
+            params['ch_start'] = ch_start
+            plot_fun()
+    # horizontal scrollbar changed
+    elif event.inaxes == params['ax_hscroll']:
+        _plot_raw_time(event.xdata - params['duration'] / 2, params)
+
+    elif event.inaxes == params['ax']:
+        _pick_bad_channels(event, params)
+
+
+def _plot_raw_time(value, params):
+    """Deal with changed time value"""
+    info = params['info']
+    max_times = params['n_times'] / float(info['sfreq']) - params['duration']
+    if value > max_times:
+        value = params['n_times'] / info['sfreq'] - params['duration']
+    if value < 0:
+        value = 0
+    if params['t_start'] != value:
+        params['t_start'] = value
+        params['hsel_patch'].set_x(value)
+        _update_raw_data(params)
+        params['plot_fun']()
+
+
+def _plot_raw_onkey(event, params):
+    """Interpret key presses"""
+    import matplotlib.pyplot as plt
+    # check for initial plot
+    plot_fun = params['plot_fun']
+    if event is None:
+        plot_fun()
+        return
+
+    # quit event
+    if event.key == 'escape':
+        plt.close(params['fig'])
+        return
+
+    # change plotting params
+    ch_changed = False
+    if event.key == 'down':
+        params['ch_start'] += params['n_channels']
+        ch_changed = True
+    elif event.key == 'up':
+        params['ch_start'] -= params['n_channels']
+        ch_changed = True
+    elif event.key == 'right':
+        _plot_raw_time(params['t_start'] + params['duration'], params)
+        return
+    elif event.key == 'left':
+        _plot_raw_time(params['t_start'] - params['duration'], params)
+        return
+    elif event.key in ['o', 'p']:
+        _toggle_options(None, params)
+        return
+
+    # deal with plotting changes
+    if ch_changed is True:
+        if params['ch_start'] >= len(params['info']['ch_names']):
+            params['ch_start'] = 0
+        elif params['ch_start'] < 0:
+            # wrap to end
+            rem = len(params['info']['ch_names']) % params['n_channels']
+            params['ch_start'] = len(params['info']['ch_names'])
+            params['ch_start'] -= rem if rem != 0 else params['n_channels']
+
+    if ch_changed:
+        plot_fun()
+
+
+def _plot_traces(params, inds, color, bad_color, lines, event_line, offsets):
+    """Helper for plotting raw"""
+
+    info = params['info']
+    n_channels = params['n_channels']
+    params['bad_color'] = bad_color
+    # do the plotting
+    tick_list = []
+    for ii in range(n_channels):
+        ch_ind = ii + params['ch_start']
+        # let's be generous here and allow users to pass
+        # n_channels per view >= the number of traces available
+        if ii >= len(lines):
+            break
+        elif ch_ind < len(info['ch_names']):
+            # scale to fit
+            ch_name = info['ch_names'][inds[ch_ind]]
+            tick_list += [ch_name]
+            offset = offsets[ii]
+
+            # do NOT operate in-place lest this get screwed up
+            this_data = params['data'][inds[ch_ind]]
+            this_color = bad_color if ch_name in info['bads'] else color
+            if isinstance(this_color, dict):
+                this_color = this_color[params['types'][inds[ch_ind]]]
+
+            # subtraction here gets corect orientation for flipped ylim
+            lines[ii].set_ydata(offset - this_data)
+            lines[ii].set_xdata(params['times'])
+            lines[ii].set_color(this_color)
+            vars(lines[ii])['ch_name'] = ch_name
+            vars(lines[ii])['def-color'] = color[params['types'][inds[ch_ind]]]
+        else:
+            # "remove" lines
+            lines[ii].set_xdata([])
+            lines[ii].set_ydata([])
+    # deal with event lines
+    if params['events'] is not None:
+        t = params['events']
+        t = t[np.where(np.logical_and(t >= params['times'][0],
+                       t <= params['times'][-1]))[0]]
+        if len(t) > 0:
+            xs = list()
+            ys = list()
+            for tt in t:
+                xs += [tt, tt, np.nan]
+                ys += [0, 2 * n_channels + 1, np.nan]
+            event_line.set_xdata(xs)
+            event_line.set_ydata(ys)
+        else:
+            event_line.set_xdata([])
+            event_line.set_ydata([])
+    # finalize plot
+    params['ax'].set_xlim(params['times'][0],
+                          params['times'][0] + params['duration'], False)
+    params['ax'].set_yticklabels(tick_list)
+    params['vsel_patch'].set_y(params['ch_start'])
+    params['fig'].canvas.draw()
+
+
+def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
+             bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
+             event_color='cyan', scalings=None, remove_dc=True, order='type',
+             show_options=False, title=None, show=True, block=False):
+    """Plot raw data
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data to plot.
+    events : array | None
+        Events to show with vertical bars.
+    duration : float
+        Time window (sec) to plot in a given time.
+    start : float
+        Initial time to show (can be changed dynamically once plotted).
+    n_channels : int
+        Number of channels to plot at once.
+    bgcolor : color object
+        Color of the background.
+    color : dict | color object | None
+        Color for the data traces. If None, defaults to:
+        `dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
+             ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
+    bad_color : color object
+        Color to make bad channels.
+    event_color : color object
+        Color to use for events.
+    scalings : dict | None
+        Scale factors for the traces. If None, defaults to:
+        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
+             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
+    remove_dc : bool
+        If True remove DC component when plotting data.
+    order : 'type' | 'original' | array
+        Order in which to plot data. 'type' groups by channel type,
+        'original' plots in the order of ch_names, array gives the
+        indices to use in plotting.
+    show_options : bool
+        If True, a dialog for options related to projecion is shown.
+    title : str | None
+        The title of the window. If None, and either the filename of the
+        raw object or '<unknown>' will be displayed as title.
+    show : bool
+        Show figure if True
+    block : bool
+        Whether to halt program execution until the figure is closed.
+        Useful for setting bad channels on the fly by clicking on a line.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Raw traces.
+
+    Notes
+    -----
+    The arrow keys (up/down/left/right) can typically be used to navigate
+    between channels and time ranges, but this depends on the backend
+    matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
+    To mark or un-mark a channel as bad, click on the rather flat segments
+    of a channel's time series. The changes will be reflected immediately
+    in the raw object's ``raw.info['bads']`` entry.
+    """
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    color, scalings = _mutable_defaults(('color', color),
+                                        ('scalings_plot_raw', scalings))
+
+    # make a copy of info, remove projection (for now)
+    info = copy.deepcopy(raw.info)
+    projs = info['projs']
+    info['projs'] = []
+    n_times = raw.n_times
+
+    # allow for raw objects without filename, e.g., ICA
+    if title is None:
+        title = raw._filenames
+        if len(title) == 0:  # empty list or absent key
+            title = '<unknown>'
+        elif len(title) == 1:
+            title = title[0]
+        else:  # if len(title) > 1:
+            title = '%s ... (+ %d more) ' % (title[0], len(title) - 1)
+            if len(title) > 60:
+                title = '...' + title[-60:]
+    elif not isinstance(title, string_types):
+        raise TypeError('title must be None or a string')
+    if events is not None:
+        events = events[:, 0].astype(float) - raw.first_samp
+        events /= info['sfreq']
+
+    # reorganize the data in plotting order
+    inds = list()
+    types = list()
+    for t in ['grad', 'mag']:
+        inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])]
+        types += [t] * len(inds[-1])
+    pick_kwargs = dict(meg=False, exclude=[])
+    for t in ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp',
+              'misc', 'chpi', 'syst', 'ias', 'exci']:
+        pick_kwargs[t] = True
+        inds += [pick_types(raw.info, **pick_kwargs)]
+        types += [t] * len(inds[-1])
+        pick_kwargs[t] = False
+    inds = np.concatenate(inds).astype(int)
+    if not len(inds) == len(info['ch_names']):
+        raise RuntimeError('Some channels not classified, please report '
+                           'this problem')
+
+    # put them back to original or modified order for natral plotting
+    reord = np.argsort(inds)
+    types = [types[ri] for ri in reord]
+    if isinstance(order, str):
+        if order == 'original':
+            inds = inds[reord]
+        elif order != 'type':
+            raise ValueError('Unknown order type %s' % order)
+    elif isinstance(order, np.ndarray):
+        if not np.array_equal(np.sort(order),
+                              np.arange(len(info['ch_names']))):
+            raise ValueError('order, if array, must have integers from '
+                             '0 to n_channels - 1')
+        # put back to original order first, then use new order
+        inds = inds[reord][order]
+
+    # set up projection and data parameters
+    params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
+                  info=info, projs=projs, remove_dc=remove_dc,
+                  n_channels=n_channels, scalings=scalings, types=types,
+                  n_times=n_times, events=events)
+
+    # set up plotting
+    size = get_config('MNE_BROWSE_RAW_SIZE')
+    if size is not None:
+        size = size.split(',')
+        size = tuple([float(s) for s in size])
+        # have to try/catch when there's no toolbar
+    fig = figure_nobar(facecolor=bgcolor, figsize=size)
+    fig.canvas.set_window_title('mne_browse_raw')
+    ax = plt.subplot2grid((10, 10), (0, 0), colspan=9, rowspan=9)
+    ax.set_title(title, fontsize=12)
+    ax_hscroll = plt.subplot2grid((10, 10), (9, 0), colspan=9)
+    ax_hscroll.get_yaxis().set_visible(False)
+    ax_hscroll.set_xlabel('Time (s)')
+    ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
+    ax_vscroll.set_axis_off()
+    ax_button = plt.subplot2grid((10, 10), (9, 9))
+    # store these so they can be fixed on resize
+    params['fig'] = fig
+    params['ax'] = ax
+    params['ax_hscroll'] = ax_hscroll
+    params['ax_vscroll'] = ax_vscroll
+    params['ax_button'] = ax_button
+
+    # populate vertical and horizontal scrollbars
+    for ci in range(len(info['ch_names'])):
+        this_color = (bad_color if info['ch_names'][inds[ci]] in info['bads']
+                      else color)
+        if isinstance(this_color, dict):
+            this_color = this_color[types[inds[ci]]]
+        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
+                                                   facecolor=this_color,
+                                                   edgecolor=this_color))
+    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
+                                       facecolor='w', edgecolor='w')
+    ax_vscroll.add_patch(vsel_patch)
+    params['vsel_patch'] = vsel_patch
+    hsel_patch = mpl.patches.Rectangle((start, 0), duration, 1, color='k',
+                                       edgecolor=None, alpha=0.5)
+    ax_hscroll.add_patch(hsel_patch)
+    params['hsel_patch'] = hsel_patch
+    ax_hscroll.set_xlim(0, n_times / float(info['sfreq']))
+    n_ch = len(info['ch_names'])
+    ax_vscroll.set_ylim(n_ch, 0)
+    ax_vscroll.set_title('Ch.')
+
+    # make shells for plotting traces
+    offsets = np.arange(n_channels) * 2 + 1
+    ax.set_yticks(offsets)
+    ax.set_ylim([n_channels * 2 + 1, 0])
+    # plot event_line first so it's in the back
+    event_line = ax.plot([np.nan], color=event_color)[0]
+    lines = [ax.plot([np.nan])[0] for _ in range(n_ch)]
+    ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
+
+    params['plot_fun'] = partial(_plot_traces, params=params, inds=inds,
+                                 color=color, bad_color=bad_color, lines=lines,
+                                 event_line=event_line, offsets=offsets)
+
+    # set up callbacks
+    opt_button = mpl.widgets.Button(ax_button, 'Opt')
+    callback_option = partial(_toggle_options, params=params)
+    opt_button.on_clicked(callback_option)
+    callback_key = partial(_plot_raw_onkey, params=params)
+    fig.canvas.mpl_connect('key_press_event', callback_key)
+    callback_pick = partial(_mouse_click, params=params)
+    fig.canvas.mpl_connect('button_press_event', callback_pick)
+    callback_resize = partial(_helper_resize, params=params)
+    fig.canvas.mpl_connect('resize_event', callback_resize)
+
+    # As here code is shared with plot_evoked, some extra steps:
+    # first the actual plot update function
+    params['plot_update_proj_callback'] = _plot_update_raw_proj
+    # then the toggle handler
+    callback_proj = partial(_toggle_proj, params=params)
+    # store these for use by callbacks in the options figure
+    params['callback_proj'] = callback_proj
+    params['callback_key'] = callback_key
+    # have to store this, or it could get garbage-collected
+    params['opt_button'] = opt_button
+
+    # do initial plots
+    callback_proj('none')
+    _layout_raw(params)
+
+    # deal with projectors
+    params['fig_opts'] = None
+    if show_options is True:
+        _toggle_options(None, params)
+
+    if show:
+        plt.show(block=block)
+
+    return fig
+
+
+ at verbose
+def plot_raw_psds(raw, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
+                  proj=False, n_fft=2048, picks=None, ax=None, color='black',
+                  area_mode='std', area_alpha=0.33, n_jobs=1, verbose=None):
+    """Plot the power spectral density across channels
+
+    Parameters
+    ----------
+    raw : instance of io.Raw
+        The raw instance to use.
+    tmin : float
+        Start time for calculations.
+    tmax : float
+        End time for calculations.
+    fmin : float
+        Start frequency to consider.
+    fmax : float
+        End frequency to consider.
+    proj : bool
+        Apply projection.
+    n_fft : int
+        Number of points to use in Welch FFT calculations.
+    picks : array-like of int | None
+        List of channels to use. Cannot be None if `ax` is supplied. If both
+        `picks` and `ax` are None, separate subplots will be created for
+        each standard channel type (`mag`, `grad`, and `eeg`).
+    ax : instance of matplotlib Axes | None
+        Axes to plot into. If None, axes will be created.
+    color : str | tuple
+        A matplotlib-compatible color to use.
+    area_mode : str | None
+        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
+        will be plotted. If 'range', the min and max (across channels) will be
+        plotted. Bad channels will be excluded from these calculations.
+        If None, no area will be plotted.
+    area_alpha : float
+        Alpha for the area.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    import matplotlib.pyplot as plt
+    if area_mode not in [None, 'std', 'range']:
+        raise ValueError('"area_mode" must be "std", "range", or None')
+    if picks is None:
+        if ax is not None:
+            raise ValueError('If "ax" is not supplied (None), then "picks" '
+                             'must also be supplied')
+        megs = ['mag', 'grad', False]
+        eegs = [False, False, True]
+        names = ['Magnetometers', 'Gradiometers', 'EEG']
+        picks_list = list()
+        titles_list = list()
+        for meg, eeg, name in zip(megs, eegs, names):
+            picks = pick_types(raw.info, meg=meg, eeg=eeg, ref_meg=False)
+            if len(picks) > 0:
+                picks_list.append(picks)
+                titles_list.append(name)
+        if len(picks_list) == 0:
+            raise RuntimeError('No MEG or EEG channels found')
+    else:
+        picks_list = [picks]
+        titles_list = ['Selected channels']
+        ax_list = [ax]
+
+    make_label = False
+    fig = None
+    if ax is None:
+        fig = plt.figure()
+        ax_list = list()
+        for ii in range(len(picks_list)):
+            # Make x-axes change together
+            if ii > 0:
+                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
+                                           sharex=ax_list[0]))
+            else:
+                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
+        make_label = True
+    else:
+        fig = ax_list[0].get_figure()
+
+    for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
+                                                ax_list)):
+        psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                      fmin=fmin, fmax=fmax, n_fft=n_fft,
+                                      n_jobs=n_jobs, plot=False, proj=proj)
+
+        # Convert PSDs to dB
+        psds = 10 * np.log10(psds)
+        psd_mean = np.mean(psds, axis=0)
+        if area_mode == 'std':
+            psd_std = np.std(psds, axis=0)
+            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
+        elif area_mode == 'range':
+            hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
+        else:  # area_mode is None
+            hyp_limits = None
+
+        ax.plot(freqs, psd_mean, color=color)
+        if hyp_limits is not None:
+            ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
+                            color=color, alpha=area_alpha)
+        if make_label:
+            if ii == len(picks_list) - 1:
+                ax.set_xlabel('Freq (Hz)')
+            if ii == len(picks_list) / 2:
+                ax.set_ylabel('Power Spectral Density (dB/Hz)')
+            ax.set_title(title)
+            ax.set_xlim(freqs[0], freqs[-1])
+    if make_label:
+        tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
+    plt.show()
+    return fig
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/viz/tests/__init__.py
similarity index 100%
copy from mne/fiff/bti/tests/__init__.py
copy to mne/viz/tests/__init__.py
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/viz/tests/__init__py
similarity index 100%
rename from mne/fiff/bti/tests/__init__.py
rename to mne/viz/tests/__init__py
diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py
new file mode 100644
index 0000000..e39f410
--- /dev/null
+++ b/mne/viz/tests/test_3d.py
@@ -0,0 +1,115 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+from mne import SourceEstimate
+from mne import make_field_map, pick_channels_evoked, read_evokeds
+from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
+                     plot_trans, mne_analyze_colormap)
+from mne.datasets import sample
+from mne.source_space import read_source_spaces
+
+data_dir = sample.data_path(download=False)
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+subjects_dir = op.join(data_dir, 'subjects')
+
+lacks_mayavi = False
+try:
+    from mayavi import mlab
+except ImportError:
+    try:
+        from enthought.mayavi import mlab
+    except ImportError:
+        lacks_mayavi = True
+requires_mayavi = np.testing.dec.skipif(lacks_mayavi, 'Requires mayavi')
+
+if not lacks_mayavi:
+    mlab.options.backend = 'test'
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+
+ at sample.requires_sample_data
+ at requires_mayavi
+def test_plot_sparse_source_estimates():
+    """Test plotting of (sparse) source estimates
+    """
+    sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
+                                            'bem', 'sample-oct-6-src.fif'))
+
+    # dense version
+    vertices = [s['vertno'] for s in sample_src]
+    n_time = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.zeros((n_verts * n_time))
+    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
+    stc_data.shape = (n_verts, n_time)
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    colormap = mne_analyze_colormap(format='matplotlib')
+    # don't really need to test matplotlib method since it's not used now...
+    colormap = mne_analyze_colormap()
+    plot_source_estimates(stc, 'sample', colormap=colormap,
+                          config_opts={'background': (1, 1, 0)},
+                          subjects_dir=subjects_dir, colorbar=True)
+    assert_raises(TypeError, plot_source_estimates, stc, 'sample',
+                  figure='foo', hemi='both')
+
+    # now do sparse version
+    vertices = sample_src[0]['vertno']
+    inds = [111, 333]
+    stc_data = np.zeros((len(inds), n_time))
+    stc_data[0, 1] = 1.
+    stc_data[1, 4] = 2.
+    vertices = [vertices[inds], np.empty(0, dtype=np.int)]
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
+                                 opacity=0.5, high_resolution=False)
+
+
+ at requires_mayavi
+ at sample.requires_sample_data
+def test_plot_evoked_field():
+    """Test plotting evoked field
+    """
+    trans_fname = op.join(data_dir, 'MEG', 'sample',
+                          'sample_audvis_raw-trans.fif')
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory',
+                          baseline=(-0.2, 0.0))
+    evoked = pick_channels_evoked(evoked, evoked.ch_names[::10])  # speed
+    for t in ['meg', None]:
+        maps = make_field_map(evoked, trans_fname=trans_fname,
+                              subject='sample', subjects_dir=subjects_dir,
+                              n_jobs=1, ch_type=t)
+
+        evoked.plot_field(maps, time=0.1)
+
+
+ at requires_mayavi
+ at sample.requires_sample_data
+def test_plot_trans():
+    """Test plotting of -trans.fif files
+    """
+    trans_fname = op.join(data_dir, 'MEG', 'sample',
+                          'sample_audvis_raw-trans.fif')
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory',
+                          baseline=(-0.2, 0.0))
+    plot_trans(evoked.info, trans_fname=trans_fname, subject='sample',
+               subjects_dir=subjects_dir)
+    assert_raises(ValueError, plot_trans, evoked.info, trans_fname=trans_fname,
+                  subject='sample', subjects_dir=subjects_dir,
+                  ch_type='bad-chtype')
diff --git a/mne/viz/tests/test_circle.py b/mne/viz/tests/test_circle.py
new file mode 100644
index 0000000..3e6ded6
--- /dev/null
+++ b/mne/viz/tests/test_circle.py
@@ -0,0 +1,94 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+
+import numpy as np
+from numpy.testing import assert_raises
+
+from mne.viz import plot_connectivity_circle, circular_layout
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+
+def test_plot_connectivity_circle():
+    """Test plotting connectivity circle
+    """
+    node_order = ['frontalpole-lh', 'parsorbitalis-lh',
+                  'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
+                  'medialorbitofrontal-lh', 'parstriangularis-lh',
+                  'rostralanteriorcingulate-lh', 'temporalpole-lh',
+                  'parsopercularis-lh', 'caudalanteriorcingulate-lh',
+                  'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
+                  'caudalmiddlefrontal-lh', 'superiortemporal-lh',
+                  'parahippocampal-lh', 'middletemporal-lh',
+                  'inferiortemporal-lh', 'precentral-lh',
+                  'transversetemporal-lh', 'posteriorcingulate-lh',
+                  'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
+                  'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
+                  'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
+                  'superiorparietal-lh', 'pericalcarine-lh',
+                  'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
+                  'lateraloccipital-rh', 'pericalcarine-rh',
+                  'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
+                  'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
+                  'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
+                  'fusiform-rh', 'posteriorcingulate-rh',
+                  'transversetemporal-rh', 'precentral-rh',
+                  'inferiortemporal-rh', 'middletemporal-rh',
+                  'parahippocampal-rh', 'superiortemporal-rh',
+                  'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
+                  'entorhinal-rh', 'caudalanteriorcingulate-rh',
+                  'parsopercularis-rh', 'temporalpole-rh',
+                  'rostralanteriorcingulate-rh', 'parstriangularis-rh',
+                  'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
+                  'lateralorbitofrontal-rh', 'parsorbitalis-rh',
+                  'frontalpole-rh']
+    label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
+                   'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
+                   'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
+                   'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
+                   'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
+                   'inferiorparietal-lh', 'inferiorparietal-rh',
+                   'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
+                   'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
+                   'lateraloccipital-lh', 'lateraloccipital-rh',
+                   'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
+                   'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
+                   'medialorbitofrontal-rh', 'middletemporal-lh',
+                   'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
+                   'parahippocampal-lh', 'parahippocampal-rh',
+                   'parsopercularis-lh', 'parsopercularis-rh',
+                   'parsorbitalis-lh', 'parsorbitalis-rh',
+                   'parstriangularis-lh', 'parstriangularis-rh',
+                   'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
+                   'postcentral-rh', 'posteriorcingulate-lh',
+                   'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
+                   'precuneus-lh', 'precuneus-rh',
+                   'rostralanteriorcingulate-lh',
+                   'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
+                   'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
+                   'superiorfrontal-rh', 'superiorparietal-lh',
+                   'superiorparietal-rh', 'superiortemporal-lh',
+                   'superiortemporal-rh', 'supramarginal-lh',
+                   'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
+                   'transversetemporal-lh', 'transversetemporal-rh']
+
+    group_boundaries = [0, len(label_names) / 2]
+    node_angles = circular_layout(label_names, node_order, start_pos=90,
+                                  group_boundaries=group_boundaries)
+    con = np.random.randn(68, 68)
+    plot_connectivity_circle(con, label_names, n_lines=300,
+                             node_angles=node_angles, title='test',
+                             )
+
+    plt.close('all')
+    assert_raises(ValueError, circular_layout, label_names, node_order,
+                  group_boundaries=[-1])
+    assert_raises(ValueError, circular_layout, label_names, node_order,
+                  group_boundaries=[20, 0])
diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py
new file mode 100644
index 0000000..0f13843
--- /dev/null
+++ b/mne/viz/tests/test_epochs.py
@@ -0,0 +1,117 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+from mne import io, read_events, Epochs
+from mne import pick_types
+from mne.layouts import read_layout
+from mne.datasets import sample
+
+from mne.viz import plot_drop_log, plot_image_epochs
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.1, 0.1
+n_chan = 15
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return pick_types(raw.info, meg=True, eeg=False, stim=False,
+                      ecg=False, eog=False, exclude='bads')
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    # Use a subset of channels for plotting speed
+    picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+def _get_epochs_delayed_ssp():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    reject = dict(mag=4e-12)
+    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                proj='delayed', reject=reject)
+    return epochs_delayed_ssp
+
+
+def test_plot_epochs():
+    """ Test plotting epochs
+    """
+    epochs = _get_epochs()
+    epochs.plot([0, 1], picks=[0, 2, 3], scalings=None, title_str='%s')
+    epochs[0].plot(picks=[0, 2, 3], scalings=None, title_str='%s')
+    # test clicking: should increase coverage on
+    # 3200-3226, 3235, 3237, 3239-3242, 3245-3255, 3260-3280
+    fig = plt.gcf()
+    fig.canvas.button_press_event(10, 10, 'left')
+    # now let's add a bad channel
+    epochs.info['bads'] = [epochs.ch_names[0]]  # include a bad one
+    epochs.plot([0, 1], picks=[0, 2, 3], scalings=None, title_str='%s')
+    epochs[0].plot(picks=[0, 2, 3], scalings=None, title_str='%s')
+    plt.close('all')
+
+
+def test_plot_image_epochs():
+    """Test plotting of epochs image
+    """
+    epochs = _get_epochs()
+    plot_image_epochs(epochs, picks=[1, 2])
+    plt.close('all')
+
+
+def test_plot_drop_log():
+    """Test plotting a drop log
+    """
+    epochs = _get_epochs()
+    epochs.drop_bad_epochs()
+
+    warnings.simplefilter('always', UserWarning)
+    with warnings.catch_warnings(record=True):
+        epochs.plot_drop_log()
+
+        plot_drop_log([['One'], [], []])
+        plot_drop_log([['One'], ['Two'], []])
+        plot_drop_log([['One'], ['One', 'Two'], []])
+    plt.close('all')
+
diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py
new file mode 100644
index 0000000..d4bb40f
--- /dev/null
+++ b/mne/viz/tests/test_evoked.py
@@ -0,0 +1,106 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+from mne import io, read_events, Epochs
+from mne import pick_types
+from mne.layouts import read_layout
+from mne.datasets import sample
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.1, 0.1
+n_chan = 6
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return pick_types(raw.info, meg=True, eeg=False, stim=False,
+                      ecg=False, eog=False, exclude='bads')
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    # Use a subset of channels for plotting speed
+    picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+def _get_epochs_delayed_ssp():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    reject = dict(mag=4e-12)
+    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                proj='delayed', reject=reject)
+    return epochs_delayed_ssp
+
+
+def test_plot_evoked():
+    """Test plotting of evoked
+    """
+    evoked = _get_epochs().average()
+    with warnings.catch_warnings(record=True):
+        evoked.plot(proj=True, hline=[1])
+        # plot with bad channels excluded
+        evoked.plot(exclude='bads')
+        evoked.plot(exclude=evoked.info['bads'])  # does the same thing
+
+        # test selective updating of dict keys is working.
+        evoked.plot(hline=[1], units=dict(mag='femto foo'))
+        evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
+        evoked_delayed_ssp.plot(proj='interactive')
+        evoked_delayed_ssp.apply_proj()
+        assert_raises(RuntimeError, evoked_delayed_ssp.plot,
+                      proj='interactive')
+        evoked_delayed_ssp.info['projs'] = []
+        assert_raises(RuntimeError, evoked_delayed_ssp.plot,
+                      proj='interactive')
+        assert_raises(RuntimeError, evoked_delayed_ssp.plot,
+                      proj='interactive', axes='foo')
+
+        evoked.plot_image(proj=True)
+        # plot with bad channels excluded
+        evoked.plot_image(exclude='bads')
+        evoked.plot_image(exclude=evoked.info['bads'])  # does the same thing
+        plt.close('all')
diff --git a/mne/viz/tests/test_ica.py b/mne/viz/tests/test_ica.py
new file mode 100644
index 0000000..8532902
--- /dev/null
+++ b/mne/viz/tests/test_ica.py
@@ -0,0 +1,140 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+import os.path as op
+from functools import wraps
+import warnings
+
+from numpy.testing import assert_raises
+
+from mne import io, read_events, Epochs, read_cov
+from mne import pick_types
+from mne.datasets import sample
+from mne.utils import check_sklearn_version
+from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.1, 0.2
+
+
+def requires_sklearn(function):
+    """Decorator to skip test if scikit-learn >= 0.12 is not available"""
+    @wraps(function)
+    def dec(*args, **kwargs):
+        if not check_sklearn_version(min_version='0.12'):
+            from nose.plugins.skip import SkipTest
+            raise SkipTest('Test %s skipped, requires scikit-learn >= 0.12'
+                           % function.__name__)
+        ret = function(*args, **kwargs)
+        return ret
+    return dec
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return [0, 1, 2, 6, 7, 8, 12, 13, 14]  # take a only few channels
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+ at requires_sklearn
+def test_plot_ica_components():
+    """Test plotting of ICA solutions
+    """
+    raw = _get_raw()
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica_picks = _get_picks(raw)
+    ica.fit(raw, picks=ica_picks)
+    warnings.simplefilter('always', UserWarning)
+    with warnings.catch_warnings(record=True):
+        for components in [0, [0], [0, 1], [0, 1] * 2, None]:
+            ica.plot_components(components, image_interp='bilinear', res=16)
+    ica.info = None
+    assert_raises(RuntimeError, ica.plot_components, 1)
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_ica_sources():
+    """Test plotting of ICA panel
+    """
+    raw = io.Raw(raw_fname, preload=True)
+    picks = _get_picks(raw)
+    epochs = _get_epochs()
+    raw.pick_channels([raw.ch_names[k] for k in picks])
+    ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
+                           ecg=False, eog=False, exclude='bads')
+    ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=ica_picks)
+    ica.plot_sources(raw)
+    ica.plot_sources(epochs)
+    with warnings.catch_warnings(record=True):  # no labeled objects mpl
+        ica.plot_sources(epochs.average())
+    assert_raises(ValueError, ica.plot_sources, 'meeow')
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_ica_overlay():
+    """Test plotting of ICA cleaning
+    """
+    raw = _get_raw()
+    picks = _get_picks(raw)
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=picks)
+    # don't test raw, needs preload ...
+    ecg_epochs = create_ecg_epochs(raw, picks=picks)
+    ica.plot_overlay(ecg_epochs.average())
+    eog_epochs = create_eog_epochs(raw, picks=picks)
+    ica.plot_overlay(eog_epochs.average())
+    assert_raises(ValueError, ica.plot_overlay, raw[:2, :3][0])
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_ica_scores():
+    """Test plotting of ICA scores
+    """
+    raw = _get_raw()
+    picks = _get_picks(raw)
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=picks)
+    ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
+    assert_raises(ValueError, ica.plot_scores, [0.2])
+    plt.close('all')
diff --git a/mne/viz/tests/test_misc.py b/mne/viz/tests/test_misc.py
new file mode 100644
index 0000000..8b76ccf
--- /dev/null
+++ b/mne/viz/tests/test_misc.py
@@ -0,0 +1,114 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+from mne import io, read_events, read_cov, read_source_spaces
+from mne import SourceEstimate
+from mne.datasets import sample
+
+from mne.viz import plot_cov, plot_bem, plot_events
+from mne.viz import plot_source_spectrogram
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=True)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def test_plot_cov():
+    """Test plotting of covariances
+    """
+    raw = _get_raw()
+    cov = read_cov(cov_fname)
+    fig1, fig2 = plot_cov(cov, raw.info, proj=True, exclude=raw.ch_names[6:])
+    plt.close('all')
+
+
+ at sample.requires_sample_data
+def test_plot_bem():
+    """Test plotting of BEM contours
+    """
+    assert_raises(IOError, plot_bem, subject='bad-subject',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, plot_bem, subject='sample',
+                  subjects_dir=subjects_dir, orientation='bad-ori')
+    plot_bem(subject='sample', subjects_dir=subjects_dir,
+             orientation='sagittal', slices=[50, 100])
+
+
+def test_plot_events():
+    """Test plotting events
+    """
+    event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
+    color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
+    raw = _get_raw()
+    events = _get_events()
+    plot_events(events, raw.info['sfreq'], raw.first_samp)
+    plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
+    # Test plotting events without sfreq
+    plot_events(events, first_samp=raw.first_samp)
+    warnings.simplefilter('always', UserWarning)
+    with warnings.catch_warnings(record=True):
+        plot_events(events, raw.info['sfreq'], raw.first_samp,
+                    event_id=event_labels)
+        plot_events(events, raw.info['sfreq'], raw.first_samp,
+                    color=color)
+        plot_events(events, raw.info['sfreq'], raw.first_samp,
+                    event_id=event_labels, color=color)
+        assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
+                      raw.first_samp, event_id={'aud_l': 1}, color=color)
+        assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
+                      raw.first_samp, event_id={'aud_l': 111}, color=color)
+
+
+ at sample.requires_sample_data
+def test_plot_source_spectrogram():
+    """Test plotting of source spectrogram
+    """
+    sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
+                                            'bem', 'sample-oct-6-src.fif'))
+
+    # dense version
+    vertices = [s['vertno'] for s in sample_src]
+    n_times = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.ones((n_verts, n_times))
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
+    assert_raises(ValueError, plot_source_spectrogram, [], [])
+    assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
+                  [[1, 2], [3, 4]], tmin=0)
+    assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
+                  [[1, 2], [3, 4]], tmax=7)
diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py
new file mode 100644
index 0000000..e76a7db
--- /dev/null
+++ b/mne/viz/tests/test_raw.py
@@ -0,0 +1,107 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+from numpy.testing import assert_raises
+
+from mne import io, read_events, pick_types
+from mne.datasets import sample
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+
+
+def _fake_click(fig, ax, point, xform='ax'):
+    """Helper to fake a click at a relative point within axes"""
+    if xform == 'ax':
+        x, y = ax.transAxes.transform_point(point)
+    elif xform == 'data':
+        x, y = ax.transData.transform_point(point)
+    else:
+        raise ValueError('unknown transform')
+    try:
+        fig.canvas.button_press_event(x, y, 1, False, None)
+    except:  # for old MPL
+        fig.canvas.button_press_event(x, y, 1, False)
+
+
+def _get_raw():
+    raw = io.Raw(raw_fname, preload=True)
+    raw.pick_channels(raw.ch_names[:9])
+    return raw
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def test_plot_raw():
+    """Test plotting of raw data
+    """
+    raw = _get_raw()
+    events = _get_events()
+    plt.close('all')  # ensure all are closed
+    with warnings.catch_warnings(record=True):
+        fig = raw.plot(events=events, show_options=True)
+        # test mouse clicks
+        x = fig.get_axes()[0].lines[1].get_xdata().mean()
+        y = fig.get_axes()[0].lines[1].get_ydata().mean()
+        data_ax = fig.get_axes()[0]
+        _fake_click(fig, data_ax, [x, y], xform='data')  # mark a bad channel
+        _fake_click(fig, data_ax, [x, y], xform='data')  # unmark a bad channel
+        _fake_click(fig, data_ax, [0.5, 0.999])  # click elsewhere in 1st axes
+        _fake_click(fig, fig.get_axes()[1], [0.5, 0.5])  # change time
+        _fake_click(fig, fig.get_axes()[2], [0.5, 0.5])  # change channels
+        _fake_click(fig, fig.get_axes()[3], [0.5, 0.5])  # open SSP window
+        fig.canvas.button_press_event(1, 1, 1)  # outside any axes
+        # sadly these fail when no renderer is used (i.e., when using Agg):
+        #ssp_fig = set(plt.get_fignums()) - set([fig.number])
+        #assert_equal(len(ssp_fig), 1)
+        #ssp_fig = plt.figure(list(ssp_fig)[0])
+        #ax = ssp_fig.get_axes()[0]  # only one axis is used
+        #t = [c for c in ax.get_children() if isinstance(c,
+        #     matplotlib.text.Text)]
+        #pos = np.array(t[0].get_position()) + 0.01
+        #_fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # off
+        #_fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # on
+        # test keypresses
+        fig.canvas.key_press_event('escape')
+        fig.canvas.key_press_event('down')
+        fig.canvas.key_press_event('up')
+        fig.canvas.key_press_event('right')
+        fig.canvas.key_press_event('left')
+        fig.canvas.key_press_event('o')
+        fig.canvas.key_press_event('escape')
+        plt.close('all')
+
+
+def test_plot_raw_psds():
+    """Test plotting of raw psds
+    """
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    # normal mode
+    raw.plot_psds(tmax=2.0)
+    # specific mode
+    picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
+    raw.plot_psds(picks=picks, area_mode='range')
+    ax = plt.axes()
+    # if ax is supplied, picks must be, too:
+    assert_raises(ValueError, raw.plot_psds, ax=ax)
+    raw.plot_psds(picks=picks, ax=ax)
+    plt.close('all')
diff --git a/mne/viz/tests/test_topo.py b/mne/viz/tests/test_topo.py
new file mode 100644
index 0000000..a0f2b24
--- /dev/null
+++ b/mne/viz/tests/test_topo.py
@@ -0,0 +1,119 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+from mne import io, read_events, Epochs
+from mne import pick_channels_evoked
+from mne.layouts import read_layout
+from mne.datasets import sample
+from mne.time_frequency.tfr import AverageTFR
+
+from mne.viz import plot_topo, plot_topo_image_epochs
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.2, 0.2
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return [0, 1, 2, 6, 7, 8, 12, 13, 14]  # take a only few channels
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+def _get_epochs_delayed_ssp():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    reject = dict(mag=4e-12)
+    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                proj='delayed', reject=reject)
+    return epochs_delayed_ssp
+
+
+def test_plot_topo():
+    """Test plotting of ERP topography
+    """
+    # Show topography
+    evoked = _get_epochs().average()
+    plot_topo(evoked, layout)
+    warnings.simplefilter('always', UserWarning)
+    picked_evoked = pick_channels_evoked(evoked, evoked.ch_names[:3])
+
+    # test scaling
+    with warnings.catch_warnings(record=True):
+        for ylim in [dict(mag=[-600, 600]), None]:
+            plot_topo([picked_evoked] * 2, layout, ylim=ylim)
+
+        for evo in [evoked, [evoked, picked_evoked]]:
+            assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
+
+        evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
+        ch_names = evoked_delayed_ssp.ch_names[:3]  # make it faster
+        picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
+                                                         ch_names)
+        plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
+
+
+def test_plot_topo_image_epochs():
+    """Test plotting of epochs image topography
+    """
+    title = 'ERF images - MNE sample data'
+    epochs = _get_epochs()
+    plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
+                           colorbar=True, title=title)
+    plt.close('all')
+
+
+def test_plot_tfr_topo():
+    """Test plotting of TFR data
+    """
+    epochs = _get_epochs()
+    n_freqs = 3
+    nave = 1
+    data = np.random.randn(len(epochs.ch_names), n_freqs, len(epochs.times))
+    tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
+    tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
+                  vmin=0., vmax=14.)
+    tfr.plot([4], baseline=(None, 0), mode='ratio')
diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py
new file mode 100644
index 0000000..e58042a
--- /dev/null
+++ b/mne/viz/tests/test_topomap.py
@@ -0,0 +1,131 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+from nose.tools import assert_true, assert_equal
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+from mne import io
+from mne import read_evokeds, read_proj
+from mne.io.constants import FIFF
+from mne.layouts import read_layout
+from mne.datasets import sample
+from mne.time_frequency.tfr import AverageTFR
+
+from mne.viz import plot_evoked_topomap, plot_projs_topomap
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+data_dir = sample.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+ at sample.requires_sample_data
+def test_plot_topomap():
+    """Test topomap plotting
+    """
+    # evoked
+    warnings.simplefilter('always', UserWarning)
+    res = 16
+    with warnings.catch_warnings(record=True):
+        evoked = read_evokeds(evoked_fname, 'Left Auditory',
+                              baseline=(None, 0))
+        evoked.plot_topomap(0.1, 'mag', layout=layout)
+        mask = np.zeros_like(evoked.data, dtype=bool)
+        mask[[1, 5], :] = True
+        evoked.plot_topomap(None, ch_type='mag', outlines=None)
+        times = [0.1]
+        evoked.plot_topomap(times, ch_type='eeg', res=res)
+        evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
+        evoked.plot_topomap(times, ch_type='planar1', res=res)
+        evoked.plot_topomap(times, ch_type='planar2', res=res)
+        evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
+                            show_names=True, mask_params={'marker': 'x'})
+
+        p = evoked.plot_topomap(times, ch_type='grad', res=res,
+                                show_names=lambda x: x.replace('MEG', ''),
+                                image_interp='bilinear')
+        subplot = [x for x in p.get_children() if
+                   isinstance(x, matplotlib.axes.Subplot)][0]
+        assert_true(all('MEG' not in x.get_text()
+                        for x in subplot.get_children()
+                        if isinstance(x, matplotlib.text.Text)))
+
+        # Test title
+        def get_texts(p):
+            return [x.get_text() for x in p.get_children() if
+                    isinstance(x, matplotlib.text.Text)]
+
+        p = evoked.plot_topomap(times, ch_type='eeg', res=res)
+        assert_equal(len(get_texts(p)), 0)
+        p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
+        texts = get_texts(p)
+        assert_equal(len(texts), 1)
+        assert_equal(texts[0], 'Custom')
+
+        # delaunay triangulation warning
+        with warnings.catch_warnings(record=True):
+            evoked.plot_topomap(times, ch_type='mag', layout='auto', res=res)
+        assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
+                      proj='interactive')  # projs have already been applied
+
+        # change to no-proj mode
+        evoked = read_evokeds(evoked_fname, 'Left Auditory',
+                              baseline=(None, 0), proj=False)
+        evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
+        assert_raises(RuntimeError, plot_evoked_topomap, evoked,
+                      np.repeat(.1, 50))
+        assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
+
+        projs = read_proj(ecg_fname)
+        projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
+        plot_projs_topomap(projs, res=res)
+        plt.close('all')
+        for ch in evoked.info['chs']:
+            if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
+                if ch['eeg_loc'] is not None:
+                    ch['eeg_loc'].fill(0)
+                ch['loc'].fill(0)
+        assert_raises(RuntimeError, plot_evoked_topomap, evoked,
+                      times, ch_type='eeg')
+
+
+def test_plot_tfr_topomap():
+    """Test plotting of TFR data
+    """
+    raw = _get_raw()
+    times = np.linspace(-0.1, 0.1, 200)
+    n_freqs = 3
+    nave = 1
+    rng = np.random.RandomState(42)
+    data = rng.randn(len(raw.ch_names), n_freqs, len(times))
+    tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
+    tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
+                     res=16)
diff --git a/mne/viz/tests/test_utils.py b/mne/viz/tests/test_utils.py
new file mode 100644
index 0000000..f87cc33
--- /dev/null
+++ b/mne/viz/tests/test_utils.py
@@ -0,0 +1,28 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+from mne.viz.utils import compare_fiff
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+
+
+def test_compare_fiff():
+    """Test comparing fiff files
+    """
+    compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
+    plt.close('all')
diff --git a/mne/viz/topo.py b/mne/viz/topo.py
new file mode 100644
index 0000000..b87a7c7
--- /dev/null
+++ b/mne/viz/topo.py
@@ -0,0 +1,725 @@
+"""Functions to plot M/EEG data on topo (one axes per channel)
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import warnings
+from itertools import cycle
+from functools import partial
+
+import numpy as np
+from scipy import ndimage
+
+# XXX : don't import pyplot here or you will break the doc
+
+from ..baseline import rescale
+from ..utils import deprecated
+from ..io.pick import channel_type, pick_types
+from ..fixes import normalize_colors
+from ..utils import _clean_names
+
+from .utils import _mutable_defaults, _check_delayed_ssp, COLORS
+from .utils import _draw_proj_checkbox
+
+
+def iter_topography(info, layout=None, on_pick=None, fig=None,
+                    fig_facecolor='k', axis_facecolor='k',
+                    axis_spinecolor='k', layout_scale=None,
+                    colorbar=False):
+    """ Create iterator over channel positions
+
+    This function returns a generator that unpacks into
+    a series of matplotlib axis objects and data / channel
+    indices, both corresponding to the sensor positions
+    of the related layout passed or inferred from the channel info.
+    `iter_topography`, hence, allows to conveniently realize custom
+    topography plots.
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    layout : instance of mne.layout.Layout | None
+        The layout to use. If None, layout will be guessed
+    on_pick : callable | None
+        The callback function to be invoked on clicking one
+        of the axes. Is supposed to instantiate the following
+        API: `function(axis, channel_index)`
+    fig : matplotlib.figure.Figure | None
+        The figure object to be considered. If None, a new
+        figure will be created.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    axis_facecolor : str | obj
+        The axis face color. Defaults to black.
+    axis_spinecolor : str | obj
+        The axis spine color. Defaults to black. In other words,
+        the color of the axis' edge lines.
+    layout_scale: float | None
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas. If None, nothing will be scaled.
+
+    Returns
+    -------
+    A generator that can be unpacked into
+
+    ax : matplotlib.axis.Axis
+        The current axis of the topo plot.
+    ch_dx : int
+        The related channel index.
+    """
+    import matplotlib.pyplot as plt
+
+    if fig is None:
+        fig = plt.figure()
+
+    fig.set_facecolor(fig_facecolor)
+    if layout is None:
+        from ..layouts import find_layout
+        layout = find_layout(info)
+
+    if on_pick is not None:
+        callback = partial(_plot_topo_onpick, show_func=on_pick)
+        fig.canvas.mpl_connect('button_press_event', callback)
+
+    pos = layout.pos.copy()
+    if layout_scale:
+        pos[:, :2] *= layout_scale
+
+    ch_names = _clean_names(info['ch_names'])
+    iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
+    for idx, name in iter_ch:
+        ax = plt.axes(pos[idx])
+        ax.patch.set_facecolor(axis_facecolor)
+        plt.setp(list(ax.spines.values()), color=axis_spinecolor)
+        ax.set_xticklabels([])
+        ax.set_yticklabels([])
+        plt.setp(ax.get_xticklines(), visible=False)
+        plt.setp(ax.get_yticklines(), visible=False)
+        ch_idx = ch_names.index(name)
+        vars(ax)['_mne_ch_name'] = name
+        vars(ax)['_mne_ch_idx'] = ch_idx
+        vars(ax)['_mne_ax_face_color'] = axis_facecolor
+        yield ax, ch_idx
+
+
+def _plot_topo(info=None, times=None, show_func=None, layout=None,
+               decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
+               border='none', cmap=None, layout_scale=None, title=None,
+               x_label=None, y_label=None, vline=None):
+    """Helper function to plot on sensor layout"""
+    import matplotlib.pyplot as plt
+
+    # prepare callbacks
+    tmin, tmax = times[[0, -1]]
+    on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
+                      vmax=vmax, ylim=ylim, x_label=x_label,
+                      y_label=y_label, colorbar=colorbar)
+
+    fig = plt.figure()
+    if colorbar:
+        norm = normalize_colors(vmin=vmin, vmax=vmax)
+        sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
+        sm.set_array(np.linspace(vmin, vmax))
+        ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg='k')
+        cb = fig.colorbar(sm, ax=ax)
+        cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
+        plt.setp(cb_yticks, color='w')
+
+    my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
+                                   fig=fig, layout_scale=layout_scale,
+                                   axis_spinecolor=border,
+                                   colorbar=colorbar)
+
+    for ax, ch_idx in my_topo_plot:
+        if layout.kind == 'Vectorview-all' and ylim is not None:
+            this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
+            ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
+        else:
+            ylim_ = ylim
+
+        show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
+                  vmax=vmax, ylim=ylim_)
+
+        if ylim_ and not any(v is None for v in ylim_):
+            plt.ylim(*ylim_)
+
+    if title is not None:
+        plt.figtext(0.03, 0.9, title, color='w', fontsize=19)
+
+    return fig
+
+
+def _plot_topo_onpick(event, show_func=None, colorbar=False):
+    """Onpick callback that shows a single channel in a new figure"""
+
+    # make sure that the swipe gesture in OS-X doesn't open many figures
+    orig_ax = event.inaxes
+    if event.inaxes is None:
+        return
+
+    import matplotlib.pyplot as plt
+    try:
+        ch_idx = orig_ax._mne_ch_idx
+        face_color = orig_ax._mne_ax_face_color
+        fig, ax = plt.subplots(1)
+
+        plt.title(orig_ax._mne_ch_name)
+        ax.set_axis_bgcolor(face_color)
+
+        # allow custom function to override parameters
+        show_func(plt, ch_idx)
+
+    except Exception as err:
+        # matplotlib silently ignores exceptions in event handlers,
+        # so we print
+        # it here to know what went wrong
+        print(err)
+        raise err
+
+
+def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
+                freq=None, vline=None, x_label=None, y_label=None,
+                colorbar=False, picker=True, cmap=None):
+    """ Aux function to show time-freq map on topo """
+    import matplotlib.pyplot as plt
+    if cmap is None:
+        cmap = plt.cm.jet
+
+    extent = (tmin, tmax, freq[0], freq[-1])
+    ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
+              vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
+    if x_label is not None:
+        plt.xlabel(x_label)
+    if y_label is not None:
+        plt.ylabel(y_label)
+    if colorbar:
+        plt.colorbar()
+
+
+def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
+                     times, vline=None, x_label=None, y_label=None,
+                     colorbar=False):
+    """ Aux function to show time series on topo """
+    import matplotlib.pyplot as plt
+    picker_flag = False
+    for data_, color_ in zip(data, color):
+        if not picker_flag:
+            # use large tol for picker so we can click anywhere in the axes
+            ax.plot(times, data_[ch_idx], color_, picker=1e9)
+            picker_flag = True
+        else:
+            ax.plot(times, data_[ch_idx], color_)
+    if vline:
+        [plt.axvline(x, color='w', linewidth=0.5) for x in vline]
+    if x_label is not None:
+        plt.xlabel(x_label)
+    if y_label is not None:
+        plt.ylabel(y_label)
+    if colorbar:
+        plt.colorbar()
+
+
+def _check_vlim(vlim):
+    """AUX function"""
+    return not np.isscalar(vlim) and not vlim is None
+
+
+def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
+              border='none', ylim=None, scalings=None, title=None, proj=False,
+              vline=[0.0]):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    title : str
+        Title of the figure.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+
+    if not type(evoked) in (tuple, list):
+        evoked = [evoked]
+
+    if type(color) in (tuple, list):
+        if len(color) != len(evoked):
+            raise ValueError('Lists of evoked objects and colors'
+                             ' must have the same length')
+    elif color is None:
+        colors = ['w'] + COLORS
+        stop = (slice(len(evoked)) if len(evoked) < len(colors)
+                else slice(len(colors)))
+        color = cycle(colors[stop])
+        if len(evoked) > len(colors):
+            warnings.warn('More evoked objects than colors available.'
+                          'You should pass a list of unique colors.')
+    else:
+        color = cycle([color])
+
+    times = evoked[0].times
+    if not all([(e.times == times).all() for e in evoked]):
+        raise ValueError('All evoked.times must be the same')
+
+    info = evoked[0].info
+    ch_names = evoked[0].ch_names
+    if not all([e.ch_names == ch_names for e in evoked]):
+        raise ValueError('All evoked.picks must be the same')
+    ch_names = _clean_names(ch_names)
+
+    if layout is None:
+        from ..layouts.layout import find_layout
+        layout = find_layout(info)
+
+    # XXX. at the moment we are committed to 1- / 2-sensor-types layouts
+    chs_in_layout = set(layout.names) & set(ch_names)
+    types_used = set(channel_type(info, ch_names.index(ch))
+                     for ch in chs_in_layout)
+    # one check for all vendors
+    meg_types = ['mag'], ['grad'], ['mag', 'grad'],
+    is_meg = any(types_used == set(k) for k in meg_types)
+    if is_meg:
+        types_used = list(types_used)[::-1]  # -> restore kwarg order
+        picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
+                 for kk in types_used]
+    else:
+        types_used_kwargs = dict((t, True) for t in types_used)
+        picks = [pick_types(info, meg=False, **types_used_kwargs)]
+    assert isinstance(picks, list) and len(types_used) == len(picks)
+
+    scalings = _mutable_defaults(('scalings', scalings))[0]
+    evoked = [e.copy() for e in evoked]
+    for e in evoked:
+        for pick, t in zip(picks, types_used):
+            e.data[pick] = e.data[pick] * scalings[t]
+
+    if proj is True and all([e.proj is not True for e in evoked]):
+        evoked = [e.apply_proj() for e in evoked]
+    elif proj == 'interactive':  # let it fail early.
+        for e in evoked:
+            _check_delayed_ssp(e)
+
+    if ylim is None:
+        set_ylim = lambda x: np.abs(x).max()
+        ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
+        ymax = np.array(ylim_)
+        ylim_ = (-ymax, ymax)
+    elif isinstance(ylim, dict):
+        ylim_ = _mutable_defaults(('ylim', ylim))[0]
+        ylim_ = [ylim_[kk] for kk in types_used]
+        ylim_ = zip(*[np.array(yl) for yl in ylim_])
+    else:
+        raise ValueError('ylim must be None ore a dict')
+
+    plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
+                       color=color, times=times, vline=vline)
+
+    fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
+                     decim=1, colorbar=False, ylim=ylim_, cmap=None,
+                     layout_scale=layout_scale, border=border, title=title,
+                     x_label='Time (s)', vline=vline)
+
+    if proj == 'interactive':
+        for e in evoked:
+            _check_delayed_ssp(e)
+        params = dict(evokeds=evoked, times=times,
+                      plot_update_proj_callback=_plot_update_evoked_topo,
+                      projs=evoked[0].info['projs'], fig=fig)
+        _draw_proj_checkbox(None, params)
+
+    return fig
+
+
+def _plot_update_evoked_topo(params, bools):
+    """Helper function to update topo sensor plots"""
+    evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
+
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+
+    params['proj_bools'] = bools
+    evokeds = [e.copy() for e in evokeds]
+    for e in evokeds:
+        e.info['projs'] = []
+        e.add_proj(projs)
+        e.apply_proj()
+
+    # make sure to only modify the time courses, not the ticks
+    axes = fig.get_axes()
+    n_lines = len(axes[0].lines)
+    n_diff = len(evokeds) - n_lines
+    ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
+    for ax in axes:
+        lines = ax.lines[ax_slice]
+        for line, evoked in zip(lines, evokeds):
+            line.set_data(times, evoked.data[ax._mne_ch_idx])
+
+    fig.canvas.draw()
+
+
+ at deprecated('`plot_topo_tfr` is deprecated and will be removed in '
+            'MNE 0.9. Use `plot_topo` method on TFR objects.')
+def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
+                  vmax=None, cmap='RdBu_r', layout_scale=0.945, title=None):
+    """Plot time-frequency data on sensor layout
+
+    Clicking on the time-frequency map of an individual sensor opens a
+    new figure showing the time-frequency map of the selected sensor.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs used to generate the power
+    tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
+        The time-frequency data. Must have the same channels as Epochs.
+    freq : array-like
+        Frequencies of interest as passed to induced_power
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    colorbar : bool
+        If true, colorbar will be added to the plot
+    vmin : float
+        Minimum value mapped to lowermost color
+    vmax : float
+        Minimum value mapped to upppermost color
+    cmap : instance of matplotlib.pyplot.colormap | str
+        Colors to be mapped to the values. Default 'RdBu_r'.
+    layout_scale : float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    title : str
+        Title of the figure.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of time-frequency data at sensor locations
+    """
+
+    if vmin is None:
+        vmin = tfr.min()
+    if vmax is None:
+        vmax = tfr.max()
+
+    if layout is None:
+        from ..layouts.layout import find_layout
+        layout = find_layout(epochs.info)
+
+    tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq, cmap=cmap)
+
+    fig = _plot_topo(info=epochs.info, times=epochs.times,
+                     show_func=tfr_imshow, layout=layout, border='w',
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title,
+                     x_label='Time (s)', y_label='Frequency (Hz)')
+
+    return fig
+
+
+ at deprecated('`plot_topo_power` is deprecated and will be removed in '
+            'MNE 0.9. Use `plot_topo` method on TFR objects.')
+def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
+                    mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
+                    cmap=None, layout_scale=0.945, dB=True, title=None):
+    """Plot induced power on sensor layout
+
+    Clicking on the induced power map of an individual sensor opens a
+    new figure showing the induced power map of the selected sensor.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs used to generate the power
+    power : 3D-array
+        First return value from mne.time_frequency.induced_power
+    freq : array-like
+        Frequencies of interest as passed to induced_power
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    baseline : tuple or list of length 2
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or z-score (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+        If None, baseline no correction will be performed.
+    decim : integer
+        Increment for selecting each nth time slice
+    colorbar : bool
+        If true, colorbar will be added to the plot
+    vmin : float
+        Minimum value mapped to lowermost color
+    vmax : float
+        Minimum value mapped to upppermost color
+    cmap : instance of matplotlib.pyplot.colormap
+        Colors to be mapped to the values
+    layout_scale : float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    dB : bool
+        If True, log10 will be applied to the data.
+    title : str
+        Title of the figure.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of induced power at sensor locations
+    """
+    times = epochs.times[::decim].copy()
+    if mode is not None:
+        if baseline is None:
+            baseline = epochs.baseline
+        power = rescale(power.copy(), times, baseline, mode)
+    times *= 1e3
+    if dB:
+        power = 20 * np.log10(power)
+    if vmin is None:
+        vmin = power.min()
+    if vmax is None:
+        vmax = power.max()
+    if layout is None:
+        from ..layouts.layout import find_layout
+        layout = find_layout(epochs.info)
+
+    power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
+
+    fig = _plot_topo(info=epochs.info, times=times,
+                     show_func=power_imshow, layout=layout, decim=decim,
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title, border='w',
+                     x_label='Time (s)', y_label='Frequency (Hz)')
+
+    return fig
+
+
+ at deprecated('`plot_topo_phase_lock` is deprecated and will be removed in '
+            'MNE 0.9. Use `plot_topo` method on TFR objects.')
+def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
+                         mode='mean', decim=1, colorbar=True, vmin=None,
+                         vmax=None, cmap=None, layout_scale=0.945,
+                         title=None):
+    """Plot phase locking values (PLV) on sensor layout
+
+    Clicking on the PLV map of an individual sensor opens a new figure
+    showing the PLV map of the selected sensor.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs used to generate the phase locking value
+    phase_lock : 3D-array
+        Phase locking value, second return value from
+        mne.time_frequency.induced_power.
+    freq : array-like
+        Frequencies of interest as passed to induced_power
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    baseline : tuple or list of length 2
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
+        Do baseline correction with ratio (phase is divided by mean
+        phase during baseline) or z-score (phase is divided by standard
+        deviation of phase during baseline after subtracting the mean,
+        phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
+        If None, baseline no correction will be performed.
+    decim : integer
+        Increment for selecting each nth time slice
+    colorbar : bool
+        If true, colorbar will be added to the plot
+    vmin : float
+        Minimum value mapped to lowermost color
+    vmax : float
+        Minimum value mapped to upppermost color
+    cmap : instance of matplotlib.pyplot.colormap
+        Colors to be mapped to the values
+    layout_scale : float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas.
+    title : str
+        Title of the figure.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figrue
+        Phase lock images at sensor locations
+    """
+    times = epochs.times[::decim] * 1e3
+    if mode is not None:
+        if baseline is None:
+            baseline = epochs.baseline
+        phase = rescale(phase.copy(), times, baseline, mode)
+    if vmin is None:
+        vmin = phase.min()
+    if vmax is None:
+        vmax = phase.max()
+    if layout is None:
+        from ..layouts.layout import find_layout
+        layout = find_layout(epochs.info)
+
+    phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
+
+    fig = _plot_topo(info=epochs.info, times=times,
+                     show_func=phase_imshow, layout=layout, decim=decim,
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title, border='w',
+                     x_label='Time (s)', y_label='Frequency (Hz)')
+
+    return fig
+
+
+def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
+                     data=None, epochs=None, sigma=None,
+                     order=None, scalings=None, vline=None,
+                     x_label=None, y_label=None, colorbar=False):
+    """Aux function to plot erfimage on sensor topography"""
+
+    import matplotlib.pyplot as plt
+    this_data = data[:, ch_idx, :].copy()
+    ch_type = channel_type(epochs.info, ch_idx)
+    if not ch_type in scalings:
+        raise KeyError('%s channel type not in scalings' % ch_type)
+    this_data *= scalings[ch_type]
+
+    if callable(order):
+        order = order(epochs.times, this_data)
+
+    if order is not None:
+        this_data = this_data[order]
+
+    this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
+
+    ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
+              origin='lower', vmin=vmin, vmax=vmax, picker=True)
+
+    if x_label is not None:
+        plt.xlabel(x_label)
+    if y_label is not None:
+        plt.ylabel(y_label)
+    if colorbar:
+        plt.colorbar()
+
+
+def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
+                           vmax=None, colorbar=True, order=None, cmap=None,
+                           layout_scale=.95, title=None, scalings=None):
+    """Plot Event Related Potential / Fields image on topographies
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    layout: instance of Layout
+        System specific sensor positions.
+    sigma : float
+        The standard deviation of the Gaussian smoothing to apply along
+        the epoch axis to apply in the image.
+    vmin : float
+        The min value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers.
+    vmax : float
+        The max value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers.
+    colorbar : bool
+        Display or not a colorbar.
+    order : None | array of int | callable
+        If not None, order is used to reorder the epochs on the y-axis
+        of the image. If it's an array of int it should be of length
+        the number of good epochs. If it's a callable the arguments
+        passed are the times vector and the data as 2d array
+        (data.shape[1] == len(times)).
+    cmap : instance of matplotlib.pyplot.colormap
+        Colors to be mapped to the values.
+    layout_scale: float
+        scaling factor for adjusting the relative size of the layout
+        on the canvas.
+    title : str
+        Title of the figure.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If
+        None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    scalings = _mutable_defaults(('scalings', scalings))[0]
+    data = epochs.get_data()
+    if vmin is None:
+        vmin = data.min()
+    if vmax is None:
+        vmax = data.max()
+    if layout is None:
+        from ..layouts.layout import find_layout
+        layout = find_layout(epochs.info)
+
+    erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
+                         data=data, epochs=epochs, sigma=sigma)
+
+    fig = _plot_topo(info=epochs.info, times=epochs.times,
+                     show_func=erf_imshow, layout=layout, decim=1,
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title,
+                     border='w', x_label='Time (s)', y_label='Epoch')
+
+    return fig
diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py
new file mode 100644
index 0000000..5e66284
--- /dev/null
+++ b/mne/viz/topomap.py
@@ -0,0 +1,1035 @@
+"""Functions to plot M/EEG data e.g. topographies
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import math
+import copy
+
+import numpy as np
+from scipy import linalg
+
+from ..baseline import rescale
+from ..io.constants import FIFF
+from ..io.pick import pick_types
+from ..utils import _clean_names, deprecated
+from .utils import tight_layout, _setup_vmin_vmax, DEFAULTS
+from .utils import _prepare_trellis, _check_delayed_ssp
+from .utils import _draw_proj_checkbox
+
+
+def _prepare_topo_plot(obj, ch_type, layout):
+    """"Aux Function"""
+    info = copy.deepcopy(obj.info)
+    if layout is None and ch_type is not 'eeg':
+        from ..layouts.layout import find_layout
+        layout = find_layout(info)
+    elif layout == 'auto':
+        layout = None
+
+    info['ch_names'] = _clean_names(info['ch_names'])
+    for ii, this_ch in enumerate(info['chs']):
+        this_ch['ch_name'] = info['ch_names'][ii]
+
+    # special case for merging grad channels
+    if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
+            np.unique([ch['coil_type'] for ch in info['chs']])):
+        from ..layouts.layout import _pair_grad_sensors
+        picks, pos = _pair_grad_sensors(info, layout)
+        merge_grads = True
+    else:
+        merge_grads = False
+        if ch_type == 'eeg':
+            picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                               exclude='bads')
+        else:
+            picks = pick_types(info, meg=ch_type, ref_meg=False,
+                               exclude='bads')
+
+        if len(picks) == 0:
+            raise ValueError("No channels of type %r" % ch_type)
+
+        if layout is None:
+            chs = [info['chs'][i] for i in picks]
+            from ..layouts.layout import _find_topomap_coords
+            pos = _find_topomap_coords(chs, layout)
+        else:
+            names = [n.upper() for n in layout.names]
+            pos = [layout.pos[names.index(info['ch_names'][k].upper())]
+                   for k in picks]
+
+    return picks, pos, merge_grads, info['ch_names']
+
+
+def _plot_update_evoked_topomap(params, bools):
+    """ Helper to update topomaps """
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+
+    params['proj_bools'] = bools
+    new_evoked = params['evoked'].copy()
+    new_evoked.info['projs'] = []
+    new_evoked.add_proj(projs)
+    new_evoked.apply_proj()
+
+    data = new_evoked.data[np.ix_(params['picks'],
+                                  params['time_idx'])] * params['scale']
+    if params['merge_grads']:
+        from ..layouts.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+    image_mask = params['image_mask']
+
+    pos_x, pos_y = np.asarray(params['pos'])[:, :2].T
+
+    xi = np.linspace(pos_x.min(), pos_x.max(), params['res'])
+    yi = np.linspace(pos_y.min(), pos_y.max(), params['res'])
+    Xi, Yi = np.meshgrid(xi, yi)
+    for ii, im in enumerate(params['images']):
+        Zi = _griddata(pos_x, pos_y, data[:, ii], Xi, Yi)
+        Zi[~image_mask] = np.nan
+        im.set_data(Zi)
+    for cont in params['contours']:
+        cont.set_array(np.c_[Xi, Yi, Zi])
+
+    params['fig'].canvas.draw()
+
+
+def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
+                       colorbar=False, res=64, size=1, show=True,
+                       outlines='head', contours=6, image_interp='bilinear'):
+    """Plot topographic maps of SSP projections
+
+    Parameters
+    ----------
+    projs : list of Projection
+        The projections
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses).
+    colorbar : bool
+        Plot a colorbar.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : scalar
+        Side length of the topomaps in inches (only applies when plotting
+        multiple topomaps at a time).
+    show : bool
+        Show figures if True
+    outlines : 'head' | dict | None
+        The outlines to be drawn. If 'head', a head scheme will be drawn. If
+        dict, each key refers to a tuple of x and y positions. The values in
+        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Defaults to 'head'.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    import matplotlib.pyplot as plt
+
+    if layout is None:
+        from ..layouts import read_layout
+        layout = read_layout('Vectorview-all')
+
+    if not isinstance(layout, list):
+        layout = [layout]
+
+    n_projs = len(projs)
+    nrows = math.floor(math.sqrt(n_projs))
+    ncols = math.ceil(n_projs / nrows)
+
+    fig = plt.gcf()
+    fig.clear()
+    for k, proj in enumerate(projs):
+
+        ch_names = _clean_names(proj['data']['col_names'])
+        data = proj['data']['data'].ravel()
+
+        idx = []
+        for l in layout:
+            is_vv = l.kind.startswith('Vectorview')
+            if is_vv:
+                from ..layouts.layout import _pair_grad_sensors_from_ch_names
+                grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
+                if grad_pairs:
+                    ch_names = [ch_names[i] for i in grad_pairs]
+
+            idx = [l.names.index(c) for c in ch_names if c in l.names]
+            if len(idx) == 0:
+                continue
+
+            pos = l.pos[idx]
+            if is_vv and grad_pairs:
+                from ..layouts.layout import _merge_grad_data
+                shape = (len(idx) / 2, 2, -1)
+                pos = pos.reshape(shape).mean(axis=1)
+                data = _merge_grad_data(data[grad_pairs]).ravel()
+
+            break
+
+        ax = plt.subplot(nrows, ncols, k + 1)
+        ax.set_title(proj['desc'][:10] + '...')
+        if len(idx):
+            plot_topomap(data, pos, vmax=None, cmap=cmap,
+                         sensors=sensors, res=res, outlines=outlines,
+                         contours=contours, image_interp=image_interp)
+            if colorbar:
+                plt.colorbar()
+        else:
+            raise RuntimeError('Cannot find a proper layout for projection %s'
+                               % proj['desc'])
+    fig = ax.get_figure()
+    if show and plt.get_backend() != 'agg':
+        fig.show()
+    tight_layout(fig=fig)
+
+    return fig
+
+
+def _check_outlines(pos, outlines, head_scale=0.85):
+    """Check or create outlines for topoplot
+    """
+    pos = np.asarray(pos)
+    if outlines in ('head', None):
+        radius = 0.5
+        step = 2 * np.pi / 101
+        l = np.arange(0, 2 * np.pi + step, step)
+        head_x = np.cos(l) * radius
+        head_y = np.sin(l) * radius
+        nose_x = np.array([0.18, 0, -0.18]) * radius
+        nose_y = np.array([radius - .004, radius * 1.15, radius - .004])
+        ear_x = np.array([.497, .510, .518, .5299, .5419, .54, .547,
+                         .532, .510, .489])
+        ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
+                          -.1313, -.1384, -.1199])
+        x, y = pos[:, :2].T
+        x_range = np.abs(x.max() - x.min())
+        y_range = np.abs(y.max() - y.min())
+
+        # shift and scale the electrode positions
+        pos[:, 0] = head_scale * ((pos[:, 0] - x.min()) / x_range - 0.5)
+        pos[:, 1] = head_scale * ((pos[:, 1] - y.min()) / y_range - 0.5)
+
+        # Define the outline of the head, ears and nose
+        if outlines is not None:
+            outlines = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
+                            ear_left=(ear_x,  ear_y),
+                            ear_right=(-ear_x,  ear_y))
+        else:
+            outlines = dict()
+
+        outlines['mask_pos'] = head_x, head_y
+    elif isinstance(outlines, dict):
+        if 'mask_pos' not in outlines:
+            raise ValueError('You must specify the coordinates of the image'
+                             'mask')
+    else:
+        raise ValueError('Invalid value for `outlines')
+
+    return pos, outlines
+
+
+def _inside_contour(pos, contour):
+    """Aux function"""
+    npos, ncnt = len(pos), len(contour)
+    x, y = pos[:, :2].T
+
+    check_mask = np.ones((npos), dtype=bool)
+    check_mask[((x < np.min(x)) | (y < np.min(y)) |
+                (x > np.max(x)) | (y > np.max(y)))] = False
+
+    critval = 0.1
+    sel = np.where(check_mask)[0]
+    for this_sel in sel:
+        contourx = contour[:, 0] - pos[this_sel, 0]
+        contoury = contour[:, 1] - pos[this_sel, 1]
+        angle = np.arctan2(contoury, contourx)
+        angle = np.unwrap(angle)
+        total = np.sum(np.diff(angle))
+        check_mask[this_sel] = np.abs(total) > critval
+
+    return check_mask
+
+
+def _griddata(x, y, v, xi, yi):
+    """Aux function"""
+    xy = x.ravel() + y.ravel() * -1j
+    d = xy[None, :] * np.ones((len(xy), 1))
+    d = np.abs(d - d.T)
+    n = d.shape[0]
+    d.flat[::n + 1] = 1.
+
+    g = (d * d) * (np.log(d) - 1.)
+    g.flat[::n + 1] = 0.
+    weights = linalg.solve(g, v.ravel())
+
+    m, n = xi.shape
+    zi = np.zeros_like(xi)
+    xy = xy.T
+
+    g = np.empty(xy.shape)
+    for i in range(m):
+        for j in range(n):
+            d = np.abs(xi[i, j] + -1j * yi[i, j] - xy)
+            mask = np.where(d == 0)[0]
+            if len(mask):
+                d[mask] = 1.
+            np.log(d, out=g)
+            g -= 1.
+            g *= d * d
+            if len(mask):
+                g[mask] = 0.
+            zi[i, j] = g.dot(weights)
+    return zi
+
+
+def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
+                 res=64, axis=None, names=None, show_names=False, mask=None,
+                 mask_params=None, outlines='head', image_mask=None,
+                 contours=6, image_interp='bilinear'):
+    """Plot a topographic map as image
+
+    Parameters
+    ----------
+    data : array, length = n_points
+        The data values to plot.
+    pos : array, shape = (n_points, 2)
+        For each data point, the x and y coordinates.
+    vmin : float | callable
+        The value specfying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data).
+    vmax : float | callable
+        The value specfying the upper bound of the color range.
+        If None, the maximum absolute value is used. If vmin is None,
+        but vmax is not, defaults to np.min(data).
+        If callable, the output equals vmax(data).
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses).
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    axis : instance of Axis | None
+        The axis to plot to. If None, the current axis will be used.
+    names : list | None
+        List of channel names. If None, channel names are not plotted.
+    show_names : bool | callable
+        If True, show channel names on top of the map. If a callable is
+        passed, channel names will be formatted using the callable; e.g., to
+        delete the prefix 'MEG ' from all channel names, pass the function
+        lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+        significant sensors will be shown.
+    mask : ndarray of bool, shape (n_channels, n_times) | None
+        The channels to be marked as significant at a given time point.
+        Indices set to `True` will be considered. Defaults to None.
+    mask_params : dict | None
+        Additional plotting parameters for plotting significant sensors.
+        Default (None) equals:
+        dict(marker='o', markerfacecolor='w', markeredgecolor='k', linewidth=0,
+             markersize=4)
+    outlines : 'head' | dict | None
+        The outlines to be drawn. If 'head', a head scheme will be drawn. If
+        dict, each key refers to a tuple of x and y positions. The values in
+        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Defaults to 'head'.
+    image_mask : ndarray of bool, shape (res, res) | None
+        The image mask to cover the interpolated surface. If None, it will be
+        computed from the outline.
+    contour : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+
+    Returns
+    -------
+    im : matplotlib.image.AxesImage
+        The interpolated data.
+    cn : matplotlib.contour.ContourSet
+        The fieldlines.
+    """
+    import matplotlib.pyplot as plt
+
+    data = np.asarray(data)
+    if data.ndim > 1:
+        err = ("Data needs to be array of shape (n_sensors,); got shape "
+               "%s." % str(data.shape))
+        raise ValueError(err)
+    elif len(data) != len(pos):
+        err = ("Data and pos need to be of same length. Got data of shape %s, "
+               "pos of shape %s." % (str(), str()))
+
+    axes = plt.gca()
+    axes.set_frame_on(False)
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+
+    plt.xticks(())
+    plt.yticks(())
+    pos, outlines = _check_outlines(pos, outlines)
+    pos_x = pos[:, 0]
+    pos_y = pos[:, 1]
+
+    ax = axis if axis else plt.gca()
+    if any([not pos_y.any(), not pos_x.any()]):
+        raise RuntimeError('No position information found, cannot compute '
+                           'geometries for topomap.')
+    if outlines is None:
+        xmin, xmax = pos_x.min(), pos_x.max()
+        ymin, ymax = pos_y.min(), pos_y.max()
+    else:
+        xlim = np.inf, -np.inf,
+        ylim = np.inf, -np.inf,
+        mask_ = np.c_[outlines['mask_pos']]
+        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0] * 1.01]),
+                      np.max(np.r_[xlim[1], mask_[:, 0] * 1.01]))
+        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1] * 1.01]),
+                      np.max(np.r_[ylim[1], mask_[:, 1] * 1.01]))
+
+    # interpolate data
+    xi = np.linspace(xmin, xmax, res)
+    yi = np.linspace(ymin, ymax, res)
+    Xi, Yi = np.meshgrid(xi, yi)
+    Zi = _griddata(pos_x, pos_y, data, Xi, Yi)
+
+    if outlines is None:
+        _is_default_outlines = False
+    elif isinstance(outlines, dict):
+        _is_default_outlines = any([k.startswith('head') for k in outlines])
+
+    if _is_default_outlines and image_mask is None:
+        # prepare masking
+        image_mask, pos = _make_image_mask(outlines, pos, res)
+
+    if image_mask is not None and not _is_default_outlines:
+        Zi[~image_mask] = np.nan
+
+    if mask_params is None:
+        mask_params = DEFAULTS['mask_params'].copy()
+    elif isinstance(mask_params, dict):
+        params = dict((k, v) for k, v in DEFAULTS['mask_params'].items()
+                      if k not in mask_params)
+        mask_params.update(params)
+    else:
+        raise ValueError('`mask_params` must be of dict-type '
+                         'or None')
+
+    # plot map and countour
+    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
+                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
+                   interpolation=image_interp)
+    # plot outline
+    linewidth = mask_params['markeredgewidth']
+    if isinstance(outlines, dict):
+        for k, (x, y) in outlines.items():
+            if 'mask' in k:
+                continue
+            ax.plot(x, y, color='k', linewidth=linewidth)
+
+    # This tackles an incomprehensible matplotlib bug if no contours are
+    # drawn. To avoid rescalings, we will always draw contours.
+    # But if no contours are desired we only draw one and make it invisible .
+    no_contours = False
+    if contours in (False, None):
+        contours, no_contours = 1, True
+    cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
+                      linewidths=linewidth)
+    if no_contours is True:
+        for col in cont.collections:
+            col.set_visible(False)
+
+    if _is_default_outlines:
+        from matplotlib import patches
+        # remove nose offset and tweak
+        patch = patches.Circle((0.5, 0.4687), radius=.46,
+                               clip_on=True,
+                               transform=ax.transAxes)
+        im.set_clip_path(patch)
+        ax.set_clip_path(patch)
+        if cont is not None:
+            for col in cont.collections:
+                col.set_clip_path(patch)
+
+    if sensors is True:
+        sensors = 'k,'
+    if sensors and mask is None:
+        ax.plot(pos_x, pos_y, sensors)
+    elif sensors and mask is not None:
+        idx = np.where(mask)[0]
+        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
+        idx = np.where(~mask)[0]
+        ax.plot(pos_x[idx], pos_y[idx], sensors)
+
+    if show_names:
+        if show_names is True:
+            show_names = lambda x: x
+        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
+        for ii, (p, ch_id) in enumerate(zip(pos, names)):
+            if ii not in show_idx:
+                continue
+            ch_id = show_names(ch_id)
+            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
+                    verticalalignment='center', size='x-small')
+
+    plt.subplots_adjust(top=.95)
+
+    return im, cont
+
+
+def _make_image_mask(outlines, pos, res):
+    """Aux function
+    """
+
+    mask_ = np.c_[outlines['mask_pos']]
+    xmin, xmax = (np.min(np.r_[np.inf, mask_[:, 0]]),
+                  np.max(np.r_[-np.inf, mask_[:, 0]]))
+    ymin, ymax = (np.min(np.r_[np.inf, mask_[:, 1]]),
+                  np.max(np.r_[-np.inf, mask_[:, 1]]))
+
+    inside = _inside_contour(pos, mask_)
+    outside = np.invert(inside)
+    outlier_points = pos[outside]
+    while np.any(outlier_points):  # auto shrink
+        pos *= 0.99
+        inside = _inside_contour(pos, mask_)
+        outside = np.invert(inside)
+        outlier_points = pos[outside]
+    image_mask = np.zeros((res, res), dtype=bool)
+    xi_mask = np.linspace(xmin, xmax, res)
+    yi_mask = np.linspace(ymin, ymax, res)
+    Xi_mask, Yi_mask = np.meshgrid(xi_mask, yi_mask)
+
+    pos_ = np.c_[Xi_mask.flatten(), Yi_mask.flatten()]
+    inds = _inside_contour(pos_, mask_)
+    image_mask[inds.reshape(image_mask.shape)] = True
+
+    return image_mask, pos
+
+
+ at deprecated('`plot_ica_topomap` is deprecated and will be removed in '
+            'MNE 1.0. Use `plot_ica_components` instead')
+def plot_ica_topomap(ica, source_idx, ch_type='mag', res=64, layout=None,
+                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
+                     show=True):
+    """This functoin is deprecated
+
+    See ``plot_ica_components``.
+    """
+    return plot_ica_components(ica, source_idx, ch_type, res, layout,
+                               vmax, cmap, sensors, colorbar)
+
+
+def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
+                        layout=None, vmin=None, vmax=None, cmap='RdBu_r',
+                        sensors='k,', colorbar=False, title=None,
+                        show=True, outlines='head', contours=6,
+                        image_interp='bilinear'):
+    """Project unmixing matrix on interpolated sensor topogrpahy.
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA solution.
+    picks : int | array-like | None
+        The indices of the sources to be plotted.
+        If None all are plotted in batches of 20.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        The channel type to plot. For 'grad', the gradiometers are
+        collected in pairs and the RMS for each pair is plotted.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    vmin : float | callable
+        The value specfying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data).
+    vmax : float | callable
+        The value specfying the upper bound of the color range.
+        If None, the maximum absolute value is used. If vmin is None,
+        but vmax is not, defaults to np.min(data).
+        If callable, the output equals vmax(data).
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib
+        plot format string (e.g., 'r+' for red plusses).
+    colorbar : bool
+        Plot a colorbar.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    show : bool
+        Call pyplot.show() at the end.
+    outlines : 'head' | dict | None
+            The outlines to be drawn. If 'head', a head scheme will be drawn.
+            If dict, each key refers to a tuple of x and y positions. The
+            values in 'mask_pos' will serve as image mask. If None,
+            nothing will be drawn. defaults to 'head'.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+
+    Returns
+    -------
+    fig : instance of matplotlib.pyplot.Figure or list
+        The figure object(s).
+    """
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid import make_axes_locatable
+
+    if picks is None:  # plot components by sets of 20
+        n_components = ica.mixing_matrix_.shape[1]
+        p = 20
+        figs = []
+        for k in range(0, n_components, p):
+            picks = range(k, min(k + p, n_components))
+            fig = plot_ica_components(ica, picks=picks,
+                                      ch_type=ch_type, res=res, layout=layout,
+                                      vmax=vmax, cmap=cmap, sensors=sensors,
+                                      colorbar=colorbar, title=title,
+                                      show=show, outlines=outlines,
+                                      contours=contours,
+                                      image_interp=image_interp)
+            figs.append(fig)
+        return figs
+    elif np.isscalar(picks):
+        picks = [picks]
+
+    data = np.dot(ica.mixing_matrix_[:, picks].T,
+                  ica.pca_components_[:ica.n_components_])
+
+    if ica.info is None:
+        raise RuntimeError('The ICA\'s measurement info is missing. Please '
+                           'fit the ICA or add the corresponding info object.')
+
+    data_picks, pos, merge_grads, names = _prepare_topo_plot(ica, ch_type,
+                                                             layout)
+    pos, outlines = _check_outlines(pos, outlines)
+    if outlines not in (None, 'head'):
+        image_mask, pos = _make_image_mask(outlines, pos, res)
+    else:
+        image_mask = None
+
+    data = np.atleast_2d(data)
+    data = data[:, data_picks]
+
+    # prepare data for iteration
+    fig, axes = _prepare_trellis(len(data), max_col=5)
+    if title is None:
+        title = 'ICA components'
+    fig.suptitle(title)
+
+    if merge_grads:
+        from ..layouts.layout import _merge_grad_data
+    for ii, data_, ax in zip(picks, data, axes):
+        ax.set_title('IC #%03d' % ii, fontsize=12)
+        data_ = _merge_grad_data(data_) if merge_grads else data_
+        vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
+        im = plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
+                          res=res, axis=ax, cmap=cmap, outlines=outlines,
+                          image_mask=image_mask, contours=contours,
+                          image_interp=image_interp)[0]
+        if colorbar:
+            divider = make_axes_locatable(ax)
+            cax = divider.append_axes("right", size="5%", pad=0.05)
+            cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
+            cbar.ax.tick_params(labelsize=12)
+            cbar.set_ticks((vmin_, vmax_))
+            cbar.ax.set_title('AU', fontsize=10)
+        ax.set_yticks([])
+        ax.set_xticks([])
+        ax.set_frame_on(False)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.95)
+    fig.canvas.draw()
+
+    if show is True:
+        plt.show()
+    return fig
+
+
+def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
+                     ch_type='mag', baseline=None, mode='mean', layout=None,
+                     vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
+                     colorbar=True, unit=None, res=64, size=2, format='%1.1e',
+                     show_names=False, title=None, axes=None, show=True):
+    """Plot topographic maps of specific time-frequency intervals of TFR data
+
+    Parameters
+    ----------
+    tfr : AvereageTFR
+        The AvereageTFR object.
+    tmin : None | float
+        The first time instant to display. If None the first time point
+        available is used.
+    tmax : None | float
+        The last time instant to display. If None the last time point
+        available is used.
+    fmin : None | float
+        The first frequency to display. If None the first frequency
+        available is used.
+    fmax : None | float
+        The last frequency to display. If None the last frequency
+        available is used.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        The channel type to plot. For 'grad', the gradiometers are
+        collected in pairs and the RMS for each pair is plotted.
+    baseline : tuple or list of length 2
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or z-score (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+        If None, baseline no correction will be performed.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout
+        file is inferred from the data; if no appropriate layout file
+        was found, the layout is automatically generated from the sensor
+        locations.
+    vmin : float | callable
+        The value specfying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data).
+    vmax : float | callable
+        The value specfying the upper bound of the color range.
+        If None, the maximum absolute value is used. If vmin is None,
+        but vmax is not, defaults to np.min(data).
+        If callable, the output equals vmax(data).
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib
+        plot format string (e.g., 'r+' for red plusses).
+    colorbar : bool
+        Plot a colorbar.
+    unit : str | None
+        The unit of the channel type used for colorbar labels.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : float
+        Side length per topomap in inches.
+    format : str
+        String format for colorbar values.
+    show_names : bool | callable
+        If True, show channel names on top of the map. If a callable is
+        passed, channel names will be formatted using the callable; e.g., to
+        delete the prefix 'MEG ' from all channel names, pass the function
+        lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+        significant sensors will be shown.
+    title : str | None
+        Title. If None (default), no title is displayed.
+    axes : instance of Axis | None
+        The axes to plot to. If None the axes is defined automatically.
+    show : bool
+        Call pyplot.show() at the end.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The figure containing the topography.
+    """
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid1 import make_axes_locatable
+
+    picks, pos, merge_grads, names = _prepare_topo_plot(tfr, ch_type,
+                                                        layout)
+    if not show_names:
+        names = None
+
+    data = tfr.data
+
+    if mode is not None and baseline is not None:
+        data = rescale(data, tfr.times, baseline, mode, copy=True)
+
+    # crop time
+    itmin, itmax = None, None
+    if tmin is not None:
+        itmin = np.where(tfr.times >= tmin)[0][0]
+    if tmax is not None:
+        itmax = np.where(tfr.times <= tmax)[0][-1]
+
+    # crop freqs
+    ifmin, ifmax = None, None
+    if fmin is not None:
+        ifmin = np.where(tfr.freqs >= fmin)[0][0]
+    if fmax is not None:
+        ifmax = np.where(tfr.freqs <= fmax)[0][-1]
+
+    data = data[picks, ifmin:ifmax, itmin:itmax]
+    data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
+
+    if merge_grads:
+        from ..layouts.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+
+    if axes is None:
+        fig = plt.figure()
+        ax = fig.gca()
+    else:
+        fig = axes.figure
+        ax = axes
+
+    ax.set_yticks([])
+    ax.set_xticks([])
+    ax.set_frame_on(False)
+
+    if title is not None:
+        ax.set_title(title)
+
+    im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
+                         axis=ax, cmap=cmap, image_interp='bilinear',
+                         contours=False, names=names)
+
+    if colorbar:
+        divider = make_axes_locatable(ax)
+        cax = divider.append_axes("right", size="5%", pad=0.05)
+        cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
+        cbar.set_ticks((vmin, vmax))
+        cbar.ax.tick_params(labelsize=12)
+        cbar.ax.set_title('AU')
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
+                        vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
+                        colorbar=True, scale=None, scale_time=1e3, unit=None,
+                        res=64, size=1, format='%3.1f',
+                        time_format='%01d ms', proj=False, show=True,
+                        show_names=False, title=None, mask=None,
+                        mask_params=None, outlines='head', contours=6,
+                        image_interp='bilinear'):
+    """Plot topographic maps of specific time points of evoked data
+
+    Parameters
+    ----------
+    evoked : Evoked
+        The Evoked object.
+    times : float | array of floats | None.
+        The time point(s) to plot. If None, 10 topographies will be shown
+        will a regular time spacing between the first and last time instant.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        The channel type to plot. For 'grad', the gradiometers are collected in
+        pairs and the RMS for each pair is plotted.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout file
+        is inferred from the data; if no appropriate layout file was found, the
+        layout is automatically generated from the sensor locations.
+    vmin : float | callable
+        The value specfying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data).
+    vmax : float | callable
+        The value specfying the upper bound of the color range.
+        If None, the maximum absolute value is used. If vmin is None,
+        but vmax is not, defaults to np.min(data).
+        If callable, the output equals vmax(data).
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses).
+    colorbar : bool
+        Plot a colorbar.
+    scale : float | None
+        Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+        for grad and 1e15 for mag.
+    scale_time : float | None
+        Scale the time labels. Defaults to 1e3 (ms).
+    unit : str | None
+        The unit of the channel type used for colorbar label. If
+        scale is None the unit is automatically determined.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : float
+        Side length per topomap in inches.
+    format : str
+        String format for colorbar values.
+    time_format : str
+        String format for topomap values. Defaults to "%01d ms"
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be show.
+    show : bool
+        Call pyplot.show() at the end.
+    show_names : bool | callable
+        If True, show channel names on top of the map. If a callable is
+        passed, channel names will be formatted using the callable; e.g., to
+        delete the prefix 'MEG ' from all channel names, pass the function
+        lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+        significant sensors will be shown.
+    title : str | None
+        Title. If None (default), no title is displayed.
+    mask : ndarray of bool, shape (n_channels, n_times) | None
+        The channels to be marked as significant at a given time point.
+        Indicies set to `True` will be considered. Defaults to None.
+    mask_params : dict | None
+        Additional plotting parameters for plotting significant sensors.
+        Default (None) equals:
+        dict(marker='o', markerfacecolor='w', markeredgecolor='k', linewidth=0,
+             markersize=4)
+    outlines : 'head' | dict | None
+        The outlines to be drawn. If 'head', a head scheme will be drawn. If
+        dict, each key refers to a tuple of x and y positions. The values in
+        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Defaults to 'head'.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+    """
+    import matplotlib.pyplot as plt
+
+    if ch_type.startswith('planar'):
+        key = 'grad'
+    else:
+        key = ch_type
+
+    if scale is None:
+        scale = DEFAULTS['scalings'][key]
+        unit = DEFAULTS['units'][key]
+
+    if mask_params is None:
+        mask_params = DEFAULTS['mask_params'].copy()
+        mask_params['markersize'] *= size / 2.
+        mask_params['markeredgewidth'] *= size / 2.
+
+    if times is None:
+        times = np.linspace(evoked.times[0], evoked.times[-1], 10)
+    elif np.isscalar(times):
+        times = [times]
+    if len(times) > 20:
+        raise RuntimeError('Too many plots requested. Please pass fewer '
+                           'than 20 time instants.')
+    tmin, tmax = evoked.times[[0, -1]]
+    for t in times:
+        if not tmin <= t <= tmax:
+            raise ValueError('Times should be between %0.3f and %0.3f. (Got '
+                             '%0.3f).' % (tmin, tmax, t))
+
+    picks, pos, merge_grads, names = _prepare_topo_plot(evoked, ch_type,
+                                                        layout)
+    if not show_names:
+        names = None
+
+    n = len(times)
+    nax = n + bool(colorbar)
+    width = size * nax
+    height = size * 1. + max(0, 0.1 * (4 - size))
+    fig = plt.figure(figsize=(width, height))
+    w_frame = plt.rcParams['figure.subplot.wspace'] / (2 * nax)
+    top_frame = max((0.05 if title is None else 0.15), .2 / size)
+    fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0,
+                        top=1 - top_frame)
+    time_idx = [np.where(evoked.times >= t)[0][0] for t in times]
+
+    if proj is True and evoked.proj is not True:
+        data = evoked.copy().apply_proj().data
+    else:
+        data = evoked.data
+
+    data = data[np.ix_(picks, time_idx)] * scale
+    if merge_grads:
+        from ..layouts.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+
+    images, contours_ = [], []
+
+    if mask is not None:
+        _picks = picks[::2 if ch_type not in ['mag', 'eeg'] else 1]
+        mask_ = mask[np.ix_(_picks, time_idx)]
+
+    pos, outlines = _check_outlines(pos, outlines)
+    if outlines is not None:
+        image_mask, pos = _make_image_mask(outlines, pos, res)
+    else:
+        image_mask = None
+
+    for i, t in enumerate(times):
+        ax = plt.subplot(1, nax, i + 1)
+        tp, cn = plot_topomap(data[:, i], pos, vmin=vmin, vmax=vmax,
+                              sensors=sensors, res=res, names=names,
+                              show_names=show_names, cmap=cmap,
+                              mask=mask_[:, i] if mask is not None else None,
+                              mask_params=mask_params, axis=ax,
+                              outlines=outlines, image_mask=image_mask,
+                              contours=contours, image_interp=image_interp)
+        images.append(tp)
+        if cn is not None:
+            contours_.append(cn)
+        if time_format is not None:
+            plt.title(time_format % (t * scale_time))
+
+    if colorbar:
+        cax = plt.subplot(1, n + 1, n + 1)
+        plt.colorbar(images[-1], ax=cax, cax=cax, ticks=[vmin, 0, vmax],
+                     format=format)
+        # resize the colorbar (by default the color fills the whole axes)
+        cpos = cax.get_position()
+        if size <= 1:
+            cpos.x0 = 1 - (.7 + .1 / size) / nax
+        cpos.x1 = cpos.x0 + .1 / nax
+        cpos.y0 = .1
+        cpos.y1 = .7
+        cax.set_position(cpos)
+        if unit is not None:
+            cax.set_title(unit)
+
+    if proj == 'interactive':
+        _check_delayed_ssp(evoked)
+        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
+                      picks=picks, images=images, contours=contours_,
+                      time_idx=time_idx, scale=scale, merge_grads=merge_grads,
+                      res=res, pos=pos, image_mask=image_mask,
+                      plot_update_proj_callback=_plot_update_evoked_topomap)
+        _draw_proj_checkbox(None, params)
+
+    if title is not None:
+        plt.suptitle(title, verticalalignment='top', size='x-large')
+        tight_layout(pad=2 * size / 2.0, fig=fig)
+    if show:
+        plt.show()
+
+    return fig
diff --git a/mne/viz/utils.py b/mne/viz/utils.py
new file mode 100644
index 0000000..88c5e5b
--- /dev/null
+++ b/mne/viz/utils.py
@@ -0,0 +1,364 @@
+"""Utility functions for plotting M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import math
+from copy import deepcopy
+from functools import partial
+import difflib
+import webbrowser
+from warnings import warn
+import tempfile
+
+import numpy as np
+
+from ..io import show_fiff
+from ..utils import verbose
+
+
+COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
+          '#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
+
+DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
+                           emg='k', ref_meg='steelblue', misc='k', stim='k',
+                           resp='k', chpi='k', exci='k', ias='k', syst='k'),
+                units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU'),
+                scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0),
+                scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
+                                       eog=150e-6, ecg=5e-4, emg=1e-3,
+                                       ref_meg=1e-12, misc=1e-3,
+                                       stim=1, resp=1, chpi=1e-4, exci=1,
+                                       ias=1, syst=1),
+                ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
+                          eeg=(-200., 200.), misc=(-5., 5.)),
+                titles=dict(eeg='EEG', grad='Gradiometers',
+                            mag='Magnetometers', misc='misc'),
+                mask_params=dict(marker='o',
+                                 markerfacecolor='w',
+                                 markeredgecolor='k',
+                                 linewidth=0,
+                                 markeredgewidth=1,
+                                 markersize=4))
+
+
+def _mutable_defaults(*mappings):
+    """ To avoid dicts as default keyword arguments
+
+    Use this function instead to resolve default dict values.
+    Example usage:
+    scalings, units = _mutable_defaults(('scalings', scalings,
+                                         'units', units))
+    """
+    out = []
+    for k, v in mappings:
+        this_mapping = DEFAULTS[k]
+        if v is not None:
+            this_mapping = deepcopy(DEFAULTS[k])
+            this_mapping.update(v)
+        out += [this_mapping]
+    return out
+
+
+def _setup_vmin_vmax(data, vmin, vmax):
+    """Aux function to handle vmin and vamx parameters"""
+    if vmax is None and vmin is None:
+        vmax = np.abs(data).max()
+        vmin = -vmax
+    else:
+        if callable(vmin):
+            vmin = vmin(data)
+        elif vmin is None:
+            vmin = np.min(data)
+        if callable(vmax):
+            vmax = vmax(data)
+        elif vmin is None:
+            vmax = np.max(data)
+    return vmin, vmax
+
+
+def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
+    """ Adjust subplot parameters to give specified padding.
+
+    Note. For plotting please use this function instead of plt.tight_layout
+
+    Parameters
+    ----------
+    pad : float
+        padding between the figure edge and the edges of subplots, as a
+        fraction of the font-size.
+    h_pad, w_pad : float
+        padding (height/width) between edges of adjacent subplots.
+        Defaults to `pad_inches`.
+    """
+    import matplotlib.pyplot as plt
+    if fig is None:
+        fig = plt.gcf()
+
+    try:  # see https://github.com/matplotlib/matplotlib/issues/2654
+        fig.canvas.draw()
+        fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
+    except:
+        msg = ('Matplotlib function \'tight_layout\'%s.'
+               ' Skipping subpplot adjusment.')
+        if not hasattr(plt, 'tight_layout'):
+            case = ' is not available'
+        else:
+            case = (' is not supported by your backend: `%s`'
+                    % plt.get_backend())
+        warn(msg % case)
+
+
+def _check_delayed_ssp(container):
+    """ Aux function to be used for interactive SSP selection
+    """
+    if container.proj is True or\
+       all([p['active'] for p in container.info['projs']]):
+        raise RuntimeError('Projs are already applied. Please initialize'
+                           ' the data with proj set to False.')
+    elif len(container.info['projs']) < 1:
+        raise RuntimeError('No projs found in evoked.')
+
+
+def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
+    """Return a colormap similar to that used by mne_analyze
+
+    Parameters
+    ----------
+    limits : list (or array) of length 3
+        Bounds for the colormap.
+    format : str
+        Type of colormap to return. If 'matplotlib', will return a
+        matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
+        return an RGBA array of shape (256, 4).
+
+    Returns
+    -------
+    cmap : instance of matplotlib.pyplot.colormap | array
+        A teal->blue->gray->red->yellow colormap.
+
+    Notes
+    -----
+    For this will return a colormap that will display correctly for data
+    that are scaled by the plotting function to span [-fmax, fmax].
+
+    Examples
+    --------
+    The following code will plot a STC using standard MNE limits:
+
+        colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
+        brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
+        brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
+
+    """
+    l = np.asarray(limits, dtype='float')
+    if len(l) != 3:
+        raise ValueError('limits must have 3 elements')
+    if any(l < 0):
+        raise ValueError('limits must all be positive')
+    if any(np.diff(l) <= 0):
+        raise ValueError('limits must be monotonically increasing')
+    if format == 'matplotlib':
+        from matplotlib import colors
+        l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
+        cdict = {'red': ((l[0], 0.0, 0.0),
+                         (l[1], 0.0, 0.0),
+                         (l[2], 0.5, 0.5),
+                         (l[3], 0.5, 0.5),
+                         (l[4], 1.0, 1.0),
+                         (l[5], 1.0, 1.0)),
+                 'green': ((l[0], 1.0, 1.0),
+                           (l[1], 0.0, 0.0),
+                           (l[2], 0.5, 0.5),
+                           (l[3], 0.5, 0.5),
+                           (l[4], 0.0, 0.0),
+                           (l[5], 1.0, 1.0)),
+                 'blue': ((l[0], 1.0, 1.0),
+                          (l[1], 1.0, 1.0),
+                          (l[2], 0.5, 0.5),
+                          (l[3], 0.5, 0.5),
+                          (l[4], 0.0, 0.0),
+                          (l[5], 0.0, 0.0))}
+        return colors.LinearSegmentedColormap('mne_analyze', cdict)
+    elif format == 'mayavi':
+        l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
+        r = np.array([0, 0, 0, 0, 1, 1, 1])
+        g = np.array([1, 0, 0, 0, 0, 0, 1])
+        b = np.array([1, 1, 1, 0, 0, 0, 0])
+        a = np.array([1, 1, 0, 0, 0, 1, 1])
+        xp = (np.arange(256) - 128) / 128.0
+        colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
+        return colormap
+    else:
+        raise ValueError('format must be either matplotlib or mayavi')
+
+
+def _toggle_options(event, params):
+    """Toggle options (projectors) dialog"""
+    import matplotlib.pyplot as plt
+    if len(params['projs']) > 0:
+        if params['fig_opts'] is None:
+            _draw_proj_checkbox(event, params, draw_current_state=False)
+        else:
+            # turn off options dialog
+            plt.close(params['fig_opts'])
+            del params['proj_checks']
+            params['fig_opts'] = None
+
+
+def _toggle_proj(event, params):
+    """Operation to perform when proj boxes clicked"""
+    # read options if possible
+    if 'proj_checks' in params:
+        bools = [x[0].get_visible() for x in params['proj_checks'].lines]
+        for bi, (b, p) in enumerate(zip(bools, params['projs'])):
+            # see if they tried to deactivate an active one
+            if not b and p['active']:
+                bools[bi] = True
+    else:
+        bools = [True] * len(params['projs'])
+
+    compute_proj = False
+    if not 'proj_bools' in params:
+        compute_proj = True
+    elif not np.array_equal(bools, params['proj_bools']):
+        compute_proj = True
+
+    # if projectors changed, update plots
+    if compute_proj is True:
+        params['plot_update_proj_callback'](params, bools)
+
+
+def _prepare_trellis(n_cells, max_col):
+    """Aux function
+    """
+    import matplotlib.pyplot as plt
+    if n_cells == 1:
+        nrow = ncol = 1
+    elif n_cells <= max_col:
+        nrow, ncol = 1, n_cells
+    else:
+        nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
+
+    fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
+    axes = [axes] if ncol == nrow == 1 else axes.flatten()
+    for ax in axes[n_cells:]:  # hide unused axes
+        ax.set_visible(False)
+    return fig, axes
+
+
+def _draw_proj_checkbox(event, params, draw_current_state=True):
+    """Toggle options (projectors) dialog"""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    projs = params['projs']
+    # turn on options dialog
+
+    labels = [p['desc'] for p in projs]
+    actives = ([p['active'] for p in projs] if draw_current_state else
+               [True] * len(params['projs']))
+
+    width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
+    height = len(projs) / 6.0 + 0.5
+    fig_proj = figure_nobar(figsize=(width, height))
+    fig_proj.canvas.set_window_title('SSP projection vectors')
+    ax_temp = plt.axes((0, 0, 1, 1))
+    ax_temp.get_yaxis().set_visible(False)
+    ax_temp.get_xaxis().set_visible(False)
+    fig_proj.add_axes(ax_temp)
+
+    proj_checks = mpl.widgets.CheckButtons(ax_temp, labels=labels,
+                                           actives=actives)
+    # change already-applied projectors to red
+    for ii, p in enumerate(projs):
+        if p['active'] is True:
+            for x in proj_checks.lines[ii]:
+                x.set_color('r')
+    # make minimal size
+    # pass key presses from option dialog over
+
+    proj_checks.on_clicked(partial(_toggle_proj, params=params))
+    params['proj_checks'] = proj_checks
+
+    # this should work for non-test cases
+    try:
+        fig_proj.canvas.draw()
+        fig_proj.show()
+    except Exception:
+        pass
+
+
+ at verbose
+def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
+                 read_limit=np.inf, max_str=30, verbose=None):
+    """Compare the contents of two fiff files using diff and show_fiff
+
+    Parameters
+    ----------
+    fname_1 : str
+        First file to compare.
+    fname_2 : str
+        Second file to compare.
+    fname_out : str | None
+        Filename to store the resulting diff. If None, a temporary
+        file will be created.
+    show : bool
+        If True, show the resulting diff in a new tab in a web browser.
+    indent : str
+        How to indent the lines.
+    read_limit : int
+        Max number of bytes of data to read from a tag. Can be np.inf
+        to always read all data (helps test read completion).
+    max_str : int
+        Max number of characters of string representation to print for
+        each tag's data.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fname_out : str
+        The filename used for storing the diff. Could be useful for
+        when a temporary file is used.
+    """
+    file_1 = show_fiff(fname_1, output=list, indent=indent,
+                       read_limit=read_limit, max_str=max_str)
+    file_2 = show_fiff(fname_2, output=list, indent=indent,
+                       read_limit=read_limit, max_str=max_str)
+    diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
+    if fname_out is not None:
+        f = open(fname_out, 'w')
+    else:
+        f = tempfile.NamedTemporaryFile('w', delete=False)
+        fname_out = f.name
+    with f as fid:
+        fid.write(diff)
+    if show is True:
+        webbrowser.open_new_tab(fname_out)
+    return fname_out
+
+
+def figure_nobar(*args, **kwargs):
+    """Make matplotlib figure with no toolbar"""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    old_val = mpl.rcParams['toolbar']
+    try:
+        mpl.rcParams['toolbar'] = 'none'
+        fig = plt.figure(*args, **kwargs)
+        # remove button press catchers (for toolbar)
+        cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
+        for key in cbs:
+            fig.canvas.callbacks.disconnect(key)
+    except Exception as ex:
+        raise ex
+    finally:
+        mpl.rcParams['toolbar'] = old_val
+    return fig
diff --git a/setup.cfg b/setup.cfg
index 299c8be..0d2de22 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,11 +15,14 @@ release = egg_info -RDb ''
 doc-files = doc
 
 [nosetests]
-verbosity = 2
-detailed-errors = 1
 with-coverage = 1
+# cover-html = 1
+# cover-html-dir = coverage
 cover-package = mne
-#pdb = 1
-#pdb-failures = 1
+
+detailed-errors = 1
 with-doctest = 1
-doctest-extension=rst
+doctest-tests = 1
+doctest-extension = rst
+doctest-fixtures = _fixture
+#doctest-options = +ELLIPSIS,+NORMALIZE_WHITESPACE
diff --git a/setup.py b/setup.py
index ae05de2..b8c143d 100755
--- a/setup.py
+++ b/setup.py
@@ -1,10 +1,12 @@
 #! /usr/bin/env python
 #
-# Copyright (C) 2011-2013 Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+
+# Copyright (C) 2011-2014 Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 
 import os
+from os import path as op
 
-import setuptools  # we are using a setuptools namespace
+import setuptools  # noqa; analysis:ignore; we are using a setuptools namespace
 from numpy.distutils.core import setup
 
 # get the version (don't import mne here, so dependencies are not needed)
@@ -23,7 +25,7 @@ descr = """MNE python project for MEG and EEG data analysis."""
 DISTNAME = 'mne'
 DESCRIPTION = descr
 MAINTAINER = 'Alexandre Gramfort'
-MAINTAINER_EMAIL = 'gramfort at nmr.mgh.harvard.edu'
+MAINTAINER_EMAIL = 'alexandre.gramfort at telecom-paristech.fr'
 URL = 'http://martinos.org/mne'
 LICENSE = 'BSD (3-clause)'
 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
@@ -64,12 +66,20 @@ if __name__ == "__main__":
                     'mne.datasets.sample',
                     'mne.datasets.megsim',
                     'mne.datasets.spm_face',
-                    'mne.fiff', 'mne.fiff.tests',
-                    'mne.fiff.bti', 'mne.fiff.bti.tests',
-                    'mne.fiff.kit', 'mne.fiff.kit.tests',
-                    'mne.fiff.edf', 'mne.fiff.edf.tests',
-                    'mne.fiff.brainvision', 'mne.fiff.brainvision.tests',
+                    'mne.datasets.eegbci',
+                    'mne.datasets.somato',
+                    'mne.externals',
+                    'mne.fiff',
+                    'mne.io', 'mne.io.tests',
+                    'mne.io.array', 'mne.io.array.tests',
+                    'mne.io.brainvision', 'mne.io.brainvision.tests',
+                    'mne.io.bti', 'mne.io.bti.tests',
+                    'mne.io.edf', 'mne.io.edf.tests',
+                    'mne.io.egi', 'mne.io.egi.tests',
+                    'mne.io.fiff', 'mne.io.fiff.tests',
+                    'mne.io.kit', 'mne.io.kit.tests',
                     'mne.forward', 'mne.forward.tests',
+                    'mne.viz', 'mne.viz.tests',
                     'mne.gui', 'mne.gui.tests',
                     'mne.layouts', 'mne.layouts.tests',
                     'mne.minimum_norm', 'mne.minimum_norm.tests',
@@ -82,10 +92,14 @@ if __name__ == "__main__":
                     'mne.time_frequency', 'mne.time_frequency.tests',
                     'mne.realtime', 'mne.realtime.tests',
                     'mne.decoding', 'mne.decoding.tests',
-                    'mne.commands'],
-          package_data={'mne': ['data/*.sel',
-                                'data/icos.fif.gz',
-                                'data/coil_def.dat',
-                                'layouts/*.lout',
-                                'layouts/*.lay']},
+                    'mne.commands', 'mne.externals',
+                    'mne.externals.tempita'],
+          package_data={'mne': [op.join('data', '*.sel'),
+                                op.join('data', 'icos.fif.gz'),
+                                op.join('data', 'coil_def.dat'),
+                                op.join('data', 'helmets', '*.fif.gz'),
+                                op.join('layouts', '*.lout'),
+                                op.join('layouts', '*.lay'),
+                                op.join('html', '*.js'),
+                                op.join('html', '*.css')]},
           scripts=['bin/mne'])

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git



More information about the debian-med-commit mailing list