[med-svn] [mne-python] 01/02: Imported Upstream version 0.6

Andreas Tille tille at debian.org
Thu Nov 21 13:13:19 UTC 2013


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository mne-python.

commit 21bf9c0a9291e6f10a0820144bb14ff7ab62011a
Author: Andreas Tille <tille at debian.org>
Date:   Thu Nov 21 11:51:28 2013 +0100

    Imported Upstream version 0.6
---
 .gitignore                                         |   47 +
 .mailmap                                           |   23 +
 AUTHORS.rst                                        |   19 +
 LICENSE.txt                                        |   24 +
 MANIFEST.in                                        |   15 +
 Makefile                                           |   67 +
 README.rst                                         |  149 +
 bin/mne_browse_raw.py                              |   73 +
 bin/mne_bti2fiff.py                                |   85 +
 bin/mne_clean_eog_ecg.py                           |  139 +
 bin/mne_compute_proj_ecg.py                        |  184 +
 bin/mne_compute_proj_eog.py                        |  189 +
 bin/mne_flash_bem_model.py                         |  132 +
 bin/mne_kit2fiff.py                                |   62 +
 bin/mne_maxfilter.py                               |  143 +
 bin/mne_surf2bem.py                                |   34 +
 dictionary.txt                                     | 4264 ++++++++++++++++++++
 doc/Makefile                                       |   93 +
 doc/build_doc                                      |   16 +
 doc/source/_images/mne_helmet.png                  |  Bin 0 -> 27218 bytes
 .../_images/plot_read_and_write_raw_data.png       |  Bin 0 -> 61418 bytes
 doc/source/_images/plot_read_epochs.png            |  Bin 0 -> 186065 bytes
 doc/source/_images/plot_time_frequency.png         |  Bin 0 -> 151383 bytes
 doc/source/_static/branch_dropdown.png             |  Bin 0 -> 16311 bytes
 doc/source/_static/default.css                     |  515 +++
 doc/source/_static/favicon.ico                     |  Bin 0 -> 6687 bytes
 doc/source/_static/forking_button.png              |  Bin 0 -> 13092 bytes
 doc/source/_static/logo.png                        |  Bin 0 -> 74269 bytes
 doc/source/_static/navy.css                        |  515 +++
 doc/source/_static/pull_button.png                 |  Bin 0 -> 12893 bytes
 doc/source/_templates/class.rst                    |   12 +
 doc/source/_templates/function.rst                 |    8 +
 doc/source/_templates/layout.html                  |   59 +
 doc/source/command_line_tutorial.rst               |  100 +
 doc/source/conf.py                                 |  230 ++
 doc/source/contributing.rst                        |  649 +++
 doc/source/customizing_git.rst                     |  123 +
 doc/source/getting_started.rst                     |  122 +
 doc/source/git_links.inc                           |   64 +
 doc/source/index.rst                               |   48 +
 doc/source/known_projects.inc                      |   46 +
 doc/source/links.inc                               |    4 +
 doc/source/manual.rst                              |   28 +
 doc/source/manual/AppA.rst                         |  377 ++
 doc/source/manual/AppB.rst                         |  294 ++
 doc/source/manual/AppEULA.rst                      |  133 +
 doc/source/manual/AppInstall.rst                   |  174 +
 doc/source/manual/AppReleaseNotes.rst              |  879 ++++
 doc/source/manual/analyze.rst                      | 2746 +++++++++++++
 doc/source/manual/browse.rst                       | 2633 ++++++++++++
 doc/source/manual/convert.rst                      | 2312 +++++++++++
 doc/source/manual/cookbook.rst                     | 1066 +++++
 doc/source/manual/forward.rst                      | 1337 ++++++
 doc/source/manual/intro.rst                        |   45 +
 doc/source/manual/list.rst                         |  439 ++
 doc/source/manual/matlab.rst                       | 1210 ++++++
 doc/source/manual/mne.rst                          | 1323 ++++++
 doc/source/manual/mne_analyze/MNE_preferences.png  |  Bin 0 -> 23704 bytes
 doc/source/manual/mne_analyze/adjust_alignment.png |  Bin 0 -> 21447 bytes
 doc/source/manual/mne_analyze/adjust_lights.png    |  Bin 0 -> 15994 bytes
 doc/source/manual/mne_analyze/adjust_menu.png      |  Bin 0 -> 4747 bytes
 doc/source/manual/mne_analyze/cont_hpi_data.png    |  Bin 0 -> 109299 bytes
 doc/source/manual/mne_analyze/dipole_list.png      |  Bin 0 -> 18515 bytes
 .../manual/mne_analyze/dipole_parameters.png       |  Bin 0 -> 17173 bytes
 doc/source/manual/mne_analyze/dipoles_menu.png     |  Bin 0 -> 2327 bytes
 doc/source/manual/mne_analyze/epoch_selector.png   |  Bin 0 -> 6807 bytes
 .../manual/mne_analyze/field_mapping_pref.png      |  Bin 0 -> 20543 bytes
 doc/source/manual/mne_analyze/file_menu.png        |  Bin 0 -> 4932 bytes
 .../manual/mne_analyze/hardcopy_controls.png       |  Bin 0 -> 3273 bytes
 doc/source/manual/mne_analyze/help_menu.png        |  Bin 0 -> 8056 bytes
 doc/source/manual/mne_analyze/image_dialog.png     |  Bin 0 -> 10742 bytes
 doc/source/manual/mne_analyze/label_list.png       |  Bin 0 -> 8876 bytes
 doc/source/manual/mne_analyze/labels_menu.png      |  Bin 0 -> 2589 bytes
 doc/source/manual/mne_analyze/main_window.png      |  Bin 0 -> 39612 bytes
 doc/source/manual/mne_analyze/movie_dialog.png     |  Bin 0 -> 19162 bytes
 doc/source/manual/mne_analyze/mri_viewer.png       |  Bin 0 -> 12233 bytes
 doc/source/manual/mne_analyze/open_dialog.png      |  Bin 0 -> 51617 bytes
 .../manual/mne_analyze/overlay_management.png      |  Bin 0 -> 24078 bytes
 .../manual/mne_analyze/patch_selection_dialog.png  |  Bin 0 -> 10090 bytes
 .../manual/mne_analyze/save_label_timecourse.png   |  Bin 0 -> 5400 bytes
 doc/source/manual/mne_analyze/scales_dialog.png    |  Bin 0 -> 14773 bytes
 doc/source/manual/mne_analyze/surface_controls.png |  Bin 0 -> 2478 bytes
 .../mne_analyze/surface_selection_dialog.png       |  Bin 0 -> 12759 bytes
 .../manual/mne_analyze/timecourse_manager.png      |  Bin 0 -> 14007 bytes
 doc/source/manual/mne_analyze/view_menu.png        |  Bin 0 -> 2170 bytes
 doc/source/manual/mne_analyze/viewer.png           |  Bin 0 -> 130171 bytes
 doc/source/manual/mne_analyze/viewer_options.png   |  Bin 0 -> 59375 bytes
 doc/source/manual/mne_analyze/visualize_hpi.png    |  Bin 0 -> 53891 bytes
 doc/source/manual/mne_analyze/windows_menu.png     |  Bin 0 -> 4015 bytes
 doc/source/manual/mne_browse_raw/adjust_menu.png   |  Bin 0 -> 5320 bytes
 doc/source/manual/mne_browse_raw/adust_menu.png    |  Bin 0 -> 5307 bytes
 doc/source/manual/mne_browse_raw/average_pref.png  |  Bin 0 -> 69396 bytes
 .../manual/mne_browse_raw/channel_selection.png    |  Bin 0 -> 10141 bytes
 doc/source/manual/mne_browse_raw/file_menu.png     |  Bin 0 -> 5230 bytes
 doc/source/manual/mne_browse_raw/filter_dialog.png |  Bin 0 -> 44574 bytes
 doc/source/manual/mne_browse_raw/help_menu.png     |  Bin 0 -> 9857 bytes
 doc/source/manual/mne_browse_raw/main.png          |  Bin 0 -> 29524 bytes
 .../mne_browse_raw/manage_averages_dialog.png      |  Bin 0 -> 21766 bytes
 doc/source/manual/mne_browse_raw/new_selection.png |  Bin 0 -> 16464 bytes
 doc/source/manual/mne_browse_raw/new_ssp.png       |  Bin 0 -> 9308 bytes
 .../manual/mne_browse_raw/open_dialog copy.png     |  Bin 0 -> 21399 bytes
 doc/source/manual/mne_browse_raw/open_dialog.png   |  Bin 0 -> 19042 bytes
 doc/source/manual/mne_browse_raw/process_menu.png  |  Bin 0 -> 4522 bytes
 doc/source/manual/mne_browse_raw/process_menu2.png |  Bin 0 -> 11239 bytes
 doc/source/manual/mne_browse_raw/scales_dialog.png |  Bin 0 -> 20145 bytes
 .../manual/mne_browse_raw/scales_dialog2.png       |  Bin 0 -> 82263 bytes
 doc/source/manual/mne_browse_raw/toolbar.png       |  Bin 0 -> 3965 bytes
 .../manual/mne_browse_raw/windows_menu-0.png       |  Bin 0 -> 5320 bytes
 .../manual/mne_browse_raw/windows_menu-1.png       |  Bin 0 -> 5307 bytes
 .../manual/mne_browse_raw/windows_menu-10.png      |  Bin 0 -> 9308 bytes
 .../manual/mne_browse_raw/windows_menu-11.png      |  Bin 0 -> 21399 bytes
 .../manual/mne_browse_raw/windows_menu-12.png      |  Bin 0 -> 19042 bytes
 .../manual/mne_browse_raw/windows_menu-13.png      |  Bin 0 -> 4522 bytes
 .../manual/mne_browse_raw/windows_menu-14.png      |  Bin 0 -> 10589 bytes
 .../manual/mne_browse_raw/windows_menu-15.png      |  Bin 0 -> 20145 bytes
 .../manual/mne_browse_raw/windows_menu-16.png      |  Bin 0 -> 75239 bytes
 .../manual/mne_browse_raw/windows_menu-17.png      |  Bin 0 -> 3965 bytes
 .../manual/mne_browse_raw/windows_menu-2.png       |  Bin 0 -> 64663 bytes
 .../manual/mne_browse_raw/windows_menu-3.png       |  Bin 0 -> 10141 bytes
 .../manual/mne_browse_raw/windows_menu-4.png       |  Bin 0 -> 5266 bytes
 .../manual/mne_browse_raw/windows_menu-5.png       |  Bin 0 -> 39744 bytes
 .../manual/mne_browse_raw/windows_menu-6.png       |  Bin 0 -> 9893 bytes
 .../manual/mne_browse_raw/windows_menu-7.png       |  Bin 0 -> 21881 bytes
 .../manual/mne_browse_raw/windows_menu-8.png       |  Bin 0 -> 21766 bytes
 .../manual/mne_browse_raw/windows_menu-9.png       |  Bin 0 -> 16500 bytes
 doc/source/manual/mne_browse_raw/windows_menu.png  |  Bin 0 -> 6133 bytes
 doc/source/manual/morph.rst                        |  409 ++
 doc/source/manual/pics/Averaging-flowchart.png     |  Bin 0 -> 16254 bytes
 doc/source/manual/pics/CoordinateSystems.png       |  Bin 0 -> 25060 bytes
 doc/source/manual/pics/Digitizer-example.png       |  Bin 0 -> 7356 bytes
 doc/source/manual/pics/Flowchart.png               |  Bin 0 -> 43984 bytes
 doc/source/manual/pics/HeadCS.png                  |  Bin 0 -> 14787 bytes
 doc/source/manual/pics/cover.png                   |  Bin 0 -> 119904 bytes
 doc/source/manual/pics/flat.png                    |  Bin 0 -> 89003 bytes
 doc/source/manual/pics/morphed.png                 |  Bin 0 -> 101881 bytes
 doc/source/manual/pics/neuromag.png                |  Bin 0 -> 4015 bytes
 doc/source/manual/pics/orig.png                    |  Bin 0 -> 83718 bytes
 doc/source/manual/pics/proj-off-on.png             |  Bin 0 -> 147837 bytes
 doc/source/manual/pics/title_page.png              |  Bin 0 -> 110643 bytes
 doc/source/manual/reading.rst                      |  145 +
 doc/source/manual/sampledata.rst                   |  768 ++++
 doc/source/manual/utilities.rst                    | 1402 +++++++
 doc/source/mne-python.rst                          |   19 +
 doc/source/python_reference.rst                    |  547 +++
 doc/source/python_tutorial.rst                     |  417 ++
 doc/source/this_project.inc                        |    5 +
 doc/source/whats_new.rst                           |  444 ++
 doc/sphinxext/README.txt                           |   25 +
 doc/sphinxext/docscrape.py                         |  497 +++
 doc/sphinxext/docscrape_sphinx.py                  |  137 +
 doc/sphinxext/gen_rst.py                           |  907 +++++
 doc/sphinxext/ipython_console_highlighting.py      |  100 +
 doc/sphinxext/numpy_ext/docscrape.py               |  498 +++
 doc/sphinxext/numpy_ext/docscrape_sphinx.py        |  226 ++
 doc/sphinxext/numpy_ext/numpydoc.py                |  163 +
 doc/sphinxext/numpy_ext_old/docscrape.py           |  490 +++
 doc/sphinxext/numpy_ext_old/docscrape_sphinx.py    |  133 +
 doc/sphinxext/numpy_ext_old/numpydoc.py            |  111 +
 doc/sphinxext/only_directives.py                   |   65 +
 doc/upload_html.sh                                 |    4 +
 doc/utils/extract_config_doc.py                    |   73 +
 doc/utils/lut2sphinxtbl.py                         |   65 +
 doc/utils/make_clean_config.py                     |   30 +
 examples/README.txt                                |    6 +
 examples/connectivity/README.txt                   |    6 +
 .../connectivity/plot_cwt_sensor_connectivity.py   |   79 +
 .../plot_mne_inverse_coherence_epochs.py           |  118 +
 .../plot_mne_inverse_connectivity_spectrum.py      |   98 +
 .../plot_mne_inverse_label_connectivity.py         |  125 +
 .../connectivity/plot_mne_inverse_psi_visual.py    |  115 +
 examples/connectivity/plot_sensor_connectivity.py  |  118 +
 examples/decoding/README.txt                       |    5 +
 examples/decoding/plot_decoding_sensors.py         |   99 +
 .../plot_decoding_spatio_temporal_source.py        |  154 +
 examples/export/README.txt                         |    5 +
 examples/export/plot_epochs_as_data_frame.py       |  230 ++
 examples/export/plot_epochs_to_nitime.py           |   65 +
 examples/export/plot_evoked_to_nitime.py           |   34 +
 examples/export/plot_raw_to_nitime.py              |   83 +
 examples/extract_events_from_raw.py                |   31 +
 examples/inverse/README.txt                        |    7 +
 examples/inverse/plot_compute_mne_inverse.py       |   56 +
 .../plot_compute_mne_inverse_epochs_in_label.py    |   79 +
 .../plot_compute_mne_inverse_raw_in_label.py       |   54 +
 .../inverse/plot_compute_mne_inverse_volume.py     |   55 +
 examples/inverse/plot_dipole_fit_result.py         |   82 +
 examples/inverse/plot_gamma_map_inverse.py         |   66 +
 examples/inverse/plot_label_activation_from_stc.py |   62 +
 examples/inverse/plot_label_source_activations.py  |   64 +
 examples/inverse/plot_lcmv_beamformer.py           |   85 +
 examples/inverse/plot_lcmv_beamformer_volume.py    |   90 +
 examples/inverse/plot_make_inverse_operator.py     |   89 +
 examples/inverse/plot_mixed_norm_L21_inverse.py    |   69 +
 examples/inverse/plot_morph_data.py                |   57 +
 examples/inverse/plot_read_inverse.py              |   41 +
 examples/inverse/plot_read_source_space.py         |   36 +
 examples/inverse/plot_read_stc.py                  |   32 +
 .../plot_time_frequency_mixed_norm_inverse.py      |  123 +
 examples/plot_channel_epochs_image.py              |   72 +
 examples/plot_define_target_events.py              |   98 +
 .../plot_estimate_covariance_matrix_baseline.py    |   55 +
 examples/plot_estimate_covariance_matrix_raw.py    |   38 +
 examples/plot_evoked_delayed_ssp.py                |   95 +
 examples/plot_evoked_topomap.py                    |   35 +
 examples/plot_evoked_topomap_delayed_ssp.py        |   61 +
 examples/plot_evoked_whitening.py                  |   48 +
 examples/plot_from_raw_to_epochs_to_evoked.py      |   65 +
 .../plot_from_raw_to_multiple_epochs_to_evoked.py  |   71 +
 examples/plot_megsim_data.py                       |   53 +
 examples/plot_megsim_data_single_trial.py          |   35 +
 examples/plot_read_and_write_raw_data.py           |   44 +
 examples/plot_read_bem_surfaces.py                 |   40 +
 examples/plot_read_epochs.py                       |   47 +
 examples/plot_read_evoked.py                       |   28 +
 examples/plot_read_forward.py                      |   44 +
 examples/plot_read_noise_covariance_matrix.py      |   29 +
 examples/plot_shift_evoked.py                      |   43 +
 examples/plot_simulate_evoked_data.py              |   85 +
 examples/plot_ssp_projs_sensitivity_map.py         |   38 +
 examples/plot_ssp_projs_topomaps.py                |   31 +
 examples/plot_topo_channel_epochs_image.py         |   55 +
 examples/plot_topo_compare_conditions.py           |   71 +
 examples/plot_topography.py                        |   30 +
 examples/preprocessing/README.txt                  |    6 +
 examples/preprocessing/plot_find_ecg_artifacts.py  |   48 +
 examples/preprocessing/plot_find_eog_artifacts.py  |   46 +
 examples/preprocessing/plot_ica_from_epochs.py     |  136 +
 examples/preprocessing/plot_ica_from_raw.py        |  208 +
 examples/read_events.py                            |   31 +
 examples/stats/README.txt                          |    6 +
 .../plot_cluster_1samp_test_time_frequency.py      |  139 +
 examples/stats/plot_cluster_methods_tutorial.py    |  197 +
 examples/stats/plot_cluster_stats_evoked.py        |   88 +
 .../stats/plot_cluster_stats_spatio_temporal.py    |  179 +
 .../plot_cluster_stats_spatio_temporal_2samp.py    |  107 +
 ...tats_spatio_temporal_repeated_measures_anova.py |  266 ++
 .../stats/plot_cluster_stats_time_frequency.py     |  149 +
 ...stats_time_frequency_repeated_measures_anova.py |  224 +
 examples/stats/plot_fdr_stats_evoked.py            |   81 +
 examples/stats/plot_sensor_permutation_test.py     |   90 +
 examples/time_frequency/README.txt                 |    6 +
 .../plot_compute_raw_data_spectrum.py              |   86 +
 .../plot_compute_source_psd_epochs.py              |   89 +
 .../plot_source_label_time_frequency.py            |   86 +
 .../time_frequency/plot_source_power_spectrum.py   |   55 +
 .../plot_source_space_time_frequency.py            |   67 +
 examples/time_frequency/plot_temporal_whitening.py |   63 +
 examples/time_frequency/plot_tfr_topography.py     |   80 +
 examples/time_frequency/plot_time_frequency.py     |   95 +
 mne/__init__.py                                    |   68 +
 mne/baseline.py                                    |   87 +
 mne/beamformer/__init__.py                         |    4 +
 mne/beamformer/_lcmv.py                            |  398 ++
 mne/beamformer/tests/test_lcmv.py                  |  153 +
 mne/connectivity/__init__.py                       |    6 +
 mne/connectivity/effective.py                      |  164 +
 mne/connectivity/spectral.py                       | 1054 +++++
 mne/connectivity/tests/test_effective.py           |   43 +
 mne/connectivity/tests/test_spectral.py            |  195 +
 mne/connectivity/tests/test_utils.py               |   23 +
 mne/connectivity/utils.py                          |   45 +
 mne/cov.py                                         |  741 ++++
 mne/cuda.py                                        |  408 ++
 mne/data/icos.fif.gz                               |  Bin 0 -> 3732551 bytes
 mne/data/mne_analyze.sel                           |   13 +
 mne/datasets/__init__.py                           |    5 +
 mne/datasets/megsim/__init__.py                    |    4 +
 mne/datasets/megsim/megsim.py                      |  192 +
 mne/datasets/megsim/urls.py                        |  160 +
 mne/datasets/sample/__init__.py                    |    4 +
 mne/datasets/sample/sample.py                      |  135 +
 mne/dipole.py                                      |   48 +
 mne/epochs.py                                      | 1515 +++++++
 mne/event.py                                       |  683 ++++
 mne/fiff/__init__.py                               |   20 +
 mne/fiff/bti/__init__.py                           |    5 +
 mne/fiff/bti/constants.py                          |  107 +
 mne/fiff/bti/raw.py                                | 1220 ++++++
 mne/fiff/bti/read.py                               |  126 +
 mne/fiff/bti/tests/data/exported4D_linux.fif       |  Bin 0 -> 523296 bytes
 mne/fiff/bti/tests/data/exported4D_solaris.fif     |  Bin 0 -> 987548 bytes
 mne/fiff/bti/tests/data/test_config_linux          |  Bin 0 -> 194296 bytes
 mne/fiff/bti/tests/data/test_config_solaris        |  Bin 0 -> 193616 bytes
 mne/fiff/bti/tests/data/test_hs_linux              |  Bin 0 -> 85576 bytes
 mne/fiff/bti/tests/data/test_hs_solaris            |  Bin 0 -> 123760 bytes
 mne/fiff/bti/tests/data/test_pdf_linux             |  Bin 0 -> 201448 bytes
 mne/fiff/bti/tests/data/test_pdf_solaris           |  Bin 0 -> 781560 bytes
 mne/fiff/bti/tests/test_bti.py                     |   91 +
 mne/fiff/bti/transforms.py                         |  102 +
 mne/fiff/channels.py                               |   35 +
 mne/fiff/compensator.py                            |  156 +
 mne/fiff/constants.py                              |  713 ++++
 mne/fiff/cov.py                                    |  184 +
 mne/fiff/ctf.py                                    |  256 ++
 mne/fiff/diff.py                                   |   42 +
 mne/fiff/evoked.py                                 |  774 ++++
 mne/fiff/kit/__init__.py                           |   10 +
 mne/fiff/kit/constants.py                          |   98 +
 mne/fiff/kit/coreg.py                              |  276 ++
 mne/fiff/kit/kit.py                                |  466 +++
 mne/fiff/kit/tests/data/sns.txt                    |  195 +
 mne/fiff/kit/tests/data/test.sqd                   |  Bin 0 -> 828852 bytes
 mne/fiff/kit/tests/data/test_Ykgw.mat              |  Bin 0 -> 2212272 bytes
 mne/fiff/kit/tests/data/test_bin.fif               |  Bin 0 -> 1546456 bytes
 mne/fiff/kit/tests/data/test_elp.txt               |   11 +
 mne/fiff/kit/tests/data/test_hsp.txt               |  504 +++
 mne/fiff/kit/tests/data/test_mrk.sqd               |  Bin 0 -> 68824 bytes
 mne/fiff/kit/tests/test_kit.py                     |  109 +
 mne/fiff/matrix.py                                 |  133 +
 mne/fiff/meas_info.py                              |  391 ++
 mne/fiff/open.py                                   |  204 +
 mne/fiff/pick.py                                   |  494 +++
 mne/fiff/proj.py                                   |  645 +++
 mne/fiff/raw.py                                    | 1890 +++++++++
 mne/fiff/tag.py                                    |  472 +++
 mne/fiff/tests/data/process_raw.sh                 |   26 +
 mne/fiff/tests/data/test-ave-2.log                 |   28 +
 mne/fiff/tests/data/test-ave.fif                   |  Bin 0 -> 5546473 bytes
 mne/fiff/tests/data/test-ave.fif.gz                |  Bin 0 -> 2338877 bytes
 mne/fiff/tests/data/test-ave.log                   |   14 +
 mne/fiff/tests/data/test-cov.fif                   |  Bin 0 -> 547234 bytes
 mne/fiff/tests/data/test-cov.fif.gz                |  Bin 0 -> 524544 bytes
 mne/fiff/tests/data/test-eve-1.eve                 |    1 +
 mne/fiff/tests/data/test-eve-1.fif                 |  Bin 0 -> 160 bytes
 mne/fiff/tests/data/test-eve-old-style.eve         |   31 +
 mne/fiff/tests/data/test-eve.eve                   |   31 +
 mne/fiff/tests/data/test-eve.fif                   |  Bin 0 -> 543 bytes
 mne/fiff/tests/data/test-eve.fif.gz                |  Bin 0 -> 270 bytes
 mne/fiff/tests/data/test-km-cov.fif                |  Bin 0 -> 547234 bytes
 mne/fiff/tests/data/test-mpr-eve.eve               |   32 +
 mne/fiff/tests/data/test-nf-ave.fif                |  Bin 0 -> 5546473 bytes
 mne/fiff/tests/data/test-no-reject.ave             |   49 +
 mne/fiff/tests/data/test.ave                       |   49 +
 mne/fiff/tests/data/test.cov                       |   55 +
 mne/fiff/tests/data/test_bads.txt                  |    2 +
 mne/fiff/tests/data/test_ctf_comp_raw.fif          |  Bin 0 -> 442032 bytes
 mne/fiff/tests/data/test_ctf_raw.fif               |  Bin 0 -> 9017895 bytes
 mne/fiff/tests/data/test_empty_room.cov            |   44 +
 mne/fiff/tests/data/test_erm-cov.fif               |  Bin 0 -> 541025 bytes
 mne/fiff/tests/data/test_ica.lout                  |    3 +
 mne/fiff/tests/data/test_keepmean.cov              |   56 +
 mne/fiff/tests/data/test_proj.fif                  |  Bin 0 -> 4563 bytes
 mne/fiff/tests/data/test_proj.fif.gz               |  Bin 0 -> 2221 bytes
 mne/fiff/tests/data/test_raw-eve.fif               |  Bin 0 -> 927 bytes
 mne/fiff/tests/data/test_raw.fif                   |  Bin 0 -> 13852290 bytes
 mne/fiff/tests/data/test_raw.fif.gz                |  Bin 0 -> 7270038 bytes
 mne/fiff/tests/data/test_raw.lout                  |   61 +
 mne/fiff/tests/data/test_withbads_raw.fif          |  Bin 0 -> 4823007 bytes
 mne/fiff/tests/data/test_wrong_bads.txt            |    3 +
 mne/fiff/tests/test_compensator.py                 |   20 +
 mne/fiff/tests/test_evoked.py                      |  207 +
 mne/fiff/tests/test_pick.py                        |   11 +
 mne/fiff/tests/test_raw.py                         |  784 ++++
 mne/fiff/tree.py                                   |  154 +
 mne/fiff/write.py                                  |  354 ++
 mne/filter.py                                      | 1338 ++++++
 mne/fixes.py                                       |  521 +++
 mne/forward.py                                     | 1468 +++++++
 mne/inverse_sparse/__init__.py                     |    8 +
 mne/inverse_sparse/_gamma_map.py                   |  304 ++
 mne/inverse_sparse/mxne_debiasing.py               |  133 +
 mne/inverse_sparse/mxne_inverse.py                 |  435 ++
 mne/inverse_sparse/mxne_optim.py                   |  631 +++
 mne/inverse_sparse/tests/test_gamma_map.py         |   51 +
 mne/inverse_sparse/tests/test_mxne_debiasing.py    |   22 +
 mne/inverse_sparse/tests/test_mxne_inverse.py      |   95 +
 mne/inverse_sparse/tests/test_mxne_optim.py        |  121 +
 mne/label.py                                       |  976 +++++
 mne/layouts/CTF-275.lout                           |  276 ++
 mne/layouts/CTF151.lay                             |  153 +
 mne/layouts/CTF275.lay                             |  275 ++
 mne/layouts/Vectorview-all.lout                    |  307 ++
 mne/layouts/Vectorview-grad.lout                   |  205 +
 mne/layouts/Vectorview-mag.lout                    |  103 +
 mne/layouts/__init__.py                            |    1 +
 mne/layouts/layout.py                              |  471 +++
 mne/layouts/magnesWH3600.lout                      |  249 ++
 mne/layouts/tests/test_layout.py                   |  107 +
 mne/minimum_norm/__init__.py                       |    8 +
 mne/minimum_norm/inverse.py                        | 1342 ++++++
 mne/minimum_norm/tests/test_inverse.py             |  398 ++
 mne/minimum_norm/tests/test_time_frequency.py      |  157 +
 mne/minimum_norm/time_frequency.py                 |  667 +++
 mne/misc.py                                        |  100 +
 mne/mixed_norm/__init__.py                         |    7 +
 mne/parallel.py                                    |   96 +
 mne/preprocessing/__init__.py                      |   15 +
 mne/preprocessing/ecg.py                           |  168 +
 mne/preprocessing/eog.py                           |  123 +
 mne/preprocessing/ica.py                           | 1624 ++++++++
 mne/preprocessing/maxfilter.py                     |  281 ++
 mne/preprocessing/peak_finder.py                   |  172 +
 mne/preprocessing/ssp.py                           |  381 ++
 mne/preprocessing/stim.py                          |   66 +
 mne/preprocessing/tests/test_ecg.py                |   21 +
 mne/preprocessing/tests/test_eog.py                |   18 +
 mne/preprocessing/tests/test_ica.py                |  318 ++
 mne/preprocessing/tests/test_peak_finder.py        |   10 +
 mne/preprocessing/tests/test_ssp.py                |   94 +
 mne/preprocessing/tests/test_stim.py               |   36 +
 mne/proj.py                                        |  357 ++
 mne/selection.py                                   |  104 +
 mne/simulation/__init__.py                         |    6 +
 mne/simulation/evoked.py                           |  125 +
 mne/simulation/source.py                           |  196 +
 mne/simulation/tests/test_evoked.py                |   76 +
 mne/simulation/tests/test_source.py                |  205 +
 mne/source_estimate.py                             | 2521 ++++++++++++
 mne/source_space.py                                |  766 ++++
 mne/stats/__init__.py                              |   12 +
 mne/stats/cluster_level.py                         | 1414 +++++++
 mne/stats/multi_comp.py                            |  102 +
 mne/stats/parametric.py                            |  252 ++
 mne/stats/permutations.py                          |  152 +
 mne/stats/tests/test_cluster_level.py              |  359 ++
 mne/stats/tests/test_multi_comp.py                 |   44 +
 mne/stats/tests/test_parametric.py                 |   91 +
 mne/stats/tests/test_permutations.py               |   33 +
 mne/surface.py                                     |  394 ++
 mne/tests/test_cov.py                              |  201 +
 mne/tests/test_dipole.py                           |   20 +
 mne/tests/test_epochs.py                           |  756 ++++
 mne/tests/test_event.py                            |  234 ++
 mne/tests/test_filter.py                           |  201 +
 mne/tests/test_fixes.py                            |   73 +
 mne/tests/test_forward.py                          |  329 ++
 mne/tests/test_label.py                            |  287 ++
 mne/tests/test_misc.py                             |   14 +
 mne/tests/test_proj.py                             |  190 +
 mne/tests/test_selection.py                        |   27 +
 mne/tests/test_source_estimate.py                  |  443 ++
 mne/tests/test_source_space.py                     |  102 +
 mne/tests/test_surface.py                          |   40 +
 mne/tests/test_utils.py                            |  152 +
 mne/tests/test_viz.py                              |  307 ++
 mne/time_frequency/__init__.py                     |    8 +
 mne/time_frequency/ar.py                           |  152 +
 mne/time_frequency/multitaper.py                   |  529 +++
 mne/time_frequency/psd.py                          |   98 +
 mne/time_frequency/stft.py                         |  239 ++
 mne/time_frequency/tests/test_ar.py                |   44 +
 mne/time_frequency/tests/test_multitaper.py        |   47 +
 mne/time_frequency/tests/test_psd.py               |   40 +
 mne/time_frequency/tests/test_stft.py              |   40 +
 mne/time_frequency/tests/test_tfr.py               |   75 +
 mne/time_frequency/tfr.py                          |  407 ++
 mne/transforms/__init__.py                         |    2 +
 mne/transforms/coreg.py                            |   67 +
 mne/transforms/tests/test_coreg.py                 |   20 +
 mne/transforms/tests/test_transforms.py            |   31 +
 mne/transforms/transforms.py                       |  415 ++
 mne/utils.py                                       |  996 +++++
 mne/viz.py                                         | 2960 ++++++++++++++
 setup.cfg                                          |   25 +
 setup.py                                           |   76 +
 455 files changed, 86486 insertions(+)

diff --git a/.gitignore b/.gitignore
new file mode 100755
index 0000000..0c1303a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,47 @@
+*.pyc
+*.pyo
+*.so
+*.fif
+*.tar.gz
+*.log
+*.stc
+*~
+.#*
+*.swp
+*.lprof
+*.npy
+*.zip
+*.fif.gz
+*.nii.gz
+*.tar.*
+*.egg*
+*.tmproj
+*.png
+.DS_Store
+events.eve
+foo-lh.label
+foo.lout
+bar.lout
+foobar.lout
+epochs_data.mat
+memmap*.dat
+tmp-*.w
+tmtags
+auto_examples
+MNE-sample-data*
+build
+coverage
+
+dist/
+doc/_build/
+doc/build/
+doc/auto_examples/
+doc/modules/generated/
+doc/source/generated/
+pip-log.txt
+.coverage
+tags
+doc/coverages
+doc/samples
+
+*.orig
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..ef644f0
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,23 @@
+Alexandre Gramfort <alexandre.gramfort at inria.fr> Alexandre Gramfort <alexandre.gramfort at gmail.com>
+Alexandre Gramfort <alexandre.gramfort at inria.fr> Alexandre Gramfort <alexandre.gramfort at m4x.org>
+Alexandre Gramfort <alexandre.gramfort at inria.fr> Alexandre Gramfort <gramfort at localhost.(none)>
+Martin Luessi <mluessi at nmr.mgh.harvard.edu> mluessi at nmr.mgh.harvard.edu <mluessi at nmr.mgh.harvard.edu>
+Martin Luessi <mluessi at nmr.mgh.harvard.edu> martin <martin at think.hsd1.ma.comcast.net>
+Martin Luessi <mluessi at nmr.mgh.harvard.edu> martin <martin at think.(none)>
+Matti Hamalainen <msh at nmr.mgh.harvard.edu> Matti Hamalainen <msh at parsley.nmr.mgh.harvard.edu>
+Christian Brodbeck <christianmbrodbeck at gmail.com> christianmbrodbeck <christianmbrodbeck at gmail.com>
+Louis Thibault <louist87 at gmail.com> = <louist87 at gmail.com>
+Louis Thibault <louist87 at gmail.com> Louis Thibault <louist at ltpc.(none)>
+Eric Larson <larson.eric.d at gmail.com> Eric Larson <larson.eric.d at gmail.com>
+Eric Larson <larson.eric.d at gmail.com> Eric89GXL <larson.eric.d at gmail.com>
+Denis A. Engemann <denis.engemann at gmail.com> dengemann <denis.engemann at gmail.com>
+Denis A. Engemann <denis.engemann at gmail.com> denis <denis.engemann at gmail.com>
+Daniel Strohmeier <daniel.strohmeier at googlemail.com> joewalter <daniel.strohmeier at googlemail.com>
+Dan G. Wakeman <dgwakeman at gmail.com>
+Teon Brooks <teon.brooks at gmail.com>
+Romain Trachel <romain.trachel at inria.fr>
+Roman Goj <roman.goj at gmail.com>
+Andrew Dykstra <andrew.r.dykstra at gmail.com>
+Yousra BEKHTI <yousra.bekhti at gmail.com> Yoursa BEKHTI <ybekhti at is222485.intra.cea.fr>
+Yousra BEKHTI <yousra.bekhti at gmail.com> Yoursa BEKHTI <yousra.bekhti at gmail.com>
+Mainak Jas <mainakjas at gmail.com> Mainak <mainakjas at gmail.com>
diff --git a/AUTHORS.rst b/AUTHORS.rst
new file mode 100644
index 0000000..90a4e7e
--- /dev/null
+++ b/AUTHORS.rst
@@ -0,0 +1,19 @@
+.. -*- mode: rst -*-
+
+Authors
+=======
+
+  * Alexandre Gramfort 2011-2013
+  * Matti Hamalainen 2011-2013
+  * Emily Ruzich 2011
+  * Martin Luessi 2011-2013
+  * Christian Brodbeck 2012-2013
+  * Louis Thibault 2012
+  * Eric Larson 2012-2013
+  * Denis A. Engemann 2012-2013
+  * Daniel Strohmeier 2012
+  * Brad Buran 2013
+  * Simon Kornblith 2013
+  * Mainak Jas 2013
+  * Roman Goj 2013
+  * Teon Brooks 2013
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..a63dc6a
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,24 @@
+Copyright © 2011, authors of MNE-Python
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+    * Neither the names of MNE-Python authors nor the names of any
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..9593ea5
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,15 @@
+include *.rst
+include mne/__init__.py
+# recursive-include doc *
+recursive-include examples *.py
+recursive-include examples *.txt
+recursive-include mne *.py
+# recursive-include mne/fiff/tests/data *
+recursive-exclude mne/fiff/tests/data *
+recursive-exclude mne/fiff/bti/tests/data *
+recursive-include mne/data *.sel
+recursive-include mne/data *.fif.gz
+recursive-include mne/layouts *.lout
+recursive-include mne/layouts *.lay
+recursive-exclude examples/MNE-sample-data *
+recursive-exclude mne/fiff/kit/tests/data *
diff --git a/Makefile b/Makefile
new file mode 100755
index 0000000..48be1ec
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,67 @@
+# simple makefile to simplify repetetive build env management tasks under posix
+
+# caution: testing won't work on windows, see README
+
+PYTHON ?= python
+NOSETESTS ?= nosetests
+CTAGS ?= ctags
+
+all: clean inplace test test-doc
+
+clean-pyc:
+	find . -name "*.pyc" | xargs rm -f
+
+clean-so:
+	find . -name "*.so" | xargs rm -f
+	find . -name "*.pyd" | xargs rm -f
+
+clean-build:
+	rm -rf build
+
+clean-ctags:
+	rm -f tags
+
+clean: clean-build clean-pyc clean-so clean-ctags
+
+in: inplace # just a shortcut
+inplace:
+	$(PYTHON) setup.py build_ext -i
+
+sample_data: $(CURDIR)/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif
+	@echo "Target needs sample data"
+
+$(CURDIR)/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif:
+	wget ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
+	tar xvzf MNE-sample-data-processed.tar.gz
+	mv MNE-sample-data examples/
+	ln -s ${PWD}/examples/MNE-sample-data ${PWD}/MNE-sample-data -f
+
+test: in sample_data
+	$(NOSETESTS) mne
+
+test-doc: sample_data
+	$(NOSETESTS) --with-doctest --doctest-tests --doctest-extension=rst doc/ doc/source/
+
+test-coverage: sample_data
+	rm -rf coverage .coverage
+	$(NOSETESTS) --with-coverage --cover-package=mne --cover-html --cover-html-dir=coverage
+
+test-profile: sample_data
+	$(NOSETESTS) --with-profile --profile-stats-file stats.pf mne
+	hotshot2dot stats.pf | dot -Tpng -o profile.png
+
+trailing-spaces:
+	find . -name "*.py" | xargs perl -pi -e 's/[ \t]*$$//'
+
+ctags:
+	# make tags for symbol based navigation in emacs and vim
+	# Install with: sudo apt-get install exuberant-ctags
+	$(CTAGS) -R *
+
+upload-pipy:
+	python setup.py sdist bdist_egg register upload
+
+codespell:
+	# The *.fif had to be there twice to be properly ignored (!)
+	codespell.py -w -i 3 -S="*.fif,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.coverage,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii" ./dictionary.txt -r .
+
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..9dad435
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,149 @@
+.. -*- mode: rst -*-
+
+`mne-python <http://martinos.org/mne/mne-python.html>`_
+=======================================================
+
+This package is designed for sensor- and source-space analysis of M-EEG
+data, including frequency-domain and time-frequency analyses and
+non-parametric statistics. This package is presently evolving quickly and
+thanks to the adopted open development environment user contributions can
+be easily incorporated.
+
+Get more information
+^^^^^^^^^^^^^^^^^^^^
+
+This page only contains bare-bones instructions for installing mne-python.
+
+If you're familiar with MNE and you're looking for information on using
+mne-python specifically, jump right to the `mne-python homepage
+<http://martinos.org/mne/mne-python.html>`_. This website includes a
+`tutorial <http://martinos.org/mne/python_tutorial.html>`_,
+helpful `examples <http://martinos.org/mne/auto_examples/index.html>`_, and
+a handy `function reference <http://martinos.org/mne/python_reference.html>`_,
+among other things.
+
+If you're unfamiliar with MNE, you can visit the
+`MNE homepage <http://martinos.org/mne>`_ for full user documentation.
+
+Get the latest code
+^^^^^^^^^^^^^^^^^^^
+
+To get the latest code using git, simply type::
+
+    git clone git://github.com/mne-tools/mne-python.git
+
+If you don't have git installed, you can download a zip or tarball
+of the latest code: http://github.com/mne-tools/mne-python/archives/master
+
+Install mne-python
+^^^^^^^^^^^^^^^^^^
+
+As any Python packages, to install MNE-Python, go in the mne-python source
+code directory and do::
+
+    python setup.py install
+
+or if you don't have admin access to your python setup (permission denied
+when install) use::
+
+    python setup.py install --user
+
+You can also install the latest release version with easy_install::
+
+    easy_install -U mne
+
+or with pip::
+
+    pip install mne --upgrade
+
+or for the latest development version (the most up to date)::
+
+    pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev --user
+
+Dependencies
+^^^^^^^^^^^^
+
+The required dependencies to build the software are python >= 2.6,
+NumPy >= 1.4, SciPy >= 0.7.2 and matplotlib >= 0.98.4.
+
+Some isolated functions require pandas >= 0.7.3 and nitime (multitaper analysis).
+
+To run the tests you will also need nose >= 0.10.
+and the MNE sample dataset (will be downloaded automatically
+when you run an example ... but be patient)
+
+To use NVIDIA CUDA for FFT FIR filtering, you will also need to install
+the NVIDIA CUDA SDK, pycuda, and scikits.cuda. The difficulty of this varies
+by platform; consider reading the following site for help getting pycuda
+to work (typically the most difficult to configure):
+
+http://wiki.tiker.net/PyCuda/Installation/
+
+Contribute to mne-python
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Please see the documentation on the mne-python homepage:
+
+http://martinos.org/mne/contributing.html
+
+Mailing list
+^^^^^^^^^^^^
+
+http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis
+
+Running the test suite
+^^^^^^^^^^^^^^^^^^^^^^
+
+To run the test suite, you need nosetests and the coverage modules.
+Run the test suite using::
+
+    nosetests
+
+from the root of the project.
+
+Making a release and uploading it to PyPI
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This command is only run by project manager, to make a release, and
+upload in to PyPI::
+
+    python setup.py sdist bdist_egg register upload
+
+
+Licensing
+^^^^^^^^^
+
+MNE-Python is **BSD-licenced** (3 clause):
+
+    This software is OSI Certified Open Source Software.
+    OSI Certified is a certification mark of the Open Source Initiative.
+
+    Copyright (c) 2011, authors of MNE-Python
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the names of MNE-Python authors nor the names of any
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+    **This software is provided by the copyright holders and contributors
+    "as is" and any express or implied warranties, including, but not
+    limited to, the implied warranties of merchantability and fitness for
+    a particular purpose are disclaimed. In no event shall the copyright
+    owner or contributors be liable for any direct, indirect, incidental,
+    special, exemplary, or consequential damages (including, but not
+    limited to, procurement of substitute goods or services; loss of use,
+    data, or profits; or business interruption) however caused and on any
+    theory of liability, whether in contract, strict liability, or tort
+    (including negligence or otherwise) arising in any way out of the use
+    of this software, even if advised of the possibility of such
+    damage.**
diff --git a/bin/mne_browse_raw.py b/bin/mne_browse_raw.py
new file mode 100755
index 0000000..be19708
--- /dev/null
+++ b/bin/mne_browse_raw.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+"""Browse raw data
+
+You can do for example:
+
+$mne_browse_raw.py --raw sample_audvis_raw.fif --proj sample_audvis_ecg_proj.fif --eve sample_audvis_raw-eve.fif
+"""
+
+# Authors : Eric Larson, PhD
+
+import sys
+import mne
+
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+    import pylab as pl
+
+    parser = OptionParser()
+    parser.add_option("--raw", dest="raw_in",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("--proj", dest="proj_in",
+                      help="Projector file", metavar="FILE",
+                      default='')
+    parser.add_option("--eve", dest="eve_in",
+                      help="Events file", metavar="FILE",
+                      default='')
+    parser.add_option("-d", "--duration", dest="duration", type="float",
+                      help="Time window for plotting (sec)",
+                      default=10.0)
+    parser.add_option("-t", "--start", dest="start", type="float",
+                      help="Initial start time for plotting",
+                      default=0.0)
+    parser.add_option("-n", "--n_channels", dest="n_channels", type="int",
+                      help="Number of channels to plot at a time",
+                      default=20)
+    parser.add_option("-o", "--order", dest="order",
+                      help="Order for plotting ('type' or 'original')",
+                      default='type')
+    parser.add_option("-p", "--preload", dest="preload",
+                    help="Preload raw data (for faster navigaton)",
+                    default=False)
+    parser.add_option("-s", "--show_options", dest="show_options",
+                    help="Show projection options dialog",
+                    default=False)
+    options, args = parser.parse_args()
+
+    raw_in = options.raw_in
+    duration = options.duration
+    start = options.start
+    n_channels = options.n_channels
+    order = options.order
+    preload = options.preload
+    show_options = options.show_options
+    proj_in = options.proj_in
+    eve_in = options.eve_in
+
+    if raw_in is None:
+        parser.print_help()
+        sys.exit(-1)
+
+    raw = mne.fiff.Raw(raw_in, preload=preload)
+    if len(proj_in) > 0:
+        projs = mne.read_proj(proj_in)
+        raw.info['projs'] = projs
+    if len(eve_in) > 0:
+        events = mne.read_events(eve_in)
+    else:
+        events = None
+    fig = raw.plot(duration=duration, start=start, n_channels=n_channels,
+                   order=order, show_options=show_options, events=events)
+    pl.show(block=True)
diff --git a/bin/mne_bti2fiff.py b/bin/mne_bti2fiff.py
new file mode 100755
index 0000000..25bb625
--- /dev/null
+++ b/bin/mne_bti2fiff.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Yuval Harpaz <yuvharpaz at gmail.com>
+#
+#          simplified bsd-3 license
+
+"""
+
+Import BTi / 4D MagnesWH3600 data to fif file.
+
+example usage: mne_bti2fiff.py -pdf C,rfDC -o my_raw.fif
+
+Note.
+1) Currently direct inclusion of reference channel weights
+is not supported. Please use \'mne_create_comp_data\' to include
+the weights or use the low level functions from this module to
+include them by yourself.
+2) The informed guess for the 4D name is E31 for the ECG channel and
+E63, E63 for the EOG channels. Pleas check and adjust if those channels
+are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
+appear in the channel names of the raw object.
+"""
+
+from mne.fiff.bti import read_raw_bti
+# from mne import verbose
+import sys
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option('-p', '--pdf', dest='pdf_fname',
+                    help='Input data file name', metavar='FILE')
+    parser.add_option('-c', '--config', dest='config_fname',
+                    help='Input config file name', metavar='FILE', default='config')
+    parser.add_option('--head_shape', dest='head_shape_fname',
+                    help='Headshape file name', metavar='FILE',
+                    default='hs_file')
+    parser.add_option('-o', '--out_fname', dest='out_fname',
+                      help='Name of the resulting fiff file',
+                      default='as_data_fname')
+    parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float',
+                    help='Compensatory rotation about Neuromag x axis, deg',
+                    default=2.0)
+    parser.add_option('-T', '--translation', dest='translation', type='str',
+                    help='Default translation, meter',
+                    default=(0.00, 0.02, 0.11))
+    parser.add_option('--ecg_ch', dest='ecg_ch', type='str',
+                    help='4D ECG channel name',
+                    default='E31')
+    parser.add_option('--eog_ch', dest='eog_ch', type='str',
+                    help='4D EOG channel names',
+                    default='E63,E64')
+
+    options, args = parser.parse_args()
+
+    pdf_fname = options.pdf_fname
+    if pdf_fname is None:
+        parser.print_help()
+        sys.exit(-1)
+
+    config_fname = options.config_fname
+    head_shape_fname = options.head_shape_fname
+    out_fname = options.out_fname
+    rotation_x = options.rotation_x
+    translation = options.translation
+    ecg_ch = options.ecg_ch
+    eog_ch = options.ecg_ch.split(',')
+
+    if out_fname == 'as_data_fname':
+        out_fname = pdf_fname + '_raw.fif'
+
+    raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname,
+                       head_shape_fname=head_shape_fname,
+                       rotation_x=rotation_x, translation=translation,
+                       ecg_ch=ecg_ch, eog_ch=eog_ch)
+
+    raw.save(out_fname)
+    raw.close()
+    sys.exit(0)
diff --git a/bin/mne_clean_eog_ecg.py b/bin/mne_clean_eog_ecg.py
new file mode 100755
index 0000000..624cc56
--- /dev/null
+++ b/bin/mne_clean_eog_ecg.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+"""Clean a raw file from EOG and ECG artifacts with PCA (ie SSP)
+"""
+
+# Authors : Dr Engr. Sheraz Khan,  P.Eng, Ph.D.
+#           Engr. Nandita Shetty,  MS.
+#           Alexandre Gramfort, Ph.D.
+
+
+import os
+import mne
+
+
+def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
+                  ecg_proj_fname=None, eog_proj_fname=None,
+                  ecg_event_fname=None, eog_event_fname=None, in_path='.'):
+    """Clean ECG from raw fif file
+
+    Parameters
+    ----------
+    in_fif_fname : string
+        Raw fif File
+    eog_event_fname : string
+        name of EOG event file required.
+    eog : bool
+        Reject or not EOG artifacts.
+    ecg : bool
+        Reject or not ECG artifacts.
+    ecg_event_fname : string
+        name of ECG event file required.
+    in_path :
+        Path where all the files are.
+    """
+    if not eog and not ecg:
+        raise Exception("EOG and ECG cannot be both disabled")
+
+    # Reading fif File
+    raw_in = mne.fiff.Raw(in_fif_fname)
+
+    if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'):
+        prefix = in_fif_fname[:-8]
+    else:
+        prefix = in_fif_fname[:-4]
+
+    if out_fif_fname is None:
+        out_fif_fname = prefix + '_clean_ecg_eog_raw.fif'
+    if ecg_proj_fname is None:
+        ecg_proj_fname = prefix + '_ecg_proj.fif'
+    if eog_proj_fname is None:
+        eog_proj_fname = prefix + '_eog_proj.fif'
+    if ecg_event_fname is None:
+        ecg_event_fname = prefix + '_ecg-eve.fif'
+    if eog_event_fname is None:
+        eog_event_fname = prefix + '_eog-eve.fif'
+
+    print 'Implementing ECG and EOG artifact rejection on data'
+
+    if ecg:
+        ecg_events, _, _  = mne.preprocessing.find_ecg_events(raw_in)
+        print "Writing ECG events in %s" % ecg_event_fname
+        mne.write_events(ecg_event_fname, ecg_events)
+
+        print 'Computing ECG projector'
+
+        command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
+                   '--projtmin -0.08 --projtmax 0.08 --saveprojtag _ecg_proj '
+                   '--projnmag 2 --projngrad 1 --projevent 999 --highpass 5 '
+                   '--lowpass 35 --projmagrej 4000  --projgradrej 3000'
+                   % (in_path, in_fif_fname, ecg_event_fname))
+        st = os.system(command)
+
+        if st != 0:
+            print "Error while running : %s" % command
+
+    if eog:
+        eog_events = mne.preprocessing.find_eog_events(raw_in)
+        print "Writing EOG events in %s" % eog_event_fname
+        mne.write_events(eog_event_fname, eog_events)
+
+        print 'Computing EOG projector'
+
+        command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
+                   '--projtmin -0.15 --projtmax 0.15 --saveprojtag _eog_proj '
+                   '--projnmag 2 --projngrad 2 --projevent 998 --lowpass 35 '
+                   '--projmagrej 4000  --projgradrej 3000' % (in_path,
+                   in_fif_fname, eog_event_fname))
+
+        print 'Running : %s' % command
+
+        st = os.system(command)
+        if st != 0:
+            raise ValueError('Problem while running : %s' % command)
+
+    if out_fif_fname is not None:
+        # Applying the ECG EOG projector
+        print 'Applying ECG EOG projector'
+
+        command = ('mne_process_raw --cd %s --raw %s '
+                   '--proj %s --projoff --save %s --filteroff'
+                   % (in_path, in_fif_fname, in_fif_fname, out_fif_fname))
+        command += ' --proj %s --proj %s' % (ecg_proj_fname, eog_proj_fname)
+
+        print 'Command executed: %s' % command
+
+        st = os.system(command)
+
+        if st != 0:
+            raise ValueError('Pb while running : %s' % command)
+
+        print 'Done removing artifacts.'
+        print "Cleaned raw data saved in: %s" % out_fif_fname
+        print 'IMPORTANT : Please eye-ball the data !!'
+    else:
+        print 'Projection not applied to raw data.'
+
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option("-i", "--in", dest="raw_in",
+                    help="Input raw FIF file", metavar="FILE")
+    parser.add_option("-o", "--out", dest="raw_out",
+                    help="Output raw FIF file", metavar="FILE",
+                    default=None)
+    parser.add_option("-e", "--no-eog", dest="eog", action="store_false",
+                    help="Remove EOG", default=True)
+    parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false",
+                    help="Remove ECG", default=True)
+
+    (options, args) = parser.parse_args()
+
+    raw_in = options.raw_in
+    raw_out = options.raw_out
+    eog = options.eog
+    ecg = options.ecg
+
+    clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg)
diff --git a/bin/mne_compute_proj_ecg.py b/bin/mne_compute_proj_ecg.py
new file mode 100755
index 0000000..10cc1a8
--- /dev/null
+++ b/bin/mne_compute_proj_ecg.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+"""Compute SSP/PCA projections for ECG artifacts
+
+You can do for example:
+
+$mne_compute_proj_ecg.py -i sample_audvis_raw.fif -c "MEG 1531" --l-freq 1 --h-freq 100 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
+"""
+
+# Authors : Alexandre Gramfort, Ph.D.
+#           Martin Luessi, Ph.D.
+
+import os
+import sys
+import mne
+
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option("-i", "--in", dest="raw_in",
+                    help="Input raw FIF file", metavar="FILE")
+    parser.add_option("--tmin", dest="tmin", type="float",
+                    help="Time before event in seconds",
+                    default=-0.2)
+    parser.add_option("--tmax", dest="tmax", type="float",
+                    help="Time after event in seconds",
+                    default=0.4)
+    parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
+                    help="Number of SSP vectors for gradiometers",
+                    default=2)
+    parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
+                    help="Number of SSP vectors for magnetometers",
+                    default=2)
+    parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
+                    help="Number of SSP vectors for EEG",
+                    default=2)
+    parser.add_option("--l-freq", dest="l_freq", type="float",
+                    help="Filter low cut-off frequency in Hz",
+                    default=1)
+    parser.add_option("--h-freq", dest="h_freq", type="float",
+                    help="Filter high cut-off frequency in Hz",
+                    default=100)
+    parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float",
+                    help="Filter low cut-off frequency in Hz used for ECG event detection",
+                    default=5)
+    parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float",
+                    help="Filter high cut-off frequency in Hz used for ECG event detection",
+                    default=35)
+    parser.add_option("-p", "--preload", dest="preload",
+                    help="Temporary file used during computation (to save memory)",
+                    default=True)
+    parser.add_option("-a", "--average", dest="average", action="store_true",
+                    help="Compute SSP after averaging",
+                    default=False)
+    parser.add_option("--proj", dest="proj",
+                    help="Use SSP projections from a fif file.",
+                    default=None)
+    parser.add_option("--filtersize", dest="filter_length", type="int",
+                    help="Number of taps to use for filtering",
+                    default=2048)
+    parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
+                    help="Number of jobs to run in parallel",
+                    default=1)
+    parser.add_option("-c", "--channel", dest="ch_name",
+                    help="Channel to use for ECG detection (Required if no ECG found)",
+                    default=None)
+    parser.add_option("--rej-grad", dest="rej_grad", type="float",
+                    help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)",
+                    default=2000)
+    parser.add_option("--rej-mag", dest="rej_mag", type="float",
+                    help="Magnetometers rejection parameter in fT (peak to peak amplitude)",
+                    default=3000)
+    parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
+                    help="EEG rejection parameter in uV (peak to peak amplitude)",
+                    default=50)
+    parser.add_option("--rej-eog", dest="rej_eog", type="float",
+                    help="EOG rejection parameter in uV (peak to peak amplitude)",
+                    default=250)
+    parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
+                    help="Add EEG average reference proj",
+                    default=False)
+    parser.add_option("--no-proj", dest="no_proj", action="store_true",
+                    help="Exclude the SSP projectors currently in the fiff file",
+                    default=False)
+    parser.add_option("--bad", dest="bad_fname",
+                    help="Text file containing bad channels list (one per line)",
+                    default=None)
+    parser.add_option("--event-id", dest="event_id", type="int",
+                    help="ID to use for events", default=999)
+    parser.add_option("--event-raw", dest="raw_event_fname",
+                    help="raw file to use for event detection", default=None)
+    parser.add_option("--tstart", dest="tstart", type="float",
+                    help="Start artifact detection after tstart seconds", default=0.)
+    parser.add_option("--qrsthr", dest="qrs_threshold", type="float",
+                    help="QRS detection threshold. Between 0 and 1.", default=0.6)
+
+    options, args = parser.parse_args()
+
+    raw_in = options.raw_in
+
+    if raw_in is None:
+        parser.print_help()
+        sys.exit(-1)
+
+    tmin = options.tmin
+    tmax = options.tmax
+    n_grad = options.n_grad
+    n_mag = options.n_mag
+    n_eeg = options.n_eeg
+    l_freq = options.l_freq
+    h_freq = options.h_freq
+    ecg_l_freq = options.ecg_l_freq
+    ecg_h_freq = options.ecg_h_freq
+    average = options.average
+    preload = options.preload
+    filter_length = options.filter_length
+    n_jobs = options.n_jobs
+    ch_name = options.ch_name
+    reject = dict(grad=1e-13 * float(options.rej_grad),
+                  mag=1e-15 * float(options.rej_mag),
+                  eeg=1e-6 * float(options.rej_eeg),
+                  eog=1e-6 * float(options.rej_eog))
+    avg_ref = options.avg_ref
+    no_proj = options.no_proj
+    bad_fname = options.bad_fname
+    event_id = options.event_id
+    proj_fname = options.proj
+    raw_event_fname = options.raw_event_fname
+    tstart = options.tstart
+    qrs_threshold = options.qrs_threshold
+
+    if bad_fname is not None:
+        bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
+        print 'Bad channels read : %s' % bads
+    else:
+        bads = []
+
+    if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
+        prefix = raw_in[:-8]
+    else:
+        prefix = raw_in[:-4]
+
+    ecg_event_fname = prefix + '_ecg-eve.fif'
+
+    if average:
+        ecg_proj_fname = prefix + '_ecg_avg_proj.fif'
+    else:
+        ecg_proj_fname = prefix + '_ecg_proj.fif'
+
+    raw = mne.fiff.Raw(raw_in, preload=preload)
+
+    if raw_event_fname is not None:
+        raw_event = mne.fiff.Raw(raw_event_fname)
+    else:
+        raw_event = raw
+
+    flat = None  # XXX : not exposed to the user
+    projs, events = mne.preprocessing.compute_proj_ecg(raw, raw_event,
+                            tmin, tmax, n_grad, n_mag, n_eeg,
+                            l_freq, h_freq, average, filter_length,
+                            n_jobs, ch_name, reject, flat,
+                            bads, avg_ref, no_proj, event_id,
+                            ecg_l_freq, ecg_h_freq, tstart, qrs_threshold)
+
+    raw.close()
+
+    if raw_event_fname is not None:
+        raw_event.close()
+
+    if proj_fname is not None:
+        print 'Including SSP projections from : %s' % proj_fname
+        # append the ecg projs, so they are last in the list
+        projs = mne.read_proj(proj_fname) + projs
+
+    if isinstance(preload, basestring) and os.path.exists(preload):
+        os.remove(preload)
+
+    print "Writing ECG projections in %s" % ecg_proj_fname
+    mne.write_proj(ecg_proj_fname, projs)
+
+    print "Writing ECG events in %s" % ecg_event_fname
+    mne.write_events(ecg_event_fname, events)
diff --git a/bin/mne_compute_proj_eog.py b/bin/mne_compute_proj_eog.py
new file mode 100755
index 0000000..2a67081
--- /dev/null
+++ b/bin/mne_compute_proj_eog.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+"""Compute SSP/PCA projections for EOG artifacts
+
+You can do for example:
+
+$mne_compute_proj_eog.py -i sample_audvis_raw.fif --l-freq 1 --h-freq 35 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
+
+or
+
+$mne_compute_proj_eog.py -i sample_audvis_raw.fif --l-freq 1 --h-freq 35 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 --proj sample_audvis_ecg_proj.fif
+
+to exclude ECG artifacts from projection computation.
+"""
+
+# Authors : Alexandre Gramfort, Ph.D.
+#           Martin Luessi, Ph.D.
+
+import os
+import sys
+import mne
+
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option("-i", "--in", dest="raw_in",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("--tmin", dest="tmin", type="float",
+                      help="Time before event in seconds",
+                      default=-0.2)
+    parser.add_option("--tmax", dest="tmax", type="float",
+                      help="Time after event in seconds",
+                      default=0.2)
+    parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
+                      help="Number of SSP vectors for gradiometers",
+                      default=2)
+    parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
+                      help="Number of SSP vectors for magnetometers",
+                      default=2)
+    parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
+                      help="Number of SSP vectors for EEG",
+                      default=2)
+    parser.add_option("--l-freq", dest="l_freq", type="float",
+                      help="Filter low cut-off frequency in Hz",
+                      default=1)
+    parser.add_option("--h-freq", dest="h_freq", type="float",
+                      help="Filter high cut-off frequency in Hz",
+                      default=35)
+    parser.add_option("--eog-l-freq", dest="eog_l_freq", type="float",
+                      help="Filter low cut-off frequency in Hz used for EOG event detection",
+                      default=1)
+    parser.add_option("--eog-h-freq", dest="eog_h_freq", type="float",
+                      help="Filter high cut-off frequency in Hz used for EOG event detection",
+                      default=10)
+    parser.add_option("-p", "--preload", dest="preload",
+                      help="Temporary file used during computation (to save memory)",
+                      default=True)
+    parser.add_option("-a", "--average", dest="average", action="store_true",
+                      help="Compute SSP after averaging",
+                      default=False)
+    parser.add_option("--proj", dest="proj",
+                      help="Use SSP projections from a fif file.",
+                      default=None)
+    parser.add_option("--filtersize", dest="filter_length", type="int",
+                      help="Number of taps to use for filtering",
+                      default=2048)
+    parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
+                      help="Number of jobs to run in parallel",
+                      default=1)
+    parser.add_option("--rej-grad", dest="rej_grad", type="float",
+                      help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)",
+                      default=2000)
+    parser.add_option("--rej-mag", dest="rej_mag", type="float",
+                      help="Magnetometers rejection parameter in fT (peak to peak amplitude)",
+                      default=3000)
+    parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
+                      help="EEG rejection parameter in uV (peak to peak amplitude)",
+                      default=50)
+    parser.add_option("--rej-eog", dest="rej_eog", type="float",
+                      help="EOG rejection parameter in uV (peak to peak amplitude)",
+                      default=1e9)
+    parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
+                      help="Add EEG average reference proj",
+                      default=False)
+    parser.add_option("--no-proj", dest="no_proj", action="store_true",
+                      help="Exclude the SSP projectors currently in the fiff file",
+                      default=False)
+    parser.add_option("--bad", dest="bad_fname",
+                      help="Text file containing bad channels list (one per line)",
+                      default=None)
+    parser.add_option("--event-id", dest="event_id", type="int",
+                      help="ID to use for events", default=998)
+    parser.add_option("--event-raw", dest="raw_event_fname",
+                      help="raw file to use for event detection", default=None)
+    parser.add_option("--tstart", dest="tstart", type="float",
+                      help="Start artifact detection after tstart seconds", default=0.)
+    parser.add_option("-c","--channel", dest="ch_name", type="string",
+                      help="Custom EOG channel(s), comma separated",
+                      default=None)
+
+    options, args = parser.parse_args()
+
+    raw_in = options.raw_in
+
+    if raw_in is None:
+        parser.print_help()
+        sys.exit(-1)
+
+    tmin = options.tmin
+    tmax = options.tmax
+    n_grad = options.n_grad
+    n_mag = options.n_mag
+    n_eeg = options.n_eeg
+    l_freq = options.l_freq
+    h_freq = options.h_freq
+    eog_l_freq = options.eog_l_freq
+    eog_h_freq = options.eog_h_freq
+    average = options.average
+    preload = options.preload
+    filter_length = options.filter_length
+    n_jobs = options.n_jobs
+    reject = dict(grad=1e-13 * float(options.rej_grad),
+                  mag=1e-15 * float(options.rej_mag),
+                  eeg=1e-6 * float(options.rej_eeg),
+                  eog=1e-6 * float(options.rej_eog))
+    avg_ref = options.avg_ref
+    no_proj = options.no_proj
+    bad_fname = options.bad_fname
+    event_id = options.event_id
+    proj_fname = options.proj
+    raw_event_fname = options.raw_event_fname
+    tstart = options.tstart
+    ch_name = options.ch_name
+
+    if bad_fname is not None:
+        bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
+        print 'Bad channels read : %s' % bads
+    else:
+        bads = []
+
+    if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
+        prefix = raw_in[:-8]
+    else:
+        prefix = raw_in[:-4]
+
+    eog_event_fname = prefix + '_eog-eve.fif'
+
+    if average:
+        eog_proj_fname = prefix + '_eog_avg_proj.fif'
+    else:
+        eog_proj_fname = prefix + '_eog_proj.fif'
+
+    raw = mne.fiff.Raw(raw_in, preload=preload)
+
+    if raw_event_fname is not None:
+        raw_event = mne.fiff.Raw(raw_event_fname)
+    else:
+        raw_event = raw
+
+    flat = None  # XXX : not exposed to the user
+    projs, events = mne.preprocessing.compute_proj_eog(raw=raw,
+                    raw_event=raw_event, tmin=tmin, tmax=tmax, n_grad=n_grad,
+                    n_mag=n_mag, n_eeg=n_eeg, l_freq=l_freq, h_freq=h_freq,
+                    average=average, filter_length=filter_length,
+                    n_jobs=n_jobs, reject=reject, flat=flat, bads=bads,
+                    avg_ref=avg_ref, no_proj=no_proj, event_id=event_id,
+                    eog_l_freq=eog_l_freq, eog_h_freq=eog_h_freq, 
+                    tstart=tstart, ch_name=ch_name)
+
+    raw.close()
+
+    if raw_event_fname is not None:
+        raw_event.close()
+
+    if proj_fname is not None:
+        print 'Including SSP projections from : %s' % proj_fname
+        # append the eog projs, so they are last in the list
+        projs = mne.read_proj(proj_fname) + projs
+
+    if isinstance(preload, basestring) and os.path.exists(preload):
+        os.remove(preload)
+
+    print "Writing EOG projections in %s" % eog_proj_fname
+    mne.write_proj(eog_proj_fname, projs)
+
+    print "Writing EOG events in %s" % eog_event_fname
+    mne.write_events(eog_event_fname, events)
\ No newline at end of file
diff --git a/bin/mne_flash_bem_model.py b/bin/mne_flash_bem_model.py
new file mode 100755
index 0000000..b8584be
--- /dev/null
+++ b/bin/mne_flash_bem_model.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+"""Create 3-Layers BEM model from Flash MRI images
+
+This function extracts the BEM surfaces (outer skull, inner skull, and
+outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
+degrees. The multiecho FLASH data are inputted in NIFTI format.
+It was developed to work for Phillips MRI data, but could probably be
+used for data from other scanners that have been converted to NIFTI format
+(e.g., using MRIcron's dcm2nii). However,it has been tested only for
+data from the Achieva scanner). This function assumes that the Freesurfer
+segmentation of the subject has been completed. In particular, the T1.mgz
+and brain.mgz MRI volumes should be, as usual, in the subject's mri
+directory.
+
+"""
+
+# Authors:  Rey Rene Ramirez, Ph.D.   e-mail: rrramir at uw.edu
+#           Alexandre Gramfort, Ph.D.
+
+
+import math
+import os
+import mne
+
+
+def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
+    """Create 3-Layers BEM model from Flash MRI images
+
+    Parameters
+    ----------
+    subject : string
+        Subject name
+    subjects_dir : string
+        Directory containing subjects data (Freesurfer SUBJECTS_DIR)
+    flash05 : string
+        Full path of the NIFTI file for the
+        FLASH sequence with a spin angle of 5 degrees
+    flash30 : string
+        Full path of the NIFTI file for the
+        FLASH sequence with a spin angle of 30 degrees
+    show : bool
+        Show surfaces in 3D to visually inspect all three BEM
+        surfaces (recommended)
+
+    Notes
+    -----
+    This program assumes that both Freesurfer/FSL, and MNE,
+    including MNE's Matlab Toolbox, are installed properly.
+    For reference please read the MNE manual and wiki, and Freesurfer's wiki:
+    http://www.nmr.mgh.harvard.edu/meg/manuals/
+    http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/sofMNE.php
+    http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php
+    http://surfer.nmr.mgh.harvard.edu/
+    http://surfer.nmr.mgh.harvard.edu/fswiki
+
+    References:
+    B. Fischl, D. H. Salat, A. J. van der Kouwe, N. Makris, F. Segonne,
+    B. T. Quinn, and A. M. Dale, "Sequence-independent segmentation of magnetic
+    resonance images," Neuroimage, vol. 23 Suppl 1, pp. S69-84, 2004.
+    J. Jovicich, S. Czanner, D. Greve, E. Haley, A. van der Kouwe, R. Gollub,
+    D. Kennedy, F. Schmitt, G. Brown, J. Macfall, B. Fischl, and A. Dale,
+    "Reliability in multi-site structural MRI studies: effects of gradient
+    non-linearity correction on phantom and human data," Neuroimage,
+    vol. 30, Epp. 436-43, 2006.
+    """
+    os.environ['SUBJECT'] = subject
+    os.chdir(os.path.join(subjects_dir, subject, "mri"))
+    if not os.path.exists('flash'):
+        os.mkdir("flash")
+    os.chdir("flash")
+    # flash_dir = os.getcwd()
+    if not os.path.exists('parameter_maps'):
+        os.mkdir("parameter_maps")
+    print "--- Converting Flash 5"
+    os.system('mri_convert -flip_angle %s -tr 25 %s mef05.mgz' %
+                                            (5 * math.pi / 180, flash05))
+    print "--- Converting Flash 30"
+    os.system('mri_convert -flip_angle %s -tr 25 %s mef30.mgz' %
+                                            (30 * math.pi / 180, flash30))
+    print "--- Running mne_flash_bem"
+    os.system('mne_flash_bem --noconvert')
+    os.chdir(os.path.join(subjects_dir, subject, 'bem'))
+    if not os.path.exists('flash'):
+        os.mkdir("flash")
+    os.chdir("flash")
+    print "[done]"
+
+    if show:
+        fnames = ['outer_skin.surf', 'outer_skull.surf', 'inner_skull.surf']
+        head_col = (0.95, 0.83, 0.83)  # light pink
+        skull_col = (0.91, 0.89, 0.67)
+        brain_col = (0.67, 0.89, 0.91)  # light blue
+        colors = [head_col, skull_col, brain_col]
+        from enthought.mayavi import mlab
+        mlab.clf()
+        for fname, c in zip(fnames, colors):
+            points, faces = mne.read_surface(fname)
+            mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
+                                 color=c, opacity=0.3)
+        mlab.show()
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    subject = os.environ.get('SUBJECT')
+    subjects_dir = os.environ.get('SUBJECTS_DIR')
+
+    parser = OptionParser()
+    parser.add_option("-s", "--subject", dest="subject",
+                    help="Subject name", default=subject)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                    help="Subjects directory", default=subjects_dir)
+    parser.add_option("-5", "--flash05", dest="flash05",
+                    help=("Path to FLASH sequence with a spin angle of 5 "
+                          "degrees in Nifti format"), metavar="FILE")
+    parser.add_option("-3", "--flash30", dest="flash30",
+                    help=("Path to FLASH sequence with a spin angle of 30 "
+                          "degrees in Nifti format"), metavar="FILE")
+    parser.add_option("-v", "--view", dest="show", action="store_true",
+                      help="Show BEM model in 3D for visual inspection",
+                      default=False)
+
+    (options, args) = parser.parse_args()
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    flash05 = os.path.abspath(options.flash05)
+    flash30 = os.path.abspath(options.flash30)
+    show = options.show
+
+    make_flash_bem(subject, subjects_dir, flash05, flash30, show=show)
diff --git a/bin/mne_kit2fiff.py b/bin/mne_kit2fiff.py
new file mode 100755
index 0000000..eb64dcc
--- /dev/null
+++ b/bin/mne_kit2fiff.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Authors: Teon Brooks  <teon at nyu.edu>
+
+""" Import KIT / NYU data to fif file.
+
+example usage: mne_kit2fiff.py --input input.sqd --output output.fif
+
+"""
+
+import sys
+from mne.fiff.kit import read_raw_kit
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option('--input', dest='input_fname',
+                    help='Input data file name', metavar='filename')
+    parser.add_option('--mrk', dest='mrk_fname',
+                    help='MEG Marker file name', metavar='filename')
+    parser.add_option('--elp', dest='elp_fname',
+                    help='Headshape file name', metavar='filename')
+    parser.add_option('--hsp', dest='hsp_fname',
+                    help='Headshape file name', metavar='filename')
+    parser.add_option('--sns', dest='sns_fname',
+                    help='Sensor info file name', metavar='filename')
+    parser.add_option('--stim', dest='stim',
+                      help='Colon Separated Stimulus Trigger Channels',
+                      metavar='chs')
+    parser.add_option('--stimthresh', dest='stimthresh', default=3.5,
+                      help='Threshold value for trigger channels',
+                      metavar='value')
+    parser.add_option('--output', dest='out_fname',
+                      help='Name of the resulting fiff file',
+                      metavar='filename')
+
+    options, args = parser.parse_args()
+
+    input_fname = options.input_fname
+    if input_fname is None:
+        parser.print_help()
+        sys.exit(-1)
+
+    sns_fname = options.sns_fname
+    hsp_fname = options.hsp_fname
+    elp_fname = options.elp_fname
+    mrk_fname = options.mrk_fname
+    stim = options.stim
+    stimthresh = options.stimthresh
+    out_fname = options.out_fname
+
+    if isinstance(stim, str):
+        stim = stim.split(':')
+
+    raw = read_raw_kit(input_fname=input_fname, mrk_fname=mrk_fname,
+                       elp_fname=elp_fname, hsp_fname=hsp_fname,
+                       sns_fname=sns_fname, stim=stim, stimthresh=stimthresh)
+
+    raw.save(out_fname)
+    raw.close()
+    sys.exit(0)
diff --git a/bin/mne_maxfilter.py b/bin/mne_maxfilter.py
new file mode 100755
index 0000000..ad89a07
--- /dev/null
+++ b/bin/mne_maxfilter.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+""" Apply MaxFilter
+
+Example usage:
+
+$mne_maxfilter.py -i sample_audvis_raw.fif --st
+
+This will apply MaxFilter with the MaxSt extension. The origin used
+by MaxFilter is computed by mne-python by fitting a sphere to the
+headshape points.
+"""
+
+# Authors : Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+
+import sys
+import os
+import mne
+
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option("-i", "--in", dest="in_fname",
+                    help="Input raw FIF file", metavar="FILE")
+    parser.add_option("-o", dest="out_fname",
+                    help="Output FIF file (if not set, suffix  '_sss' will be used)",
+                    metavar="FILE", default=None)
+    parser.add_option("--origin", dest="origin",
+                    help="Head origin in mm, or a filename to read the origin from. "
+                    "If not set it will be estimated from headshape points",
+                    default=None)
+    parser.add_option("--origin-out", dest="origin_out",
+                    help="Filename to use for computed origin", default=None)
+    parser.add_option("--frame", dest="frame", type="string",
+                    help="Coordinate frame for head center ('device' or 'head')",
+                    default="device")
+    parser.add_option("--bad", dest="bad", type="string",
+                    help="List of static bad channels",
+                    default=None)
+    parser.add_option("--autobad", dest="autobad", type="string",
+                    help="Set automated bad channel detection ('on', 'off', 'n')",
+                    default="off")
+    parser.add_option("--skip", dest="skip",
+                    help="Skips raw data sequences, time intervals pairs in sec, e.g.: 0 30 120 150",
+                    default=None)
+    parser.add_option("--force", dest="force", action="store_true",
+                    help="Ignore program warnings",
+                    default=False)
+    parser.add_option("--st", dest="st", action="store_true",
+                    help="Apply the time-domain MaxST extension",
+                    default=False)
+    parser.add_option("--buflen", dest="st_buflen", type="float",
+                    help="MaxSt buffer length in sec",
+                    default=16.0)
+    parser.add_option("--corr", dest="st_corr", type="float",
+                    help="MaxSt subspace correlation",
+                    default=0.96)
+    parser.add_option("--trans", dest="mv_trans",
+                    help="Transforms the data into the coil definitions of in_fname, or into the default frame",
+                    default=None)
+    parser.add_option("--movecomp", dest="mv_comp", action="store_true",
+                    help="Estimates and compensates head movements in continuous raw data",
+                    default=False)
+    parser.add_option("--headpos", dest="mv_headpos", action="store_true",
+                    help="Estimates and stores head position parameters, but does not compensate movements",
+                    default=False)
+    parser.add_option("--hp", dest="mv_hp", type="string",
+                    help="Stores head position data in an ascii file",
+                    default=None)
+    parser.add_option("--hpistep", dest="mv_hpistep", type="float",
+                    help="Sets head position update interval in ms",
+                    default=None)
+    parser.add_option("--hpisubt", dest="mv_hpisubt", type="string",
+                    help="Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off",
+                    default=None)
+    parser.add_option("--nohpicons", dest="mv_hpicons", action="store_false",
+                    help="Do not check initial consistency isotrak vs hpifit",
+                    default=True)
+    parser.add_option("--linefreq", dest="linefreq", type="float",
+                    help="Sets the basic line interference frequency (50 or 60 Hz)",
+                    default=None)
+    parser.add_option("--nooverwrite", dest="overwrite", action="store_false",
+                    help="Do not overwrite output file if it already exists",
+                    default=True)
+    parser.add_option("--args", dest="mx_args", type="string",
+                    help="Additional command line arguments to pass to MaxFilter",
+                    default="")
+
+    options, args = parser.parse_args()
+
+    in_fname = options.in_fname
+
+    if in_fname is None:
+        parser.print_help()
+        sys.exit(-1)
+
+    out_fname = options.out_fname
+    origin = options.origin
+    origin_out = options.origin_out
+    frame = options.frame
+    bad = options.bad
+    autobad = options.autobad
+    skip = options.skip
+    force = options.force
+    st = options.st
+    st_buflen = options.st_buflen
+    st_corr = options.st_corr
+    mv_trans = options.mv_trans
+    mv_comp = options.mv_comp
+    mv_headpos = options.mv_headpos
+    mv_hp = options.mv_hp
+    mv_hpistep = options.mv_hpistep
+    mv_hpisubt = options.mv_hpisubt
+    mv_hpicons = options.mv_hpicons
+    linefreq = options.linefreq
+    overwrite = options.overwrite
+    mx_args = options.mx_args
+
+    if in_fname.endswith('_raw.fif') or in_fname.endswith('-raw.fif'):
+        prefix = in_fname[:-8]
+    else:
+        prefix = in_fname[:-4]
+
+    if out_fname is None:
+        if st:
+            out_fname = prefix + '_tsss.fif'
+        else:
+            out_fname = prefix + '_sss.fif'
+
+    if origin is not None and os.path.exists(origin):
+        origin = open(origin, 'r').readlines()[0].strip()
+
+    origin = mne.preprocessing.apply_maxfilter(in_fname, out_fname, origin, frame,
+                    bad, autobad, skip, force, st, st_buflen, st_corr, mv_trans,
+                    mv_comp, mv_headpos, mv_hp, mv_hpistep, mv_hpisubt, mv_hpicons,
+                    linefreq, mx_args, overwrite)
+
+    if origin_out is not None:
+        fid = open(origin_out, 'w')
+        fid.write(origin + '\n')
+        fid.close()
diff --git a/bin/mne_surf2bem.py b/bin/mne_surf2bem.py
new file mode 100755
index 0000000..368b9f4
--- /dev/null
+++ b/bin/mne_surf2bem.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""Example usage
+
+mne_surf2bem.py --surf ${SUBJECTS_DIR}/${SUBJECT}/surf/lh.seghead --fif \
+    ${SUBJECTS_DIR}/${SUBJECT}/bem/${SUBJECT}-head.fif --id=4
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import mne
+
+if __name__ == '__main__':
+
+    from optparse import OptionParser
+
+    parser = OptionParser()
+    parser.add_option("-s", "--surf", dest="surf",
+                    help="Surface in Freesurfer format", metavar="FILE")
+    parser.add_option("-f", "--fif", dest="fif",
+                    help="FIF file produced", metavar="FILE")
+    parser.add_option("-i", "--id", dest="id", default=4,
+                    help=("Surface Id (e.g. 4 sur head surface)"))
+
+    (options, args) = parser.parse_args()
+
+    print "Converting %s to BEM FIF file." % options.surf
+
+    points, tris = mne.read_surface(options.surf)
+    points *= 1e-3
+    surf = dict(coord_frame=5, id=int(options.id), nn=None, np=len(points),
+                ntri=len(tris), rr=points, sigma=1, tris=tris)
+    mne.write_bem_surface(options.fif, surf)
diff --git a/dictionary.txt b/dictionary.txt
new file mode 100644
index 0000000..25279c3
--- /dev/null
+++ b/dictionary.txt
@@ -0,0 +1,4264 @@
+abandonned->abandoned
+aberation->aberration
+abilties->abilities
+abilty->ability
+abondon->abandon
+abondoned->abandoned
+abondoning->abandoning
+abondons->abandons
+aborigene->aborigine
+abortificant->abortifacient
+abreviate->abbreviate
+abreviated->abbreviated
+abreviation->abbreviation
+abritrary->arbitrary
+absail->abseil
+absailing->abseiling
+absense->absence
+absolutly->absolutely
+absorbsion->absorption
+absorbtion->absorption
+abundacies->abundances
+abundancies->abundances
+abundunt->abundant
+abutts->abuts
+acadamy->academy
+acadmic->academic
+accademic->academic
+accademy->academy
+acccused->accused
+accelleration->acceleration
+accension->accession, ascension,
+acceptence->acceptance
+acceptible->acceptable
+accessable->accessible
+accidentaly->accidentally
+accidently->accidentally
+acclimitization->acclimatization
+accomadate->accommodate
+accomadated->accommodated
+accomadates->accommodates
+accomadating->accommodating
+accomadation->accommodation
+accomadations->accommodations
+accomdate->accommodate
+accomodate->accommodate
+accomodated->accommodated
+accomodates->accommodates
+accomodating->accommodating
+accomodation->accommodation
+accomodations->accommodations
+accompanyed->accompanied
+accordeon->accordion
+accordian->accordion
+accoring->according
+accoustic->acoustic
+accquainted->acquainted
+accrediation->accreditation
+accredidation->accreditation
+accross->across
+accussed->accused
+acedemic->academic
+acess->access
+acheive->achieve
+acheived->achieved
+acheivement->achievement
+acheivements->achievements
+acheives->achieves
+acheiving->achieving
+acheivment->achievement
+acheivments->achievements
+achievment->achievement
+achievments->achievements
+achive->achieve
+achived->achieved, archived,
+achivement->achievement
+achivements->achievements
+acknowldeged->acknowledged
+acknowledgeing->acknowledging
+ackward->awkward, backward,
+acommodate->accommodate
+acomplish->accomplish
+acomplished->accomplished
+acomplishment->accomplishment
+acomplishments->accomplishments
+acording->according
+acordingly->accordingly
+acquaintence->acquaintance
+acquaintences->acquaintances
+acquiantence->acquaintance
+acquiantences->acquaintances
+acquited->acquitted
+actived->activated
+activites->activities
+activly->actively
+actualy->actually
+acuracy->accuracy
+acused->accused
+acustom->accustom
+acustommed->accustomed
+adavanced->advanced
+adbandon->abandon
+additinally->additionally
+additionaly->additionally
+additon->addition
+additonal->additional
+additonally->additionally
+addmission->admission
+addopt->adopt
+addopted->adopted
+addoptive->adoptive
+addres->address
+addresable->addressable
+addresed->addressed
+addresing->addressing
+addressess->addresses
+addtion->addition
+addtional->additional
+adecuate->adequate
+adequit->adequate
+adhearing->adhering
+adherance->adherence
+admendment->amendment
+admininistrative->administrative
+adminstered->administered
+adminstrate->administrate
+adminstration->administration
+adminstrative->administrative
+adminstrator->administrator
+admissability->admissibility
+admissable->admissible
+admited->admitted
+admitedly->admittedly
+adn->and
+adolecent->adolescent
+adquire->acquire
+adquired->acquired
+adquires->acquires
+adquiring->acquiring
+adres->address
+adresable->addressable
+adresing->addressing
+adress->address
+adressable->addressable
+adressed->addressed
+adressing->addressing
+adventrous->adventurous
+advertisment->advertisement
+advertisments->advertisements
+advesary->adversary
+adviced->advised
+aeriel->aerial
+aeriels->aerials
+afair->affair
+afficianados->aficionados
+afficionado->aficionado
+afficionados->aficionados
+affilate->affiliate
+affilliate->affiliate
+affort->afford, effort,
+aforememtioned->aforementioned
+againnst->against
+agains->against
+agaisnt->against
+aganist->against
+aggaravates->aggravates
+aggreed->agreed
+aggreement->agreement
+aggregious->egregious
+aggresive->aggressive
+agian->again
+agianst->against
+agin->again
+agina->again, angina,
+aginst->against
+agravate->aggravate
+agre->agree
+agred->agreed
+agreeement->agreement
+agreemnt->agreement
+agregate->aggregate
+agregates->aggregates
+agreing->agreeing
+agression->aggression
+agressive->aggressive
+agressively->aggressively
+agressor->aggressor
+agricuture->agriculture
+agrieved->aggrieved
+ahev->have
+ahppen->happen
+ahve->have
+aicraft->aircraft
+aiport->airport
+airbourne->airborne
+aircaft->aircraft
+aircrafts->aircraft
+airporta->airports
+airrcraft->aircraft
+aisian->asian
+albiet->albeit
+alchohol->alcohol
+alchoholic->alcoholic
+alchol->alcohol
+alcholic->alcoholic
+alcohal->alcohol
+alcoholical->alcoholic
+aledge->allege
+aledged->alleged
+aledges->alleges
+alege->allege
+aleged->alleged
+alegience->allegiance
+algebraical->algebraic
+algorhitms->algorithms
+algoritm->algorithm
+algoritms->algorithms
+alientating->alienating
+alledge->allege
+alledged->alleged
+alledgedly->allegedly
+alledges->alleges
+allegedely->allegedly
+allegedy->allegedly
+allegely->allegedly
+allegence->allegiance
+allegience->allegiance
+allign->align
+alligned->aligned
+alliviate->alleviate
+allopone->allophone
+allopones->allophones
+allready->already
+allthough->although
+alltime->all-time
+alltogether->altogether
+almsot->almost
+alochol->alcohol
+alomst->almost
+alot->a lot
+alotted->allotted
+alowed->allowed
+alowing->allowing
+alreayd->already
+alse->else
+alsot->also
+alternitives->alternatives
+altho->although
+althought->although
+altough->although
+alusion->allusion, illusion,
+alwasy->always
+alwyas->always
+amalgomated->amalgamated
+amatuer->amateur
+amature->armature, amateur,
+amendmant->amendment
+Amercia->America
+amerliorate->ameliorate
+amke->make
+amking->making
+ammend->amend
+ammended->amended
+ammendment->amendment
+ammendments->amendments
+ammount->amount
+ammused->amused
+amoung->among
+amoungst->amongst
+amung->among
+amunition->ammunition
+analagous->analogous
+analitic->analytic
+analogeous->analogous
+anarchim->anarchism
+anarchistm->anarchism
+anbd->and
+ancestory->ancestry
+ancilliary->ancillary
+androgenous->androgynous
+androgeny->androgyny
+anihilation->annihilation
+aniversary->anniversary
+annoint->anoint
+annointed->anointed
+annointing->anointing
+annoints->anoints
+annother->another
+annouced->announced
+annualy->annually
+annuled->annulled
+anohter->another
+anomolies->anomalies
+anomolous->anomalous
+anomoly->anomaly
+anonimity->anonymity
+anounced->announced
+anouncement->announcement
+ansalisation->nasalisation
+ansalization->nasalization
+ansestors->ancestors
+antartic->antarctic
+anthromorphization->anthropomorphization
+anthropolgist->anthropologist
+anthropolgy->anthropology
+anual->annual
+anulled->annulled
+anwsered->answered
+anyhwere->anywhere
+anyother->any other
+anytying->anything
+aparent->apparent
+aparment->apartment
+apenines->apennines, Apennines,
+aplication->application
+aplied->applied
+apolegetics->apologetics
+apon->upon, apron,
+apparant->apparent
+apparantly->apparently
+appart->apart
+appartment->apartment
+appartments->apartments
+appealling->appealing, appalling,
+appeareance->appearance
+appearence->appearance
+appearences->appearances
+appenines->apennines, Apennines,
+apperance->appearance
+apperances->appearances
+appereance->appearance
+appereances->appearances
+applicaiton->application
+applicaitons->applications
+appologies->apologies
+appology->apology
+apprearance->appearance
+apprieciate->appreciate
+approachs->approaches
+appropiate->appropriate
+appropiately->appropriately
+appropraite->appropriate
+appropraitely->appropriately
+appropriatly->appropriately
+appropriatness->appropriateness
+appropropiate->appropriate
+appropropiately->appropriately
+approproximate->approximate
+approxamately->approximately
+approxiately->approximately
+approximitely->approximately
+aprehensive->apprehensive
+apropriate->appropriate
+apropriately->appropriately
+aproximate->approximate
+aproximately->approximately
+aquaduct->aqueduct
+aquaintance->acquaintance
+aquainted->acquainted
+aquiantance->acquaintance
+aquire->acquire
+aquired->acquired
+aquiring->acquiring
+aquisition->acquisition
+aquitted->acquitted
+aranged->arranged
+arangement->arrangement
+arbitarily->arbitrarily
+arbitary->arbitrary
+archaelogists->archaeologists
+archaelogy->archaeology
+archaoelogy->archeology, archaeology,
+archaology->archeology, archaeology,
+archeaologist->archeologist, archaeologist,
+archeaologists->archeologists, archaeologists,
+archetect->architect
+archetects->architects
+archetectural->architectural
+archetecturally->architecturally
+archetecture->architecture
+archiac->archaic
+archictect->architect
+archimedian->archimedean
+architecht->architect
+architechturally->architecturally
+architechture->architecture
+architechtures->architectures
+architectual->architectural
+archtype->archetype
+archtypes->archetypes
+aready->already
+areodynamics->aerodynamics
+argubly->arguably
+arguement->argument
+arguements->arguments
+arised->arose
+arival->arrival
+armamant->armament
+armistace->armistice
+arogant->arrogant
+arogent->arrogant
+aroud->around
+arrangment->arrangement
+arrangments->arrangements
+arround->around
+artical->article
+artice->article
+articel->article
+artifical->artificial
+artifically->artificially
+artillary->artillery
+arund->around
+asetic->ascetic
+asfar->as far
+asign->assign
+aslo->also
+asociated->associated
+asorbed->absorbed
+asphyxation->asphyxiation
+assasin->assassin
+assasinate->assassinate
+assasinated->assassinated
+assasinates->assassinates
+assasination->assassination
+assasinations->assassinations
+assasined->assassinated
+assasins->assassins
+assassintation->assassination
+assemple->assemble
+assertation->assertion
+asside->aside
+assisnate->assassinate
+assit->assist
+assitant->assistant
+assocation->association
+assoicate->associate
+assoicated->associated
+assoicates->associates
+assosication->assassination
+asssassans->assassins
+assualt->assault
+assualted->assaulted
+assymetric->asymmetric
+assymetrical->asymmetrical
+asteriod->asteroid
+asthetic->aesthetic
+asthetical->aesthetical
+asthetically->aesthetically
+asume->assume
+aswell->as well
+atain->attain
+atempting->attempting
+atheistical->atheistic
+athenean->athenian
+atheneans->athenians
+athiesm->atheism
+athiest->atheist
+atorney->attorney
+atribute->attribute
+atributed->attributed
+atributes->attributes
+attaindre->attainder, attained,
+attemp->attempt
+attemped->attempted
+attemt->attempt
+attemted->attempted
+attemting->attempting
+attemts->attempts
+attendence->attendance
+attendent->attendant
+attendents->attendants
+attened->attended
+attension->attention
+attitide->attitude
+attributred->attributed
+attrocities->atrocities
+audeince->audience
+auromated->automated
+austrailia->Australia
+austrailian->Australian
+auther->author
+authobiographic->autobiographic
+authobiography->autobiography
+authorative->authoritative
+authorites->authorities
+authorithy->authority
+authoritiers->authorities
+authoritive->authoritative
+authrorities->authorities
+autochtonous->autochthonous
+autoctonous->autochthonous
+autoincrememnt->autoincrement
+automaticly->automatically
+automibile->automobile
+automonomous->autonomous
+autor->author
+autority->authority
+auxilary->auxiliary
+auxillaries->auxiliaries
+auxillary->auxiliary
+auxilliaries->auxiliaries
+auxilliary->auxiliary
+availabe->available
+availablity->availability
+availaible->available
+availble->available
+availiable->available
+availible->available
+avalable->available
+avalance->avalanche
+avaliable->available
+avation->aviation
+avengence->a vengeance
+averageed->averaged
+avilable->available
+awared->awarded
+awya->away
+baceause->because
+backgorund->background
+backrounds->backgrounds
+bakc->back
+banannas->bananas
+bandwith->bandwidth
+bankrupcy->bankruptcy
+banruptcy->bankruptcy
+baout->about, bout,
+basicaly->basically
+basicly->basically
+bcak->back
+beachead->beachhead
+beacuse->because
+beastiality->bestiality
+beatiful->beautiful
+beaurocracy->bureaucracy
+beaurocratic->bureaucratic
+beautyfull->beautiful
+becamae->became
+becames->becomes, became,
+becasue->because
+beccause->because
+becomeing->becoming
+becomming->becoming
+becouse->because
+becuase->because
+bedore->before
+befoer->before
+beggin->begin, begging,
+begginer->beginner
+begginers->beginners
+beggining->beginning
+begginings->beginnings
+beggins->begins
+begining->beginning
+beginnig->beginning
+behavour->behavior, behaviour,
+beleagured->beleaguered
+beleif->belief
+beleive->believe
+beleived->believed
+beleives->believes
+beleiving->believing
+beligum->belgium
+belive->believe
+belived->believed, beloved,
+belives->believes, beliefs,
+belligerant->belligerent
+bellweather->bellwether
+bemusemnt->bemusement
+beneficary->beneficiary
+beng->being
+benificial->beneficial
+benifit->benefit
+benifits->benefits
+bergamont->bergamot
+Bernouilli->Bernoulli
+beseige->besiege
+beseiged->besieged
+beseiging->besieging
+betwen->between
+beween->between
+bewteen->between
+bilateraly->bilaterally
+billingualism->bilingualism
+bianry->binary
+binominal->binomial
+bizzare->bizarre
+blaim->blame
+blaimed->blamed
+blessure->blessing
+Blitzkreig->Blitzkrieg
+boaut->bout, boat, about,
+bodydbuilder->bodybuilder
+bombardement->bombardment
+bombarment->bombardment
+bondary->boundary
+Bonnano->Bonanno
+boradcast->broadcast
+borke->broke
+boundry->boundary
+bouyancy->buoyancy
+bouyant->buoyant
+boyant->buoyant
+Brasillian->Brazilian
+breakthough->breakthrough
+breakthroughts->breakthroughs
+breif->brief
+breifly->briefly
+brethen->brethren
+bretheren->brethren
+briliant->brilliant
+brillant->brilliant
+brimestone->brimstone
+Britian->Britain
+Brittish->British
+broacasted->broadcast
+broadacasting->broadcasting
+broady->broadly
+Buddah->Buddha
+Buddist->Buddhist
+buisness->business
+buisnessman->businessman
+buoancy->buoyancy
+buring->burying, burning, burin, during,
+burried->buried
+busineses->business, businesses,
+busness->business
+bussiness->business
+caculater->calculator
+cacuses->caucuses
+cahracters->characters
+calaber->caliber
+calander->calendar, calender, colander,
+calculater->calculator
+calculs->calculus
+calenders->calendars
+caligraphy->calligraphy
+caluclate->calculate
+caluclated->calculated
+caluculate->calculate
+caluculated->calculated
+calulate->calculate
+calulated->calculated
+calulater->calculator
+Cambrige->Cambridge
+camoflage->camouflage
+campain->campaign
+campains->campaigns
+candadate->candidate
+candiate->candidate
+candidiate->candidate
+cannister->canister
+cannisters->canisters
+cannnot->cannot
+cannonical->canonical
+cannotation->connotation
+cannotations->connotations
+cant'->can't
+cant->can't
+caost->coast
+caperbility->capability
+Capetown->Cape Town
+capible->capable
+captial->capital
+captued->captured
+capturd->captured
+carachter->character
+caracterized->characterized
+carcas->carcass, Caracas,
+carefull->careful
+careing->caring
+carismatic->charismatic
+Carmalite->Carmelite
+Carnagie->Carnegie
+Carnagie-Mellon->Carnegie-Mellon
+carnege->carnage, Carnegie,
+carnige->carnage, Carnegie,
+Carnigie->Carnegie
+Carnigie-Mellon->Carnegie-Mellon
+carniverous->carnivorous
+carreer->career
+carrers->careers
+Carribbean->Caribbean
+Carribean->Caribbean
+cartdridge->cartridge
+Carthagian->Carthaginian
+carthographer->cartographer
+cartilege->cartilage
+cartilidge->cartilage
+cartrige->cartridge
+casette->cassette
+casion->caisson
+cassawory->cassowary
+cassowarry->cassowary
+casulaties->casualties
+casulaty->casualty
+catagories->categories
+catagorized->categorized
+catagory->category
+Cataline->Catiline, Catalina,
+catapillar->caterpillar
+catapillars->caterpillars
+catapiller->caterpillar
+catapillers->caterpillars
+catepillar->caterpillar
+catepillars->caterpillars
+catergorize->categorize
+catergorized->categorized
+caterpilar->caterpillar
+caterpilars->caterpillars
+caterpiller->caterpillar
+caterpillers->caterpillars
+cathlic->catholic
+catholocism->catholicism
+catterpilar->caterpillar
+catterpilars->caterpillars
+catterpillar->caterpillar
+catterpillars->caterpillars
+cattleship->battleship
+causalities->casualties
+Ceasar->Caesar
+Celcius->Celsius
+cellpading->cellpadding
+cementary->cemetery
+cemetarey->cemetery
+cemetaries->cemeteries
+cemetary->cemetery
+cencus->census
+censur->censor, censure,
+cententenial->centennial
+centruies->centuries
+centruy->century
+ceratin->certain
+cerimonial->ceremonial
+cerimonies->ceremonies
+cerimonious->ceremonious
+cerimony->ceremony
+ceromony->ceremony
+certainity->certainty
+certian->certain
+cervial->cervical, servile, serval,
+chalenging->challenging
+challange->challenge
+challanged->challenged
+challege->challenge
+Champange->Champagne
+changable->changeable
+charachter->character
+charachters->characters
+charactersistic->characteristic
+charactor->character
+charactors->characters
+charasmatic->charismatic
+charaterized->characterized
+chariman->chairman
+charistics->characteristics
+chasr->chaser, chase,
+cheif->chief
+cheifs->chiefs
+chek->check
+chemcial->chemical
+chemcially->chemically
+chemestry->chemistry
+chemicaly->chemically
+childbird->childbirth
+childen->children
+choosed->chose, chosen,
+choosen->chosen
+chracter->character
+chuch->church
+churchs->churches
+Cincinatti->Cincinnati
+Cincinnatti->Cincinnati
+circulaton->circulation
+circumsicion->circumcision
+circut->circuit
+ciricuit->circuit
+ciriculum->curriculum
+civillian->civilian
+claer->clear
+claerer->clearer
+claerly->clearly
+claimes->claims
+clas->class, disabled because of name clash in c++
+clasic->classic
+clasical->classical
+clasically->classically
+cleareance->clearance
+clera->clear, sclera,
+clincial->clinical
+clinicaly->clinically
+cmo->com, disabled due to lots of false positives
+cmoputer->computer
+co-incided->coincided
+coctail->cocktail
+coform->conform
+cognizent->cognizant
+coincedentally->coincidentally
+colaborations->collaborations
+colateral->collateral
+colelctive->collective
+collaberative->collaborative
+collecton->collection
+collegue->colleague
+collegues->colleagues
+collonade->colonnade
+collonies->colonies
+collony->colony
+collosal->colossal
+colonizators->colonizers
+colum->column
+comander->commander, commandeer,
+comando->commando
+comandos->commandos
+comany->company
+comapany->company
+comback->comeback
+combanations->combinations
+combinatins->combinations
+combusion->combustion
+comdemnation->condemnation
+comemmorates->commemorates
+comemoretion->commemoration
+comision->commission
+comisioned->commissioned
+comisioner->commissioner
+comisioning->commissioning
+comisions->commissions
+comission->commission
+comissioned->commissioned
+comissioner->commissioner
+comissioning->commissioning
+comissions->commissions
+comited->committed
+comiting->committing
+comitted->committed
+comittee->committee
+comitting->committing
+commandoes->commandos
+commedic->comedic
+commemerative->commemorative
+commemmorate->commemorate
+commemmorating->commemorating
+commerical->commercial
+commerically->commercially
+commericial->commercial
+commericially->commercially
+commerorative->commemorative
+comming->coming
+comminication->communication
+commision->commission
+commisioned->commissioned
+commisioner->commissioner
+commisioning->commissioning
+commisions->commissions
+commited->committed
+commitee->committee
+commiting->committing
+committe->committee
+committment->commitment
+committments->commitments
+commmemorated->commemorated
+commongly->commonly
+commonweath->commonwealth
+commuications->communications
+commuinications->communications
+communciation->communication
+communiation->communication
+communites->communities
+compability->compatibility
+comparision->comparison
+comparisions->comparisons
+comparitive->comparative
+comparitively->comparatively
+compatabilities->compatibilities
+compatability->compatibility
+compatable->compatible
+compatablities->compatibilities
+compatablity->compatibility
+compatiable->compatible
+compatibilty->compatibility
+compatiblities->compatibilities
+compatiblity->compatibility
+compeitions->competitions
+compensantion->compensation
+competance->competence
+competant->competent
+competative->competitive
+competion->competition, completion,
+competitiion->competition
+competive->competitive
+competiveness->competitiveness
+comphrehensive->comprehensive
+compitent->competent
+completedthe->completed the
+completelyl->completely
+completetion->completion
+complier->compiler
+componant->component
+comprable->comparable
+comprimise->compromise
+compulsary->compulsory
+compulsery->compulsory
+computarized->computerized
+concensus->consensus
+concider->consider
+concidered->considered
+concidering->considering
+conciders->considers
+concieted->conceited
+concieved->conceived
+concious->conscious
+conciously->consciously
+conciousness->consciousness
+condamned->condemned
+condemmed->condemned
+condidtion->condition
+condidtions->conditions
+conditionsof->conditions of
+conected->connected
+conection->connection
+conectix->connectix
+conesencus->consensus
+confidental->confidential
+confidentally->confidentially
+confids->confides
+configureable->configurable
+confortable->comfortable
+congradulations->congratulations
+congresional->congressional
+conived->connived
+conjecutre->conjecture
+conjuction->conjunction
+Conneticut->Connecticut
+conotations->connotations
+conquerd->conquered
+conquerer->conqueror
+conquerers->conquerors
+conqured->conquered
+conscent->consent
+consciouness->consciousness
+consdider->consider
+consdidered->considered
+consdiered->considered
+consectutive->consecutive
+consenquently->consequently
+consentrate->concentrate
+consentrated->concentrated
+consentrates->concentrates
+consept->concept
+consequentually->consequently
+consequeseces->consequences
+consern->concern
+conserned->concerned
+conserning->concerning
+conservitive->conservative
+consiciousness->consciousness
+consicousness->consciousness
+considerd->considered
+consideres->considers
+consious->conscious
+consistant->consistent
+consistantly->consistently
+consituencies->constituencies
+consituency->constituency
+consituted->constituted
+consitution->constitution
+consitutional->constitutional
+consolodate->consolidate
+consolodated->consolidated
+consonent->consonant
+consonents->consonants
+consorcium->consortium
+conspiracys->conspiracies
+conspiriator->conspirator
+constaints->constraints
+constanly->constantly
+constarnation->consternation
+constatn->constant
+constinually->continually
+constituant->constituent
+constituants->constituents
+constituion->constitution
+constituional->constitutional
+constructes->constructs
+consttruction->construction
+constuction->construction
+consulant->consultant
+consumate->consummate
+consumated->consummated
+contaiminate->contaminate
+containes->contains
+contamporaries->contemporaries
+contamporary->contemporary
+contempoary->contemporary
+contemporaneus->contemporaneous
+contempory->contemporary
+contendor->contender
+contibute->contribute
+contibuted->contributed
+contibutes->contributes
+contigent->contingent
+contigious->contiguous
+contined->continued
+continous->continuous
+continously->continuously
+continueing->continuing
+contravercial->controversial
+contraversy->controversy
+contributer->contributor
+contributers->contributors
+contritutions->contributions
+controled->controlled
+controler->controller
+controling->controlling
+controll->control
+controlls->controls
+controvercial->controversial
+controvercy->controversy
+controveries->controversies
+controversal->controversial
+controversey->controversy
+controvertial->controversial
+controvery->controversy
+contruction->construction
+conveinent->convenient
+convenant->covenant
+convential->conventional
+convertables->convertibles
+convertion->conversion
+convery->convert
+conveyer->conveyor
+conviced->convinced
+convienient->convenient
+coordiantion->coordination
+coorperation->cooperation, corporation,
+coorperations->corporations
+copmetitors->competitors
+coputer->computer
+copywrite->copyright
+coridal->cordial
+cornmitted->committed
+corosion->corrosion
+corparate->corporate
+corperations->corporations
+correcters->correctors
+correponding->corresponding
+correposding->corresponding
+correspondant->correspondent
+correspondants->correspondents
+corresponsing->corresponding
+corridoors->corridors
+corrispond->correspond
+corrispondant->correspondent
+corrispondants->correspondents
+corrisponded->corresponded
+corrisponding->corresponding
+corrisponds->corresponds
+costitution->constitution
+cotrol->control
+coucil->council
+coudl->could, cloud,
+councellor->councillor, counselor, councilor,
+councellors->councillors, counselors, councilors,
+counries->countries
+countains->contains
+countires->countries
+countrie's->countries, countries', country's,
+coururier->courier, couturier,
+coverted->converted, covered, coveted,
+cpoy->coy, copy,
+creaeted->created
+creche->crèche
+creedence->credence
+critereon->criterion
+criterias->criteria
+criticists->critics
+critising->criticising, criticizing,
+critisising->criticising
+critisism->criticism
+critisisms->criticisms
+critisize->criticise, criticize,
+critisized->criticised, criticized,
+critisizes->criticises, criticizes,
+critisizing->criticising, criticizing,
+critized->criticized
+critizing->criticizing
+crockodiles->crocodiles
+crowm->crown
+crtical->critical
+crticised->criticised
+crucifiction->crucifixion
+crusies->cruises
+crystalisation->crystallisation
+culiminating->culminating
+cumulatative->cumulative
+curch->church
+curcuit->circuit
+currenly->currently
+curriculem->curriculum
+cxan->cyan
+cyclinder->cylinder
+dacquiri->daiquiri
+dael->deal, dial, dahl,
+dalmation->dalmatian
+damenor->demeanor
+dammage->damage
+Dardenelles->Dardanelles
+daugher->daughter
+debateable->debatable
+decendant->descendant
+decendants->descendants
+decendent->descendant
+decendents->descendants
+decideable->decidable
+decidely->decidedly
+decieved->deceived
+decison->decision
+decomissioned->decommissioned
+decomposit->decompose
+decomposited->decomposed
+decompositing->decomposing
+decomposits->decomposes
+decress->decrees
+decribe->describe
+decribed->described
+decribes->describes
+decribing->describing
+decriptor->descriptor
+dectect->detect
+defendent->defendant
+defendents->defendants
+deffensively->defensively
+deffine->define
+deffined->defined
+definance->defiance
+definate->definite
+definately->definitely
+definatly->definitely
+definetly->definitely
+definining->defining
+definit->definite
+definitly->definitely
+definiton->definition
+defintion->definition
+degrate->degrade
+delagates->delegates
+delapidated->dilapidated
+delerious->delirious
+delevopment->development
+deliberatly->deliberately
+delusionally->delusively
+demenor->demeanor
+demographical->demographic
+demolision->demolition
+demorcracy->democracy
+demostration->demonstration
+denegrating->denigrating
+densly->densely
+deparment->department
+deparmental->departmental
+deparments->departments
+dependance->dependence
+dependancy->dependency
+dependant->dependent
+deram->dram, dream,
+deriviated->derived
+derivitive->derivative
+derogitory->derogatory
+descendands->descendants
+descibed->described
+descision->decision
+descisions->decisions
+descriibes->describes
+descripters->descriptors
+descripton->description
+desctruction->destruction
+descuss->discuss
+desgined->designed
+deside->decide
+desigining->designing
+desinations->destinations
+desintegrated->disintegrated
+desintegration->disintegration
+desireable->desirable
+desitned->destined
+desktiop->desktop
+desorder->disorder
+desoriented->disoriented
+desparate->desperate, disparate,
+despict->depict
+despiration->desperation
+dessicated->desiccated
+dessigned->designed
+destablized->destabilized
+destory->destroy
+detailled->detailed
+detatched->detached
+deteoriated->deteriorated
+deteriate->deteriorate
+deterioriating->deteriorating
+determinining->determining
+detremental->detrimental
+devasted->devastated
+develope->develop
+developement->development
+developped->developed
+develpment->development
+devels->delves
+devestated->devastated
+devestating->devastating
+deviatio->deviation
+devide->divide
+devided->divided
+devistating->devastating
+devolopement->development
+diablical->diabolical
+diamons->diamonds
+diaster->disaster
+dichtomy->dichotomy
+diconnects->disconnects
+dicover->discover
+dicovered->discovered
+dicovering->discovering
+dicovers->discovers
+dicovery->discovery
+dicussed->discussed
+didnt'->didn't, didn\'t,
+didnt->didn't, didn\'t,
+diea->idea, die,
+dieing->dying, dyeing,
+dieties->deities
+diety->deity
+diferent->different
+diferrent->different
+differentiatiations->differentiations
+differnt->different
+difficulity->difficulty
+diffrent->different
+dificulties->difficulties
+dificulty->difficulty
+dimenions->dimensions
+dimention->dimension
+dimentional->dimensional
+dimentions->dimensions
+dimesnional->dimensional
+diminuitive->diminutive
+dimunitive->diminutive
+diosese->diocese
+diphtong->diphthong
+diphtongs->diphthongs
+diplomancy->diplomacy
+dipthong->diphthong
+dipthongs->diphthongs
+dirived->derived
+disagreeed->disagreed
+disapeared->disappeared
+disapointing->disappointing
+disappearred->disappeared
+disaproval->disapproval
+disasterous->disastrous
+disatisfaction->dissatisfaction
+disatisfied->dissatisfied
+disatrous->disastrous
+discontentment->discontent
+discribe->describe
+discribed->described
+discribes->describes
+discribing->describing
+disctinction->distinction
+disctinctive->distinctive
+disemination->dissemination
+disenchanged->disenchanted
+disiplined->disciplined
+disobediance->disobedience
+disobediant->disobedient
+disolved->dissolved
+disover->discover
+dispair->despair
+disparingly->disparagingly
+dispence->dispense
+dispenced->dispensed
+dispencing->dispensing
+dispicable->despicable
+dispite->despite
+dispostion->disposition
+disproportiate->disproportionate
+disputandem->disputandum
+disricts->districts
+dissagreement->disagreement
+dissapear->disappear
+dissapearance->disappearance
+dissapeared->disappeared
+dissapearing->disappearing
+dissapears->disappears
+dissappear->disappear
+dissappears->disappears
+dissappointed->disappointed
+dissarray->disarray
+dissobediance->disobedience
+dissobediant->disobedient
+dissobedience->disobedience
+dissobedient->disobedient
+distiction->distinction
+distingish->distinguish
+distingished->distinguished
+distingishes->distinguishes
+distingishing->distinguishing
+distingquished->distinguished
+distrubution->distribution
+distruction->destruction
+distructive->destructive
+ditributed->distributed
+diversed->diverse, diverged,
+divice->device
+divison->division
+divisons->divisions
+doccument->document
+doccumented->documented
+doccuments->documents
+docrines->doctrines
+doctines->doctrines
+documenatry->documentary
+doens->does
+doesnt'->doesn't
+doesnt->doesn't
+doign->doing
+dominaton->domination
+dominent->dominant
+dominiant->dominant
+donig->doing
+dosen't->doesn't
+dosent'->doesn't
+doub->doubt, daub,
+doulbe->double
+dowloads->downloads
+dramtic->dramatic
+draughtman->draughtsman
+Dravadian->Dravidian
+dreasm->dreams
+driectly->directly
+drnik->drink
+druming->drumming
+drummless->drumless
+dstination->destination
+dupicate->duplicate
+durig->during
+durring->during
+duting->during
+dyas->dryas
+eahc->each
+ealier->earlier
+earlies->earliest
+earnt->earned
+ecclectic->eclectic
+eceonomy->economy
+ecidious->deciduous
+eclispe->eclipse
+ecomonic->economic
+ect->etc
+eearly->early
+efect->effect
+efel->evil
+effeciency->efficiency
+effecient->efficient
+effeciently->efficiently
+efficency->efficiency
+efficent->efficient
+efficently->efficiently
+efford->effort, afford,
+effords->efforts, affords,
+effulence->effluence
+eigth->eighth, eight,
+eiter->either
+elction->election
+electic->eclectic, electric,
+electon->election, electron,
+electrial->electrical
+electricly->electrically
+electricty->electricity
+elementay->elementary
+eleminated->eliminated
+eleminating->eliminating
+eles->eels
+eletricity->electricity
+elicided->elicited
+eligable->eligible
+elimentary->elementary
+ellected->elected
+elphant->elephant
+embarass->embarrass
+embarassed->embarrassed
+embarassing->embarrassing
+embarassment->embarrassment
+embargos->embargoes
+embarras->embarrass
+embarrased->embarrassed
+embarrasing->embarrassing
+embarrasment->embarrassment
+embeded->embedded
+embezelled->embezzled
+emblamatic->emblematic
+eminate->emanate
+eminated->emanated
+emision->emission
+emited->emitted
+emiting->emitting
+emition->emission, emotion,
+emmediately->immediately
+emmigrated->emigrated, immigrated,
+emminent->eminent, imminent,
+emminently->eminently
+emmisaries->emissaries
+emmisarries->emissaries
+emmisarry->emissary
+emmisary->emissary
+emmision->emission
+emmisions->emissions
+emmited->emitted
+emmiting->emitting
+emmitted->emitted
+emmitting->emitting
+emnity->enmity
+emperical->empirical
+emphaised->emphasised
+emphsis->emphasis
+emphysyma->emphysema
+empirial->empirical, imperial,
+emprisoned->imprisoned
+enameld->enameled
+enchancement->enhancement
+encouraing->encouraging
+encryptiion->encryption
+encylopedia->encyclopedia
+endevors->endeavors
+endevour->endeavour
+endianess->endianness
+endig->ending
+endolithes->endoliths
+enduce->induce
+ened->need
+enflamed->inflamed
+enforceing->enforcing
+engagment->engagement
+engeneer->engineer
+engeneering->engineering
+engieneer->engineer
+engieneers->engineers
+enlargment->enlargement
+enlargments->enlargements
+Enlish->English, enlist,
+enourmous->enormous
+enourmously->enormously
+ensconsed->ensconced
+entaglements->entanglements
+enteratinment->entertainment
+enthousiasm->enthusiasm
+enthusiatic->enthusiastic
+entitity->entity
+entitlied->entitled
+entrepeneur->entrepreneur
+entrepeneurs->entrepreneurs
+enviorment->environment
+enviormental->environmental
+enviormentally->environmentally
+enviorments->environments
+enviornment->environment
+enviornmental->environmental
+enviornmentalist->environmentalist
+enviornmentally->environmentally
+enviornments->environments
+enviroment->environment
+enviromental->environmental
+enviromentalist->environmentalist
+enviromentally->environmentally
+enviroments->environments
+envolutionary->evolutionary
+envrionments->environments
+enxt->next
+epidsodes->episodes
+epsiode->episode
+equialent->equivalent
+equilibium->equilibrium
+equilibrum->equilibrium
+equiped->equipped
+equippment->equipment
+equitorial->equatorial
+equivelant->equivalent
+equivelent->equivalent
+equivilant->equivalent
+equivilent->equivalent
+equivlalent->equivalent
+erally->orally, really,
+eratic->erratic
+eratically->erratically
+eraticly->erratically
+erested->arrested, erected,
+erronous->erroneous
+errupted->erupted
+esential->essential
+esitmated->estimated
+esle->else
+especialy->especially
+essencial->essential
+essense->essence
+essentail->essential
+essentialy->essentially
+essentual->essential
+essesital->essential
+estabishes->establishes
+establising->establishing
+ethnocentricm->ethnocentrism
+ethose->those, ethos,
+Europian->European
+Europians->Europeans
+Eurpean->European
+Eurpoean->European
+evenhtually->eventually
+eventally->eventually
+eventhough->even though
+eventially->eventually
+eventualy->eventually
+everthing->everything
+everytime->every time
+everyting->everything
+eveyr->every
+evidentally->evidently
+exagerate->exaggerate
+exagerated->exaggerated
+exagerates->exaggerates
+exagerating->exaggerating
+exagerrate->exaggerate
+exagerrated->exaggerated
+exagerrates->exaggerates
+exagerrating->exaggerating
+examinated->examined
+exampt->exempt
+exapansion->expansion
+excact->exact
+excange->exchange
+excecute->execute
+excecuted->executed
+excecutes->executes
+excecuting->executing
+excecution->execution
+excedded->exceeded
+excelent->excellent
+excell->excel
+excellance->excellence
+excellant->excellent
+excells->excels
+excercise->exercise
+excerciser->exerciser
+exchanching->exchanging
+excisted->existed
+exculsivly->exclusively
+execising->exercising
+exection->execution
+exectued->executed
+exeedingly->exceedingly
+exelent->excellent
+exellent->excellent
+exemple->example
+exept->except
+exeptional->exceptional
+exerbate->exacerbate
+exerbated->exacerbated
+exerciese->exercises
+exerpt->excerpt
+exerpts->excerpts
+exersize->exercise
+exerternal->external
+exhalted->exalted
+exhibtion->exhibition
+exibition->exhibition
+exibitions->exhibitions
+exicting->exciting
+exinct->extinct
+existance->existence
+existant->existent
+existince->existence
+exliled->exiled
+exludes->excludes
+exmaple->example
+exonorate->exonerate
+exoskelaton->exoskeleton
+expalin->explain
+expatriot->expatriate
+expeced->expected
+expecially->especially
+expeditonary->expeditionary
+expeiments->experiments
+expell->expel
+expells->expels
+experiance->experience
+experianced->experienced
+expiditions->expeditions
+expierence->experience
+explaination->explanation
+explaning->explaining
+explictly->explicitly
+exploititive->exploitative
+explotation->exploitation
+expropiated->expropriated
+expropiation->expropriation
+exressed->expressed
+extemely->extremely
+extention->extension
+extentions->extensions
+extered->exerted
+extermist->extremist
+extint->extinct, extant,
+extradiction->extradition
+extraterrestial->extraterrestrial
+extraterrestials->extraterrestrials
+extravagent->extravagant
+extrememly->extremely
+extremeophile->extremophile
+extremly->extremely
+extrordinarily->extraordinarily
+extrordinary->extraordinary
+eyar->year, eyas,
+eyars->years, eyas,
+eyasr->years, eyas,
+faciliate->facilitate
+faciliated->facilitated
+faciliates->facilitates
+facilites->facilities
+facillitate->facilitate
+facinated->fascinated
+facist->fascist
+familes->families
+familliar->familiar
+famoust->famous
+fanatism->fanaticism
+Farenheit->Fahrenheit
+fatc->fact
+faught->fought
+favoutrable->favourable
+feasable->feasible
+Febuary->February
+Feburary->February
+fedreally->federally
+feromone->pheromone
+fertily->fertility
+fianite->finite
+fianlly->finally
+ficticious->fictitious
+fictious->fictitious
+fidn->find
+fiel->feel, field, file, phial,
+fiels->feels, fields, files, phials,
+fiercly->fiercely
+fightings->fighting
+filiament->filament
+fimilies->families
+finacial->financial
+finaly->finally
+financialy->financially
+firends->friends
+firts->flirts, first,
+fisionable->fissionable
+flamable->flammable
+flawess->flawless
+fleed->fled, freed,
+Flemmish->Flemish
+florescent->fluorescent
+flourescent->fluorescent
+flourine->fluorine
+fluorish->flourish
+follwoing->following
+folowing->following
+fomed->formed
+fomr->from, form,
+fonetic->phonetic
+fontrier->fontier
+foootball->football
+forbad->forbade
+forbiden->forbidden
+foreward->foreword
+forfiet->forfeit
+forhead->forehead
+foriegn->foreign
+Formalhaut->Fomalhaut
+formallize->formalize
+formallized->formalized
+formaly->formally, formerly,
+formelly->formerly
+formidible->formidable
+formost->foremost
+forsaw->foresaw
+forseeable->foreseeable
+fortelling->foretelling
+forunner->forerunner
+foucs->focus
+foudn->found
+fougth->fought
+foundaries->foundries
+foundary->foundry
+Foundland->Newfoundland
+fourties->forties
+fourty->forty
+fouth->fourth
+foward->forward
+Fransiscan->Franciscan
+Fransiscans->Franciscans
+freind->friend
+freindly->friendly
+frequentily->frequently
+frome->from
+fromed->formed
+froniter->frontier
+fucntion->function
+fucntioning->functioning
+fufill->fulfill
+fufilled->fulfilled
+fulfiled->fulfilled
+fullfill->fulfill
+fullfilled->fulfilled
+fundametal->fundamental
+fundametals->fundamentals
+funguses->fungi
+funtion->function
+furuther->further
+futher->further
+futhermore->furthermore
+futhroc->futhark, futhorc,
+gae->game, Gael, gale,
+galatic->galactic
+Galations->Galatians
+gallaxies->galaxies
+galvinized->galvanized
+Gameboy->Game Boy
+ganerate->generate
+ganes->games
+ganster->gangster
+garantee->guarantee
+garanteed->guaranteed
+garantees->guarantees
+gardai->gardaí
+garnison->garrison
+gauarana->guaraná
+gaurantee->guarantee
+gauranteed->guaranteed
+gaurantees->guarantees
+gaurd->guard, gourd,
+gaurentee->guarantee
+gaurenteed->guaranteed
+gaurentees->guarantees
+geneological->genealogical
+geneologies->genealogies
+geneology->genealogy
+generaly->generally
+generatting->generating
+genialia->genitalia
+geographicial->geographical
+geometrician->geometer
+geometricians->geometers
+gerat->great
+Ghandi->Gandhi
+glight->flight
+gnawwed->gnawed
+godess->goddess
+godesses->goddesses
+Godounov->Godunov
+gogin->going, Gauguin,
+goign->going
+gonig->going
+Gothenberg->Gothenburg
+Gottleib->Gottlieb
+gouvener->governor
+govement->government
+govenment->government
+govenrment->government
+goverance->governance
+goverment->government
+govermental->governmental
+governer->governor
+governmnet->government
+govorment->government
+govormental->governmental
+govornment->government
+gracefull->graceful
+graet->great
+grafitti->graffiti
+gramatically->grammatically
+grammaticaly->grammatically
+grammer->grammar
+grat->great
+gratuitious->gratuitous
+greatful->grateful
+greatfully->gratefully
+greif->grief
+gridles->griddles
+gropu->group
+grwo->grow
+Guaduloupe->Guadalupe, Guadeloupe,
+Guadulupe->Guadalupe, Guadeloupe,
+guage->gauge
+guarentee->guarantee
+guarenteed->guaranteed
+guarentees->guarantees
+guarrenteed->guaranteed
+Guatamala->Guatemala
+Guatamalan->Guatemalan
+guerilla->guerrilla
+guerillas->guerrillas
+guerrila->guerrilla
+guerrilas->guerrillas
+gueswork->guesswork
+guidence->guidance
+Guilia->Giulia
+Guilio->Giulio
+Guiness->Guinness
+Guiseppe->Giuseppe
+gunanine->guanine
+gurantee->guarantee
+guranteed->guaranteed
+gurantees->guarantees
+guttaral->guttural
+gutteral->guttural
+habaeus->habeas
+habeus->habeas
+Habsbourg->Habsburg
+haemorrage->haemorrhage
+haev->have, heave,
+Hallowean->Hallowe'en, Halloween,
+halp->help
+hapen->happen
+hapened->happened
+hapening->happening
+happend->happened
+happended->happened
+happenned->happened
+harased->harassed
+harases->harasses
+harasment->harassment
+harasments->harassments
+harassement->harassment
+harras->harass
+harrased->harassed
+harrases->harasses
+harrasing->harassing
+harrasment->harassment
+harrasments->harassments
+harrassed->harassed
+harrasses->harassed
+harrassing->harassing
+harrassment->harassment
+harrassments->harassments
+hasnt'->hasn't
+hasnt->hasn't
+Hatian->Haitian
+haviest->heaviest
+headquarer->headquarter
+headquater->headquarter
+headquatered->headquartered
+headquaters->headquarters
+healthercare->healthcare
+heared->heard
+heathy->healthy
+Heidelburg->Heidelberg
+heigher->higher
+heirachies->hierarchies
+heirarchy->hierarchy
+heiroglyphics->hieroglyphics
+helment->helmet
+helpfull->helpful
+helpped->helped
+hemmorhage->hemorrhage
+herad->heard, Hera,
+heridity->heredity
+heroe->hero
+heros->heroes
+hertiage->heritage
+hertzs->hertz
+hesistant->hesitant
+heterogenous->heterogeneous
+hieght->height
+hierachical->hierarchical
+hierachies->hierarchies
+hierachy->hierarchy
+hierarcical->hierarchical
+hierarcy->hierarchy
+hieroglph->hieroglyph
+hieroglphs->hieroglyphs
+higer->higher
+higest->highest
+higway->highway
+hillarious->hilarious
+himselv->himself
+hinderance->hindrance
+hinderence->hindrance
+hindrence->hindrance
+hipopotamus->hippopotamus
+hismelf->himself
+histocompatability->histocompatibility
+historicians->historians
+hitsingles->hit singles
+holliday->holiday
+homestate->home state
+homogeneize->homogenize
+homogeneized->homogenized
+honory->honorary
+horrifing->horrifying
+hosited->hoisted
+hospitible->hospitable
+hounour->honour
+housr->hours, house,
+howver->however
+hsitorians->historians
+hstory->history
+hten->then, hen, the,
+htere->there, here,
+htey->they
+htikn->think
+hting->thing
+htink->think
+htis->this
+humer->humor, humour,
+humerous->humorous, humerus,
+huminoid->humanoid
+humoural->humoral
+humurous->humorous
+husban->husband
+hvae->have
+hvaing->having
+hvea->have, heave,
+hwihc->which
+hwile->while
+hwole->whole
+hydogen->hydrogen
+hydropile->hydrophile
+hydropilic->hydrophilic
+hydropobe->hydrophobe
+hydropobic->hydrophobic
+hygeine->hygiene
+hypocracy->hypocrisy
+hypocrasy->hypocrisy
+hypocricy->hypocrisy
+hypocrit->hypocrite
+hypocrits->hypocrites
+iconclastic->iconoclastic
+idaeidae->idea
+idaes->ideas
+idealogies->ideologies
+idealogy->ideology
+identicial->identical
+identifers->identifiers
+ideosyncratic->idiosyncratic
+idesa->ideas, ides,
+idiosyncracy->idiosyncrasy
+Ihaca->Ithaca
+illegimacy->illegitimacy
+illegitmate->illegitimate
+illess->illness
+illiegal->illegal
+illution->illusion
+ilness->illness
+ilogical->illogical
+imagenary->imaginary
+imagin->imagine
+imaginery->imaginary, imagery,
+imanent->eminent, imminent,
+imcomplete->incomplete
+imediately->immediately
+imense->immense
+imigrant->emigrant, immigrant,
+imigrated->emigrated, immigrated,
+imigration->emigration, immigration,
+iminent->eminent, imminent, immanent,
+immediatelly->immediately
+immediatley->immediately
+immediatly->immediately
+immidately->immediately
+immidiately->immediately
+immitate->imitate
+immitated->imitated
+immitating->imitating
+immitator->imitator
+immunosupressant->immunosuppressant
+impecabbly->impeccably
+impedence->impedance
+implamenting->implementing
+impliment->implement
+implimented->implemented
+imploys->employs
+importamt->important
+imprioned->imprisoned
+imprisonned->imprisoned
+improvision->improvisation
+improvments->improvements
+inablility->inability
+inaccesible->inaccessible
+inaccessable->inaccessible
+inadiquate->inadequate
+inadquate->inadequate
+inadvertant->inadvertent
+inadvertantly->inadvertently
+inagurated->inaugurated
+inaguration->inauguration
+inappropiate->inappropriate
+inaugures->inaugurates
+inbalance->imbalance
+inbalanced->imbalanced
+inbetween->between
+incarcirated->incarcerated
+incidentially->incidentally
+incidently->incidentally
+inclreased->increased
+includ->include
+includng->including
+incompatabilities->incompatibilities
+incompatability->incompatibility
+incompatable->incompatible
+incompatablities->incompatibilities
+incompatablity->incompatibility
+incompatiblities->incompatibilities
+incompatiblity->incompatibility
+incompetance->incompetence
+incompetant->incompetent
+incomptable->incompatible
+incomptetent->incompetent
+inconsistant->inconsistent
+incoroporated->incorporated
+incorperation->incorporation
+incorportaed->incorporated
+incorprates->incorporates
+incorruptable->incorruptible
+incramentally->incrementally
+increadible->incredible
+incredable->incredible
+inctroduce->introduce
+inctroduced->introduced
+incuding->including
+incunabla->incunabula
+indefinately->indefinitely
+indefineable->undefinable
+indefinitly->indefinitely
+indentical->identical
+indentifier->identifier
+indepedantly->independently
+indepedence->independence
+independance->independence
+independant->independent
+independantly->independently
+independece->independence
+independendet->independent
+indespensable->indispensable
+indespensible->indispensable
+indicies->indices
+indictement->indictment
+indigineous->indigenous
+indipendence->independence
+indipendent->independent
+indipendently->independently
+indispensible->indispensable
+indisputible->indisputable
+indisputibly->indisputably
+indite->indict
+individualy->individually
+indpendent->independent
+indpendently->independently
+indulgue->indulge
+indutrial->industrial
+indviduals->individuals
+inefficienty->inefficiently
+inevatible->inevitable
+inevitible->inevitable
+inevititably->inevitably
+infalability->infallibility
+infallable->infallible
+infectuous->infectious
+infered->inferred
+infilitrate->infiltrate
+infilitrated->infiltrated
+infilitration->infiltration
+infinit->infinite
+inflamation->inflammation
+influencial->influential
+influented->influenced
+infomation->information
+informtion->information
+infrantryman->infantryman
+infrigement->infringement
+ingenius->ingenious
+ingreediants->ingredients
+inhabitans->inhabitants
+inherantly->inherently
+inheritage->heritage, inheritance,
+inheritence->inheritance
+inital->initial
+initally->initially
+initation->initiation
+initiaitive->initiative
+initilize->initialize
+inlcuding->including
+inmigrant->immigrant
+inmigrants->immigrants
+innoculated->inoculated
+inocence->innocence
+inofficial->unofficial
+inot->into
+inpeach->impeach
+inpolite->impolite
+inprisonment->imprisonment
+inproving->improving
+insectiverous->insectivorous
+insensative->insensitive
+inseperable->inseparable
+insistance->insistence
+insitution->institution
+insitutions->institutions
+inspite->in spite, inspire,
+instade->instead
+instanciation->instantiation
+instatance->instance
+institue->institute
+instuction->instruction
+instuments->instruments
+instutionalized->institutionalized
+instutions->intuitions
+insurence->insurance
+intelectual->intellectual
+inteligence->intelligence
+inteligent->intelligent
+intenational->international
+intented->intended, indented,
+intepretation->interpretation
+intepretator->interpretor
+interational->international
+interbread->interbreed, interbred,
+interchangable->interchangeable
+interchangably->interchangeably
+intercontinetal->intercontinental
+intered->interred, interned,
+interelated->interrelated
+interferance->interference
+interfereing->interfering
+interger->integer
+intergrated->integrated
+intergration->integration
+interm->interim
+internation->international
+interpet->interpret
+interrim->interim
+interrput->interrupt
+interrugum->interregnum
+intertaining->entertaining
+interupt->interrupt
+intervines->intervenes
+intevene->intervene
+intial->initial
+intially->initially
+intrduced->introduced
+intrest->interest
+introdued->introduced
+intruction->instruction
+intruduced->introduced
+intrument->instrument
+intrumental->instrumental
+intruments->instruments
+intrusted->entrusted
+intutive->intuitive
+intutively->intuitively
+inudstry->industry
+inumerable->enumerable, innumerable,
+inventer->inventor
+invertibrates->invertebrates
+investingate->investigate
+involvment->involvement
+irelevent->irrelevant
+iresistable->irresistible
+iresistably->irresistibly
+iresistible->irresistible
+iresistibly->irresistibly
+iritable->irritable
+iritated->irritated
+ironicly->ironically
+irregardless->regardless
+irrelevent->irrelevant
+irreplacable->irreplaceable
+irresistable->irresistible
+irresistably->irresistibly
+isnt'->isn't
+isnt->isn't
+Israelies->Israelis
+issueing->issuing
+itnroduced->introduced
+iunior->junior
+iwll->will
+iwth->with
+Janurary->January
+Januray->January
+Japanes->Japanese
+jaques->jacques
+jeapardy->jeopardy
+jewllery->jewellery
+Johanine->Johannine
+Jospeh->Joseph
+jouney->journey
+journied->journeyed
+journies->journeys
+jstu->just
+jsut->just
+Juadaism->Judaism
+Juadism->Judaism
+judical->judicial
+judisuary->judiciary
+juducial->judicial
+juristiction->jurisdiction
+juristictions->jurisdictions
+kenrel->kernel
+kindergarden->kindergarten
+klenex->kleenex
+knifes->knives
+knive->knife
+knowlege->knowledge
+knowlegeable->knowledgeable
+knwo->know
+knwos->knows
+konw->know
+konws->knows
+kwno->know
+labatory->lavatory, laboratory,
+labled->labelled, labeled,
+labratory->laboratory
+laguage->language
+laguages->languages
+larg->large
+largst->largest
+larrry->larry
+lastr->last
+lattitude->latitude
+launchs->launch, launches,
+launhed->launched
+lavae->larvae
+layed->laid
+lazyness->laziness
+leage->league
+leanr->lean, learn, leaner,
+leathal->lethal
+lefted->left
+legitamate->legitimate
+legitmate->legitimate
+leibnitz->leibniz
+lenght->length
+leran->learn
+lerans->learns
+leutenant->lieutenant
+levetate->levitate
+levetated->levitated
+levetates->levitates
+levetating->levitating
+levle->level
+liasion->liaison
+liason->liaison
+liasons->liaisons
+libary->library
+libell->libel
+libguistic->linguistic
+libguistics->linguistics
+libitarianisn->libertarianism
+lible->libel, liable,
+licenced->licensed
+lieing->lying
+liek->like
+liekd->liked
+liesure->leisure
+lieuenant->lieutenant
+lieved->lived
+liftime->lifetime
+lightyear->light year
+lightyears->light years
+likelyhood->likelihood
+linnaena->linnaean
+lippizaner->lipizzaner
+liquify->liquefy
+liscense->license, licence,
+lisence->license, licence,
+lisense->license, licence,
+listners->listeners
+litature->literature
+literaly->literally
+literture->literature
+littel->little
+litterally->literally
+liuke->like
+livley->lively
+lmits->limits
+loev->love
+lonelyness->loneliness
+longitudonal->longitudinal
+longuer->longer
+lonley->lonely
+lonly->lonely, only,
+loosing->losing
+lotharingen->lothringen
+lsat->last
+lukid->likud
+lveo->love
+lvoe->love
+Lybia->Libya
+maching->machine, marching, matching,
+mackeral->mackerel
+magasine->magazine
+magincian->magician
+magnificient->magnificent
+magolia->magnolia
+mailny->mainly
+maintainance->maintenance
+maintainence->maintenance
+maintance->maintenance
+maintenence->maintenance
+maintinaing->maintaining
+maintioned->mentioned
+majoroty->majority
+maked->marked, made,
+makse->makes
+Malcom->Malcolm
+maltesian->Maltese
+mamal->mammal
+mamalian->mammalian
+managable->manageable, manageably,
+managment->management
+maneouvre->manoeuvre
+maneouvred->manoeuvred
+maneouvres->manoeuvres
+maneouvring->manoeuvring
+manisfestations->manifestations
+mannor->manner
+manoeuverability->maneuverability
+manouver->maneuver, manoeuvre,
+manouverability->maneuverability, manoeuvrability, maneuverability,
+manouverable->maneuverable, manoeuvrable,
+manouvers->maneuvers, manoeuvres,
+mantained->maintained
+manuever->maneuver, manoeuvre,
+manuevers->maneuvers, manoeuvres,
+manufacturedd->manufactured
+manufature->manufacture
+manufatured->manufactured
+manufaturing->manufacturing
+manuver->maneuver
+mapp->map
+mariage->marriage
+marjority->majority
+markes->marks
+marketting->marketing
+marmelade->marmalade
+marrage->marriage
+marraige->marriage
+marrtyred->martyred
+marryied->married
+Massachussets->Massachusetts
+Massachussetts->Massachusetts
+massmedia->mass media
+masterbation->masturbation
+mataphysical->metaphysical
+materalists->materialist
+mathamatics->mathematics
+mathematican->mathematician
+mathematicas->mathematics
+matheticians->mathematicians
+mathmatically->mathematically
+mathmatician->mathematician
+mathmaticians->mathematicians
+mccarthyst->mccarthyist
+mchanics->mechanics
+meaninng->meaning
+mear->wear, mere, mare,
+mechandise->merchandise
+medacine->medicine
+medeival->medieval
+medevial->medieval
+mediciney->mediciny
+medievel->medieval
+mediterainnean->mediterranean
+Mediteranean->Mediterranean
+meerkrat->meerkat
+melieux->milieux
+membranaphone->membranophone
+memeber->member
+menally->mentally
+meranda->veranda, Miranda,
+mercentile->mercantile
+messagin->messaging
+messanger->messenger
+messenging->messaging
+metalic->metallic
+metalurgic->metallurgic
+metalurgical->metallurgical
+metalurgy->metallurgy
+metamorphysis->metamorphosis
+metaphoricial->metaphorical
+meterologist->meteorologist
+meterology->meteorology
+methaphor->metaphor
+methaphors->metaphors
+Michagan->Michigan
+micoscopy->microscopy
+midwifes->midwives
+mileau->milieu
+milennia->millennia
+milennium->millennium
+mileu->milieu
+miliary->military
+milion->million
+miliraty->military
+millenia->millennia
+millenial->millennial
+millenialism->millennialism
+millenium->millennium
+millepede->millipede
+millioniare->millionaire
+millitary->military
+millon->million
+miltary->military
+minature->miniature
+minerial->mineral
+MingGW->MinGW
+ministery->ministry
+minstries->ministries
+minstry->ministry
+minumum->minimum
+mirrorred->mirrored
+miscelaneous->miscellaneous
+miscellanious->miscellaneous
+miscellanous->miscellaneous
+mischeivous->mischievous
+mischevious->mischievous
+mischievious->mischievous
+misdameanor->misdemeanor
+misdameanors->misdemeanors
+misdemenor->misdemeanor
+misdemenors->misdemeanors
+misfourtunes->misfortunes
+misile->missile
+Misouri->Missouri
+mispell->misspell
+mispelled->misspelled
+mispelling->misspelling
+missen->mizzen
+Missisipi->Mississippi
+Missisippi->Mississippi
+missle->missile
+missonary->missionary
+misterious->mysterious
+mistery->mystery
+misteryous->mysterious
+mkae->make
+mkaes->makes
+mkaing->making
+mkea->make
+modeled->modelled
+moderm->modem
+modle->model
+moduel->module
+moduels->modules
+moent->moment
+moeny->money
+mohammedans->muslims
+moil->mohel
+moil->soil
+moleclues->molecules
+momento->memento
+monestaries->monasteries
+monestary->monastery, monetary,
+monickers->monikers
+monolite->monolithic
+Monserrat->Montserrat
+montains->mountains
+montanous->mountainous
+Montnana->Montana
+monts->months
+montypic->monotypic
+moreso->more, more so,
+morgage->mortgage
+Morisette->Morissette
+Morrisette->Morissette
+morroccan->moroccan
+morrocco->morocco
+morroco->morocco
+mortage->mortgage
+mosture->moisture
+motiviated->motivated
+mounth->month
+movei->movie, disabled due to assembly code
+movment->movement
+mroe->more
+mucuous->mucous
+muder->murder
+mudering->murdering
+muhammadan->muslim
+multicultralism->multiculturalism
+multifuction->multifunction
+multipled->multiplied
+multiplers->multipliers
+multy-thread->multithread
+munbers->numbers
+muncipalities->municipalities
+muncipality->municipality
+munnicipality->municipality
+muscels->mussels, muscles,
+muscial->musical
+muscician->musician
+muscicians->musicians
+mutiliated->mutilated
+myraid->myriad
+mysef->myself
+mysogynist->misogynist
+mysogyny->misogyny
+mysterous->mysterious
+Mythraic->Mithraic
+naieve->naive
+Naploeon->Napoleon
+Napolean->Napoleon
+Napoleonian->Napoleonic
+naturaly->naturally
+naturely->naturally
+naturual->natural
+naturually->naturally
+Nazereth->Nazareth
+neccesarily->necessarily
+neccesary->necessary
+neccessarily->necessarily
+neccessary->necessary
+neccessities->necessities
+necesarily->necessarily
+necesary->necessary
+necessiate->necessitate
+negitive->negative
+neglible->negligible
+negligable->negligible
+negociate->negotiate
+negociation->negotiation
+negociations->negotiations
+negotation->negotiation
+neice->niece, nice,
+neigborhood->neighborhood
+neigbour->neighbour, neighbor,
+neigbourhood->neighbourhood
+neigbouring->neighbouring, neighboring,
+neigbours->neighbours, neighbors,
+neolitic->neolithic
+nescessary->necessary
+nessasarily->necessarily
+nessecary->necessary
+nestin->nesting
+neverthless->nevertheless
+newletters->newsletters
+Newyorker->New Yorker
+nickle->nickel
+nightfa;;->nightfall
+nightime->nighttime
+nineth->ninth
+ninteenth->nineteenth
+ninties->1990s
+ninty->ninety
+nkow->know
+nkwo->know
+nmae->name
+noncombatents->noncombatants
+nonsence->nonsense
+nontheless->nonetheless
+noone->no one
+norhern->northern
+northen->northern
+northereastern->northeastern
+notabley->notably
+noteable->notable
+noteably->notably
+noteriety->notoriety
+noth->north
+nothern->northern
+noticable->noticeable
+noticably->noticeably
+noticeing->noticing
+noticible->noticeable
+notwhithstanding->notwithstanding
+noveau->nouveau
+Novermber->November
+nowdays->nowadays
+nowe->now
+nto->not, disable due to \n
+nucular->nuclear
+nuculear->nuclear
+nuisanse->nuisance
+Nullabour->Nullarbor
+numberous->numerous
+Nuremburg->Nuremberg
+nusance->nuisance
+nutritent->nutrient
+nutritents->nutrients
+nuturing->nurturing
+obediance->obedience
+obediant->obedient
+obession->obsession
+obssessed->obsessed
+obstacal->obstacle
+obstancles->obstacles
+obstruced->obstructed
+ocasion->occasion
+ocasional->occasional
+ocasionally->occasionally
+ocasionaly->occasionally
+ocasioned->occasioned
+ocasions->occasions
+ocassion->occasion
+ocassional->occasional
+ocassionally->occasionally
+ocassionaly->occasionally
+ocassioned->occasioned
+ocassions->occasions
+occaison->occasion
+occassion->occasion
+occassional->occasional
+occassionally->occasionally
+occassionaly->occasionally
+occassioned->occasioned
+occassions->occasions
+occationally->occasionally
+occour->occur
+occurance->occurrence
+occurances->occurrences
+occure->occur
+occured->occurred
+occurence->occurrence
+occurences->occurrences
+occuring->occurring
+occurr->occur
+occurrance->occurrence
+occurrances->occurrences
+octohedra->octahedra
+octohedral->octahedral
+octohedron->octahedron
+ocuntries->countries
+ocuntry->country
+ocurr->occur
+ocurrance->occurrence
+ocurred->occurred
+ocurrence->occurrence
+offcers->officers
+offcially->officially
+offereings->offerings
+offical->official
+offically->officially
+officals->officials
+officaly->officially
+officialy->officially
+offred->offered
+oftenly->often
+oging->going, ogling,
+omision->omission
+omited->omitted
+omiting->omitting
+omlette->omelette
+ommision->omission
+ommited->omitted
+ommiting->omitting
+ommitted->omitted
+ommitting->omitting
+omniverous->omnivorous
+omniverously->omnivorously
+omre->more
+onot->note, not,
+onyl->only
+openess->openness
+openin->opening
+oponent->opponent
+oportunity->opportunity
+opose->oppose
+oposite->opposite
+oposition->opposition
+oppenly->openly
+oppinion->opinion
+opponant->opponent
+oppononent->opponent
+oppositition->opposition
+oppossed->opposed
+opprotunity->opportunity
+opression->oppression
+opressive->oppressive
+opthalmic->ophthalmic
+opthalmologist->ophthalmologist
+opthalmology->ophthalmology
+opthamologist->ophthalmologist
+optmizations->optimizations
+optomism->optimism
+orded->ordered
+organim->organism
+organistion->organisation
+organiztion->organization
+orgin->origin, organ,
+orginal->original
+orginally->originally
+orginize->organise
+oridinarily->ordinarily
+origanaly->originally
+originall->original, originally,
+originaly->originally
+originially->originally
+originnally->originally
+origional->original
+orignally->originally
+orignially->originally
+otehr->other
+oublisher->publisher
+ouevre->oeuvre
+oustanding->outstanding
+oveerun->overrun
+overshaddowed->overshadowed
+overthere->over there
+overwelming->overwhelming
+overwheliming->overwhelming
+owrk->work
+owudl->would
+oxigen->oxygen
+oximoron->oxymoron
+p0enis->penis
+paide->paid
+paitience->patience
+palce->place, palace,
+paleolitic->paleolithic
+paliamentarian->parliamentarian
+Palistian->Palestinian
+Palistinian->Palestinian
+Palistinians->Palestinians
+pallete->palette
+pamflet->pamphlet
+pamplet->pamphlet
+pantomine->pantomime
+Papanicalou->Papanicolaou
+paralel->parallel
+paralell->parallel
+paralelly->parallelly
+paralely->parallelly
+parallely->parallelly
+paramter->parameter
+paramters->parameters
+paranthesis->parenthesis
+paraphenalia->paraphernalia
+parellels->parallels
+parituclar->particular
+parliment->parliament
+parrakeets->parakeets
+parralel->parallel
+parrallel->parallel
+parrallell->parallel
+parrallelly->parallelly
+parrallely->parallelly
+partialy->partially
+particually->particularly
+particualr->particular
+particuarly->particularly
+particularily->particularly
+particulary->particularly
+pary->party
+pased->passed
+pasengers->passengers
+passerbys->passersby
+pasttime->pastime
+pastural->pastoral
+pathes->paths
+paticular->particular
+pattented->patented
+pavillion->pavilion
+payed->paid
+pblisher->publisher
+pbulisher->publisher
+peacefuland->peaceful and
+peageant->pageant
+peculure->peculiar
+pedestrain->pedestrian
+peformed->performed
+peice->piece
+Peloponnes->Peloponnesus
+penatly->penalty
+penerator->penetrator
+penisula->peninsula
+penisular->peninsular
+penninsula->peninsula
+penninsular->peninsular
+pennisula->peninsula
+Pennyslvania->Pennsylvania
+pensinula->peninsula
+peom->poem
+peoms->poems
+peopel->people
+peotry->poetry
+perade->parade
+percepted->perceived
+percieve->perceive
+percieved->perceived
+perenially->perennially
+perfomance->performance
+perfomers->performers
+performence->performance
+performes->performed, performs,
+perhasp->perhaps
+perheaps->perhaps
+perhpas->perhaps
+peripathetic->peripatetic
+peristent->persistent
+perjery->perjury
+perjorative->pejorative
+permanant->permanent
+permenant->permanent
+permenantly->permanently
+permissable->permissible
+perogative->prerogative
+peronal->personal
+perosnality->personality
+perphas->perhaps
+perpindicular->perpendicular
+perseverence->perseverance
+persistance->persistence
+persistant->persistent
+personel->personnel, personal,
+personell->personnel
+personnell->personnel
+persuded->persuaded
+persue->pursue
+persued->pursued
+persuing->pursuing
+persuit->pursuit
+persuits->pursuits
+pertubation->perturbation
+pertubations->perturbations
+pessiary->pessary
+petetion->petition
+Pharoah->Pharaoh
+phenomenom->phenomenon
+phenomenonal->phenomenal
+phenomenonly->phenomenally
+phenomonenon->phenomenon
+phenomonon->phenomenon
+phenonmena->phenomena
+Philipines->Philippines
+philisopher->philosopher
+philisophical->philosophical
+philisophy->philosophy
+Phillipine->Philippine
+Phillipines->Philippines
+Phillippines->Philippines
+phillosophically->philosophically
+philospher->philosopher
+philosphies->philosophies
+philosphy->philosophy
+Phonecian->Phoenecian
+phongraph->phonograph
+phylosophical->philosophical
+physicaly->physically
+piblisher->publisher
+pich->pitch
+pilgrimmage->pilgrimage
+pilgrimmages->pilgrimages
+pinapple->pineapple
+pinnaple->pineapple
+pinoneered->pioneered
+plagarism->plagiarism
+planation->plantation
+planed->planned
+plantiff->plaintiff
+plateu->plateau
+platfrom->platform
+plathome->platform
+plausable->plausible
+playright->playwright
+playwrite->playwright
+playwrites->playwrights
+pleasent->pleasant
+plebicite->plebiscite
+plesant->pleasant
+poenis->penis
+poeoples->peoples
+poety->poetry
+poisin->poison
+polical->political
+polinator->pollinator
+polinators->pollinators
+politican->politician
+politicans->politicians
+poltical->political
+polute->pollute
+poluted->polluted
+polutes->pollutes
+poluting->polluting
+polution->pollution
+polyphonyic->polyphonic
+polysaccaride->polysaccharide
+polysaccharid->polysaccharide
+pomegranite->pomegranate
+pomotion->promotion
+poportional->proportional
+popoulation->population
+popularaty->popularity
+populare->popular
+populer->popular
+portait->portrait
+portayed->portrayed
+portraing->portraying
+Portugese->Portuguese
+portuguease->portuguese
+portugues->Portuguese
+posess->possess
+posessed->possessed
+posesses->possesses
+posessing->possessing
+posession->possession
+posessions->possessions
+posion->poison
+positon->position
+possable->possible
+possably->possibly
+posseses->possesses
+possesing->possessing
+possesion->possession
+possessess->possesses
+possibile->possible
+possibilty->possibility
+possiblility->possibility
+possiblilty->possibility
+possiblities->possibilities
+possiblity->possibility
+possition->position
+Postdam->Potsdam
+posthomous->posthumous
+postion->position
+postive->positive
+potatos->potatoes
+potrait->portrait
+potrayed->portrayed
+poulations->populations
+poverful->powerful
+poweful->powerful
+powerfull->powerful
+ppublisher->publisher
+practial->practical
+practially->practically
+practicaly->practically
+practicioner->practitioner
+practicioners->practitioners
+practicly->practically
+practioner->practitioner
+practioners->practitioners
+prairy->prairie
+prarie->prairie
+praries->prairies
+pratice->practice
+preample->preamble
+precedessor->predecessor
+preceed->precede
+preceeded->preceded
+preceeding->preceding
+preceeds->precedes
+precentage->percentage
+precice->precise
+precisly->precisely
+precurser->precursor
+predecesors->predecessors
+predicatble->predictable
+predicitons->predictions
+predomiantly->predominately
+prefered->preferred
+prefering->preferring
+preferrably->preferably
+pregancies->pregnancies
+preiod->period
+preliferation->proliferation
+premeire->premiere
+premeired->premiered
+premillenial->premillennial
+preminence->preeminence
+premission->permission
+Premonasterians->Premonstratensians
+preocupation->preoccupation
+prepair->prepare
+prepartion->preparation
+prepatory->preparatory
+preperation->preparation
+preperations->preparations
+prepresent->represent
+preriod->period
+presedential->presidential
+presense->presence
+presidenital->presidential
+presidental->presidential
+presitgious->prestigious
+prespective->perspective
+prestigeous->prestigious
+prestigous->prestigious
+presumabely->presumably
+presumibly->presumably
+pretection->protection
+prevelant->prevalent
+preverse->perverse
+previvous->previous
+pricipal->principal
+priciple->principle
+priestood->priesthood
+primarly->primarily
+primative->primitive
+primatively->primitively
+primatives->primitives
+primordal->primordial
+privalege->privilege
+privaleges->privileges
+priveledges->privileges
+privelege->privilege
+priveleged->privileged
+priveleges->privileges
+privelige->privilege
+priveliged->privileged
+priveliges->privileges
+privelleges->privileges
+privilage->privilege
+priviledge->privilege
+priviledges->privileges
+privledge->privilege
+privte->private
+probabilaty->probability
+probablistic->probabilistic
+probablly->probably
+probalibity->probability
+probaly->probably
+probelm->problem
+proccess->process
+proccessing->processing
+procede->proceed, precede,
+proceded->proceeded, preceded,
+procedes->proceeds, precedes,
+procedger->procedure
+proceding->proceeding, preceding,
+procedings->proceedings
+proceedure->procedure
+proces->process
+processer->processor
+proclaimation->proclamation
+proclamed->proclaimed
+proclaming->proclaiming
+proclomation->proclamation
+profesion->profusion, profession,
+profesor->professor
+professer->professor
+proffesed->professed
+proffesion->profession
+proffesional->professional
+proffesor->professor
+profilic->prolific
+progessed->progressed
+programable->programmable
+progrom->pogrom, program,
+progroms->pogroms, programs,
+prohabition->prohibition
+prologomena->prolegomena
+prominance->prominence
+prominant->prominent
+prominantly->prominently
+prominately->prominently, predominately,
+promiscous->promiscuous
+promotted->promoted
+pronomial->pronominal
+pronouced->pronounced
+pronounched->pronounced
+pronounciation->pronunciation
+proove->prove
+prooved->proved
+prophacy->prophecy
+propietary->proprietary
+propmted->prompted
+propoganda->propaganda
+propogate->propagate
+propogates->propagates
+propogation->propagation
+propostion->proposition
+propotions->proportions
+propper->proper
+propperly->properly
+proprietory->proprietary
+proseletyzing->proselytizing
+protaganist->protagonist
+protaganists->protagonists
+protocal->protocol
+protoganist->protagonist
+protrayed->portrayed
+protruberance->protuberance
+protruberances->protuberances
+prouncements->pronouncements
+provacative->provocative
+provded->provided
+provicial->provincial
+provinicial->provincial
+provisiosn->provision
+provisonal->provisional
+proximty->proximity
+pseudononymous->pseudonymous
+pseudonyn->pseudonym
+psuedo->pseudo
+psycology->psychology
+psyhic->psychic
+pubilsher->publisher
+pubisher->publisher
+publiaher->publisher
+publically->publicly
+publicaly->publicly
+publicher->publisher
+publihser->publisher
+publisehr->publisher
+publiser->publisher
+publisger->publisher
+publisheed->published
+publisherr->publisher
+publishher->publisher
+publishor->publisher
+publishre->publisher
+publissher->publisher
+publlisher->publisher
+publsiher->publisher
+publusher->publisher
+puchasing->purchasing
+Pucini->Puccini
+Puertorrican->Puerto Rican
+Puertorricans->Puerto Ricans
+pulisher->publisher
+pumkin->pumpkin
+puplisher->publisher
+puritannical->puritanical
+purposedly->purposely
+purpotedly->purportedly
+pursuade->persuade
+pursuaded->persuaded
+pursuades->persuades
+pususading->persuading
+puting->putting
+pwoer->power
+pyscic->psychic
+qtuie->quite, quiet,
+quantaty->quantity
+quantitiy->quantity
+quarantaine->quarantine
+Queenland->Queensland
+questonable->questionable
+quicklyu->quickly
+quinessential->quintessential
+quitted->quit
+quizes->quizzes
+qutie->quite, quiet,
+rabinnical->rabbinical
+racaus->raucous
+radiactive->radioactive
+radify->ratify
+raelly->really
+rarified->rarefied
+reaccurring->recurring
+reacing->reaching
+reacll->recall
+readmition->readmission
+realitvely->relatively
+realsitic->realistic
+realtions->relations
+realy->really
+realyl->really
+reasearch->research
+rebiulding->rebuilding
+rebllions->rebellions
+rebounce->rebound
+reccomend->recommend
+reccomendations->recommendations
+reccomended->recommended
+reccomending->recommending
+reccommend->recommend
+reccommended->recommended
+reccommending->recommending
+reccuring->recurring
+receeded->receded
+receeding->receding
+receivedfrom->received from
+recepient->recipient
+recepients->recipients
+receving->receiving
+rechargable->rechargeable
+reched->reached
+recide->reside
+recided->resided
+recident->resident
+recidents->residents
+reciding->residing
+reciepents->recipients
+reciept->receipt
+recieve->receive
+recieved->received
+reciever->receiver
+recievers->receivers
+recieves->receives
+recieving->receiving
+recipiant->recipient
+recipiants->recipients
+recived->received
+recivership->receivership
+recogise->recognise
+recogize->recognize
+recomend->recommend
+recomended->recommended
+recomending->recommending
+recomends->recommends
+recommedations->recommendations
+reconaissance->reconnaissance
+reconcilation->reconciliation
+reconized->recognized
+reconnaisance->reconnaissance
+reconnaissence->reconnaissance
+recontructed->reconstructed
+recordproducer->record producer
+recquired->required
+recrational->recreational
+recrod->record
+recuiting->recruiting
+recuring->recurring
+recurrance->recurrence
+rediculous->ridiculous
+reedeming->redeeming
+reenforced->reinforced
+refect->reflect
+refedendum->referendum
+referal->referral
+referece->reference
+refereces->references
+refered->referred
+referemce->reference
+referemces->references
+referencs->references
+referenece->reference
+refereneced->referenced
+refereneces->references
+referiang->referring
+refering->referring
+refernce->reference
+refernce->references
+refernces->references
+referrence->reference
+referrences->references
+referrs->refers
+reffered->referred
+refference->reference
+reffering->referring
+refrence->reference
+refrences->references
+refrers->refers
+refridgeration->refrigeration
+refridgerator->refrigerator
+refromist->reformist
+refusla->refusal
+regardes->regards
+registrs->registers
+regluar->regular
+reguarly->regularly
+regulaion->regulation
+regulaotrs->regulators
+regularily->regularly
+rehersal->rehearsal
+reicarnation->reincarnation
+reigining->reigning
+reknown->renown
+reknowned->renowned
+rela->real, disabled due to lots of false positives
+relaly->really
+relatiopnship->relationship
+relativly->relatively
+relected->reelected
+releive->relieve
+releived->relieved
+releiver->reliever
+releses->releases
+relevence->relevance
+relevent->relevant
+reliablity->reliability
+relient->reliant
+religeous->religious
+religous->religious
+religously->religiously
+relinqushment->relinquishment
+relitavely->relatively
+relized->realised, realized,
+relpacement->replacement
+remaing->remaining
+remeber->remember
+rememberable->memorable
+rememberance->remembrance
+remembrence->remembrance
+remenant->remnant
+remenicent->reminiscent
+reminent->remnant
+reminescent->reminiscent
+reminscent->reminiscent
+reminsicent->reminiscent
+rendevous->rendezvous
+rendezous->rendezvous
+renedered->rende
+renewl->renewal
+rennovate->renovate
+rennovated->renovated
+rennovating->renovating
+rennovation->renovation
+rentors->renters
+reoccurrence->recurrence
+reorganision->reorganisation
+repatition->repetition, repartition,
+repectively->respectively
+repeition->repetition
+repentence->repentance
+repentent->repentant
+repeteadly->repeatedly
+repetion->repetition
+repid->rapid
+reponse->response
+reponses->responses
+reponsible->responsible
+reportadly->reportedly
+represantative->representative
+representive->representative
+representives->representatives
+reproducable->reproducible
+reprtoire->repertoire
+repsectively->respectively
+reptition->repetition
+reqest->request
+requirment->requirement
+requred->required
+resaurant->restaurant
+resembelance->resemblance
+resembes->resembles
+resemblence->resemblance
+resevoir->reservoir
+residental->residential
+resignement->resignment
+resistable->resistible
+resistence->resistance
+resistent->resistant
+respectivly->respectively
+responce->response
+responibilities->responsibilities
+responisble->responsible
+responnsibilty->responsibility
+responsability->responsibility
+responsibile->responsible
+responsibilites->responsibilities
+responsiblities->responsibilities
+responsiblity->responsibility
+ressemblance->resemblance
+ressemble->resemble
+ressembled->resembled
+ressemblence->resemblance
+ressembling->resembling
+resssurecting->resurrecting
+ressurect->resurrect
+ressurected->resurrected
+ressurection->resurrection
+ressurrection->resurrection
+restarant->restaurant
+restarants->restaurants
+restaraunt->restaurant
+restaraunteur->restaurateur
+restaraunteurs->restaurateurs
+restaraunts->restaurants
+restauranteurs->restaurateurs
+restauration->restoration
+restauraunt->restaurant
+resteraunt->restaurant
+resteraunts->restaurants
+resticted->restricted
+restraunt->restraint, restaurant,
+resturant->restaurant
+resturants->restaurants
+resturaunt->restaurant
+resturaunts->restaurants
+resurecting->resurrecting
+retalitated->retaliated
+retalitation->retaliation
+retreive->retrieve
+returnd->returned
+revaluated->reevaluated
+reveiw->review
+reveral->reversal
+reversable->reversible
+revolutionar->revolutionary
+rewitten->rewritten
+rewriet->rewrite
+rewuired->required
+rference->reference
+rferences->references
+rhymme->rhyme
+rhythem->rhythm
+rhythim->rhythm
+rhytmic->rhythmic
+rigeur->rigueur, rigour, rigor,
+rigourous->rigorous
+rininging->ringing
+rised->raised, rose,
+Rockerfeller->Rockefeller
+rococco->rococo
+rocord->record
+roomate->roommate
+rougly->roughly
+rucuperate->recuperate
+rudimentatry->rudimentary
+rulle->rule
+runing->running
+runnning->running
+runnung->running
+russina->Russian
+Russion->Russian
+rwite->write
+rythem->rhythm
+rythim->rhythm
+rythm->rhythm
+rythmic->rhythmic
+rythyms->rhythms
+sacrafice->sacrifice
+sacreligious->sacrilegious
+sacrifical->sacrificial
+saftey->safety
+safty->safety
+salery->salary
+sanctionning->sanctioning
+sandwhich->sandwich
+Sanhedrim->Sanhedrin
+santioned->sanctioned
+sargant->sergeant
+sargeant->sergeant
+sasy->says, sassy,
+satelite->satellite
+satelites->satellites
+Saterday->Saturday
+Saterdays->Saturdays
+satisfactority->satisfactorily
+satric->satiric
+satrical->satirical
+satrically->satirically
+sattelite->satellite
+sattelites->satellites
+saught->sought
+saveing->saving
+saxaphone->saxophone
+scaleable->scalable
+scandanavia->Scandinavia
+scaricity->scarcity
+scavanged->scavenged
+schedual->schedule
+scholarhip->scholarship
+scholarstic->scholastic, scholarly,
+scientfic->scientific
+scientifc->scientific
+scientis->scientist
+scince->science
+scinece->science
+scirpt->script
+scoll->scroll
+screenwrighter->screenwriter
+scrutinity->scrutiny
+scuptures->sculptures
+seach->search
+seached->searched
+seaches->searches
+secceeded->seceded, succeeded,
+seceed->succeed, secede,
+seceeded->succeeded, seceded,
+secratary->secretary
+secretery->secretary
+sedereal->sidereal
+seeked->sought
+segementation->segmentation
+seguoys->segues
+seige->siege
+seing->seeing
+seinor->senior
+seldomly->seldom
+senarios->scenarios
+sence->sense, since,
+senstive->sensitive
+sensure->censure
+seperate->separate
+seperated->separated
+seperately->separately
+seperates->separates
+seperating->separating
+seperation->separation
+seperatism->separatism
+seperatist->separatist
+sepina->subpoena
+sepulchure->sepulchre, sepulcher,
+sepulcre->sepulchre, sepulcher,
+sergent->sergeant
+settelement->settlement
+settlment->settlement
+severeal->several
+severley->severely
+severly->severely
+sevice->service
+shadasloo->shadaloo
+shaddow->shadow
+shadoloo->shadaloo
+shamen->shaman, shamans,
+sheat->sheath, sheet, cheat,
+sheild->shield
+sherif->sheriff
+shineing->shining
+shiped->shipped
+shiping->shipping
+shopkeeepers->shopkeepers
+shorly->shortly
+shortwhile->short while
+shoudl->should
+shoudln->should, shouldn't,
+shouldnt'->shouldn't
+shouldnt->shouldn't
+shreak->shriek
+shrinked->shrunk
+sicne->since
+sideral->sidereal
+sieze->seize, size,
+siezed->seized, sized,
+siezing->seizing, sizing,
+siezure->seizure
+siezures->seizures
+siginificant->significant
+signficant->significant
+signficiant->significant
+signfies->signifies
+signifantly->significantly
+significently->significantly
+signifigant->significant
+signifigantly->significantly
+signitories->signatories
+signitory->signatory
+similarily->similarly
+similiar->similar
+similiarity->similarity
+similiarly->similarly
+simmilar->similar
+simpley->simply
+simplier->simpler
+simultanous->simultaneous
+simultanously->simultaneously
+sincerley->sincerely
+singsog->singsong
+sinse->sines, since,
+Sionist->Zionist
+Sionists->Zionists
+Sixtin->Sistine
+Skagerak->Skagerrak
+skateing->skating
+slaugterhouses->slaughterhouses
+slighly->slightly
+slowy->slowly
+smae->same
+smealting->smelting
+smoe->some
+sneeks->sneaks
+snese->sneeze
+socalism->socialism
+socities->societies
+soem->some
+sofware->software
+sohw->show
+soilders->soldiers
+solatary->solitary
+soley->solely
+soliders->soldiers
+soliliquy->soliloquy
+soluable->soluble
+somene->someone
+somtimes->sometimes
+somwhere->somewhere
+sophicated->sophisticated
+sophmore->sophomore
+sorceror->sorcerer
+sorrounding->surrounding
+sotry->story
+sotyr->satyr, story,
+soudn->sound
+soudns->sounds
+sould->could, should, sold,
+sountrack->soundtrack
+sourth->south
+sourthern->southern
+souvenier->souvenir
+souveniers->souvenirs
+soveits->soviets
+sovereignity->sovereignty
+soverign->sovereign
+soverignity->sovereignty
+soverignty->sovereignty
+spagetti->spaghetti
+spainish->Spanish
+speach->speech
+specfic->specific
+speciallized->specialised, specialized,
+specif->specific, specify,
+specifiying->specifying
+speciman->specimen
+spectauclar->spectacular
+spectaulars->spectaculars
+spects->aspects, expects,
+spectum->spectrum
+speices->species
+spendour->splendour
+spermatozoan->spermatozoon
+spoace->space
+sponser->sponsor
+sponsered->sponsored
+spontanous->spontaneous
+sponzored->sponsored
+spoonfulls->spoonfuls
+sppeches->speeches
+spreaded->spread
+sprech->speech
+spred->spread
+spriritual->spiritual
+spritual->spiritual
+sqaure->square
+stablility->stability
+stainlees->stainless
+staion->station
+standars->standards
+stange->strange
+startegic->strategic
+startegies->strategies
+startegy->strategy
+stateman->statesman
+statememts->statements
+statment->statement
+steriods->steroids
+sterotypes->stereotypes
+stilus->stylus
+stingent->stringent
+stiring->stirring
+stirrs->stirs
+stlye->style
+stomache->stomach
+stong->strong
+stopry->story
+storeis->stories
+storise->stories
+stornegst->strongest
+stoyr->story
+stpo->stop
+stradegies->strategies
+stradegy->strategy
+strat->start, strata,
+stratagically->strategically
+streemlining->streamlining
+stregth->strength
+strenghen->strengthen
+strenghened->strengthened
+strenghening->strengthening
+strenght->strength
+strenghten->strengthen
+strenghtened->strengthened
+strenghtening->strengthening
+strengtened->strengthened
+strenous->strenuous
+strictist->strictest
+strikely->strikingly
+strnad->strand
+stroy->story, destroy,
+structual->structural
+stubborness->stubbornness
+stucture->structure
+stuctured->structured
+studdy->study
+studing->studying
+stuggling->struggling
+sturcture->structure
+subcatagories->subcategories
+subcatagory->subcategory
+subconsiously->subconsciously
+subjudgation->subjugation
+submachne->submachine
+subpecies->subspecies
+subsidary->subsidiary
+subsiduary->subsidiary
+subsituting->substituting
+subsquent->subsequent
+subsquently->subsequently
+substace->substance
+substancial->substantial
+substatial->substantial
+substituded->substituted
+substract->subtract
+substracted->subtracted
+substracting->subtracting
+substraction->subtraction
+substracts->subtracts
+subtances->substances
+subterranian->subterranean
+suburburban->suburban
+succceeded->succeeded
+succcesses->successes
+succedded->succeeded
+succeded->succeeded
+succeds->succeeds
+succesful->successful
+succesfully->successfully
+succesfuly->successfully
+succesion->succession
+succesive->successive
+successfull->successful
+successully->successfully
+succsess->success
+succsessfull->successful
+suceed->succeed
+suceeded->succeeded
+suceeding->succeeding
+suceeds->succeeds
+sucesful->successful
+sucesfully->successfully
+sucesfuly->successfully
+sucesion->succession
+sucess->success
+sucesses->successes
+sucessful->successful
+sucessfull->successful
+sucessfully->successfully
+sucessfuly->successfully
+sucession->succession
+sucessive->successive
+sucessor->successor
+sucessot->successor
+sucide->suicide
+sucidial->suicidal
+sufferage->suffrage
+sufferred->suffered
+sufferring->suffering
+sufficent->sufficient
+sufficently->sufficiently
+sumary->summary
+sunglases->sunglasses
+suop->soup
+superceeded->superseded
+superintendant->superintendent
+suphisticated->sophisticated
+suplimented->supplemented
+supose->suppose
+suposed->supposed
+suposedly->supposedly
+suposes->supposes
+suposing->supposing
+supplamented->supplemented
+suppliementing->supplementing
+suppoed->supposed
+supposingly->supposedly
+suppy->supply
+supress->suppress
+supressed->suppressed
+supresses->suppresses
+supressing->suppressing
+suprise->surprise
+suprised->surprised
+suprising->surprising
+suprisingly->surprisingly
+suprize->surprise
+suprized->surprised
+suprizing->surprising
+suprizingly->surprisingly
+surfce->surface
+surley->surly, surely,
+suround->surround
+surounded->surrounded
+surounding->surrounding
+suroundings->surroundings
+surounds->surrounds
+surplanted->supplanted
+surpress->suppress
+surpressed->suppressed
+surprize->surprise
+surprized->surprised
+surprizing->surprising
+surprizingly->surprisingly
+surrended->surrounded, surrendered,
+surrepetitious->surreptitious
+surrepetitiously->surreptitiously
+surreptious->surreptitious
+surreptiously->surreptitiously
+surronded->surrounded
+surrouded->surrounded
+surrouding->surrounding
+surrundering->surrendering
+surveilence->surveillance
+surveill->surveil
+surveyer->surveyor
+surviver->survivor
+survivers->survivors
+survivied->survived
+suseptable->susceptible
+suseptible->susceptible
+suspention->suspension
+swaer->swear
+swaers->swears
+swepth->swept
+swiming->swimming
+syas->says
+symetrical->symmetrical
+symetrically->symmetrically
+symetry->symmetry
+symettric->symmetric
+symmetral->symmetric
+symmetricaly->symmetrically
+synagouge->synagogue
+syncronization->synchronization
+synonomous->synonymous
+synonymns->synonyms
+synphony->symphony
+syphyllis->syphilis
+sypmtoms->symptoms
+syrap->syrup
+sysmatically->systematically
+sytem->system
+sytle->style
+tabacco->tobacco
+tahn->than
+taht->that
+talekd->talked
+targetted->targeted
+targetting->targeting
+tast->taste
+tath->that
+tattooes->tattoos
+taxanomic->taxonomic
+taxanomy->taxonomy
+teached->taught
+techician->technician
+techicians->technicians
+techiniques->techniques
+technitian->technician
+technnology->technology
+technolgy->technology
+teh->the
+tehy->they
+telelevision->television
+televsion->television
+telphony->telephony
+temerature->temperature
+tempalte->template
+tempaltes->templates
+temparate->temperate
+temperarily->temporarily
+temperment->temperament
+tempertaure->temperature
+temperture->temperature
+temprary->temporary
+tenacle->tentacle
+tenacles->tentacles
+tendacy->tendency
+tendancies->tendencies
+tendancy->tendency
+tennisplayer->tennis player
+tepmorarily->temporarily
+terrestial->terrestrial
+terriories->territories
+terriory->territory
+territorist->terrorist
+territoy->territory
+terroist->terrorist
+testiclular->testicular
+tghe->the
+thast->that, that's,
+theather->theater
+theese->these
+theif->thief
+theives->thieves
+themselfs->themselves
+themslves->themselves
+ther->there, their, the,
+therafter->thereafter
+therby->thereby
+theri->their
+theyre->they're,
+thgat->that
+thge->the
+thier->their
+thign->thing
+thigns->things
+thigsn->things
+thikn->think
+thikning->thinking, thickening,
+thikns->thinks
+thiunk->think
+thn->then
+thna->than
+thne->then
+thnig->thing
+thnigs->things
+thoughout->throughout
+threatend->threatened
+threatning->threatening
+threee->three
+threshhold->threshold
+thrid->third
+throrough->thorough
+throughly->thoroughly
+throught->thought, through, throughout,
+througout->throughout
+ths->this
+thsi->this
+thsoe->those
+thta->that
+thyat->that
+tiem->time, item,
+tihkn->think
+tihs->this
+timne->time
+tiome->time, tome,
+tje->the
+tjhe->the
+tjpanishad->upanishad
+tkae->take
+tkaes->takes
+tkaing->taking
+tlaking->talking
+tobbaco->tobacco
+todays->today's, disable because of var names
+todya->today
+toghether->together
+toke->took
+tolerence->tolerance
+Tolkein->Tolkien
+tomatos->tomatoes
+tommorow->tomorrow
+tommorrow->tomorrow
+tongiht->tonight
+toriodal->toroidal
+tormenters->tormentors
+tornadoe->tornado
+torpeados->torpedoes
+torpedos->torpedoes
+tothe->to the
+toubles->troubles
+tounge->tongue
+tourch->torch, touch,
+towords->towards
+towrad->toward
+tradionally->traditionally
+traditionaly->traditionally
+traditionnal->traditional
+traditition->tradition
+tradtionally->traditionally
+trafficed->trafficked
+trafficing->trafficking
+trafic->traffic
+trancendent->transcendent
+trancending->transcending
+tranfer->transfer
+tranform->transform
+tranformed->transformed
+transcendance->transcendence
+transcendant->transcendent
+transcendentational->transcendental
+transcripting->transcribing, transcription,
+transending->transcending
+transesxuals->transsexuals
+transfered->transferred
+transfering->transferring
+transformaton->transformation
+transistion->transition
+translater->translator
+translaters->translators
+transmissable->transmissible
+transporation->transportation
+tremelo->tremolo
+tremelos->tremolos
+triguered->triggered
+triology->trilogy
+troling->trolling
+troup->troupe
+troups->troupes, troops,
+truely->truly
+trustworthyness->trustworthiness
+turnk->turnkey, trunk,
+Tuscon->Tucson
+tust->trust
+twelth->twelfth
+twon->town
+twpo->two
+tyhat->that
+tyhe->they
+typcial->typical
+typicaly->typically
+tyranies->tyrannies
+tyrany->tyranny
+tyrranies->tyrannies
+tyrrany->tyranny
+ubiquitious->ubiquitous
+ublisher->publisher
+uise->use
+Ukranian->Ukrainian
+ultimely->ultimately
+unacompanied->unaccompanied
+unahppy->unhappy
+unanymous->unanimous
+unathorised->unauthorised
+unavailible->unavailable
+unballance->unbalance
+unbeknowst->unbeknownst
+unbeleivable->unbelievable
+uncertainity->uncertainty
+unchallengable->unchallengeable
+unchangable->unchangeable
+uncompetive->uncompetitive
+unconcious->unconscious
+unconciousness->unconsciousness
+unconfortability->discomfort
+uncontitutional->unconstitutional
+unconvential->unconventional
+undecideable->undecidable
+understoon->understood
+undesireable->undesirable
+undetecable->undetectable
+undoubtely->undoubtedly
+undreground->underground
+uneccesary->unnecessary
+unecessary->unnecessary
+unequalities->inequalities
+unforetunately->unfortunately
+unforgetable->unforgettable
+unforgiveable->unforgivable
+unfortunatley->unfortunately
+unfortunatly->unfortunately
+unfourtunately->unfortunately
+unihabited->uninhabited
+unilateraly->unilaterally
+unilatreal->unilateral
+unilatreally->unilaterally
+uninterruped->uninterrupted
+uninterupted->uninterrupted
+UnitesStates->UnitedStates
+univeral->universal
+univeristies->universities
+univeristy->university
+univerity->university
+universtiy->university
+univesities->universities
+univesity->university
+unkonwn->unknown
+unkown->unknown
+unlikey->unlikely
+unmanouverable->unmaneuverable, unmanoeuvrable,
+unmistakeably->unmistakably
+unneccesarily->unnecessarily
+unneccesary->unnecessary
+unneccessarily->unnecessarily
+unneccessary->unnecessary
+unnecesarily->unnecessarily
+unnecesary->unnecessary
+unoffical->unofficial
+unoperational->nonoperational
+unoticeable->unnoticeable
+unplease->displease
+unplesant->unpleasant
+unprecendented->unprecedented
+unprecidented->unprecedented
+unregnized->unrecognized
+unrepentent->unrepentant
+unrepetant->unrepentant
+unrepetent->unrepentant
+unsed->unused
+unsubstanciated->unsubstantiated
+unsuccesful->unsuccessful
+unsuccesfully->unsuccessfully
+unsuccessfull->unsuccessful
+unsucesful->unsuccessful
+unsucesfuly->unsuccessfully
+unsucessful->unsuccessful
+unsucessfull->unsuccessful
+unsucessfully->unsuccessfully
+unsuprised->unsurprised
+unsuprising->unsurprising
+unsuprisingly->unsurprisingly
+unsuprized->unsurprised
+unsuprizing->unsurprising
+unsuprizingly->unsurprisingly
+unsurprized->unsurprised
+unsurprizing->unsurprising
+unsurprizingly->unsurprisingly
+untill->until
+untranslateable->untranslatable
+unuseable->unusable
+unusuable->unusable
+unviersity->university
+unwarrented->unwarranted
+unweildly->unwieldy
+unwieldly->unwieldy
+upcomming->upcoming
+upgradded->upgraded
+upto->up to
+usally->usually
+useage->usage
+usefull->useful
+usefuly->usefully
+useing->using
+usualy->usually
+ususally->usually
+vaccum->vacuum
+vaccume->vacuum
+vacinity->vicinity
+vaguaries->vagaries
+vaieties->varieties
+vailidty->validity
+valetta->valletta
+valuble->valuable
+valueable->valuable
+varables->variables
+varations->variations
+varient->variant
+variey->variety
+varing->varying
+varities->varieties
+varity->variety
+vasall->vassal
+vasalls->vassals
+vegatarian->vegetarian
+vegitable->vegetable
+vegitables->vegetables
+vegtable->vegetable
+vehicule->vehicle
+vell->well
+venemous->venomous
+vengance->vengeance
+vengence->vengeance
+verfication->verification
+verison->version
+verisons->versions
+vermillion->vermilion
+versitilaty->versatility
+versitlity->versatility
+vetween->between
+veyr->very
+vigeur->vigueur, vigour, vigor,
+vigilence->vigilance
+vigourous->vigorous
+vill->will
+villian->villain
+villification->vilification
+villify->vilify
+villin->villi, villain, villein,
+vincinity->vicinity
+violentce->violence
+virtualy->virtually
+virutal->virtual
+virutally->virtually
+visable->visible
+visably->visibly
+visting->visiting
+vistors->visitors
+vitories->victories
+volcanoe->volcano
+voleyball->volleyball
+volontary->voluntary
+volonteer->volunteer
+volonteered->volunteered
+volonteering->volunteering
+volonteers->volunteers
+volounteer->volunteer
+volounteered->volunteered
+volounteering->volunteering
+volounteers->volunteers
+volumne->volume
+vreity->variety
+vrey->very
+vriety->variety
+vulnerablility->vulnerability
+vyer->very
+vyre->very
+waht->what
+wanna->want to, disabled because one might want to allow informal pronunciation
+warantee->warranty
+wardobe->wardrobe
+warrent->warrant
+warrriors->warriors
+wasnt'->wasn't
+wasnt->wasn't
+wass->was
+watn->want
+wayword->wayward
+weaponary->weaponry
+weas->was
+wehn->when
+weild->wield, wild,
+weilded->wielded
+wendsay->Wednesday
+wensday->Wednesday
+wereabouts->whereabouts
+whant->want
+whants->wants
+whcih->which
+whenver->whenever
+wheras->whereas
+wherease->whereas
+whereever->wherever
+whic->which
+whihc->which
+whith->with
+whlch->which
+whn->when
+wholey->wholly
+wholy->wholly, holy,
+whta->what
+whther->whether
+wich->which
+widesread->widespread
+wief->wife
+wierd->weird
+wiew->view
+wih->with
+wiht->with
+wille->will
+willingless->willingness
+wirting->writing
+withdrawl->withdrawal, withdraw,
+witheld->withheld
+withh->with
+withing->within
+withold->withhold
+witht->with
+witn->with
+wiull->will
+wnat->want
+wnated->wanted
+wnats->wants
+wohle->whole
+wokr->work
+wokring->working
+wonderfull->wonderful
+wordlwide->worldwide
+workststion->workstation
+worls->world
+worstened->worsened
+woudl->would
+wresters->wrestlers
+wriet->write
+writen->written
+wroet->wrote
+wrok->work
+wroking->working
+wtih->with
+wupport->support
+xenophoby->xenophobia
+yaching->yachting
+yaer->year
+yaerly->yearly
+yaers->years
+yatch->yacht
+yearm->year
+yeasr->years
+yeild->yield
+yeilding->yielding
+Yementite->Yemenite, Yemeni,
+yera->year
+yeras->years
+yersa->years
+yotube->youtube
+youseff->yousef
+youself->yourself
+ytou->you
+yuo->you
+zeebra->zebra
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..c3d4404
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,93 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf build/*
+	-rm -rf source/auto_examples
+	-rm -rf source/generated
+	-rm -rf *.stc
+	-rm -rf *.fif
+	-rm -rf *.nii.gz
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in build/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in build/qthelp, like this:"
+	@echo "# qcollectiongenerator build/qthelp/MNE.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile build/qthelp/MNE.qhc"
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in build/doctest/output.txt."
diff --git a/doc/build_doc b/doc/build_doc
new file mode 100755
index 0000000..0c75cb2
--- /dev/null
+++ b/doc/build_doc
@@ -0,0 +1,16 @@
+#! /usr/bin/env python
+
+import os
+import sys
+
+args = sys.argv
+
+if '-h' in args:
+    print "build_doc [--nocfg] [--nosphinx] [--noweb]"
+    sys.exit(0)
+
+if "--nosphinx" not in args:
+    os.system("make html")
+
+# if "--noweb" not in args:
+#     os.system("scp -r build/html/* user at ftp.XXX.edu:pyfiff")
diff --git a/doc/source/_images/mne_helmet.png b/doc/source/_images/mne_helmet.png
new file mode 100644
index 0000000..bb21246
Binary files /dev/null and b/doc/source/_images/mne_helmet.png differ
diff --git a/doc/source/_images/plot_read_and_write_raw_data.png b/doc/source/_images/plot_read_and_write_raw_data.png
new file mode 100644
index 0000000..4b63526
Binary files /dev/null and b/doc/source/_images/plot_read_and_write_raw_data.png differ
diff --git a/doc/source/_images/plot_read_epochs.png b/doc/source/_images/plot_read_epochs.png
new file mode 100644
index 0000000..6484722
Binary files /dev/null and b/doc/source/_images/plot_read_epochs.png differ
diff --git a/doc/source/_images/plot_time_frequency.png b/doc/source/_images/plot_time_frequency.png
new file mode 100644
index 0000000..da7456b
Binary files /dev/null and b/doc/source/_images/plot_time_frequency.png differ
diff --git a/doc/source/_static/branch_dropdown.png b/doc/source/_static/branch_dropdown.png
new file mode 100644
index 0000000..1bb7a57
Binary files /dev/null and b/doc/source/_static/branch_dropdown.png differ
diff --git a/doc/source/_static/default.css b/doc/source/_static/default.css
new file mode 100755
index 0000000..4e9adfd
--- /dev/null
+++ b/doc/source/_static/default.css
@@ -0,0 +1,515 @@
+/**
+ * Alternate Sphinx design
+ * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
+ */
+
+body {
+    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+    font-size: 14px;
+    letter-spacing: -0.01em;
+    line-height: 150%;
+    text-align: center;
+    /*background-color: #AFC1C4; */
+    /*background-color: -moz-linear-gradient(linear, left top, left bottom, from(#660000), to(#000000));*/
+    background-color: #151515;
+    color: black;
+    padding: 0;
+    border: 1px solid #aaa;
+
+    margin: 0px 80px 0px 80px;
+    min-width: 740px;
+}
+
+a {
+    color: #330033;
+    text-decoration: none;
+}
+
+a:hover {
+    color: #99CCFF;
+}
+
+pre {
+    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.015em;
+    padding: 0.5em;
+    border: 1px solid #ccc;
+    background-color: #f8f8f8;
+}
+
+td.linenos pre {
+    padding: 0.5em 0;
+    border: 0;
+    background-color: #000000;
+    color: #aaa;
+}
+
+table.highlighttable {
+    margin-left: 0.5em;
+}
+
+table.highlighttable td {
+    padding: 0 0.5em 0 0.5em;
+}
+
+cite, code, tt {
+    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.01em;
+}
+
+hr {
+    border: 1px solid #abc;
+    margin: 2em;
+}
+
+tt {
+    background-color: #f2f2f2;
+    border-bottom: 1px solid #ddd;
+    color: #333;
+}
+
+tt.descname {
+    background-color: transparent;
+    font-weight: bold;
+    font-size: 1.2em;
+    border: 0;
+}
+
+tt.descclassname {
+    background-color: transparent;
+    border: 0;
+}
+
+tt.xref {
+    background-color: transparent;
+    font-weight: bold;
+    border: 0;
+}
+
+a tt {
+    background-color: transparent;
+    font-weight: bold;
+    border: 0;
+    color: #CA7900;
+}
+
+a tt:hover {
+    color: #2491CF;
+}
+
+dl {
+    margin-bottom: 15px;
+}
+
+dd p {
+    margin-top: 0px;
+}
+
+dd ul, dd table {
+    margin-bottom: 10px;
+}
+
+dd {
+    margin-top: 3px;
+    margin-bottom: 10px;
+    margin-left: 30px;
+}
+
+.refcount {
+    color: #060;
+}
+
+dt:target,
+.highlight {
+    background-color: #fbe54e;
+}
+
+dl.class, dl.function {
+    border-top: 2px solid #888;
+}
+
+dl.method, dl.attribute {
+    border-top: 1px solid #aaa;
+}
+
+dl.glossary dt {
+    font-weight: bold;
+    font-size: 1.1em;
+}
+
+pre {
+    line-height: 120%;
+}
+
+pre a {
+    color: inherit;
+    text-decoration: underline;
+}
+
+.first {
+    margin-top: 0 !important;
+}
+
+div.document {
+    background-color: white;
+    text-align: left;
+    background-image: url(contents.png);
+    background-repeat: repeat-x;
+}
+
+/*
+div.documentwrapper {
+    width: 100%;
+}
+*/
+
+div.clearer {
+    clear: both;
+}
+
+div.related h3 {
+    display: none;
+}
+
+div.related ul {
+    background-image: url(navigation.png);
+    height: 2em;
+    list-style: none;
+    border-top: 1px solid #ddd;
+    border-bottom: 1px solid #ddd;
+    margin: 0;
+    padding-left: 10px;
+}
+
+div.related ul li {
+    margin: 0;
+    padding: 0;
+    height: 2em;
+    float: left;
+}
+
+div.related ul li.right {
+    float: right;
+    margin-right: 5px;
+}
+
+div.related ul li a {
+    margin: 0;
+    padding: 0 5px 0 5px;
+    line-height: 1.75em;
+    color: #FFFFFF;
+}
+
+div.related ul li a:hover {
+    color: #C0C0C0;
+}
+
+div.body {
+    margin: 0;
+    padding: 0.5em 20px 20px 20px;
+}
+
+div.bodywrapper {
+    margin: 0 240px 0 0;
+    border-right: 1px solid #ccc;
+}
+
+div.body a {
+    text-decoration: underline;
+}
+
+div.sphinxsidebar {
+    margin: 0;
+    padding: 0.5em 15px 15px 0;
+    width: 210px;
+    float: right;
+    text-align: left;
+/*    margin-left: -100%; */
+}
+
+div.sphinxsidebar h4, div.sphinxsidebar h3 {
+    margin: 1em 0 0.5em 0;
+    font-size: 0.9em;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: white;
+    border: 1px solid #86989B;
+    background-color: #C0C0C0;
+}
+
+div.sphinxsidebar ul {
+    padding-left: 1.5em;
+    margin-top: 7px;
+    list-style: none;
+    padding: 0;
+    line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+    list-style: square;
+    margin-left: 20px;
+}
+
+p {
+    margin: 0.8em 0 0.5em 0;
+}
+
+p.rubric {
+    font-weight: bold;
+}
+
+h1 {
+    margin: 0;
+    padding: 0.7em 0 0.3em 0;
+    font-size: 1.5em;
+    color: #11557C;
+}
+
+h2 {
+    margin: 1.3em 0 0.2em 0;
+    font-size: 1.35em;
+    padding: 0;
+}
+
+h3 {
+    margin: 1em 0 -0.3em 0;
+    font-size: 1.2em;
+}
+
+h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
+    color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+    display: none;
+    margin: 0 0 0 0.3em;
+    padding: 0 0.2em 0 0.2em;
+    color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+    display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+    color: #777;
+    background-color: #eee;
+}
+
+table {
+    border-collapse: collapse;
+    margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+    padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.footer {
+    background-color: #C0C0C0;
+    color: #000000;
+    padding: 3px 8px 3px 0;
+    clear: both;
+    font-size: 0.8em;
+    text-align: right;
+}
+
+div.footer a {
+    color: #000000;
+    text-decoration: underline;
+}
+
+div.pagination {
+    margin-top: 2em;
+    padding-top: 0.5em;
+    border-top: 1px solid black;
+    text-align: center;
+}
+
+div.sphinxsidebar ul.toc {
+    margin: 1em 0 1em 0;
+    padding: 0 0 0 0.5em;
+    list-style: none;
+}
+
+div.sphinxsidebar ul.toc li {
+    margin: 0.5em 0 0.5em 0;
+    font-size: 0.9em;
+    line-height: 130%;
+}
+
+div.sphinxsidebar ul.toc li p {
+    margin: 0;
+    padding: 0;
+}
+
+div.sphinxsidebar ul.toc ul {
+    margin: 0.2em 0 0.2em 0;
+    padding: 0 0 0 1.8em;
+}
+
+div.sphinxsidebar ul.toc ul li {
+    padding: 0;
+}
+
+div.admonition, div.warning {
+    font-size: 0.9em;
+    margin: 1em 0 0 0;
+    border: 1px solid #86989B;
+    background-color: #f7f7f7;
+}
+
+div.admonition p, div.warning p {
+    margin: 0.5em 1em 0.5em 1em;
+    padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+    margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+    margin: 0;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: white;
+    border-bottom: 1px solid #86989B;
+    font-weight: bold;
+    background-color: #AFC1C4;
+}
+
+div.warning {
+    border: 1px solid #000000;
+}
+
+div.warning p.admonition-title {
+    background-color: #000000;
+    border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+    margin: 0.1em 0.5em 0.5em 3em;
+    padding: 0;
+}
+
+div.versioninfo {
+    margin: 1em 0 0 0;
+    border: 1px solid #ccc;
+    background-color: #DDEAF0;
+    padding: 8px;
+    line-height: 1.3em;
+    font-size: 0.9em;
+}
+
+
+a.headerlink {
+    color: #c60f0f!important;
+    font-size: 1em;
+    margin-left: 6px;
+    padding: 0 4px 0 4px;
+    text-decoration: none!important;
+    visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+    visibility: visible;
+}
+
+a.headerlink:hover {
+    background-color: #ccc;
+    color: white!important;
+}
+
+table.indextable td {
+    text-align: left;
+    vertical-align: top;
+}
+
+table.indextable dl, table.indextable dd {
+    margin-top: 0;
+    margin-bottom: 0;
+}
+
+table.indextable tr.pcap {
+    height: 10px;
+}
+
+table.indextable tr.cap {
+    margin-top: 10px;
+    background-color: #f2f2f2;
+}
+
+img.toggler {
+    margin-right: 3px;
+    margin-top: 3px;
+    cursor: pointer;
+}
+
+img.inheritance {
+    border: 0px
+}
+
+form.pfform {
+    margin: 10px 0 20px 0;
+}
+
+table.contentstable {
+    width: 90%;
+}
+
+table.contentstable p.biglink {
+    line-height: 150%;
+}
+
+a.biglink {
+    font-size: 1.3em;
+}
+
+span.linkdescr {
+    font-style: italic;
+    padding-top: 5px;
+    font-size: 90%;
+}
+
+ul.search {
+    margin: 10px 0 0 20px;
+    padding: 0;
+}
+
+ul.search li {
+    padding: 5px 0 5px 20px;
+    background-image: url(file.png);
+    background-repeat: no-repeat;
+    background-position: 0 7px;
+}
+
+ul.search li a {
+    font-weight: bold;
+}
+
+ul.search li div.context {
+    color: #888;
+    margin: 2px 0 0 30px;
+    text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+    font-weight: bold;
+}
+
+div.social-button {
+    float: left;
+    width: 120px;
+    height: 28px;
+}
+
diff --git a/doc/source/_static/favicon.ico b/doc/source/_static/favicon.ico
new file mode 100755
index 0000000..40dc94f
Binary files /dev/null and b/doc/source/_static/favicon.ico differ
diff --git a/doc/source/_static/forking_button.png b/doc/source/_static/forking_button.png
new file mode 100644
index 0000000..d0e0413
Binary files /dev/null and b/doc/source/_static/forking_button.png differ
diff --git a/doc/source/_static/logo.png b/doc/source/_static/logo.png
new file mode 100644
index 0000000..4b86571
Binary files /dev/null and b/doc/source/_static/logo.png differ
diff --git a/doc/source/_static/navy.css b/doc/source/_static/navy.css
new file mode 100755
index 0000000..04912f9
--- /dev/null
+++ b/doc/source/_static/navy.css
@@ -0,0 +1,515 @@
+/**
+ * Alternate Sphinx design
+ * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
+ */
+
+body {
+    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+    font-size: 14px;
+    letter-spacing: -0.01em;
+    line-height: 150%;
+    text-align: center;
+    /*background-color: #AFC1C4; */
+    /*background-color: -moz-linear-gradient(linear, left top, left bottom, from(#660000), to(#000000));*/
+/*    background-color: #151515;*/
+    background-color: #11557C;
+    color: black;
+    padding: 0;
+    border: 1px solid #aaa;
+
+    margin: 0px 80px 0px 80px;
+    min-width: 740px;
+}
+
+a {
+    color: #330033;
+    text-decoration: none;
+}
+
+a:hover {
+    color: #99CCFF;
+}
+
+pre {
+    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.015em;
+    padding: 0.5em;
+    border: 1px solid #ccc;
+    background-color: #f8f8f8;
+}
+
+td.linenos pre {
+    padding: 0.5em 0;
+    border: 0;
+    background-color: #000000;
+    color: #aaa;
+}
+
+table.highlighttable {
+    margin-left: 0.5em;
+}
+
+table.highlighttable td {
+    padding: 0 0.5em 0 0.5em;
+}
+
+cite, code, tt {
+    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.01em;
+}
+
+hr {
+    border: 1px solid #abc;
+    margin: 2em;
+}
+
+tt {
+    background-color: #f2f2f2;
+    border-bottom: 1px solid #ddd;
+    color: #333;
+}
+
+tt.descname {
+    background-color: transparent;
+    font-weight: bold;
+    font-size: 1.2em;
+    border: 0;
+}
+
+tt.descclassname {
+    background-color: transparent;
+    border: 0;
+}
+
+tt.xref {
+    background-color: transparent;
+    font-weight: bold;
+    border: 0;
+}
+
+a tt {
+    background-color: transparent;
+    font-weight: bold;
+    border: 0;
+    color: #CA7900;
+}
+
+a tt:hover {
+    color: #2491CF;
+}
+
+dl {
+    margin-bottom: 15px;
+}
+
+dd p {
+    margin-top: 0px;
+}
+
+dd ul, dd table {
+    margin-bottom: 10px;
+}
+
+dd {
+    margin-top: 3px;
+    margin-bottom: 10px;
+    margin-left: 30px;
+}
+
+.refcount {
+    color: #060;
+}
+
+dt:target,
+.highlight {
+    background-color: #fbe54e;
+}
+
+dl.class, dl.function {
+    border-top: 2px solid #888;
+}
+
+dl.method, dl.attribute {
+    border-top: 1px solid #aaa;
+}
+
+dl.glossary dt {
+    font-weight: bold;
+    font-size: 1.1em;
+}
+
+pre {
+    line-height: 120%;
+}
+
+pre a {
+    color: inherit;
+    text-decoration: underline;
+}
+
+.first {
+    margin-top: 0 !important;
+}
+
+div.document {
+    background-color: white;
+    text-align: left;
+    background-image: url(contents.png);
+    background-repeat: repeat-x;
+}
+
+/*
+div.documentwrapper {
+    width: 100%;
+}
+*/
+
+div.clearer {
+    clear: both;
+}
+
+div.related h3 {
+    display: none;
+}
+
+div.related ul {
+    background-image: url(navigation.png);
+    height: 2em;
+    list-style: none;
+    border-top: 1px solid #ddd;
+    border-bottom: 1px solid #ddd;
+    margin: 0;
+    padding-left: 10px;
+}
+
+div.related ul li {
+    margin: 0;
+    padding: 0;
+    height: 2em;
+    float: left;
+}
+
+div.related ul li.right {
+    float: right;
+    margin-right: 5px;
+}
+
+div.related ul li a {
+    margin: 0;
+    padding: 0 5px 0 5px;
+    line-height: 1.75em;
+    color: #330033;
+}
+
+div.related ul li a:hover {
+    color: #C0C0C0;
+}
+
+div.body {
+    margin: 0;
+    padding: 0.5em 20px 20px 20px;
+}
+
+div.bodywrapper {
+    margin: 0 240px 0 0;
+    border-right: 1px solid #ccc;
+}
+
+div.body a {
+    text-decoration: underline;
+}
+
+div.sphinxsidebar {
+    margin: 0;
+    padding: 0.5em 15px 15px 0;
+    width: 210px;
+    float: right;
+    text-align: left;
+/*    margin-left: -100%; */
+}
+
+div.sphinxsidebar h4, div.sphinxsidebar h3 {
+    margin: 1em 0 0.5em 0;
+    font-size: 0.9em;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: white;
+    border: 1px solid #86989B;
+    background-color: #C0C0C0;
+}
+
+div.sphinxsidebar ul {
+    padding-left: 1.5em;
+    margin-top: 7px;
+    list-style: none;
+    padding: 0;
+    line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+    list-style: square;
+    margin-left: 20px;
+}
+
+p {
+    margin: 0.8em 0 0.5em 0;
+}
+
+p.rubric {
+    font-weight: bold;
+}
+
+h1 {
+    margin: 0;
+    padding: 0.7em 0 0.3em 0;
+    font-size: 1.5em;
+    color: #11557C;
+}
+
+h2 {
+    margin: 1.3em 0 0.2em 0;
+    font-size: 1.35em;
+    padding: 0;
+}
+
+h3 {
+    margin: 1em 0 -0.3em 0;
+    font-size: 1.2em;
+}
+
+h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
+    color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+    display: none;
+    margin: 0 0 0 0.3em;
+    padding: 0 0.2em 0 0.2em;
+    color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+    display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+    color: #777;
+    background-color: #eee;
+}
+
+table {
+    border-collapse: collapse;
+    margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+    padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.footer {
+    background-color: #C0C0C0;
+    color: #000000;
+    padding: 3px 8px 3px 0;
+    clear: both;
+    font-size: 0.8em;
+    text-align: right;
+}
+
+div.footer a {
+    color: #000000;
+    text-decoration: underline;
+}
+
+div.pagination {
+    margin-top: 2em;
+    padding-top: 0.5em;
+    border-top: 1px solid black;
+    text-align: center;
+}
+
+div.sphinxsidebar ul.toc {
+    margin: 1em 0 1em 0;
+    padding: 0 0 0 0.5em;
+    list-style: none;
+}
+
+div.sphinxsidebar ul.toc li {
+    margin: 0.5em 0 0.5em 0;
+    font-size: 0.9em;
+    line-height: 130%;
+}
+
+div.sphinxsidebar ul.toc li p {
+    margin: 0;
+    padding: 0;
+}
+
+div.sphinxsidebar ul.toc ul {
+    margin: 0.2em 0 0.2em 0;
+    padding: 0 0 0 1.8em;
+}
+
+div.sphinxsidebar ul.toc ul li {
+    padding: 0;
+}
+
+div.admonition, div.warning {
+    font-size: 0.9em;
+    margin: 1em 0 0 0;
+    border: 1px solid #86989B;
+    background-color: #f7f7f7;
+}
+
+div.admonition p, div.warning p {
+    margin: 0.5em 1em 0.5em 1em;
+    padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+    margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+    margin: 0;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: white;
+    border-bottom: 1px solid #86989B;
+    font-weight: bold;
+    background-color: #05804A;
+}
+
+div.warning {
+    border: 1px solid #000000;
+}
+
+div.warning p.admonition-title {
+    background-color: #940000;
+    border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+    margin: 0.1em 0.5em 0.5em 3em;
+    padding: 0;
+}
+
+div.versioninfo {
+    margin: 1em 0 0 0;
+    border: 1px solid #ccc;
+    background-color: #DDEAF0;
+    padding: 8px;
+    line-height: 1.3em;
+    font-size: 0.9em;
+}
+
+
+a.headerlink {
+    color: #c60f0f!important;
+    font-size: 1em;
+    margin-left: 6px;
+    padding: 0 4px 0 4px;
+    text-decoration: none!important;
+    visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+    visibility: visible;
+}
+
+a.headerlink:hover {
+    background-color: #ccc;
+    color: white!important;
+}
+
+table.indextable td {
+    text-align: left;
+    vertical-align: top;
+}
+
+table.indextable dl, table.indextable dd {
+    margin-top: 0;
+    margin-bottom: 0;
+}
+
+table.indextable tr.pcap {
+    height: 10px;
+}
+
+table.indextable tr.cap {
+    margin-top: 10px;
+    background-color: #f2f2f2;
+}
+
+img.toggler {
+    margin-right: 3px;
+    margin-top: 3px;
+    cursor: pointer;
+}
+
+img.inheritance {
+    border: 0px
+}
+
+form.pfform {
+    margin: 10px 0 20px 0;
+}
+
+table.contentstable {
+    width: 90%;
+}
+
+table.contentstable p.biglink {
+    line-height: 150%;
+}
+
+a.biglink {
+    font-size: 1.3em;
+}
+
+span.linkdescr {
+    font-style: italic;
+    padding-top: 5px;
+    font-size: 90%;
+}
+
+ul.search {
+    margin: 10px 0 0 20px;
+    padding: 0;
+}
+
+ul.search li {
+    padding: 5px 0 5px 20px;
+    background-image: url(file.png);
+    background-repeat: no-repeat;
+    background-position: 0 7px;
+}
+
+ul.search li a {
+    font-weight: bold;
+}
+
+ul.search li div.context {
+    color: #888;
+    margin: 2px 0 0 30px;
+    text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+    font-weight: bold;
+}
+
+div.social-button {
+    float: left;
+    width: 120px;
+    height: 28px;
+}
diff --git a/doc/source/_static/pull_button.png b/doc/source/_static/pull_button.png
new file mode 100644
index 0000000..e503168
Binary files /dev/null and b/doc/source/_static/pull_button.png differ
diff --git a/doc/source/_templates/class.rst b/doc/source/_templates/class.rst
new file mode 100644
index 0000000..6e17cfa
--- /dev/null
+++ b/doc/source/_templates/class.rst
@@ -0,0 +1,12 @@
+{{ fullname }}
+{{ underline }}
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+
+   {% block methods %}
+   .. automethod:: __init__
+   {% endblock %}
+
+
diff --git a/doc/source/_templates/function.rst b/doc/source/_templates/function.rst
new file mode 100644
index 0000000..317222f
--- /dev/null
+++ b/doc/source/_templates/function.rst
@@ -0,0 +1,8 @@
+{{ fullname }}
+{{ underline }}
+
+.. currentmodule:: {{ module }}
+
+.. autofunction:: {{ objname }}
+
+
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
new file mode 100755
index 0000000..8bbdd51
--- /dev/null
+++ b/doc/source/_templates/layout.html
@@ -0,0 +1,59 @@
+{% extends "!layout.html" %}
+
+{% block extrahead %}
+{% if use_google_analytics|tobool %}
+    <script type="text/javascript">
+    var _gaq = _gaq || [];
+    _gaq.push(['_setAccount', 'UA-37225609-1']);
+    _gaq.push(['_trackPageview']);
+
+    (function() {
+        var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
+        ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
+        var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
+    })();
+    </script>
+{% endif %}
+
+{% if (use_twitter|tobool) or (use_media_buttons|tobool) %}
+    <script type="text/javascript">
+    !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);
+    js.id=id;js.src="http://platform.twitter.com/widgets.js";
+    fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");
+    </script>
+{% endif %}
+
+{% if use_media_buttons|tobool %}
+    <script type="text/javascript">
+    (function() {
+    var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;
+    po.src = 'https://apis.google.com/js/plusone.js';
+    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
+    })();
+    </script>
+{% endif %}
+{% endblock %}
+
+
+{% block relbar2 %}{% endblock %}
+
+{% block rootrellink %}
+        <li><a href="{{ pathto('index') }}">Home</a> | </li>
+        <li><a href="{{ pathto('manual') }}">Manual</a> | </li>
+        <li><a href="{{ pathto('mne-python') }}">Python</a> | </li>
+        <!-- <li><a href="{{ pathto('search') }}">Search</a></li> -->
+{% endblock %}
+
+
+{% block relbar1 %}
+<div style="background-color: white; text-align: left; padding: 10px 10px 15px 15px">
+<a href="{{ pathto('index') }}"><img src="{{
+pathto("_static/logo.png", 1) }}" border="0" alt="py4sci"/></a>
+</div>
+{{ super() }}
+{% endblock %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
diff --git a/doc/source/command_line_tutorial.rst b/doc/source/command_line_tutorial.rst
new file mode 100644
index 0000000..b66b2a2
--- /dev/null
+++ b/doc/source/command_line_tutorial.rst
@@ -0,0 +1,100 @@
+.. _command_line_tutorial:
+
+=====================================
+Getting started with MNE command line
+=====================================
+
+The quick start guide shows how to run a standard processing of the
+sample data set provided with MNE. XXX add link to data set download
+
+First define your subject::
+
+    export SUBJECT=sample
+
+Build your source space::
+
+    # MRI (this is not really needed for anything)
+    mne_setup_mri --overwrite
+
+    # Source space
+    mne_setup_source_space --ico -6 --overwrite
+
+Prepare for forward computation::
+
+    # For homogeneous volume conductor
+    mne_setup_forward_model --homog --surf --ico 4
+
+    # or for XXX
+    mne_setup_forward_model --surf --ico 4
+
+List your bad channels in a file. Example sample_bads.bad contains::
+
+    MEG 2443
+    EEG 053
+
+Mark bad channels::
+
+    mne_mark_bad_channels --bad sample_bads.bad sample_audvis_raw.fif
+
+Compute averaging::
+
+    mne_process_raw --raw sample_audvis_raw.fif --lowpass 40 --projoff \
+            --saveavetag -ave --ave audvis.ave
+
+Compute the noise covariance matrix::
+
+    mne_process_raw --raw sample_audvis_raw.fif --lowpass 40 --projoff \
+            --savecovtag -cov --cov audvis.cov
+
+Compute forward solution a.k.a. lead field::
+
+    # for MEG only
+    mne_do_forward_solution --mindist 5 --spacing oct-6 \
+        --meas sample_audvis_raw.fif --bem sample-5120 --megonly --overwrite \
+        --fwd sample_audvis-meg-oct-6-fwd.fif
+
+    # for EEG only
+    mne_do_forward_solution --mindist 5 --spacing oct-6 \
+        --meas sample_audvis_raw.fif --bem sample-5120-5120-5120 --eegonly \
+        --fwd sample_audvis-eeg-oct-6-fwd.fif
+
+    # for both EEG and MEG
+    mne_do_forward_solution --mindist 5 --spacing oct-6 \
+        --meas sample_audvis_raw.fif --bem sample-5120-5120-5120 \
+        --fwd sample_audvis-meg-eeg-oct-6-fwd.fif
+
+Compute MNE inverse operators::
+
+    # Note: The MEG/EEG forward solution could be used for all
+    mne_do_inverse_operator --fwd sample_audvis-meg-oct-6-fwd.fif \
+            --depth --loose 0.2 --meg
+
+    mne_do_inverse_operator --fwd sample_audvis-eeg-oct-6-fwd.fif \
+            --depth --loose 0.2 --eeg
+
+    mne_do_inverse_operator --fwd sample_audvis-meg-eeg-oct-6-fwd.fif \
+            --depth --loose 0.2 --eeg --meg
+
+Produce stc files (activation files)::
+
+    # for MEG
+    mne_make_movie --inv sample_audvis-meg-oct-6-${mod}-inv.fif \
+        --meas sample_audvis-ave.fif \
+        --tmin 0 --tmax 250 --tstep 10 --spm \
+        --smooth 5 --bmin -100 --bmax 0 --stc sample_audvis-meg
+
+    # for EEG
+    mne_make_movie --inv sample_audvis-eeg-oct-6-${mod}-inv.fif \
+        --meas sample_audvis-ave.fif \
+        --tmin 0 --tmax 250 --tstep 10 --spm \
+        --smooth 5 --bmin -100 --bmax 0 --stc sample_audvis-eeg
+
+    # for MEG and EEG combined
+    mne_make_movie --inv sample_audvis-meg-eeg-oct-6-${mod}-inv.fif \
+        --meas sample_audvis-ave.fif \
+        --tmin 0 --tmax 250 --tstep 10 --spm \
+        --smooth 5 --bmin -100 --bmax 0 --stc sample_audvis-meg-eeg
+
+And, we're done!
+
+You can now get started with the Python :ref:`examples-index`
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..b5c406b
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+#
+# MNE documentation build configuration file, created by
+# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.append(os.path.abspath('../../mne'))
+sys.path.append(os.path.abspath('../sphinxext'))
+
+import mne
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
+              'sphinx.ext.pngmath',
+              'gen_rst']
+
+try:
+    import numpy_ext.numpydoc
+    extensions.append('numpy_ext.numpydoc')
+    # With older versions of sphinx, this causes a crash
+    autosummary_generate = True
+except:
+    # Older version of sphinx
+    extensions.append('numpy_ext_old.numpydoc')
+
+autodoc_default_flags=['inherited-members']
+
+# extensions = ['sphinx.ext.autodoc',
+#               'sphinx.ext.doctest',
+#               'sphinx.ext.todo',
+#               'sphinx.ext.pngmath',
+#               'sphinx.ext.inheritance_diagram',
+#               'numpydoc',
+#               'ipython_console_highlighting',
+#               'only_directives']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# Generate the plots for the gallery
+plot_gallery = True
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'MNE'
+copyright = u'2012-2013, MNE Developers'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = mne.__version__
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+unused_docs = ['config_doc.rst']
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+modindex_common_prefix = ['mne.']
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'sphinxdoc'
+html_style = 'navy.css'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = ['themes']
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = "favicon.ico"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static', '_images']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+html_use_modindex = False
+
+# If false, no index is generated.
+html_use_index = False
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# variables to pass to HTML templating engine
+html_context = {'use_google_analytics':True, 'use_twitter':True,
+                'use_media_buttons':True}
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'mne-doc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'MNE.tex', u'MNE Manual',
+   u'MNE Contributors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+latex_logo = "_static/logo.png"
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+latex_use_parts = True
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+latex_use_modindex = True
+
+
+trim_doctests_flags = True
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
new file mode 100644
index 0000000..8e9b86c
--- /dev/null
+++ b/doc/source/contributing.rst
@@ -0,0 +1,649 @@
+.. _using-git:
+
+
+Contributing to *mne-python* source code
+========================================
+
+We are open to all types of contributions, from bugfixes to functionality
+enhancements. mne-python_ is meant to be maintained by a community of labs,
+and as such, we seek enhancements that will likely benefit a large proportion
+of the users who use the package.
+
+*Before starting new code*, we highly recommend opening an issue on
+`mne-python GitHub`_ to discuss potential changes. Getting on the same
+page as the maintainers about changes or enhancements before too much
+coding is done saves everyone time and effort!
+
+General code guidelines
+-----------------------
+
+* We highly recommend using a code editor that uses both `pep8`_ and
+  `pyflakes`_, such as `spyder`_. Standard python style guidelines are
+  followed, with very few exceptions.
+
+  You can also manually check pyflakes and pep8 warnings as::
+
+       pip install pyflakes
+       pip install pep8
+       pyflakes path/to/module.py
+       pep8 path/to/module.py
+
+  AutoPEP8 can then help you fix some of the easy redundant errors::
+
+       pip install autopep8
+       autopep8 path/to/pep8.py
+
+* mne-python adheres to the same docstring formatting as seen on
+  `numpy style`_.
+  New public functions should have all variables defined.
+
+* New functionality should be covered by appropriate tests, e.g. a method in
+  ``mne/fiff/raw.py`` should have a corresponding test in
+  ``mne/fiff/tests/test_raw.py``. You can use the ``coverage`` module in
+  conjunction with ``nosetests`` (nose can automatically determine the code
+  coverage if ``coverage`` is installed) to see how well new code is covered.
+
+* After changes have been made, **ensure all tests pass**. This can be done
+  by running the following from the ``mne-python`` root directory::
+
+     make
+
+  To run individual tests, you can also run any of the following::
+
+     make clean
+     make inplace
+     make test-doc
+     make inplace
+     nosetests
+
+  Note that the first time this is run, the `mne-python sample dataset`_
+  (~1.2 GB) will be downloaded to the root directory and extracted. This is
+  necessary for running some of the tests and nearly all of the examples.
+
+  You can also run ``nosetests -x`` to have nose stop as soon as a failed
+  test is found, or run e.g., ``nosetests mne/fiff/tests/test_raw.py`` to run
+  a specific test.
+
+Configuring git
+---------------
+
+Any contributions to the core mne-python package, whether bug fixes,
+improvements to the documentation, or new functionality, can be done via
+*pull requests* on GitHub. The workflow for this is described here.
+[Many thanks to Astropy_ for providing clear instructions that we have
+adapted for our use here!]
+
+The only absolutely necessary configuration step is identifying yourself and
+your contact info::
+
+     git config --global user.name "Your Name"
+     git config --global user.email you at yourdomain.example.com
+
+If you are going to :ref:`setup-github` eventually, this email address should
+be the same as the one used to sign up for a GitHub account. For more
+information about configuring your git installation, see
+:ref:`customizing-git`.
+
+The following sections cover the installation of the git software, the basic
+configuration, and links to resources to learn more about using git.
+However, you can also directly go to the `GitHub help pages
+<http://help.github.com/>`_ which offer a great introduction to git and
+GitHub.
+
+In the present document, we refer to the mne-python ``master`` branch, as the
+*trunk*.
+
+.. _forking:
+
+Creating a fork
+^^^^^^^^^^^^^^^
+
+You need to do this only once for each package you want to contribute to. The
+instructions here are very similar to the instructions at
+http://help.github.com/fork-a-repo/ |emdash| please see that page for more
+details. We're repeating some of it here just to give the specifics for the
+mne-python_ project, and to suggest some default names.
+
+.. _setup-github:
+
+Set up and configure a GitHub account
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you don't have a GitHub account, go to the GitHub page, and make one.
+
+You then need to configure your account to allow write access |emdash| see
+the *Generating SSH keys* help on `GitHub Help`_.
+
+Create your own fork of a repository
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Now you should fork the core ``mne-python`` repository (although you could
+in principle also fork a different one, such as ``mne-matlab```):
+
+#. Log into your GitHub account.
+
+#. Go to the `mne-python GitHub`_ home.
+
+#. Click on the *fork* button:
+
+   .. image:: _static/forking_button.png
+
+   Now, after a short pause and some 'Hardcore forking action', you should
+   find yourself at the home page for your own forked copy of mne-python_.
+
+Setting up the fork to work on
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Briefly, this is done using::
+
+    git clone git at github.com:your-user-name/mne-python.git
+    cd mne-python
+    git remote add upstream git://github.com/mne-tools/mne-python.git
+
+These steps can be broken out to be more explicit as:
+
+#. Clone your fork to the local computer::
+
+    git clone git at github.com:your-user-name/mne-python.git
+
+#. Change directory to your new repo::
+
+    cd mne-python
+
+   Then type::
+
+    git branch -a
+
+   to show you all branches.  You'll get something like::
+
+    * master
+    remotes/origin/master
+
+   This tells you that you are currently on the ``master`` branch, and
+   that you also have a ``remote`` connection to ``origin/master``.
+   What remote repository is ``remote/origin``? Try ``git remote -v`` to
+   see the URLs for the remote.  They will point to your GitHub fork.
+
+   Now you want to connect to the mne-python repository, so you can
+   merge in changes from the trunk::
+
+    cd mne-python
+    git remote add upstream git://github.com/mne-tools/mne-python.git
+
+   ``upstream`` here is just the arbitrary name we're using to refer to the
+   main mne-python_ repository.
+
+   Note that we've used ``git://`` for the URL rather than ``git@``. The
+   ``git://`` URL is read only. This means we that we can't accidentally (or
+   deliberately) write to the upstream repo, and we are only going to use it
+   to merge into our own code.
+
+   Just for your own satisfaction, show yourself that you now have a new
+   'remote', with ``git remote -v show``, giving you something like::
+
+    upstream   git://github.com/mne-tools/mne-python.git (fetch)
+    upstream   git://github.com/mne-tools/mne-python.git (push)
+    origin     git at github.com:your-user-name/mne-python.git (fetch)
+    origin     git at github.com:your-user-name/mne-python.git (push)
+
+   Your fork is now set up correctly, and you are ready to hack away.
+
+Workflow summary
+----------------
+
+This section gives a summary of the workflow once you have successfully forked
+the repository, and details are given for each of these steps in the following
+sections.
+
+* Don't use your ``master`` branch for anything.  Consider deleting it.
+
+* When you are starting a new set of changes, fetch any changes from the
+  trunk, and start a new *feature branch* from that.
+
+* Make a new branch for each separable set of changes |emdash| "one task, one
+  branch" (`ipython git workflow`_).
+
+* Name your branch for the purpose of the changes - e.g.
+  ``bugfix-for-issue-14`` or ``refactor-database-code``.
+
+* If you can possibly avoid it, avoid merging trunk or any other branches into
+  your feature branch while you are working.
+
+* If you do find yourself merging from the trunk, consider :ref:`rebase-on-trunk`
+
+* **Ensure all tests still pass**
+
+* Ask for code review!
+
+This way of working helps to keep work well organized, with readable history.
+This in turn makes it easier for project maintainers (that might be you) to
+see what you've done, and why you did it.
+
+See `linux git workflow`_ and `ipython git workflow`_ for some explanation.
+
+Deleting your master branch
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It may sound strange, but deleting your own ``master`` branch can help reduce
+confusion about which branch you are on.  See `deleting master on github`_ for
+details.
+
+.. _update-mirror-trunk:
+
+Updating the mirror of trunk
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+From time to time you should fetch the upstream (trunk) changes from GitHub::
+
+   git fetch upstream
+
+This will pull down any commits you don't have, and set the remote branches to
+point to the right commit. For example, 'trunk' is the branch referred to by
+(remote/branchname) ``upstream/master`` - and if there have been commits since
+you last checked, ``upstream/master`` will change after you do the fetch.
+
+.. _make-feature-branch:
+
+Making a new feature branch
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When you are ready to make some changes to the code, you should start a new
+branch. Branches that are for a collection of related edits are often called
+'feature branches'.
+
+Making an new branch for each set of related changes will make it easier for
+someone reviewing your branch to see what you are doing.
+
+Choose an informative name for the branch to remind yourself and the rest of
+us what the changes in the branch are for. For example ``add-ability-to-fly``,
+or ``buxfix-for-issue-42``.
+
+::
+
+    # Update the mirror of trunk
+    git fetch upstream
+
+    # Make new feature branch starting at current trunk
+    git branch my-new-feature upstream/master
+    git checkout my-new-feature
+
+Generally, you will want to keep your feature branches on your public GitHub_
+fork. To do this, you `git push`_ this new branch up to your
+github repo. Generally (if you followed the instructions in these pages, and
+by default), git will have a link to your GitHub repo, called ``origin``. You
+push up to your own repo on GitHub with::
+
+   git push origin my-new-feature
+
+In git > 1.7 you can ensure that the link is correctly set by using the
+``--set-upstream`` option::
+
+   git push --set-upstream origin my-new-feature
+
+From now on git will know that ``my-new-feature`` is related to the
+``my-new-feature`` branch in the GitHub repo.
+
+.. _edit-flow:
+
+The editing workflow
+--------------------
+
+Overview
+^^^^^^^^
+
+::
+
+   git add my_new_file
+   git commit -am 'FIX: some message'
+   git push
+
+In more detail
+^^^^^^^^^^^^^^
+
+#. Make some changes
+
+#. See which files have changed with ``git status`` (see `git status`_).
+   You'll see a listing like this one::
+
+     # On branch ny-new-feature
+     # Changed but not updated:
+     #   (use "git add <file>..." to update what will be committed)
+     #   (use "git checkout -- <file>..." to discard changes in working directory)
+     #
+     #    modified:   README
+     #
+     # Untracked files:
+     #   (use "git add <file>..." to include in what will be committed)
+     #
+     #    INSTALL
+     no changes added to commit (use "git add" and/or "git commit -a")
+
+#. Check what the actual changes are with ``git diff`` (`git diff`_).
+
+#. Add any new files to version control ``git add new_file_name`` (see
+   `git add`_).
+
+#. Add any modified files that you want to commit using
+   ``git add modified_file_name``  (see `git add`_).
+
+#. Once you are ready to commit, check with ``git status`` which files are
+   about to be committed::
+
+    # Changes to be committed:
+    #   (use "git reset HEAD <file>..." to unstage)
+    #
+    #    modified:   README
+
+   Then use ``git commit -m 'A commit message'``. The ``m`` flag just
+   signals that you're going to type a message on the command line. The `git
+   commit`_ manual page might also be useful.
+
+   It is also good practice to prefix commits with the type of change, such as
+   ``FIX:``, ``STY:``, or ``ENH:`` for fixes, style changes, or enhancements.
+
+#. To push the changes up to your forked repo on GitHub, do a ``git
+   push`` (see `git push`_).
+
+Asking for your changes to be reviewed or merged
+------------------------------------------------
+
+When you are ready to ask for someone to review your code and consider a merge:
+
+#. Go to the URL of your forked repo, say
+   ``http://github.com/your-user-name/mne-python``.
+
+#. Use the 'Switch Branches' dropdown menu near the top left of the page to
+   select the branch with your changes:
+
+   .. image:: _static/branch_dropdown.png
+
+#. Click on the 'Pull request' button:
+
+   .. image:: _static/pull_button.png
+
+   Enter a title for the set of changes, and some explanation of what you've
+   done. Say if there is anything you'd like particular attention for - like a
+   complicated change or some code you are not happy with.
+
+   If you don't think your request is ready to be merged, prefix ``WIP:`` to
+   the title of the pull request, and note it also in your pull request
+   message. This is still a good way of getting some preliminary code review.
+   Submitting a pull request early on in feature development can save a great
+   deal of time for you, as the code maintainers may have "suggestions" about
+   how the code should be written (features, style, etc.) that are easier to
+   implement from the start.
+
+If you are uncertain about what would or would not be appropriate to contribute
+to mne-python, don't hesitate to either send a pull request, or open an issue
+on the mne-python_ GitHub site to discuss potential changes.
+
+Some other things you might want to do
+--------------------------------------
+
+Delete a branch on GitHub
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+   # change to the master branch (if you still have one, otherwise change to another branch)
+   git checkout master
+
+   # delete branch locally
+   git branch -D my-unwanted-branch
+
+   # delete branch on GitHub
+   git push origin :my-unwanted-branch
+
+(Note the colon ``:`` before ``test-branch``.  See also:
+http://github.com/guides/remove-a-remote-branch
+
+Several people sharing a single repository
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you want to work on some stuff with other people, where you are all
+committing into the same repository, or even the same branch, then just
+share it via GitHub.
+
+First fork mne-python into your account, as from :ref:`forking`.
+
+Then, go to your forked repository GitHub page, say
+``http://github.com/your-user-name/mne-python``
+
+Click on the 'Admin' button, and add anyone else to the repo as a
+collaborator:
+
+   .. image:: _static/pull_button.png
+
+Now all those people can do::
+
+    git clone git at githhub.com:your-user-name/mne-python.git
+
+Remember that links starting with ``git@`` use the ssh protocol and are
+read-write; links starting with ``git://`` are read-only.
+
+Your collaborators can then commit directly into that repo with the
+usual::
+
+     git commit -am 'ENH: much better code'
+     git push origin master # pushes directly into your repo
+
+Explore your repository
+^^^^^^^^^^^^^^^^^^^^^^^
+
+To see a graphical representation of the repository branches and
+commits::
+
+   gitk --all
+
+To see a linear list of commits for this branch::
+
+   git log
+
+You can also look at the `network graph visualizer`_ for your GitHub
+repo.
+
+Finally the ``lg`` alias will give you a reasonable text-based graph of the
+repository.
+
+If you are making extensive changes, ``git grep`` is also very handy.
+
+.. _rebase-on-trunk:
+
+Rebasing on trunk
+^^^^^^^^^^^^^^^^^
+
+Let's say you thought of some work you'd like to do. You
+:ref:`update-mirror-trunk` and :ref:`make-feature-branch` called
+``cool-feature``. At this stage trunk is at some commit, let's call it E. Now
+you make some new commits on your ``cool-feature`` branch, let's call them A,
+B, C. Maybe your changes take a while, or you come back to them after a while.
+In the meantime, trunk has progressed from commit E to commit (say) G::
+
+          A---B---C cool-feature
+         /
+    D---E---F---G trunk
+
+At this stage you consider merging trunk into your feature branch, and you
+remember that this here page sternly advises you not to do that, because the
+history will get messy. Most of the time you can just ask for a review, and
+not worry that trunk has got a little ahead. But sometimes, the changes in
+trunk might affect your changes, and you need to harmonize them. In this
+situation you may prefer to do a rebase.
+
+Rebase takes your changes (A, B, C) and replays them as if they had been made
+to the current state of ``trunk``. In other words, in this case, it takes the
+changes represented by A, B, C and replays them on top of G. After the rebase,
+your history will look like this::
+
+                  A'--B'--C' cool-feature
+                 /
+    D---E---F---G trunk
+
+See `rebase without tears`_ for more detail.
+
+To do a rebase on trunk::
+
+    # Update the mirror of trunk
+    git fetch upstream
+
+    # Go to the feature branch
+    git checkout cool-feature
+
+    # Make a backup in case you mess up
+    git branch tmp cool-feature
+
+    # Rebase cool-feature onto trunk
+    git rebase --onto upstream/master upstream/master cool-feature
+
+In this situation, where you are already on branch ``cool-feature``, the last
+command can be written more succinctly as::
+
+    git rebase upstream/master
+
+When all looks good you can delete your backup branch::
+
+   git branch -D tmp
+
+If it doesn't look good you may need to have a look at
+:ref:`recovering-from-mess-up`.
+
+If you have made changes to files that have also changed in trunk, this may
+generate merge conflicts that you need to resolve - see the `git rebase`_ man
+page for some instructions at the end of the "Description" section. There is
+some related help on merging in the git user manual - see `resolving a
+merge`_.
+
+If your feature branch is already on GitHub and you rebase, you will have to force
+push the branch; a normal push would give an error. If the branch you rebased is
+called ``cool-feature`` and your GitHub fork is available as the remote called ``origin``,
+you use this command to force-push::
+
+   git push -f origin cool-feature
+
+Note that this will overwrite the branch on GitHub, i.e. this is one of the few ways
+you can actually lose commits with git.
+Also note that it is never allowed to force push to the main mne-python repo (typically
+called ``upstream``), because this would re-write commit history and thus cause problems
+for all others.
+
+.. _recovering-from-mess-up:
+
+Recovering from mess-ups
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes, you mess up merges or rebases. Luckily, in git it is relatively
+straightforward to recover from such mistakes.
+
+If you mess up during a rebase::
+
+   git rebase --abort
+
+If you notice you messed up after the rebase::
+
+   # Reset branch back to the saved point
+   git reset --hard tmp
+
+If you forgot to make a backup branch::
+
+   # Look at the reflog of the branch
+   git reflog show cool-feature
+
+   8630830 cool-feature@{0}: commit: BUG: io: close file handles immediately
+   278dd2a cool-feature@{1}: rebase finished: refs/heads/my-feature-branch onto 11ee694744f2552d
+   26aa21a cool-feature@{2}: commit: BUG: lib: make seek_gzip_factory not leak gzip obj
+   ...
+
+   # Reset the branch to where it was before the botched rebase
+   git reset --hard cool-feature@{2}
+
+Otherwise, googling the issue may be helpful (especially links to Stack
+Overflow).
+
+.. _rewriting-commit-history:
+
+Rewriting commit history
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+   Do this only for your own feature branches.
+
+There's an embarrassing typo in a commit you made? Or perhaps the you
+made several false starts you would like the posterity not to see.
+
+This can be done via *interactive rebasing*.
+
+Suppose that the commit history looks like this::
+
+    git log --oneline
+    eadc391 Fix some remaining bugs
+    a815645 Modify it so that it works
+    2dec1ac Fix a few bugs + disable
+    13d7934 First implementation
+    6ad92e5 * masked is now an instance of a new object, MaskedConstant
+    29001ed Add pre-nep for a copule of structured_array_extensions.
+    ...
+
+and ``6ad92e5`` is the last commit in the ``cool-feature`` branch. Suppose we
+want to make the following changes:
+
+* Rewrite the commit message for ``13d7934`` to something more sensible.
+* Combine the commits ``2dec1ac``, ``a815645``, ``eadc391`` into a single one.
+
+We do as follows::
+
+    # make a backup of the current state
+    git branch tmp HEAD
+    # interactive rebase
+    git rebase -i 6ad92e5
+
+This will open an editor with the following text in it::
+
+    pick 13d7934 First implementation
+    pick 2dec1ac Fix a few bugs + disable
+    pick a815645 Modify it so that it works
+    pick eadc391 Fix some remaining bugs
+
+    # Rebase 6ad92e5..eadc391 onto 6ad92e5
+    #
+    # Commands:
+    #  p, pick = use commit
+    #  r, reword = use commit, but edit the commit message
+    #  e, edit = use commit, but stop for amending
+    #  s, squash = use commit, but meld into previous commit
+    #  f, fixup = like "squash", but discard this commit's log message
+    #
+    # If you remove a line here THAT COMMIT WILL BE LOST.
+    # However, if you remove everything, the rebase will be aborted.
+    #
+
+To achieve what we want, we will make the following changes to it::
+
+    r 13d7934 First implementation
+    pick 2dec1ac Fix a few bugs + disable
+    f a815645 Modify it so that it works
+    f eadc391 Fix some remaining bugs
+
+This means that (i) we want to edit the commit message for ``13d7934``, and
+(ii) collapse the last three commits into one. Now we save and quit the
+editor.
+
+Git will then immediately bring up an editor for editing the commit message.
+After revising it, we get the output::
+
+    [detached HEAD 721fc64] FOO: First implementation
+     2 files changed, 199 insertions(+), 66 deletions(-)
+    [detached HEAD 0f22701] Fix a few bugs + disable
+     1 files changed, 79 insertions(+), 61 deletions(-)
+    Successfully rebased and updated refs/heads/my-feature-branch.
+
+and the history looks now like this::
+
+     0f22701 Fix a few bugs + disable
+     721fc64 ENH: Sophisticated feature
+     6ad92e5 * masked is now an instance of a new object, MaskedConstant
+
+If it went wrong, recovery is again possible as explained :ref:`above
+<recovering-from-mess-up>`.
+
+.. include:: links.inc
diff --git a/doc/source/customizing_git.rst b/doc/source/customizing_git.rst
new file mode 100644
index 0000000..3e44b63
--- /dev/null
+++ b/doc/source/customizing_git.rst
@@ -0,0 +1,123 @@
+.. _customizing-git:
+
+=================
+ Customizing git
+=================
+
+.. _git-config-basic:
+
+Overview
+========
+
+Your personal git_ configurations are saved in the ``.gitconfig`` file in
+your home directory.
+Here is an example ``.gitconfig`` file::
+
+  [user]
+          name = Your Name
+          email = you at yourdomain.example.com
+
+  [alias]
+          ci = commit -a
+          co = checkout
+          st = status
+          stat = status
+          br = branch
+          wdiff = diff --color-words
+
+  [core]
+          editor = vim
+
+  [merge]
+          summary = true
+
+You can edit this file directly or you can use the ``git config --global``
+command::
+
+  git config --global user.name "Your Name"
+  git config --global user.email you at yourdomain.example.com
+  git config --global alias.ci "commit -a"
+  git config --global alias.co checkout
+  git config --global alias.st "status -a"
+  git config --global alias.stat "status -a"
+  git config --global alias.br branch
+  git config --global alias.wdiff "diff --color-words"
+  git config --global core.editor vim
+  git config --global merge.summary true
+
+To set up on another computer, you can copy your ``~/.gitconfig`` file,
+or run the commands above.
+
+In detail
+=========
+
+user.name and user.email
+------------------------
+
+It is good practice to tell git_ who you are, for labeling any changes
+you make to the code.  The simplest way to do this is from the command
+line::
+
+  git config --global user.name "Your Name"
+  git config --global user.email you at yourdomain.example.com
+
+This will write the settings into your git configuration file,  which
+should now contain a user section with your name and email::
+
+  [user]
+        name = Your Name
+        email = you at yourdomain.example.com
+
+Of course you'll need to replace ``Your Name`` and ``you at yourdomain.example.com``
+with your actual name and email address.
+
+Aliases
+-------
+
+You might well benefit from some aliases to common commands.
+
+For example, you might well want to be able to shorten ``git checkout``
+to ``git co``.  Or you may want to alias ``git diff --color-words``
+(which gives a nicely formatted output of the diff) to ``git wdiff``
+
+The following ``git config --global`` commands::
+
+  git config --global alias.ci "commit -a"
+  git config --global alias.co checkout
+  git config --global alias.st "status -a"
+  git config --global alias.stat "status -a"
+  git config --global alias.br branch
+  git config --global alias.wdiff "diff --color-words"
+
+will create an ``alias`` section in your ``.gitconfig`` file with contents
+like this::
+
+  [alias]
+          ci = commit -a
+          co = checkout
+          st = status -a
+          stat = status -a
+          br = branch
+          wdiff = diff --color-words
+
+Editor
+------
+
+You may also want to make sure that your editor of choice is used ::
+
+  git config --global core.editor vim
+
+Merging
+-------
+
+To enforce summaries when doing merges (``~/.gitconfig`` file again)::
+
+   [merge]
+      log = true
+
+Or from the command line::
+
+  git config --global merge.log true
+
+
+.. include:: links.inc
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
new file mode 100644
index 0000000..5ef305c
--- /dev/null
+++ b/doc/source/getting_started.rst
@@ -0,0 +1,122 @@
+.. _getting_started:
+
+Getting Started
+===============
+
+Inside the Martinos Center
+--------------------------
+For people within the MGH/MIT/HMS Martinos Center mne is available on the network.
+
+In a terminal do::
+
+    setenv PATH /usr/pubsw/packages/python/epd/bin:${PATH}
+
+If you use Bash replace the previous instruction with::
+
+    export PATH=/usr/pubsw/packages/python/epd/bin:${PATH}
+
+Then start the python interpreter with:
+
+    ipython
+
+Then type::
+
+    >>> import mne
+
+If you get a new prompt with no error messages, you should be good to go.
+Start with the :ref:`examples-index`.
+
+Outside the Martinos Center
+---------------------------
+
+MNE is written in pure Python making it easy to setup of
+any machine with Python >=2.6, Numpy >= 1.4, Scipy >= 0.7.2
+and matplotlib >= 1.1.0.
+
+Some isolated functions (e.g. filtering with firwin2 require Scipy >= 0.9).
+
+For a fast and up to date scientific Python environment you
+can install EPD available at:
+
+http://www.enthought.com/products/epd.php
+
+EPD is free for academic purposes. If you cannot benefit from the
+an academic license and you don't want to pay for it, you can
+use EPD free which is a lightweight version (no 3D visualization
+support for example):
+
+http://www.enthought.com/products/epd_free.php
+
+To test that everything works properly, open up IPython::
+
+    ipython
+
+Although all of the examples in this documentation are in the style
+of the standard Python interpreter, the use of IPython is highly
+recommended.
+
+Now that you have a working Python environment you can install MNE.
+
+You can manually get the latest version of the code at:
+
+https://github.com/mne-tools/mne-python
+
+Then from the mne-python folder (containing a setup.py file) you can install with::
+
+    python setup.py install
+
+or if you don't have admin access to your python setup (permission denied when install) use::
+
+    python setup.py install --user
+
+You can also install the latest release with easy_install::
+
+    easy_install -U mne
+
+or with pip::
+
+    pip install mne --upgrade
+
+For the latest development version (the most up to date)::
+
+    pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
+
+To check that everything went fine, in ipython, type::
+
+    >>> import mne
+
+If you get a new prompt with no error messages, you should be good to go.
+
+If you want to use NVIDIA CUDA for filtering (can yield 3-4x speedups), you'll
+need to install the NVIDIA toolkit on your system, and then both pycuda and
+scikits.cuda, see:
+
+https://developer.nvidia.com/cuda-downloads
+http://mathema.tician.de/software/pycuda
+http://wiki.tiker.net/PyCuda/Installation/
+https://github.com/lebedov/scikits.cuda
+
+To initialize mne-python cuda support, after installing these dependencies
+and running their associated unit tests (to ensure your installation is correct)
+you can run:
+
+    >>> mne.cuda.init_cuda() # doctest: +SKIP
+
+If you have everything installed correctly, you should see an INFO-level log
+message telling you your CUDA hardware's available memory. To have CUDA
+initialized on startup, you can do:
+
+    >>> mne.utils.set_config('MNE_USE_CUDA', 'true') # doctest: +SKIP
+
+You can test if MNE CUDA support is working by running the associated test:
+
+    nosetests mne/tests/test_filter.py
+
+If all tests pass with none skipped, then mne-python CUDA support works.
+
+Learning Python
+---------------
+
+If you are new to Python here is a very good place to get started:
+
+    * http://scipy-lectures.github.com
diff --git a/doc/source/git_links.inc b/doc/source/git_links.inc
new file mode 100644
index 0000000..478f7cf
--- /dev/null
+++ b/doc/source/git_links.inc
@@ -0,0 +1,64 @@
+.. This (-*- rst -*-) format file contains commonly used link targets
+   and name substitutions.  It may be included in many files,
+   therefore it should only contain link targets and name
+   substitutions.  Try grepping for "^\.\. _" to find plausible
+   candidates for this list.
+
+.. NOTE: reST targets are
+   __not_case_sensitive__, so only one target definition is needed for
+   nipy, NIPY, Nipy, etc...
+
+.. git stuff
+.. _git: http://git-scm.com/
+.. _github: http://github.com
+.. _GitHub Help: http://help.github.com
+.. _msysgit: http://code.google.com/p/msysgit/downloads/list
+.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list
+.. _subversion: http://subversion.tigris.org/
+.. _git cheat sheet: http://github.com/guides/git-cheat-sheet
+.. _pro git book: http://progit.org/
+.. _git svn crash course: http://git-scm.com/course/svn.html
+.. _learn.github: http://learn.github.com/
+.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer
+.. _git user manual: http://schacon.github.com/git/user-manual.html
+.. _git tutorial: http://schacon.github.com/git/gittutorial.html
+.. _git community book: http://book.git-scm.com/
+.. _git ready: http://www.gitready.com/
+.. _git casts: http://www.gitcasts.com/
+.. _Fernando's git page: http://www.fperez.org/py4science/git.html
+.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html
+.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/
+.. _git clone: http://schacon.github.com/git/git-clone.html
+.. _git checkout: http://schacon.github.com/git/git-checkout.html
+.. _git commit: http://schacon.github.com/git/git-commit.html
+.. _git push: http://schacon.github.com/git/git-push.html
+.. _git pull: http://schacon.github.com/git/git-pull.html
+.. _git add: http://schacon.github.com/git/git-add.html
+.. _git status: http://schacon.github.com/git/git-status.html
+.. _git diff: http://schacon.github.com/git/git-diff.html
+.. _git log: http://schacon.github.com/git/git-log.html
+.. _git branch: http://schacon.github.com/git/git-branch.html
+.. _git remote: http://schacon.github.com/git/git-remote.html
+.. _git rebase: http://schacon.github.com/git/git-rebase.html
+.. _git config: http://schacon.github.com/git/git-config.html
+.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
+.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
+.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git
+.. _git management: http://kerneltrap.org/Linux/Git_Management
+.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
+.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html
+.. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html
+.. _deleting master on github: http://matthew-brett.github.com/pydagogue/gh_delete_master.html
+.. _rebase without tears: http://matthew-brett.github.com/pydagogue/rebase_without_tears.html
+.. _resolving a merge: http://schacon.github.com/git/user-manual.html#resolving-a-merge
+.. _ipython git workflow: http://mail.scipy.org/pipermail/ipython-dev/2010-October/006746.html
+
+.. other stuff
+.. _python: http://www.python.org
+.. _spyder: http://spyder-ide.blogspot.com/
+.. _pep8: http://pypi.python.org/pypi/pep8
+.. _pyflakes: http://pypi.python.org/pypi/pyflakes
+
+.. |emdash| unicode:: U+02014
+
+.. vim: ft=rst
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..bb2dff5
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,48 @@
+========
+MNE Home
+========
+
+MNE is a software package for processing magnetoencephalography
+(MEG) and electroencephalography (EEG) data. 
+
+The MNE software computes cortically-constrained L2 minimum-norm
+current estimates and associated dynamic statistical parametric maps
+from MEG and EEG data, optionally constrained by fMRI. 
+
+This software includes MEG and EEG preprocessing tools, interactive
+and batch-mode modules for the forward and inverse calculations, as
+well as various data conditioning and data conversion utilities. These
+tools are provided as compiled C code for the LINUX and Mac OSX
+operating systems.
+
+In addition to the compiled C code tools, MNE Software includes a
+Matlab toolbox which facilitates access to the fif (functional image
+file) format data files employed in our software and enables
+development of custom analysis tools based on the intermediate results
+computed with the MNE tools. 
+
+The third and newest component of MNE is MNE-Python which implements
+all the functionality of the MNE Matlab tools in Python and extends
+the capabilities of the MNE Matlab tools to, e.g., frequency-domain
+and time-frequency analyses and non-parametric statistics. This
+component of MNE is presently evolving quickly and thanks to the
+adopted open development environment user contributions can be easily
+incorporated.
+
+The Matlab and Python components of MNE are provided under the
+simplified BSD license.
+
+  * `Download <http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php>`_ MNE
+  * Read the :ref:`manual`.
+  * Get started with :ref:`mne_python`
+  * :ref:`command_line_tutorial`
+  * Join the MNE `mailing list <http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis>`_
+  * `Help/Feature Request/Bug Report <mailto:mne_support at nmr.mgh.harvard.edu>`_
+  * :ref:`ch_reading`
+
+.. toctree::
+   :maxdepth: 2
+
+   manual
+   mne-python
+
diff --git a/doc/source/known_projects.inc b/doc/source/known_projects.inc
new file mode 100644
index 0000000..2a39d82
--- /dev/null
+++ b/doc/source/known_projects.inc
@@ -0,0 +1,46 @@
+.. Known projects
+
+.. PROJECTNAME placeholders
+.. _PROJECTNAME: http://neuroimaging.scipy.org
+.. _`PROJECTNAME github`: http://github.com/nipy
+.. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
+
+.. numpy
+.. _numpy: http://numpy.scipy.org
+.. _`numpy github`: http://github.com/numpy/numpy
+.. _`numpy style`: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+.. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion
+
+.. scipy
+.. _scipy: http://www.scipy.org
+.. _`scipy github`: http://github.com/scipy/scipy
+.. _`scipy mailing list`: http://mail.scipy.org/mailman/listinfo/scipy-dev
+
+.. nipy
+.. _nipy: http://nipy.org/nipy
+.. _`nipy github`: http://github.com/nipy/nipy
+.. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel
+
+.. ipython
+.. _ipython: http://ipython.scipy.org
+.. _`ipython github`: http://github.com/ipython/ipython
+.. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev
+
+.. dipy
+.. _dipy: http://nipy.org/dipy
+.. _`dipy github`: http://github.com/Garyfallidis/dipy
+.. _`dipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel
+
+.. nibabel
+.. _nibabel: http://nipy.org/nibabel
+.. _`nibabel github`: http://github.com/nipy/nibabel
+.. _`nibabel mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel
+
+.. marsbar
+.. _marsbar: http://marsbar.sourceforge.net
+.. _`marsbar github`: http://github.com/matthew-brett/marsbar
+.. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users
+
+.. Astropy
+.. _Astropy: http://astropy.org
+.. _`Astropy GitHub`: http://github.com/astropy/astropy
diff --git a/doc/source/links.inc b/doc/source/links.inc
new file mode 100644
index 0000000..20f4dcf
--- /dev/null
+++ b/doc/source/links.inc
@@ -0,0 +1,4 @@
+.. compiling links file
+.. include:: known_projects.inc
+.. include:: this_project.inc
+.. include:: git_links.inc
diff --git a/doc/source/manual.rst b/doc/source/manual.rst
new file mode 100644
index 0000000..9c39ce1
--- /dev/null
+++ b/doc/source/manual.rst
@@ -0,0 +1,28 @@
+.. _manual:
+
+Manual
+======
+
+.. toctree::
+   :maxdepth: 1
+
+   manual/intro
+   manual/list
+   manual/cookbook
+   manual/browse
+   manual/forward
+   manual/mne
+   manual/analyze
+   manual/morph
+   manual/convert
+   manual/matlab
+   manual/utilities
+   manual/sampledata
+   manual/reading
+   manual/AppA
+   manual/AppB
+   manual/AppInstall
+   manual/AppReleaseNotes
+   manual/AppEULA
+   command_line_tutorial
+
diff --git a/doc/source/manual/AppA.rst b/doc/source/manual/AppA.rst
new file mode 100644
index 0000000..3434c49
--- /dev/null
+++ b/doc/source/manual/AppA.rst
@@ -0,0 +1,377 @@
+
+
+.. _create_bem_model:
+
+=======================
+Creating the BEM meshes
+=======================
+
+.. _BABBDHAG:
+
+Using the watershed algorithm
+#############################
+
+The watershed algorithm\egonne *et al.*,
+2004] is part of the FreeSurfer software.
+The name of the program is mri_watershed .
+Its use in the MNE environment is facilitated by the script mne_watershed_bem ,
+which assumes the following options:
+
+**\---subject  <*subject*>**
+
+    Defines the name of the subject. This can be also accomplished
+    by setting the SUBJECT environment variable.
+
+**\---overwrite**
+
+    Overwrite the results of previous run of mne_watershed_bem .
+
+**\---atlas**
+
+    Makes mri_watershed to employ
+    atlas information to correct the segmentation.
+
+After mne_watershed_bem has
+completed, the following files appear in the subject's ``bem/watershed`` directory:
+
+** <*subject*> _brain_surface**
+
+    Contains the brain surface triangulation.
+
+** <*subject*> _inner_skull_surface**
+
+    Contains the inner skull triangulation.
+
+** <*subject*> _outer_skull_surface**
+
+    Contains the outer skull triangulation.
+
+** <*subject*> _outer_skin_surface**
+
+    Contains the scalp triangulation.
+
+All of these surfaces are in the FreeSurfer format. In addition,
+there will be a directory called ``bem/watershed/ws`` which
+contains the brain MRI volume. Furthermore, mne_watershed_bem script
+converts the scalp surface to fif format and saves the result to ``bem/``  <*subject*> ``-head.fif`` . The mne_analyze tool
+described :ref:`ch_interactive_analysis` looks for this file the visualizations
+involving the scalp surface.
+
+.. _BABFCDJH:
+
+Using FLASH images
+##################
+
+This method depends on the availablily of MRI data acquired
+with a multi-echo FLASH sequence at two flip angles (5 and 30 degrees).
+These data can be acquired separately from the MPRAGE data employed
+in FreeSurfer cortical reconstructions but it is strongly recommended
+that they are collected at the same time with the MPRAGEs or at
+least with the same scanner. For easy co-registration, the images
+should have FOV, matrix, slice thickness, gap, and slice orientation
+as the MPRAGE data. For information on suitable pulse sequences,
+see reference\. Fischl *et al.* and J. Jovicich *et
+al.*, 2006] in :ref:`CEGEGDEI`. At the Martinos
+Center, use of the 1.5-T Avanto scanner (Bay 2) is recommended for
+best results.
+
+Creation of the BEM meshes using this method involves the
+following steps:
+
+- Organizing the MRI data. This is facilitated
+  by the script mne_organize_dicom ,
+  see :ref:`BABEBJHI`.
+
+- Creating a synthetic 5-degree flip angle FLASH volume, register
+  it with the MPRAGE data, and run the segmentation and meshing program.
+  This step is accomplished by running the script mne_flash_bem , see :ref:`BABGICFE`.
+
+- Inspecting the meshes with tkmedit, see :ref:`BABHJBED`.
+
+.. note:: The following sections assume that you have    run the appropriate setup scripts to make both MNE and FreeSurfer    software available.
+
+.. _BABEBJHI:
+
+Organizing MRI data into directories
+====================================
+
+Since all images comprising the multi-echo FLASH data are
+contained in a single series, it is necessary to organize the images
+according to the echoes before proceeding to the BEM surface reconstruction.
+This is accomplished by the mne_organize_dicom script,
+which creates a directory tree with symbolic links to the original
+DICOM image files. To run mne_organize_dicom ,
+proceed as follows:
+
+- Copy all of your images or create symbolic
+  links to them in a single directory. The images must be in DICOM
+  format. We will refer to this directory as  <*source*> .
+
+- Create another directory to hold the output of mne_organize_dicom . We
+  will refer to this directory as  <*dest*> .
+
+- Change the working directory to  <*dest*> .
+
+- Say ``mne_organize_dicom``  <*source*> .
+  Depending on the total number of images in  <*source*> this
+  script may take quite a while to run. Progress is  indicated by
+  listing the number of images processed at 50-image intervals.
+
+As a result,  <*dest*> will
+contain several directories named  <*three-digit number*> _ <*protocol_name*> corresponding
+to the different series of images acquired. Spaces and parenthesis
+in protocol names will be replaced by underscores. Under each of
+these directories there are one or more directories named  <*three-digit*> number
+corresponding to one or more subsets of images in this series (protocol).
+The only subset division scheme implemented in mne_organize_dicom is
+that according to different echoes, typically found in multi-echo
+FLASH data. These second level directories will contain symbolic
+links pointing to the original image data.
+
+.. note:: mne_organize_dicom was    developed specifically for Siemens DICOM data. Its correct behavior    with DICOM files originating from other MRI scanners has not been    verified at this time.
+
+.. note:: Since mne_organize_dicom processes    all images, not only the FLASH data, it may be a useful preprocessing    step before FreeSurfer reconstruction process as well.
+
+.. _BABGICFE:
+
+Creating the surface tessellations
+==================================
+
+The BEM surface segmentation and tessellation is automated
+with the script mne_flash_bem .
+It assumes that a FreeSurfer reconstruction for this subject is
+already in place. The following options can be specified:
+
+**\---help**
+
+    Prints the usage information.
+
+**\---usage**
+
+    Prints the usage information.
+
+**\---noconvert**
+
+    Skip conversion of the original MRI data. The original data are
+    not needed and the preparatory steps 1.-3. listed below
+    are thus not needed.
+
+**\---noflash30**
+
+    The 30-degree flip angle data are not used.
+
+**\---unwarp  <*type*>**
+
+    Run grad_unwarp with ``--unwarp``  <*type*> option on each of the converted
+    data sets.
+
+Before running mne_flash_bem do
+the following:
+
+- Run mne_organize_dicom as
+  described above.
+
+- Change to the  <*dest*> directory
+  where mne_organize_dicom created the
+  image directory structure.
+
+- Create symbolic links from the directories containing the
+  5-degree and 30-degree flip angle FLASH series to ``flash05`` and ``flash30`` , respectively:
+
+  - ``ln -s``  <*FLASH 5 series dir*> ``flash05``
+
+  - ``ln -s``  <*FLASH 30 series dir*> ``flash30``
+
+- Set the ``SUBJECTS_DIR`` and ``SUBJECT`` environment
+  variables
+
+.. note:: If mne_flash_bem is    run with the ``--noflash30`` option, the flash30 directory is not needed, *i.e.*,    only the 5-degree flip angle flash data are employed.
+
+It may take a while for mne_flash_bem to
+complete. It uses the FreeSurfer directory structure under ``$SUBJECTS_DIR/$SUBJECT`` .
+The script encapsulates the following processing steps:
+
+- It creates an mgz file corresponding
+  to each of the eight echoes in each of the FLASH directories in ``mri/flash`` .
+  The files will be called ``mef``  <*flip-angle*> _ <*echo-number*> ``.mgz`` .
+
+- If the ``--unwarp`` option is specified, run grad_unwarp and produce
+  files ``mef``  <*flip-angle*> _ <*echo-number*> ``u.mgz`` .
+  These files will be then used in the following steps.
+
+- It creates parameter maps in ``mri/flash/parameter_maps`` using mri_ms_fitparms .
+
+- It creates a synthetic 5-degree flip angle volume in ``mri/flash/parameter_maps/flash5.mgz`` using mri_synthesize .
+
+- Using fsl_rigid_register ,
+  it creates a registered 5-degree flip angle volume ``mri/flash/parameter_maps/flash5_reg.mgz`` by
+  registering ``mri/flash/parameter_maps/flash5.mgz`` to
+  the *T1* volume under ``mri`` .
+
+- Using mri_convert , it converts
+  the flash5_reg volume to COR
+  format under ``mri/flash5`` . If necessary, the T1 and brain volumes
+  are also converted into the COR format.
+
+- It runs mri_make_bem_surfaces to
+  create the BEM surface tessellations.
+
+- It creates the directory ``bem/flash`` , moves the
+  tri-format tringulations there and creates the corresponding FreeSurfer
+  surface files in the same directory.
+
+- The COR format volumes created by mne_flash_bem are
+  removed.
+
+If the ``--noflash30`` option is specified to mne_flash_bem ,
+steps 3 and 4 in the above are replaced by averaging over the different
+echo times in 5-degree flip angle data.
+
+.. _BABHJBED:
+
+Inspecting the meshes
+=====================
+
+It is advisable to check the validity of the BEM meshes before
+using them. This can be done with help of tkmedit either
+before or after executing mne_setup_forward_model,
+see :ref:`CIHDBFEG`.
+
+Using seglab
+############
+
+The brain segmentation provided by FreeSurfer in the directory ``mri/brain`` can
+be employed to create the inner skull surface triangulation with
+help of seglab, the Neuromag MRI segmentation tool. The description
+below assumes that the user is familiar with the seglab tool. If
+necessary, consult the seglab manual, Neuromag P/N NM20420A-A.
+
+The data set mri/brain typically
+contains tissues within or outside the skull, in particular around
+the eyes. These must be removed manually before the inner skull
+triangulation is created.The editing and triangulation can be accomplished
+as outlined below
+
+**1. Set up the MRIs for Neuromag software access**
+
+    Run the mne_setup_mri too as described in :ref:`BABCCEHF`.
+    As a result, the directories mri/T1-neuromag and mri/brain-neuromag
+    are set up.
+
+**2. Load the MRI data**
+
+    Open the file mri/brain-neuromag/sets/COR.fif and adjust the scaling
+    of the data.
+
+**3. Preparatory steps**
+
+    Set the minimum data value to 1 using the min3D operator.
+    Make a backup of the data with the backup3D operator.
+
+**4. Manual editing**
+
+    The maskDraw3D operation is recommended
+    for manual editing. To use it, first employ the grow3D operator
+    with threshold interval 2...255 and the seed point inside
+    the brain. Then do the editing in the slicer window as described
+    in Section 5.4.2 of the seglab manual. Note that it is enough to
+    remove the connectivity to the extracerebral tissues rather than
+    erasing them completely.
+
+**5. Grow again and mask**
+
+    Once manual editing is complete, employ the grow3D operator again
+    and do mask3D with the backup
+    data to see whether the result is satisfactory. If not, undo mask3D and
+    continue manual editing. Otherwise, undo mask3D and
+    proceed to the next step.
+
+**6. Dilation**
+
+    It is advisable to make the inner skull surface slightly bigger
+    than the brain envelope obtained in the previous step. Therefore,
+    apply the dilate3D operation
+    once or twice. Use the values 1 for nbours and 26 for nhood in the
+    first dilation and 1 and 18 in the second one, respectively.
+
+**7. Triangulation**
+
+    Triangulate the resulting object with the triangulate3D operator. Use
+    a sidelength of 5 to 6 mm. Check that the triangulation looks reasonable
+    in the 3D viewing window.
+
+**8. Save the triangulation**
+
+    Save the triangulated surface as a mesh into bem/inner_skull.tri. Select
+    unit of measure as millimeters and employ the MRI coordinate system.
+
+Using BrainSuite
+################
+
+The BrainSuite software
+running under the Windows operating system can also be used for
+BEM mesh generation. This software, written by David W. Shattuck,
+is distributed as a collaborative project between the Laboratory
+of Neuro Imaging at the University of California Los Angeles (Director:
+Dr. Arthur W. Toga) and the Biomedical Imaging Research Group at
+the University of Southern California (Director: Dr. Richard M. Leahy).
+For further information, see http://brainsuite.usc.edu/.
+
+The conversion of BrainSuite tessellation
+files to MNE software compatible formats is accomplished with the mne_convert_surface utility,
+covered in :ref:`BEHDIAJG`.
+
+The workflow needed to employ the BrainSuite tessellations
+is:
+
+**Step 1**
+
+    Using the mri_convert utility
+    available in FreeSurfer , convert
+    an MRI volume to the img (Analyze) format. This volume should be the
+    T1.mgz volume or a volume registered with T1.mgz in FreeSurfer :``mri_convert``  <*volume*> ``.mgz``  <*volume*> ``.img``
+
+**Step 2**
+
+    Transfer  <*volume*> ``.mgz`` to
+    a location accessible to BrainSuite , running
+    on Windows.
+
+**Step 3**
+
+    Using  <*volume*> ``.img`` as
+    input, create the tessellations of scalp, outer skull, and inner
+    skull surfaces in BrainSuite .
+
+**Step 4**
+
+    Transfer the dfs files containing the tessellations in the bem directory
+    of your subject's FreeSurfer reconstruction.
+
+**Step 5**
+
+    Go to the bem directory where you placed the two dfs files. Using mne_convert_surface ,
+    convert them to the FreeSurfer surface
+    format, *e,g.*:
+    ``mne_convert_surface `` ``--dfs inner_skull.dfs `` ``--mghmri ../mri/T1.mgz `` ``--surf inner_skull_dfs.surf``
+
+**Step 6**
+
+    Using tkmedit, check that the surfaces are correct, *e.g.*:
+    ``tkmedit -f ../mri/T1.mgz `` ``-surface inner_skull_dfs.surf``
+
+**Step7**
+
+    Using the mne_reduce_surface function
+    in Matlab, reduce the number of triangles on the surfaces to 10000
+    - 20000. Call the output files ``outer_skin.surf`` , ``outer_skull.surf`` ,
+    and ``inner_skull.surf`` .
+
+**Step 8**
+
+    Proceed to mne_setup_forward_model .
+    Use the ``--surf`` and ``--noswap`` options.
+
+.. note:: If left and right are flipped in BrainSuite,    use the ``--flip`` option in mne_convert_surface to    set the coordinate transformation correctly.
+
+.. note:: The BrainSuite scalp    surface can be also used for visualization in mne_analyze ,    see :ref:`CHDCGHIF`.
diff --git a/doc/source/manual/AppB.rst b/doc/source/manual/AppB.rst
new file mode 100644
index 0000000..c89aa88
--- /dev/null
+++ b/doc/source/manual/AppB.rst
@@ -0,0 +1,294 @@
+
+
+.. _setup_martinos:
+
+============================
+Setup at the Martinos Center
+============================
+
+This Appendix contains information specific to the Martinos
+Center setup.
+
+.. _user_environment_martinos:
+
+User environment
+################
+
+In the Martinos Center computer network, the 2.7 version
+of MNE is located at /usr/pubsw/packages/mne/stable. To use this
+version, follow :ref:`user_environment` substituting /usr/pubsw/packages/mne/stable
+for <*MNE*> and /usr/pubsw/packages/matlab/current
+for <*Matlab*> . For most users,
+the default shell is tcsh.
+
+.. note:: A new version of MNE is build every night from    the latest sources. This version is located at /usr/pubsw/packages/mne/nightly.
+
+.. _BABGFDJG:
+
+Using Neuromag software
+#######################
+
+Software overview
+=================
+
+The complete set of Neuromag software is available on the
+LINUX workstations. The programs can be accessed from the command
+line, see :ref:`BABFIEHC`. The corresponding manuals, located
+at ``$NEUROMAG_ROOT/manuals`` are listed in :ref:`BABCJJGF`.
+
+.. _BABFIEHC:
+
+.. table:: Principal Neuromag software modules.
+
+    ===========  =================================
+    Module       Description
+    ===========  =================================
+    xfit         Source modelling
+    xplotter     Data plotting
+    graph        General purpose data processor
+    mrilab       MEG-MRI integration
+    seglab       MRI segmentation
+    cliplab      Graphics clipboard
+    ===========  =================================
+
+.. _BABCJJGF:
+
+.. table:: List of Neuromag software manuals.
+
+    ===========  =========================================
+    Module       pdf
+    ===========  =========================================
+    xfit         XFit.pdf
+    xplotter     Xplotter.pdf
+    graph        GraphUsersGuide.pdf GraphReference.pdf
+    mrilab       Mrilab.pdf
+    seglab       Seglab.pdf
+    cliplab      Cliplab.pdf
+    ===========  =========================================
+
+To access the Neuromag software on the LINUX workstations
+in the Martinos Center, say (in tcsh or csh)
+
+``source /space/orsay/8/megdev/Neuromag-LINUX/neuromag_setup_csh``
+
+or in POSIX shell
+
+``. /space/orsay/8/megdev/Neuromag-LINUX/neuromag_setup_sh``
+
+Using MRIlab for coordinate system alignment
+============================================
+
+The MEG-MRI coordinate system alignment can be also accomplished with
+the Neuromag tool MRIlab, part of the standard software on Neuromag
+MEG systems.
+
+In MRIlab, the following steps are necessary for the coordinate
+system alignment:
+
+- Load the MRI description file ``COR.fif`` from ``subjects/sample/mri/T1-neuromag/sets`` through File/Open .
+
+- Open the landmark setting dialog from Windows/Landmarks .
+
+- Click on one of the coordinate setting fields on the Nasion line.
+  Click Goto . Select the crosshair
+  tool and move the crosshair to the nasion. Click Get .
+
+- Proceed similarly for the left and right auricular points.
+  Your instructor will help you with the selection of the correct
+  points.
+
+- Click OK to set the alignment
+
+- Load the digitization data from the file ``sample_audvis_raw.fif`` or ``sample_audvis-ave.fif`` (the
+  on-line evoked-response average file) in ``MEG/sample`` through File/Import/Isotrak data . Click Make points to
+  show all the digitization data on the MRI slices.
+
+- Check that the alignment is correct by looking at the locations
+  of the digitized points are reasonable. Adjust the landmark locations
+  using the Landmarks dialog, if
+  necessary.
+
+- Save the aligned file to the file suggested in the dialog
+  coming up from File/Save .
+
+Mature software
+###############
+
+This Section contains documentation for software components,
+which are still available in the MNE software but have been replaced
+by new programs.
+
+.. _BABDABHI:
+
+mne_compute_mne
+===============
+
+This chapter contains information about the options accepted
+by the program mne_compute_mne ,
+which is gradually becoming obsolete. All of its functions will
+be eventually included to mne_make_movie ,
+see :ref:`CBBECEDE`. At this time, mne_compute_mne is
+still needed to produce time-collapsed w files unless you are willing
+to write a Matlab script of your own for this purpose.
+
+mne_compute_mne accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---inv <*name*>**
+
+    Load the inverse operator decomposition from here.
+
+**\---meas <*name*>**
+
+    Load the MEG or EEG data from this file.
+
+**\---set <*number*>**
+
+    The data set (condition) number to load. The list of data sets can
+    be seen, *e.g.*, in mne_analyze , mne_browse_raw ,
+    and xplotter .
+
+**\---bmin <*time/ms*>**
+
+    Specifies the starting time of the baseline. In order to activate
+    baseline correction, both ``--bmin`` and ``--bmax`` options
+    must be present.
+
+**\---bmax <*time/ms*>**
+
+    Specifies the finishing time of the baseline.
+
+**\---nave <*value*>**
+
+    Specifies the number of averaged epochs in the input data. If the input
+    data file is one produced by mne_process_raw or mne_browse_raw ,
+    the number of averages is correct in the file. However, if subtractions
+    or some more complicated combinations of simple averages are produced, *e.g.*,
+    by using the xplotter software, the
+    number of averages should be manually adjusted. This is accomplished
+    either by employing this flag or by adjusting the number of averages
+    in the data file with help of mne_change_nave .
+
+**\---snr <*value*>**
+
+    An estimate for the amplitude SNR. The regularization parameter will
+    be set as :math:`\lambda = ^1/_{\text{SNR}}`. If the SNR option is
+    absent, the regularization parameter will be estimated from the
+    data. The regularization parameter will be then time dependent.
+
+**\---snronly**
+
+    Only estimate SNR and output the result into a file called SNR. Each
+    line of the file contains three values: the time point in ms, the estimated
+    SNR + 1, and the regularization parameter estimated from the data
+    at this time point.
+
+**\---abs**
+
+    Calculate the absolute value of the current and the dSPM for fixed-orientation
+    data.
+
+**\---spm**
+
+    Calculate the dSPM instead of the expected current value.
+
+**\---chi2**
+
+    Calculate an approximate :math:`\chi_2^3` statistic
+    instead of the *F* statistic. This is simply
+    accomplished by multiplying the *F* statistic
+    by three.
+
+**\---sqrtF**
+
+    Take the square root of the :math:`\chi_2^3` or *F* statistic
+    before outputting the stc file.
+
+**\---collapse**
+
+    Make all frames in the stc file (or the wfile) identical. The value
+    at each source location is the maximum value of the output quantity
+    at this location over the analysis period. This option is convenient
+    for determining the correct thresholds for the rendering of the
+    final brain-activity movies.
+
+**\---collapse1**
+
+    Make all frames in the stc file (or the wfile) identical. The value
+    at each source location is the :math:`L_1` norm
+    of the output quantity at this location over the analysis period.
+
+**\---collapse2**
+
+    Make all frames in the stc file (or the wfile) identical. The value
+    at each source location is the :math:`L_2` norm
+    of the output quantity at this location over the analysis period.
+
+**\---SIcurrents**
+
+    Output true current values in SI units (Am). By default, the currents are
+    scaled so that the maximum current value is set to 50 (Am).
+
+**\---out <*name*>**
+
+    Specifies the output file name. This is the 'stem' of
+    the output file name. The actual name is derived by removing anything up
+    to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. Finally, ``.stc`` or ``.w`` is added,
+    depending on the output file type.
+
+**\---wfiles**
+
+    Use binary w-files in the output whenever possible. The noise-normalization
+    factors can be always output in this format.  The current estimates
+    and dSPMs can be output as wfiles if one of the collapse options
+    is selected.
+
+**\---pred <*name*>**
+
+    Save the predicted data into this file. This is a fif file containing
+    the predicted data waveforms, see :ref:`CHDCACDC`.
+
+**\---outputnorm <*name*>**
+
+    Output noise-normalization factors to this file.
+
+**\---invnorm**
+
+    Output inverse noise-normalization factors to the file defined by
+    the ``--outputnorm`` option.
+
+**\---dip <*name*>**
+
+    Specifies a dipole distribution snapshot file. This is a file containing the
+    current distribution at a time specified with the ``--diptime`` option.
+    The file format is the ASCII dip file format produced by the Neuromag
+    source modelling software (xfit). Therefore, the file can be loaded
+    to the Neuromag MRIlab MRI viewer to display the actual current
+    distribution. This option is only effective if the ``--spm`` option
+    is absent.
+
+**\---diptime <*time/ms*>**
+
+    Time for the dipole snapshot, see ``--dip`` option above.
+
+**\---label <*name*>**
+
+    Label to process. The label files are produced by tksurfer and specify
+    regions of interests (ROIs). A label file name should end with ``-lh.label`` for
+    left-hemisphere ROIs and with ``-rh.label`` for right-hemisphere
+    ones. The corresponding output files are tagged with ``-lh-`` <*data type* ``.amp`` and ``-rh-`` <*data type* ``.amp`` , respectively. <*data type*> equals ``MNE`` for expected current
+    data and ``spm`` for dSPM data. Each line of the output
+    file contains the waveform of the output quantity at one of the
+    source locations falling inside the ROI.
+
+.. note:: The ``--tmin`` and ``--tmax`` options    which existed in previous versions of mne_compute_mne have    been removed. mne_compute_mne can now    process only the entire averaged epoch.
diff --git a/doc/source/manual/AppEULA.rst b/doc/source/manual/AppEULA.rst
new file mode 100644
index 0000000..c85d38b
--- /dev/null
+++ b/doc/source/manual/AppEULA.rst
@@ -0,0 +1,133 @@
+
+
+.. _licence:
+
+=================
+Licence agreement
+=================
+
+This appendix includes the terms of the MNE software End-User
+License Agreement (EULA).
+
+License agreement
+#################
+
+THE GENERAL HOSPITAL CORPORATION
+
+ACADEMIC RESEARCH USE
+
+SOFTWARE LICENSE AGREEMENT FOR BINARY CODE
+
+By downloading and/or using the MNE software which is the
+subject of this Agreement (the "Software"), you
+hereby accept and agree to all of the terms and conditions of this
+Agreement.  As used in this Agreement, "you" means
+the individual who clicks the "I accept" button
+required as a condition of downloading the Software and the not-for-profit
+or governmental institution or entity which employs or is otherwise
+affiliated with such individual at the time of such download (the "Institution").
+
+- *License Grant.* Subject
+  to all of the terms and conditions of this Agreement,\he General
+  Hospital Corporation, d/b/a Massachusetts General Hospital]\he
+  Brigham and Women's Hospital, Inc.] ("Licensor") hereby
+  grants you a non-exclusive, non-transferable, non-sublicensable license
+  under Licensor's rights in the Software to copy and use the binary
+  code of the Software solely for research and educational purposes
+  under your direction at the Institution ("Research and
+  Educational Purposes," which term shall include company
+  sponsored research conducted by you in accordance with Institution's
+  policies).
+
+- *No Transfer.* You may not sell, license,
+  distribute, rent, lease, offer on an ASP or service bureau basis,
+  grant a security interest in or otherwise transfer the Software
+  to any third party or use the Software for any commercial purpose.
+
+- *Installation and Maintenance.* You are
+  solely responsible for installing and maintaining the Software and
+  for testing the Software for proper operation.  Licensor shall have
+  no obligation to provide any support, maintenance, corrections,
+  debugging, improvements, modifications, upgrades or updates of the
+  Software or notice of any of the forgoing, or otherwise assist Licensee
+  in installing, configuring, integrating, understanding or using
+  the Software.
+
+- *Attributions and Acknowledgments.* You
+  must preserve and maintain all applicable attributions, copyright
+  notices and licenses included in or applicable to the Software.
+  You agree to provide acknowledgement of Licensor and its designated
+  professional staff who participated in the creation of the Software
+  in publications or presentations in accordance with standard academic
+  practice, provided that you may not otherwise use Licensor's name,
+  logos or trademarks or the name of any individual associated with
+  Licensor, or of any funding agency, in any advertising, promotional
+  or sales material or publicity or in any document employed to obtain
+  funds or financing, or to endorse or promote any research results
+  or products related to or arising from the Software, without the
+  prior written consent of a person authorized to make such consent.
+
+- *Third Party Software.* This Agreement
+  does not grant any rights with respect to any third party software,
+  except those rights that Licensor has been authorized by a third
+  party to grant to you, and accordingly you are solely responsible
+  for obtaining any permissions from third parties which are necessary
+  to use and copy the Software.
+
+- *Compliance with Law.* You must comply
+  with all applicable governmental laws, regulations and orders, including
+  without limitation those relating to export and import control,
+  in exercising your rights under this Agreement.
+
+- *Termination.* You may terminate this
+  Agreement at any time by destroying all copies of the Software.
+  Licensor may terminate this Agreement at any time by providing
+  notice to you of such termination.  Any use or copying of the Software
+  by you which is inconsistent with the terms and conditions of this
+  Agreement shall automatically render this Agreement null and void
+  and terminate the license granted hereunder.  Upon any termination
+  of this Agreement you must stop using the Software and return or
+  destroy all copies of the Software, including any portion thereof.
+
+- *DISCLAIMERS.* YOU ACKNOWLEDGE THAT THE
+  SOFTWARE HAS BEEN DESIGNED FOR RESEARCH PURPOSES ONLY AND HAS NOT
+  BEEN REVIEWED OR APPROVED BY THE FOOD AND DRUG ADMINISTRATION OR
+  BY ANY OTHER AGENCY, AND YOU FURTHER ACKNOWLEDGE THAT CLINICAL APPLICATIONS
+  ARE NEITHER RECOMMENDED NOR ADVISED.  The Software is provided "AS
+  IS", is experimental, may contain errors and is subject to further
+  development and revision.  Licensor does not guarantee the accuracy
+  of the Software, or of any results or data.  LICENSOR SPECIFICALLY
+  DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES OF ANY KIND INCLUDING,
+  BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+  PARTICULAR PURPOSE AND NON-INFRINGEMENT
+
+- *LIMITATION OF LIABILITY*. IN NO EVENT
+  SHALL LICENSOR OR ANY OF ITS TRUSTEES, DIRECTORS, OFFICERS, MEDICAL OR
+  PROFESSIONAL STAFF, EMPLOYEES, STUDENTS OR  AGENTS ("LICENSOR'S
+  PERSONNEL") BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+  SPECIAL, INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES HOWEVER CAUSED
+  AND ON ANY THEORY OF LIABILITY ARISING IN ANY WAY RELATED TO THE
+  SOFTWARE, EVEN IF LICENSOR OR ANY OF LICENSOR'S PERSONNEL HAS BEEN
+  ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.  EXCEPT TO THE EXTENT
+  PROHIBITED BY LAW OR REGULATION, YOU ASSUME ALL RISK AND LIABILITY
+  FOR YOUR USE AND COPYING OF THE SOFTWARE, AND AGREE TO INDEMNIFY AND
+  HOLD HARMLESS LICENSOR AND EACH OF LICENSOR'S PERSONNEL FROM AND
+  AGAINST ANY AND ALL CLAIMS, SUITS, ACTIONS, DEMANDS AND JUDGMENTS ARISING
+  THEREFROM
+
+- *U.S. Government Rights.* For Software
+  supported by federal funding, the license granted under this Agreement
+  is subject to the rights, conditions and limitations imposed by
+  U.S. law including without limitation 35 U.S.C. § 202 et
+  seq. and regulations pertaining thereto.
+
+- *General.* This Agreement constitutes
+  the entire understanding between you and Licensor with respect to
+  the subject matter hereof, and supercedes any prior or contemporaneous
+  oral or written agreements with respect thereto.  :ref:`CHDBAFGJ`,:ref:`ch_browse`,
+  :ref:`ch_mne`, :ref:`ch_morph`, :ref:`ch_convert`, and :ref:`ch_misc` shall survive any termination of this Agreement.  This
+  Agreement may be modified or amended only in a writing signed by
+  duly authorized representatives of both Parties hereto.  The invalidity
+  or unenforceability of any provision of this Agreement shall not
+  affect any other provision hereof.  This Agreement and the license
+  granted hereunder may not be assigned
diff --git a/doc/source/manual/AppInstall.rst b/doc/source/manual/AppInstall.rst
new file mode 100644
index 0000000..ec35def
--- /dev/null
+++ b/doc/source/manual/AppInstall.rst
@@ -0,0 +1,174 @@
+
+
+.. _install_config:
+
+==============================
+Installation and configuration
+==============================
+
+System requirements
+###################
+
+The MNE software runs on Mac OSX and LINUX operating systems.
+The hardware and software requirements are:
+
+- Mac OSX version 10.5 (Leopard) or later.
+
+- LINUX kernel 2.6.9 or later
+
+- On both LINUX and Mac OSX 32-bit and 64-bit Intel platforms
+  are supported. PowerPC version on Mac OSX can be provided upon request.
+
+- At least 2 GB of memory, 4 GB or more recommended.
+
+- Disk space required for the MNE software: 80 MB
+
+- Additional open source software on Mac OSX, see :ref:`BABDBCJE`.
+
+Installation
+############
+
+The MNE software is distributed as a compressed tar archive
+(Mac OSX and LINUX) or a Mac OSX disk image (dmg).
+
+Download the software
+=====================
+
+Download the software package of interest. The file names
+follow the convention:
+
+MNE-* <*version*>*- <*rev*> -* <*Operating
+system*>*-* <*Processor*>*.* <*ext*>*
+
+The present version number is 2.7.0. The <*rev*> field
+is the SVN revision number at the time this package was created.
+The <*Operating system*> field
+is either Linux or MacOSX. The <*processor*> field
+is either i386 or x86_64. The <*ext*> field
+is 'gz' for compressed tar archive files and 'dmg' for
+Mac OSX disk images.
+
+Installing from a compressed tar archive
+========================================
+
+Go to the directory where you want the software to be installed:
+
+``cd`` <*dir*>
+
+Unpack the tar archive:
+
+``tar zxvf`` <*software package*>
+
+The name of the software directory under <*dir*> will
+be the same as the package file without the .gz extension.
+
+Installing from a Mac OSX disk  image
+=====================================
+
+- Double click on the disk image file.
+  A window opens with the installer package ( <*name*> .pkg)
+  inside.
+
+- Double click the the package file. The installer starts.
+
+- Follow the instructions in the installer.
+
+.. note:: The software will be installed to /Applications/ <*name*> by    default. If you want another location, select Choose Folder... on the Select a Destination screen    in the installer.
+
+.. note:: To provide centralized support in an environment    with
+
+.. _BABDBCJE:
+
+Additional software
+===================
+
+MNE uses the 'Netpbm' package (http://netpbm.sourceforge.net/)
+to create image files in formats other than tif and rgb from mne_analyze and mne_browse_raw .
+This package is usually present on LINUX systems. On Mac OSX, you
+need to install the netpbm package. The recommended way to do this
+is to use the MacPorts Project tools, see http://www.macports.org/:
+
+- If you have not installed the MacPorts
+  software, goto http://www.macports.org/install.php and follow the
+  instructions to install MacPorts.
+
+- Install the netpbm package by saying: ``sudo port install netpbm``
+
+MacPorts requires that you have the XCode developer tools
+and X11 windowing environment installed. X11 is also needed by MNE.
+For Mac OSX Leopard, we recommend using XQuartz (http://xquartz.macosforge.org/).
+As of this writing, XQuartz does not yet exist for SnowLeopard;
+the X11 included with the operating system is sufficient.
+
+.. _CIHIIBDA:
+
+Testing the performance of your OpenGL graphics
+===============================================
+
+The graphics performance of mne_analyze depends
+on your graphics software and hardware configuration. You get the
+best performance if you are using mne_analyze locally
+on a computer and the hardware acceleration capabilities are in
+use. You can check the On GLX... item
+in the help menu of mne_analyze to
+see whether the hardware acceleration is in effect. If the dialog
+popping up says Direct rendering context ,
+you are using hardware acceleration. If this dialog indicates Nondirect rendering context , you are either using software
+emulation locally, rendering to a remote display, or employing VNC
+connection. If you are rendering to a local display and get an indication
+of Nondirect rendering context ,
+software emulation is in effect and you should contact your local
+computer support to enable hardware acceleration for GLX. In some
+cases, this may require acquiring a new graphics display card. Fortunately,
+relatively high-performance OpenGL-capable graphics cards very inexpensive.
+
+There is also an utility mne_opengl_test to
+assess the graphics performance more quantitatively. This utility
+renders an inflated brain surface repeatedly, rotating it by 5 degrees
+around the *z* axis between redraws. At each
+revolution, the time spent for the full revolution is reported on
+the terminal window where mne_opengl_test was
+started from. The program renders the surface until the interrupt
+key (usually control-c) is pressed on the terminal window.
+
+mne_opengl_test is located
+in the ``bin`` directory and is thus started as:
+
+``$MNE_ROOT/bin/mne_opengl_test``
+
+On the fastest graphics cards, the time per revolution is
+well below 1 second. If this time longer than 10 seconds either
+the graphics hardware acceleration is not in effect or you need
+a faster graphics adapter.
+
+Obtain FreeSurfer
+#################
+
+The MNE software relies on the FreeSurfer software for cortical
+surface reconstruction and other MRI-related tasks. Please consult
+the FreeSurfer home page site at ``http://surfer.nmr.mgh.harvard.edu/`` .
+
+How to get started
+##################
+
+After you have installed the software, a good place to start
+is to look at the manual:
+
+- Source the correct setup script, see :ref:`user_environment`,
+  and
+
+- Say: ``mne_view_manual`` .
+
+Chapters of interest for a novice user include:
+
+- :ref:`CHDDEFAB` and :ref:`CHDBAFGJ` contain introduction
+  to the software and setup instructions.
+
+- :ref:`ch_cookbook` is an overview of the necessary steps to
+  compute the cortically constrained minimum-norm solutions.
+
+- :ref:`ch_sample_data` is a hands-on exercise demonstrating analysis
+  of the sample data set.
+
+- :ref:`ch_reading` contains a list of useful references for
+  understanding the methods implemented in the MNE software.
diff --git a/doc/source/manual/AppReleaseNotes.rst b/doc/source/manual/AppReleaseNotes.rst
new file mode 100644
index 0000000..ef26b4e
--- /dev/null
+++ b/doc/source/manual/AppReleaseNotes.rst
@@ -0,0 +1,879 @@
+
+
+.. _release_notes:
+
+=============
+Release notes
+=============
+
+This appendix contains a brief description of the changes
+in MNE software in each major release.
+
+Release notes for MNE software 2.4
+##################################
+
+Manual
+======
+
+The manual has been significantly expanded and reorganized.
+:ref:`ch_interactive_analysis` describing mne_analyze has
+been added. :ref:`ch_sample_data` contains instructions for analyzing
+the sample data set provided with the software. Useful background
+material is listed in :ref:`ch_reading`. Almost all utility programs
+are now covered in the manual.
+
+General software changes
+========================
+
+The following overall changes have been made:
+
+- A forward solution library independent
+  of Neuromag software was written.
+
+- The MEG sensor information is now imported from the coil definition file
+  instead of being hardcoded in the software. For details, see :ref:`BJECIGEB`.
+
+- CTF and 4D Neuroimaging sensors are now supported.
+
+- The number of Neuromag-based utilities was minimized.
+
+- The LINUX port of Neuromag software modules was completely
+  separated from the MNE software and now resides under a separate
+  directory tree.
+
+- Support for topologically connected source spaces was added,
+  see :ref:`CIHCHDAE`.
+
+- A lot of bugs were fixed.
+
+File conversion utilities
+=========================
+
+The following import utilities were added:
+
+- mne_ctf2fiff to
+  convert CTF data to the fif format, see :ref:`BEHDEBCH`.
+
+- mne_tufts2fiff to convert
+  EEG data from Tufts university to fif format, see :ref:`BEHDGAIJ`.
+
+The output of the Matlab conversion utilities was changed
+to use structures. For details, see :ref:`BEHCCEBJ`, :ref:`BEHDIAJG`,
+and :ref:`convert_to_matlab`.
+
+Matlab tools to import and export w and stc files were added.
+
+mne_browse_raw
+==============
+
+Output of decimated and filtered data is now available. mne_analyze now fully
+supports 32-bit integer data found in CTF and new Neuromag raw data
+files.
+
+mne_analyze
+===========
+
+The following changes have been made in mne_analyze :
+
+- Curved and flat surface patches are
+  now supported.
+
+- An iterative coordinate alignment procedure was added, see
+  :ref:`CACEHGCD`.
+
+- Utility to view continuous HPI information was added, see :ref:`CACIADAI`.
+
+- Several small changes and bug fixes were done.
+
+mne_make_movie
+==============
+
+The only major change in mne_make_movie is
+the addition of support for curved and surface patches.
+
+Averaging
+=========
+
+The highly inefficient program mne_grand_average has
+been removed from the distribution and replaced with the combined
+use of mne_make_movie and a new
+averaging program mne_average_estimates , see :ref:`CHDFDIFE`.
+
+Release notes for MNE software 2.5
+##################################
+
+Manual
+======
+
+The MNE Matlab toolbox is now covered in a separate chapter.
+Change bars are employed to indicate changes in the chapters that
+existed in the previous version of the manual. Note that :ref:`ch_matlab` describing
+the Matlab toolbox is totally new and change bars have not been
+used there. Furthermore, :ref:`setup_martinos` now contains all the
+information specific to the Martinos Center.
+
+mne_browse_raw
+==============
+
+There are several improvements in the raw data processor mne_browse_raw/mne_process_raw :
+
+- Possibility to delete and add channel
+  selections interactively has been added. A nonstandard channel selection
+  file can be now specified on the command line.
+
+- Handling of CTF software gradient compensation has been added.
+
+- The vertical scale of the digital trigger channel is now automatically
+  set to accommodate the largest trigger value.
+
+- It is now possible to load evoked-response data sets from
+  files. Time scales of the evoked-response data and data averaged
+  in mne_browse_raw can be now
+  set from the scales dialog. :ref:`CHDHBGGH` has
+  been updated to employ mne_browse_raw in
+  viewing the averages computed from the sample raw data set.
+
+- It is now possible to create new SSP operators in mne_browse_raw ,
+  see :ref:`CACEAHEI`.
+
+- Listing of amplitude values have been added to both the strip-chart
+  and topographical displays.
+
+- Text format event files can now be loaded for easy inspection
+  of rejected epochs, for example.
+
+- Handling of derived channels has been added, see :ref:`CACFHAFH` and :ref:`CHDHJABJ`.
+
+- SSS information is now transferred to the covariance matrix
+  output files.
+
+- Neuromag processing history is included with the output files.
+
+mne_epochs2mat
+==============
+
+This new utility extracts epochs from a raw data file, applies
+a bandpass filter to them and outputs them in a format convenient
+for processing in Matlab, see :ref:`BEHFIDCB`.
+
+mne_analyze
+===========
+
+The following new features have been added:
+
+- Processing of raw data segment and easy
+  switching between multiple evoked data sets (not in the manual yet).
+
+- Sketchy surface display mode for source spaces with selection
+  triangulation information created with the ``--ico`` option
+  to mne_setup_source_space.
+
+- Rotation of the coordinate frame in the coordinate system
+  alignment dialog, see :ref:`CACEHGCD`.
+
+- Several new graphics output file formats as well as automatic
+  and snapshot output modes, see :ref:`CACIJFII`.
+
+- It is now possible to inquire timecourses from stc overlays.
+  Both labels and surface picking are supported.
+
+- Added an option to include surface vertex numbers to the timecourse output,
+  see :ref:`CACJJGFA`.
+
+- Overlays matching the scalp surface can now be loaded, see :ref:`CACFCHEC`.
+
+- The dipole display dialog has now control over the dipole
+  display properties. Multiple dipoles can be now displayed, see :ref:`CACGGAIA`.
+
+- Time stepping with cursor keys has been added.
+
+- Dynamic cursors have been added to the full view display.
+
+- The viewer display now automatically rotates to facilitate
+  fiducial picking from the head surface.
+
+mne_ctf2fiff
+============
+
+Correct errors in compensation channel information and compensation data
+output. The transformation between the CTF and Neuromag coordinate
+frames is now included in the output file.
+
+mne_make_movie
+==============
+
+Added the ``--labelverts`` option, see :ref:`CBBHHCEF`.
+
+mne_surf2bem
+============
+
+Added the ``--shift`` option to move surface vertices
+outwards. Fixed some loopholes in topology checks. Also added the ``--innershift`` option
+to mne_setup_forward_model . For
+more information, see :ref:`CIHDBFEG` and :ref:`BEHCACCJ`.
+
+mne_forward_solution
+====================
+
+Added code to compute forward solutions for CTF data with
+software gradient compensation on.
+
+mne_inverse_operator
+====================
+
+The following changes have been made in mne_inverse_operator :
+
+- Added options to regularize the noise-covariance
+  matrix.
+
+- Added correct handling of the rank-deficient covariance matrix
+  resulting from the use of SSS.
+
+- Additional projections cannot be specified if the noise covariance matrix
+  was computed with projections on.
+
+- Bad channels can be added only in special circumstances if
+  the noise covariance matrix was computed with projections on.
+
+mne_compute_raw_inverse
+=======================
+
+This utility is now documented in :ref:`CBBCGHAH`. The
+utility mne_make_raw_inverse_operator has
+been removed from the software.
+
+Time range settings
+===================
+
+The tools mne_compute_raw_inverse , mne_convert_mne_data ,
+and mne_compute_mne no longer
+have command-line options to restrict the time range of evoked data
+input.
+
+mne_change_baselines
+====================
+
+It is now possible to process all data sets in a file at
+once. All processed data are stored in a single output file.
+
+New utilities
+=============
+
+mne_show_fiff
+-------------
+
+Replacement for the Neuromag utility show_fiff .
+This utility conforms to the standard command-line option conventions
+in MNE software. For details, see :ref:`CHDHEDEF`.
+
+mne_make_cor_set
+----------------
+
+Replaces the functionality of the Neuromag utility create_mri_set_simple to
+create a fif format description file for the FreeSurfer MRI data.
+This utility is called by the mne_setup_mri script.
+
+mne_compensate_data
+-------------------
+
+This utility applies or removes CTF software gradient compensation
+from evoked-response data, see :ref:`BEHDDFBI`.
+
+mne_insert_4D_comp
+------------------
+
+This utility merges 4D Magnes compensation data from a text
+file and the main helmet sensor data from a fif file and creates
+a new fif file :ref:`BEHGDDBH`.
+
+mne_ctf_dig2fiff
+----------------
+
+This utility reads a text format Polhemus data file, transforms
+the data into the Neuromag head coordinate system, and outputs the
+data in fif or hpts format.
+
+mne_kit2fiff
+------------
+
+The purpose of this new utility is to import data from the
+KIT MEG system, see :ref:`BEHBJGGF`.
+
+mne_make_derivations
+--------------------
+
+This new utility will take derivation data from a text file
+and convert it to fif format for use with mne_browse_raw ,
+see :ref:`CHDHJABJ`.
+
+BEM mesh generation
+===================
+
+All information concerning BEM mesh generation has been moved
+to :ref:`create_bem_model`. Utilities for BEM mesh generation using
+FLASH images have been added, see :ref:`BABFCDJH`.
+
+Matlab toolbox
+==============
+
+The MNE Matlab toolbox has been significantly enhanced. New
+features include:
+
+- Basic routines for reading and writing
+  fif files.
+
+- High-level functions to read and write evoked-response fif
+  data.
+
+- High-level functions to read raw data.
+
+- High-level routines to read source space information, covariance
+  matrices, forward solutions, and inverse operator decompositions
+  directly from fif files.
+
+The Matlab toolbox is documented in :ref:`ch_matlab`.
+
+The mne_div_w utility
+has been removed because it is now easy to perform its function
+and much more using the Matlab Toolbox.
+
+Release notes for MNE software 2.6
+##################################
+
+Manual
+======
+
+The changes described below briefly are documented in the
+relevant sections of the manual. Change bars are employed to indicate
+changes with respect to manual version 2.5. :ref:`ch_forward` now
+contains a comprehensive discussion of the various coordinate systems
+used in MEG/EEG data.
+
+Command-line options
+====================
+
+All compiled C programs now check that the command line does
+not contain any unknown options. Consequently, scripts that have
+inadvertently specified some options which are invalid will now
+fail.
+
+Changes to existing software
+============================
+
+mne_add_patch_info
+------------------
+
+- Changed option ``--in`` to ``--src`` and ``--out`` to ``--srcp`` .
+
+- Added ``--labeldir`` option.
+
+mne_analyze
+-----------
+
+New features include:
+
+- The name of the digital trigger channel
+  can be specified with the MNE_TRIGGER_CH_NAME environment variable.
+
+- Using information from the fif data files, the wall clock
+  time corresponding to the current file position is shown on the
+  status line
+
+- mne_analyze can now be
+  controlled by mne_browse_raw to
+  facilitate interactive analysis of clinical data.
+
+- Added compatibility with Elekta-Neuromag Report Composer (cliplab and
+  improved the quality of hardcopies.
+
+- Both in mne_browse_raw and
+  in mne_analyze , a non-standard
+  default layout can be set on a user-by-user basis, see :ref:`CACFGGCF`.
+
+- Added the ``--digtrigmask`` option.
+
+- Added new image rotation functionality using the mouse wheel
+  or trackball.
+
+- Added remote control of the FreeSurfer MRI
+  viewer (tkmedit ), see :ref:`CACCHCBF`.
+
+- Added fitting of single equivalent current dipoles and channel
+  selections, see :ref:`CHDGHIJJ`.
+
+- Added loading of FreeSurfer cortical
+  parcellation data as labels.
+
+- Added support for using the FreeSurfer average
+  brain (fsaverage) as a surrogate.
+
+- The surface selection dialog was redesigned for faster access
+  to the files and to remove problems with a large number of subjects.
+
+- A shortcut button to direct a file selector to the appropriate
+  default directory was added to several file loading dialogs.
+
+- The vertex coordinates can now be displayed, see :ref:`CHDIEHDH`.
+
+mne_average_forward_solutions
+-----------------------------
+
+EEG forward solutions are now averaged as well, see :ref:`CHDBBFCA`.
+
+mne_browse_raw and mne_process_raw
+----------------------------------
+
+Improvements in the raw data processor mne_browse_raw /mne_process_raw include:
+
+- The name of the digital trigger channel
+  can be specified with the MNE_TRIGGER_CH_NAME environment variable.
+
+- The format of the text event files was slightly changed. The
+  sample numbers are now "absolute" sample numbers
+  taking into account the initial skip in the event files. The new
+  format is indicated by an additional "pseudoevent" in
+  the beginning of the file. mne_browse_raw and mne_process_raw are
+  still compatible with the old event file format. For details, see :ref:`CACBCEGC`.
+
+- Using information from the fif data files, the wall clock
+  time corresponding to the current file position is shown on the
+  status line
+
+- mne_browse_raw can now
+  control mne_analyze to facilitate
+  interactive analysis of clinical data.
+
+- If the length of an output raw data file exceeds the 2-Gbyte
+  fif file size limit, the output is split into multiple files.
+
+- ``-split`` and ``--events`` options was
+  added to mne_process_raw .
+
+- The ``--allowmaxshield`` option was added to mne_browse_raw to allow
+  loading of unprocessed data with MaxShield in the Elekta-Neuromag
+  systems. These kind of data should never be used as an input for source
+  localization.
+
+- The ``--savehere`` option was added, see :ref:`CACFAAAJ`.
+
+- The stderr parameter was
+  added to the averaging definition files, see :ref:`CACHACHH`.
+
+- Added compatibility with Elekta-Neuromag Report Composer (cliplab and
+  improved the quality of hardcopies.
+
+- Both in mne_browse_raw and
+  in mne_analyze , a non-standard
+  default layout can be set on a user-by-user basis, see :ref:`CACFGGCF`.
+
+- mne_browse_raw now includes
+  an interactive editor to create derived channels, see :ref:`CACJIEHI`.
+
+- The menus in mne_browse_raw were
+  reorganized and an time point specification text field was added
+
+- Possibility to keep the old projection items added to the
+  new projection definition dialog.
+
+- Added ``--cd`` option.
+
+- Added filter buttons for raw files and Maxfilter (TM) output
+  to the open dialog.
+
+- Added possibility to create a graph-compatible projection
+  to the Save projection dialog
+
+- Added possibility to compute a projection operator from epochs
+  specified by events.
+
+- Added the ``--keepsamplemean`` option
+  to the covariance matrix computation files.
+
+- Added the ``--digtrigmask`` option.
+
+- Added Load channel selections... item
+  to the File menu.
+
+- Added new browsing functionality using the mouse wheel or
+  trackball, see :ref:`BABIDADB`.
+
+- Added optional items to the topographical data displays, see :ref:`CACBEHCD`.
+
+- Added an event list window, see :ref:`BABFDICC`.
+
+- Added an annotator window, see :ref:`BABCIGGH`.
+
+- Keep events sorted by time.
+
+- User-defined events are automatically kept in a fif-format
+  annotation file, see :ref:`BABDFAHA`.
+
+- Added the delay parameter
+  to the averaging and covariance matrix estimation description files,
+  see :ref:`CACHACHH` and :ref:`BABECIAH`.
+
+Detailed information on these changes can be found in :ref:`ch_browse`.
+
+mne_compute_raw_inverse
+-----------------------
+
+The ``--digtrig`` , ``--extra`` , ``--noextra`` , ``--split`` , ``--labeldir`` , and ``--out`` options
+were added, see :ref:`CBBCGHAH`.
+
+mne_convert_surface
+-------------------
+
+The functionality of mne_convert_dfs was
+integrated into mne_convert_surface .
+Text output as a triangle file and and file file containing the
+list of vertex points was added. The Matlab output option was removed.
+Consequently,  mne_convert_dfs , mne_surface2mat ,
+and mne_list_surface_nodes were
+deleted from the distribution.
+
+mne_dump_triggers
+-----------------
+
+This obsolete utility was deleted from the distribution.
+
+mne_epochs2mat
+--------------
+
+The name of the digital trigger channel can be specified
+with the MNE_TRIGGER_CH_NAME environment variable, see :ref:`BEHFIDCB`. Added
+the ``--digtrigmask`` option.
+
+mne_forward_solution
+--------------------
+
+Added code to compute the derivatives of with respect to
+the dipole position coordinates, see :ref:`CHDDIBAH`.
+
+mne_list_bem
+------------
+
+The ``--surfno`` option is replaced with the ``--id`` option, see :ref:`BEHBBEHJ`.
+
+mne_make_cor_set
+----------------
+
+Include data from mgh/mgz files to the output automatically.
+Include the Talairach transformations from the FreeSurfer data to
+the output file if possible. For details, see :ref:`BABBHHHE`.
+
+mne_make_movie
+--------------
+
+Added the ``--noscalebar``, ``--nocomments``, ``--morphgrade``, ``--rate``,
+and ``--pickrange`` options, see :ref:`CBBECEDE`.
+
+mne_make_source_space
+---------------------
+
+The ``--spacing`` option is now implemented in this
+program, which means mne_mris_trix is
+now obsolete. The mne_setup_source_space script
+was modified accordingly. Support for tri, dec, and dip files was dropped,
+see :ref:`BEHCGJDD`.
+
+mne_mdip2stc
+------------
+
+This utility is obsolete and was removed from the distribution.
+
+mne_project_raw
+---------------
+
+This is utility is obsolete and was removed from the distribution.
+The functionality is included in mne_process_raw .
+
+mne_rename_channels
+-------------------
+
+Added the ``--revert`` option, see :ref:`CHDCFEAJ`.
+
+mne_setup_forward_model
+-----------------------
+
+Added the ``--outershift`` and ``--scalpshift`` options,
+see :ref:`CIHDBFEG`.
+
+mne_simu
+--------
+
+Added source waveform expressions and the ``--raw`` option,
+see :ref:`CHDECAFD`.
+
+mne_transform_points
+--------------------
+
+Removed the ``--tomrivol`` option.
+
+Matlab toolbox
+--------------
+
+Several new functions were added, see :ref:`ch_matlab`.
+
+.. note:: The matlab function fiff_setup_read_raw has    a significant change. The sample numbers now take into account possible    initial skip in the file, *i.e.*, the time between    the start of the data acquisition and the start of saving the data    to disk. The first_samp member    of the returned structure indicates the initial skip in samples.    If you want your own routines, which assume that initial skip has    been removed, perform indentically with the previous version, sub [...]
+
+New utilities
+=============
+
+mne_collect_transforms
+----------------------
+
+This utility collects coordinate transformation information
+from several sources into a single file, see :ref:`BABBIFIJ`.
+
+mne_convert_dig_data
+--------------------
+
+This new utility convertes digitization (Polhemus) data between
+different file formats, see :ref:`BABCJEAD`.
+
+mne_edf2fiff
+------------
+
+This is a new utility to convert EEG data from EDF, EDF+,
+and BDF formats to the fif format, see :ref:`BABHDBBD`.
+
+mne_brain_vision2fiff
+---------------------
+
+This is a new utility to convert BrainVision EEG data to
+the fif format, see :ref:`BEHCCCDC`. This utility is also
+used by the mne_eximia_2fiff script
+to convert EEG data from the Nexstim eXimia EEG system to the fif
+format, see :ref:`BEHGCEHH`.
+
+mne_anonymize
+-------------
+
+New utility to remove subject identifying information from
+measurement files, see :ref:`CHDIJHIC`.
+
+mne_opengl_test
+---------------
+
+New utility for testing the OpenGL graphics performance,
+see :ref:`CIHIIBDA`.
+
+mne_volume_data2mri
+-------------------
+
+Convert data defined in a volume created with mne_volume_source_space to
+an MRI overlay, see :ref:`BEHDEJEC`.
+
+mne_volume_source_space
+-----------------------
+
+Create a a grid of source points within a volume, see :ref:`BJEFEHJI`. mne_volume_source_space also
+optionally creates a trilinear interpolator matrix to facilitate
+converting values a distribution in the volume grid into an MRI
+overlay using mne_volume_data2mri ,
+see :ref:`BEHDEJEC`.
+
+mne_copy_processing_history
+---------------------------
+
+This new utility copies the processing history block from
+one data file to another, see :ref:`CJACECAH`.
+
+Release notes for MNE software 2.7
+##################################
+
+Software engineering
+====================
+
+There have been two significant changes in the software engineering
+since MNE Version 2.6:
+
+- CMake is now used in building the software
+  package and
+
+- Subversion (SVN) is now used for revision control instead
+  of Concurrent Versions System (CVS).
+
+These changes have the effects on the distribution of the
+MNE software and setup for individual users:
+
+- There is now a separate software package
+  for each of the platforms supported.
+
+- The software is now organized completely under standard directories (bin,
+  lib, and share). In particular, the directory setup/mne has been moved
+  to share/mne and the directories app-defaults and doc are now under
+  share. All files under share are platform independent.
+
+- The use of shared libraries has been minimized. This alleviates
+  compatibility problems across operating system versions.
+
+- The setup scripts have changed.
+
+The installation and user-level effects of the new software
+organization are discussed in :ref:`CHDBAFGJ` and :ref:`setup_martinos`.
+
+In addition, several minor bugs have been fixed in the source
+code. Most relevant changes visible to the user are listed below.
+
+Matlab tools
+============
+
+- The performance of the fiff I/O routines
+  has been significantly improved thanks to the contributions of François
+  Tadel at USC.
+
+- Label file I/O routines mne_read_label_file and mne_write_label_file as
+  well as a routine to extract time courses corresponding to a label from
+  an stc file (mne_label_time_courses) have been added.
+
+- The patch information is now read from the source space file
+  and included in the source space data structure.
+
+mne_browse_raw
+==============
+
+- Rejection criteria to detect flat channels
+  have been added, see :ref:`BABIHFBI` and :ref:`BABCGEJE`.
+
+- Possibility to detect temporal skew between trigger input
+  lines has been added, see :ref:`BABIHFBI` and :ref:`BABCGEJE`.
+
+- ``--allowmaxshield`` option now works in the batch mode as well.
+
+- Added the ``--projevent`` option to batch mode.
+
+- It is now possible to compute an SSP operator for EEG, see :ref:`CACEAHEI`.
+
+mne_analyze
+===========
+
+- Both hemispheres can now be displayed
+  simultaneously, see :ref:`CACCABEA`.
+
+- If the source space was created with mne_make_source_space version 2.3
+  or later, the subject's surface data are automatically
+  loaded after loading the data and the inverse operator.
+
+Miscellaneous
+=============
+
+- mne_smooth_w was
+  renamed to mne_smooth and can
+  now handle both w and stc files. Say ``mne_smooth --help`` to
+  find the options.
+
+- All binaries now reside in $MNE_ROOT/bin. There are no separate bin/mne
+  and bin/admin directories.
+
+- mne_anonymize now has the
+  ``--his`` option to remove the HIS ID of the subject, see :ref:`CHDIJHIC`.
+
+- mne_check_surface now has
+  the ``--bem`` and ``--id`` options to check surfaces from a BEM fif file.
+  For details, try mne_check_surface --help.
+
+- mne_compute_raw_inverse now
+  has the ``--orignames`` option, see :ref:`CHDEIHFA`.
+
+- Added ``--headcoord`` option to mne_convert_dig_data ,
+  see :ref:BABCJEAD`.
+
+- Added ``--talairach`` option to mne_make_cor_set ,
+  see :ref:`BABBHHHE`.
+
+- Added the ``--morph`` option to mne_setup_source_space and mne_make_source_space ,
+  see :ref:`CIHCHDAE` and :ref:`BEHCGJDD`, respectively.
+
+- Added the ``--prefix`` option to mne_morph_labels ,
+  see :ref:`CHDCEAFC`.
+
+- Added the ``--blocks`` and ``--indent`` options to mne_show_fiff ,
+  see :ref:`CHDHEDEF`.
+
+- Added the ``--proj`` option as well as map types 5 and 6 to mne_sensitivity_map ,
+  see :ref:`CHDDCBGI`.
+
+- Fixed a bug in mne_inverse_operator which
+  caused erroneous calculation of EEG-only source estimates if the
+  data were processed with Maxfilter software and sometimes caused
+  similar behavior on MEG/EEG source estimates.
+
+Release notes for MNE software 2.7.1
+####################################
+
+mne_analyze
+===========
+
+- Added a new restricted mode for visualizing
+  head position within the helmet, see :ref:`CHDJECCG` and Section 7.21.**doesn't exist**
+
+- Added information about mne_make_scalp_surfaces to :ref:`CHDCGHIF`.
+
+mne_browse_raw
+==============
+
+- Added possibility for multiple event
+  parameters and the mask parameter in averaging and noise covariance
+  calculation, see :ref:`CACHACHH`.
+
+- Added simple conditional averaging, see :ref:`CACHACHH`.
+
+Release notes for MNE software 2.7.2
+####################################
+
+mne_add_patch_info
+==================
+
+Added the capability to compute distances between source
+space vertices, see :ref:`BEHCBCGG`.
+
+Matlab toolbox
+==============
+
+- Added new functions to for stc and w
+  file I/O to employ 1-based vertex numbering inside Matlab, see Table 10.11.
+
+- mne_read_source_spaces.m now reads the inter-vertex distance
+  information now optionally produced by mne_add_patch_info ,
+  see :ref:`BEHCBCGG`.
+
+Miscellaneous
+=============
+
+- Added ``--shift`` option to mne_convert_surface ,
+  see :ref:`BABEABAA`.
+
+- Added ``--alpha`` option to mne_make_movie ,
+  see :ref:`CBBBBHIF`.
+
+- Added ``--noiserank`` option to mne_inverse_operator and mne_do_inverse_operator ,
+  see :ref:`CBBDDBGF` and :ref:`CIHCFJEI`, respectively.
+
+- The fif output from mne_convert_dig_data now
+  includes the transformation between the digitizer and MNE head coordinate
+  systems if such a transformation has been requested, see :ref:`BABCJEAD`.
+  This also affects the output from mne_eximia2fiff, see :ref:`BEHGCEHH`.
+
+- Added ``--noflash30``, ``--noconvert``, and ``--unwarp`` options to mne_flash_bem ,
+  see :ref:`BABFCDJH`.
+
+Release notes for MNE software 2.7.3
+####################################
+
+Miscellaneous
+=============
+
+- Added preservation of the volume geometry
+  information in the FreeSurfer surface files.
+
+- The ``--mghmri`` option in combination with ``--surfout`` inserts
+  the volume geometry information to the output of mne_convert_surface ,
+  see :ref:`BEHDIAJG`.
+
+- Added ``--replacegeom`` option to mne_convert_surface ,
+  see :ref:`BEHDIAJG`.
+
+- Modified mne_watershed_bem and mne_flash_bem to
+  include the volume geometry information to the output. This allows
+  viewing of the output surfaces in the FreeSurfer freeview utility.
diff --git a/doc/source/manual/analyze.rst b/doc/source/manual/analyze.rst
new file mode 100644
index 0000000..cb97ce1
--- /dev/null
+++ b/doc/source/manual/analyze.rst
@@ -0,0 +1,2746 @@
+
+.. _ch_interactive_analysis:
+
+====================
+Interactive analysis
+====================
+
+Overview
+########
+
+Interactive analysis of the MEG/EEG data and source estimates
+is facilitated by the mne_analyze tool.
+Its features include:
+
+- Viewing of evoked-response data or data
+  segments in a topographical layout.
+
+- Alignment of MEG and head coordinate frames.
+
+- Display of maps of the magnetic field and electric potentials.
+
+- Computation and display of cortically-constrained minimum-norm current
+  estimates and statistical maps derived from them. The solutions can
+  be displayed on folded and inflated cortical surfaces as well as
+  on curved and flattened surface patches.
+
+- Fitting of current dipoles to the data.
+
+- Connection to tkmedit (part
+  of FreeSurfer) to display data on MRI slices.
+
+- Production of QuickTime (TM) movies and graphics snapshots
+  in several image file formats.
+
+- Connection to cliplab (part of Elekta-Neuromag software) to
+  produce graphics reports, see :ref:`CACJEFAI`.
+
+- Inquiry and saving of source waveforms at selected surface
+  points or within ROIs defined by label files.
+
+- On-line morphing of the current distributions.
+
+- Output of snapshots in w file format.
+
+- Display of overlay data delivered in w and stc file formats.
+
+- Creation of ROI (label) files.
+
+- Viewing of continuous head-position data delivered by Elekta-Neuromag
+  software.
+
+.. _CHDJECCG:
+
+Command line options
+####################
+
+Since mne_analyze is
+primarily an interactive analysis tool, there are only a few command-line
+options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---cd <*dir*>**
+
+    Change to this directory before starting.
+
+**\---subject <*name*>**
+
+    Specify the default subject name for surface loading.
+
+**\---digtrig <*name*>**
+
+    Name of the digital trigger channel. The default value is 'STI
+    014'. Underscores in the channel name will be replaced
+    by spaces.
+
+**\---digtrigmask <*number*>**
+
+    Mask to be applied to the raw data trigger channel values before considering
+    them. This option is useful if one wants to set some bits in a don't
+    care state. For example, some finger response pads keep the trigger
+    lines high if not in use, *i.e.*, a finger is
+    not in place. Yet, it is convenient to keep these devices permanently
+    connected to the acquisition system. The number can be given in
+    decimal or hexadecimal format (beginning with 0x or 0X). For example,
+    the value 255 (0xFF) means that only the lowest order byte (usually
+    trigger lines 1 - 8 or bits 0 - 7) will be considered.
+
+**\---visualizehpi**
+
+    Start mne_analyze in the restricted *head
+    position visualization* mode. For details, see :ref:`CHDEDFAE`.
+
+**\---dig <*filename*>**
+
+    Specify a file containing the head shape digitization data. This option
+    is only usable if the *head position visualization* position
+    visualization mode has been first invoked with the --visualizehpi
+    option.
+
+**\---hpi <*filename*>**
+
+    Specify a file containing the transformation between the MEG device
+    and head coordinate frames. This option is only usable if the *head
+    position visualization* position visualization mode has
+    been first invoked with the ``--visualizehpi`` option.
+
+**\---scalehead**
+
+    In *head position visualization* mode, scale
+    the average scalp surface according to the head surface digitization
+    data before aligning  them to the scalp surface. This option is
+    recommended.
+
+**\---rthelmet**
+
+    Use the room-temperature helmet surface instead of the MEG sensor
+    surface when showing the relative position of the MEG sensors and
+    the head in the *head position visualization* mode.
+
+.. note:: Before starting mne_analyze the ``SUBJECTS_DIR`` environment variable    has to be set.
+
+.. note:: Strictly speaking, trigger mask value zero would    mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+
+.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_analyze .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
+
+The main window
+###############
+
+.. _CACJABJI:
+
+.. figure:: mne_analyze/main_window.png
+    :alt: main window of mne_analyze
+
+    The main window of mne_analyze.
+
+The main window of mne_analyze shown
+in :ref:`CACJABJI` has the following components:
+
+- The menu bar;
+
+- Display area for a sample response;
+
+- Display of the estimated SNR, see :ref:`CACJFFEE`;
+
+- Display of a source waveform;
+
+- Message area, time-point selection text field, an ECD fit
+  button, a text field for selecting a vertex on the surface, and
+  a message text label;
+
+- Display area for the current estimates;
+
+- Controls for the current estimate display;
+
+- Topographical display of data.
+
+The menus
+#########
+
+The File menu
+=============
+
+The File shown in :ref:`CACJCBFI` contains the following items:
+
+.. _CACJCBFI:
+
+.. figure:: mne_analyze/file_menu.png
+    :alt: the file menu
+    :align: center
+    :figwidth: 25%
+
+    The file menu
+
+**Open...**
+
+    Load a new data set and an inverse operator. For details, see :ref:`CACBACHB`.
+
+**Open raw...**
+
+    Load epoch data from a raw data file. For details, see :ref:`CACDEDBI`.
+
+**Switch to data set...**
+
+    If multiple data sets or epochs from a raw data file are loaded,
+    this menu item brings up a list to switch between the data sets
+    or epochs.
+
+**Change working directory...**
+
+    Change the working directory of this program. This will usually
+    be the directory where your MEG/EEG data and inverse operator are located.
+
+**Load surface...**
+
+    Load surface reconstructions for the subject whose data you are analyzing,
+    see :ref:`CACFJICC`.
+
+**Load morphing surface...**
+
+    Load surface reconstructions of another subject for morphing, see :ref:`CACGBEIB`.
+
+**Load surface patch...**
+
+    Load a curved or flattened surface patch, see :ref:`CACFJICC`.
+
+**Load morphing surface patch...**
+
+    Load a curved or flattened surface patch for morphing, see :ref:`CACGBEIB`.
+
+**Load digitizer data...**
+
+    Load digitizer data for coordinate frame alignment, see :ref:`CACEHGCD`.
+
+**View continuous HPI data...**
+
+    Load a data file containing continuous head position information, see :ref:`CACIADAI`.
+
+**Manage overlays...**
+
+    Bring up the overlay manager to import data from stc and w files, see :ref:`CACFCHEC`.
+
+**Save bad channel selection**
+
+    Save the current bad channel selection created in the topographical data
+    display, see :ref:`CACEFBDE`.
+
+**Quit**
+
+    Quit the program.
+
+The Adjust menu
+===============
+
+The contents of the Adjust menu
+is shown in :ref:`CACFDIJJ`:
+
+.. _CACFDIJJ:
+
+.. figure:: mne_analyze/adjust_menu.png
+    :alt: The Adjust menu
+
+    The Adjust menu.
+
+**Scales**
+
+    Adjust the scales of the data display.
+
+**Estimates...**
+
+    Adjust the properties of the displayed current estimates, see :ref:`CACBHDBF`.
+
+**Select trace layout...**
+
+    Select the layout for the topographical display, see :ref:`CACEFBDE`.
+
+**Lights...**
+
+    Adjust the lighting of the scenes in the main display and the viewer, see
+    :ref:`CACBHDBF` and :ref:`CACEFFJF`.
+
+**Field mapping...**
+
+    Adjust the field mapping preferences, see :ref:`CACICDGA`.
+
+**Coordinate alignment...**
+
+    Establish a coordinate transformation between the MEG and MRI coordinate
+    frames, see :ref:`CACEHGCD`.
+
+The View menu
+=============
+
+The contents of the file menu is shown in :ref:`CACBFCGF`:
+
+.. _CACBFCGF:
+
+.. figure:: mne_analyze/view_menu.png
+    :alt: The View menu
+
+    The View menu.
+
+**Show viewer...**
+
+    Loads additional surfaces and pops up the viewer window. The functions
+    available in the viewer are discussed in :ref:`CACEFFJF`.
+
+**Show MRI viewer...**
+
+    Bring up the tkmedit program
+    to view MRI slices, see :ref:`CACCHCBF`.
+
+**Show coordinates...**
+
+    Show the coordinates of a vertex, see :ref:`CHDIEHDH`.
+
+**Show timecourse manager...**
+
+    Brings up the timecourse manager if some timecourses are available.
+    Timecourses are discussed in :ref:`CACCCFHH`.
+
+The Labels menu
+===============
+
+The contents of the Labels menu
+is shown in :ref:`CACHCDCF`. ROI analysis with help of labels
+is discussed in detail in :ref:`CACCCFHH`.
+
+.. _CACHCDCF:
+
+.. figure:: mne_analyze/labels_menu.png
+    :alt: The Labels menu
+
+    The Labels menu.
+
+The label menu contains the following
+items:
+
+**Load label...**
+
+    Loads one label file for ROI analysis.
+
+**Load all labels...**
+
+    Loads all label files available in a directory for ROI analysis.
+
+**Load parcellation...**
+
+    Load cortical parcellation data produced by FreeSurfer from
+    directory $SUBJECTS_DIR/$SUBJECT/label and add the cortical regions
+    defined to the label list.
+
+**Show label list...**
+
+    Shows a list of all currently loaded labels for ROI analysis.
+
+**Discard all labels**
+
+    Discard all labels loaded so far. The label list window will be
+    hidden.
+
+**Clear marked vertices**
+
+    Clear the label outline or a label created interactively.
+
+The Dipoles menu
+================
+
+The contents of the dipoles menu is shown in :ref:`CACCJDAF`:
+
+.. _CACCJDAF:
+
+.. figure:: mne_analyze/dipoles_menu.png
+    :alt: The dipole fitting menu
+
+    The dipole fitting menu.
+
+**Setup fitting...**
+
+    Define the dipole fitting parameters, see :ref:`CACEDEGA`.
+
+**Show dipole list...**
+
+    Show the list of imported and fitted dipoles, see :ref:`CACGGAIA`.
+
+**Manage channel selections...**
+
+    Manage the selections of channels used in dipole fitting, see :ref:`CACIBHCI`.
+
+The Help menu
+=============
+
+The contents of the Help menu is shown in :ref:`help_menu_analyze`:
+
+.. _help_menu_analyze:
+
+.. figure:: mne_analyze/help_menu.png
+    :alt: The Help menu
+
+    The Help menu.
+
+**On version...**
+
+    Displays the version and compilation date of the program.
+
+**On license...**
+
+    Displays the license information.
+
+**On GLX...**
+
+    Displays information about the OpenGL rendering context. If you experience
+    poor graphics performance, check that the window that pops up from
+    here says that you have a Direct rendering context .
+    If not, either your graphics card or driver software needs an update.
+
+**Why the beep?**
+
+    In some simple error situations, mne_analyze does
+    not popup an error dialog but refuses the action and rings the bell.
+    The reason for this can be displayed through this help menu item.
+
+.. _CACBACHB:
+
+Loading data
+############
+
+When you select Open... from
+the File menu the data loading
+dialog shown in :ref:`CACFHAIH` appears. It has four sections:
+
+- A standard file selection box.
+
+- List of available data sets. This part is automatically filled
+  in when a proper data file is selected from the file list. You can
+  select one or more data sets from this list. Multiple selection
+  works with help of the shift and control keys. If multiple data
+  sets are selected, the data set to be analyzed can be changed from
+  the data set list accessible through Switch to data set... in the File menu.
+
+- List of available inverse operator decompositions in the current
+  directory and its subdirectory called ``inv`` .
+
+- List of options:
+
+  - MRI/head transform source specifies a file to read the MRI/MEG coordinate
+    transformation information from. This is usually the inverse operator
+    file. However, you can also load data with inverse operator set
+    to <*none*> to view the data
+    as well as field and potential maps derived thereof. In this case
+    you need to specify the coordinate transformation file using the Select... button,
+    usually located in ``mri/T1-neuromag/sets`` under the subject's
+    FreeSurfer directory. The Default button
+    uses the default transformation file which must be called ``$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-trans.fif`` .
+    This can be one of the MRI description files in  ``mri/T1-neuromag/sets`` or
+    a transformation file stored from mne_analyze ,
+    see :ref:`CACEHGCD`.
+
+  - Use EEG average electrode ref. selects
+    whether the average electrode reference is applied to the data.
+    This is only available if the inverse operator is set to <*none*> .
+
+  - nave specifies the effective
+    number of averages to compute the SNR correctly. Usually your measurement
+    file contains this information.
+
+.. _CACFHAIH:
+
+.. figure:: mne_analyze/open_dialog.png
+    :alt: The open dialog
+
+    The open dialog.
+
+After the data set(s) has been selected, the following actions
+will take place:
+
+- The inverse operator will be loaded.
+
+- Baselines will be applied as specified in the scales dialog.
+
+- Projection will be applied to the data. If no inverse operator
+  is specified, the source for the projection data will be the data
+  file and the average EEG reference setting in the options. If an
+  inverse operator is included, the projection will be read from the
+  data file.
+
+- If an inverse operator is loaded, whitened data will be computed.
+
+- If an inverse operator is loaded, the SNR estimate as well
+  as the effective SNR will be computed from the whitened data and
+  displayed in the SNR window.
+
+- Waveforms will be shown in the topographical display as well
+  as in the sample channel display.
+
+If multiple data sets are loaded each data set has the following
+individual settings:
+
+- Amplitude and time scale settings,
+
+- Baseline,
+
+- Picked time point,
+
+- Sample channel to be displayed, and
+
+- MNE display preferences, see :ref:`CACHFFIJ`.
+
+If a data set has not been previously displayed, the currently
+active settings are copied to the data set.
+
+.. note:: If you double click on an inverse operator file    name displayed in the Inverse operator list,    the command used to produced this file will be displayed in a message    dialog.
+
+.. _CACDEDBI:
+
+Loading epochs from a raw data file
+###################################
+
+Instead of an evoked-response data file it is possible to
+load epochs of data (single trials) from a raw data file. This option
+is invoked from File/Open raw... .
+The file selection box is identical to the one used for evoked responses
+(:ref:`CACFHAIH`) except that data set selector is replaced
+by the epoch selector show in :ref:`CACDCGIB`.
+
+.. _CACDCGIB:
+
+.. figure:: mne_analyze/epoch_selector.png
+    :alt: The raw data epoch selector
+
+    The raw data epoch selector.
+
+The epoch selector contains the following controls:
+
+- The event specifier. Only events matching
+  this number are going to be considered.
+
+- The event source specifier. The event source can be either
+  the data file, *i.e.,* the digital trigger channel or
+  a event data file produced with mne_browse_raw or mne_process_raw ,
+  see :ref:`CACJGIFA`. Using an event data file is useful
+  if, *e.g.*, the epochs to be processed epileptic spikes.
+
+- The time range specification. This determines the length of
+  the epoch with respect to the selected event.
+
+Once the settings have been accepted by clicking OK ,
+the first matching epoch will be displayed. You can switch between
+epochs using the data set list accessible through Switch to data set... in the File menu.
+
+.. _CACEFBDE:
+
+Data displays
+#############
+
+The MEG and EEG signals can be viewed in two ways:
+
+- A selection of MEG or EEG channel is
+  shown in a topographical layout.
+
+- One representative channel can be selected to the Sample channel display by clicking on a channel in the
+  topographical display.
+
+In both the sample channel display and the topographical
+display, current time point can be selected with a left mouse click.
+In addition, time point of interest can be entered numerically in
+the text box at the bottom left corner of the main display.
+
+.. _CACHBJAC:
+
+The topographical display
+=========================
+
+A selection of channels is always shown in the right most
+part of the main display. The topographical layout to use is selected
+from Adjust/Select trace layout... ,
+which brings up a window with a list of available layouts. The system-wide
+layouts reside in $MNE_ROOT/share/mne_analyze/lout. In addition
+any layout files residing in $HOME/.mne/lout are listed. The format
+of the layout files and selection of the default layout is discussed
+in :ref:`CACFGGCF`.
+
+Several actions can be performed with the mouse in the topographical data
+display:
+
+**Left button click**
+
+    Selects a time point of interest.
+
+**Left button click with control key**
+
+    Selects a time point of interest and selects the channel under the pointer
+    to the sample channel display.
+
+**Left button drag with shift key**
+
+    Enlarges the view to contain only channels in the selected area.
+
+**Middle button click or drag**
+
+    Marks this channel as bad and clears all previously marked bad channel.
+    This action is only available if an inverse operator is *not* loaded.
+    An inverse operator dictates the selection of bad channels. The
+    current bad channel selection can be applied to the data from File/Save bad channel selection .
+
+**Middle button click or drag with control key**
+
+    Extends the bad channel selection without clearing the previously active
+    bad channels.
+
+**Right button**
+
+    Adjusts the channel selection used for dipole fitting in the same
+    way as the middle button selects bad channels. For more information
+    on channel selections, see :ref:`CACIBHCI`.
+
+.. _CACFGHBJ:
+
+The sample channel display
+==========================
+
+The sample channel display shows one of the measurement channels
+at the upper left corner of the mne_analyze user
+interface. A time point can be selected with a left mouse click.
+In addition, the following keyboard functions are associated with
+the sample channel display:
+
+**Down**
+
+    Change the sample channel to the next channel in the scanning order.
+
+**Up**
+
+    Change the sample channel to the previous channel in the scanning order.
+
+**Right**
+
+    Move forward in time by 1 ms.
+
+**Control Right**
+
+    Move forward in time by 5 ms.
+
+**Left**
+
+    Move backward in time by 1 ms.
+
+**Control Left**
+
+    Move backward in time by 5 ms.
+
+Scale settings
+==============
+
+The scales of the topographical and sample channel display
+can be adjusted from the Scales dialog
+which is invoked by selecting Adjust/Scales... from
+the menus. The Scales dialog
+shown in :ref:`CACJJCGD` has the following entries:
+
+**Analyze range min [ms]**
+
+    Specifies the lower limit of the time range of data to be shown.
+
+**Analyze range max [ms]**
+
+    Specifies the upper limit of the time range of data to be shown.
+
+**Use full time range**
+
+    If this box is checked, all data available in the data file will
+    be shown.
+
+**Baseline min [ms]**
+
+    Specifies the lower time limit of the baseline.
+
+**Baseline max [ms]**
+
+    Specifies the upper time limit of the baseline.
+
+**Baseline in use**
+
+    Baseline subtraction can be switched on and off from this button.
+
+**MEG amp min [fT/cm]**
+
+    Lower limit of the vertical scale of planar gradiometer MEG channels.
+
+**MEG amp max [fT/cm]**
+
+    Upper limit of the vertical scale of planar gradiometer MEG channels.
+
+**MEG axmult [cm]**
+
+    The vertical scale of MEG magnetometers and axial gradiometers will
+    be obtained by multiplying the planar gradiometer vertical scale
+    limits by this value, given in centimeters.
+
+**EEG amp min [muV]**
+
+    Lower limit of the vertical scale of EEG channels.
+
+**EEG amp max [muV]**
+
+    Upper limit of the vertical scale of EEG channels.
+
+**Show stimulus channel**
+
+    Show the digital trigger channel data in the sample view together with
+    the sample channel.
+
+.. _CACJJCGD:
+
+.. figure:: mne_analyze/scales_dialog.png
+    :alt: The Scales dialog
+
+    The Scales dialog.
+
+.. _CACFJICC:
+
+The surface display
+###################
+
+In mne_analyze , the current
+estimates are visualized on inflated or folded cortical surfaces.
+There are two visualization displays: the surface display, which
+is always visible, and the 3D viewer which is invoked from the Windows/Show viewer... menu selection, see :ref:`CACEFFJF`.
+
+A total of eight surfaces or patches can be assigned to the
+surface display:
+
+- The left and right hemisphere cortical
+  surfaces for the subject whose data you are analyzing. These surfaces
+  can be the inflated, white-matter, or pial surfaces. They are loaded
+  through the File/Load surface... menu
+  selection,
+
+- The left and right hemisphere cortical surfaces of another
+  subject or an alternative representation of the cortical surface
+  of the actual subject. For example, you can switch between the inflated
+  and folded (pial or white matter) cortical surfaces very easily.
+  These surfaces are loaded from the File/Load morphing surface... menu selection.
+
+- Left and right hemisphere curved or flat cortical patches
+  for the subject you are analyzing. This patch is loaded from the File/Load surface patch... menu selection. The full cortical
+  surfaces must be loaded first before loading the patches.
+
+- Patches for an another subject or another pair of patches
+  for the same subject through the File/Load morphing surface patch... menu selection. Again, the full
+  cortical surfaces must have been loaded first.
+
+.. _CHDIFFHJ:
+
+The surface selection dialog
+============================
+
+When File/Load surface... or File/Load morphing surface... is invoked, the surface selection dialog
+shown in :ref:`CACDGJDC` appears.
+
+.. _CACDGJDC:
+
+.. figure:: mne_analyze/surface_selection_dialog.png
+    :alt: The surface selection dialog
+
+    The surface selection dialog.
+
+The dialog has the following components:
+
+**List of subjects**
+
+    This list contains the subjects available in the directory set with
+    the ``SUBJECTS_DIR`` environment variable.
+
+**List of available surfaces for the selected subject**
+
+    Lists the surfaces available for the current subject. When you click on
+    an item in this list, it appears in the Selected surface text field.
+
+**x-rotation (deg)**
+
+    Specifies the initial rotation of the surface around the *x* (left
+    to right) axis. Positive angle means a counterclockwise rotation
+    when the surface is looked at from the direction of the positive *x* axis.
+    Sometimes a more pleasing visualization is obtained when this rotations are
+    specified when the surface is loaded.
+
+**y-rotation (deg)**
+
+    Specifies the initial rotation of the surface around the *y* (back
+    to front) axis.
+
+**z-rotation (deg)**
+
+    Specifies the initial rotation of the surface around the *z* (bottom
+    to up) axis.
+
+The patch selection dialog
+==========================
+
+The surface patches are loaded with help of the patch selection
+dialog, which appears when File/Load surface patch... or File/Load morphing surface patch... is selected. This dialog,
+shown in :ref:`CACHEEJD`, contains a list of available patches
+and the possibility to rotate the a flat patch counterclockwise
+by the specified number of degrees from its original orientation.
+The patch is automatically associated with the correct hemisphere
+on the basis of the two first letters in the patch name (lh = left
+hemisphere, rh = right hemisphere).
+
+.. _CACHEEJD:
+
+.. figure:: mne_analyze/patch_selection_dialog.png
+    :alt: patch selection dialog
+
+    The patch selection dialog.
+
+.. _CACCABEA:
+
+Controlling the surface display
+===============================
+
+The main surface display has a section called Adjust view , which has the controls shown in :ref:`CACCFCGJ`:
+
+**L and R**
+
+    Select the left or right hemisphere surface loaded through File/Load surface... .
+
+**B**
+
+    Display the surfaces for both hemispheres.
+
+**M**
+
+    Display the surfaces loaded File/Load morphing surface... according to the L, R, and B hemisphere
+    selectors
+
+**P**
+
+    Select the patch associated with the currently selected surface. For this
+    to work, either L or R must be selected.
+
+**Option menu**
+
+    Select one of the predefined view orientations, see :ref:`CACCCGDB`, below.
+
+**Arrow buttons**
+
+    Rotate the surface by increments specified in degrees in the text
+    box next to the arrows.
+
+.. _CACCFCGJ:
+
+.. figure:: mne_analyze/surface_controls.png
+    :alt: Surface controls
+
+    Surface controls.
+
+The display can be also adjusted
+using keyboard shortcuts, which are available once you click in
+the main surface display with the left mouse button to make it active:
+
+**Arrow keys**
+
+    Rotate the surface by increments specified in degrees in the Adjust View section.
+
+**+**
+
+    Enlarge the image.
+
+**-**
+
+    Reduce the image.
+
+**=**
+
+    Return to the default size.
+
+**r**
+
+    Rotate the image one full revolution around z axis using the currently
+    specified rotation step. This is useful for producing a sequence
+    of images when automatic image saving is on, see :ref:`CACBEBGC`.
+
+**s**
+
+    Produces a raster image file which contains a snapshot of the currently
+    displayed image. For information on snapshot mode, see :ref:`CACBEBGC`.
+
+**.**
+
+    Stops the rotation invoked with the 'r' key, see
+    above.
+
+In addition, the mouse wheel or trackball can be used to
+rotate the image. If a trackball is available, *e.g.*,
+with the Apple MightyMouse, the image can be rotated up and down
+or left and right with the trackball. With a mouse wheel the image
+will rotated up and down when the wheel is rotated. Image rotation
+in the left-right direction is achieved by holding down the shift key
+when rotating the wheel. The shift key
+has the same effect on trackball operation.
+
+.. note:: The trackball and mouse wheel functionality    is dependent on your X server settings. On Mac OSX these settings    are normally correct by default but on a LINUX system some adjustments    to the X server settings maybe necessary. Consult your system administrator    or Google for details.
+
+.. _CHDIEHDH:
+
+Selecting vertices
+==================
+
+When you click on the surface with the left mouse button,
+the corresponding vertex number and the associated value will be
+displayed on the message line at the bottom of the display. In addition,
+the time course at this vertex will be shown, see :ref:`CHDGHDGE`.
+You can also select a vertex by entering the vertex number to the
+text field at the bottom of the display. If the MRI viewer is displayed and Track surface location in MRI is selected in the MRI viewer control dialog, the cursor in the MRI slices
+will also follow the vertex selection, see :ref:`CACCHCBF`.
+
+The View menu choice Show coordinates... brings up a window which shows
+the coordinates of the selected vertex on the *white matter* surface, *i.e.*,
+lh.white and rh.white FreeSurfer surfaces. If morphing surfaces
+have been loaded, the coordinates of both the subject being analyzed
+and those of the morphing subject will be shown. The Coordinates window
+includes the following lines:
+
+**MEG head**
+
+    Indicates the vertex location in the *MEG head* coordinates.
+    This entry will be present only if MEG/EEG data have been loaded.
+
+**Surface RAS (MRI)**
+
+    Indicates the vertex location in the *Surface RAS* coordinates.
+    This is the native coordinate system of the surfaces and this entry
+    will always be present.
+
+**MNI Talairach**
+
+    Shows the location in MNI Talairach coordinates. To be present,
+    the MRI data of the subject must be in the mgz format (usually true with
+    any recent FreeSurfer version) and the Talairach transformation
+    must be appropriately defined during the *FreeSurfer* reconstruction
+    workflow.
+
+**Talairach**
+
+    Shows the location in the *FreeSurfer* Talairach
+    coordinates which give a better match to the Talairach atlas.
+
+The above coordinate systems are discussed in detail in :ref:`CHDEDFIB`.
+
+.. note:: By default, the tksurfer program,    part of the FreeSurfer package, shows    the vertex locations on the *orig* rather than *white* surfaces.    Therefore, the coordinates shown in mne_analyze and tksurfer are    by default slightly different (usually by < 1 mm). To make the    two programs consistent, you can start tksurfer with    the ``-orig white`` option.
+
+.. _CACCCGDB:
+
+Defining viewing orientations
+=============================
+
+The list of viewing orientations available in the Adjust View section of the main surface display is controlled
+by a text file. The system-wide defaults reside in ``$MNE_ROOT/share/mne/mne_analyze/eyes`` .
+If the file ``$HOME/.mne/eyes`` exists, it is used instead.
+
+All lines in the eyes file starting with # are comments.
+The view orientation definition lines have the format:
+
+<*name*>:<*Left*>:<*Right*>:<*Left up*>:<*Right up*> ,
+
+where
+
+**<*name*>**
+
+    is the name of this viewing orientation,
+
+**<*Left*>**
+
+    specifies the coordinates of the viewing 'eye' location
+    for the left hemisphere, separated by spaces,
+
+**<*Right*>**
+
+    specifies the coordinates of the viewing location for the right
+    hemisphere,
+
+**<*Left up*>**
+
+    specifies the direction which is pointing up in the image for left hemisphere,
+    and
+
+**<*Right up*>**
+
+    is the corresponding up vector for the right hemisphere.
+
+All values are given in a coordinate system where positive *x* points
+to the right, positive *y* to the front, and
+positive *z* up. The lengths of the vectors specified
+for each of the four items do not matter, since parallel projection is
+used and the up vectors will be automatically normalized. The up
+vectors are usually 0 0 1, *i.e.*, pointing to
+the positive *z* direction unless the view is
+directly from above or below or if some special effect is desired.
+
+The names of viewing orientations should be less than 9 characters
+long. Otherwise, the middle pane of the main display will not be
+able to accommodate all the controls. The widths of the main window
+panes can be adjusted from the squares at the vertical sashes separating
+the panes.
+
+Adjusting lighting
+==================
+
+The scenes shown in the main surface display and the viewer,
+described in :ref:`CACEFFJF`, are lit by fixed diffuse ambient
+lighting and a maximum of eight light sources. The states, locations,
+and colors of these light sources can be adjusted from the lighting
+adjustment dialog shown in :ref:`CACDDHAI`, which can be
+accessed through the Adjust/Lights... menu
+choice. The colors of the lights can be adjusted numerically or
+using a color adjustment dialog accessible through the Color... buttons.
+
+.. _CACDDHAI:
+
+.. figure:: mne_analyze/adjust_lights.png
+    :alt: lighting adjustment dialog
+
+    The lighting adjustment dialog.
+
+.. _CACBEBGC:
+
+Producing output files
+======================
+
+.. _CACFBIHD:
+
+.. figure:: mne_analyze/hardcopy_controls.png
+    :alt: Graphics output controls
+
+    Graphics output controls.
+
+Three types of output files can be produced from the main
+surface display using the graphics output buttons shown in :ref:`CACFBIHD`:
+
+**w files (w button)**
+
+    These files are simple binary files, which contain a list of vertex numbers
+    on the cortical surface and their current data values. The w files
+    will be automatically tagged with ``-lh.w`` and ``-rh.w`` .
+    They will only contain vertices which currently have a nonzero value.
+
+**Graphics snapshots (img button)**
+
+    These files will contain an exact copy of the image in tif or rgb
+    formats. The output format and the output mode is selected from
+    the image saving dialog shown in :ref:`CACCEFGI`. For more
+    details, see :ref:`CACIJFII`. If snapshot or automatic image
+    saving mode is in effect, thee img button
+    terminates this mode.
+
+**QuickTime (TM) movies (mov button)**
+
+    These files will contain a sequence of images as a QuickTime (TM) movie
+    file. The movie saving dialog shown in :ref:`CACFFBBD` specifies the
+    time range and the interval between the frames as well as the quality
+    of the movies, which is restricted to the range 25...100. The size
+    of the QuickTime file produced is approximately proportional to
+    the quality.
+
+.. _CACCEFGI:
+
+.. figure:: mne_analyze/image_dialog.png
+    :alt: File type selection in the image saving dialog
+
+    File type selection in the image saving dialog.
+
+.. _CACFFBBD:
+
+.. figure:: mne_analyze/movie_dialog.png
+    :alt: The controls in the movie saving dialog
+
+    The controls in the movie saving dialog.
+
+.. _CACIJFII:
+
+Image output modes
+==================
+
+The image saving dialog shown in :ref:`CACCEFGI` selects
+the format of the image files produced and the image output mode.
+The buttons associated with different image format change the file
+name filter in the dialog to display files of desired type. However,
+the final output format is defined by the ending of the file name
+in the Selection text field as
+follows:
+
+**jpg**
+
+    JPEG (Joint Photographic Experts Group) format. Best quality jpeg is
+    always produced.
+
+**tif or tiff**
+
+    Uncompressed TIFF (Tagged Image File Format).
+
+**rgb**
+
+    RGB format.
+
+**pdf**
+
+    Portable Document File format.
+
+**png**
+
+    Portable Network Graphics format.
+
+.. note:: Only TIFF and RGB output routines are compiled    into mne_analyze . For other output    formats to work, the following programs must be present in your    system: tifftopdf, tifftopnm, pnmtojpeg, and pnmtopng.
+
+There are three image saving modes which can be selected
+from the option menu labelled Output mode :
+
+**Single**
+
+    When OK is clicked one file containing
+    the present image is output.
+
+**Snapshot**
+
+    A new image file is produced every time ``s`` is pressed
+    in the image window, see :ref:`CACCABEA` and :ref:`CACFDDCB`. The image file name is used as the stem of
+    the output files. For example, if the name is, ``sample.jpg`` ,
+    the output files will be ``sample_shot_001.jpg`` , ``sample_shot_002.jpg`` , *etc.*
+
+**Automatic**
+
+    A new image file is produced every time the image window changes.
+    The image file name is used as the stem of the output files. For
+    example, if the name is, ``sample.jpg`` , the output files
+    will be ``sample_001.jpg`` , ``sample_002.jpg`` , *etc.*
+
+.. _CACGBEIB:
+
+Morphing
+########
+
+The displayed surface distributions can be morphed to another
+subject's brain using the spherical morphing procedure,
+see :ref:`ch_morph`. In addition to the morphing surfaces loaded
+through File/Load morphing surface... surface
+patches for the same subject can be loaded through File/Load morphing surface patch... . Switching between main and morphing
+surfaces is discussed in :ref:`CACCABEA`.
+
+Any labels displayed are visible on any of the surfaces displayed
+in the main surface display. Time points can be picked in any of
+the surfaces. As a result, the corresponding timecourses will be
+shown in the MNE amplitude window, see :ref:`CACCCFHH`.
+
+.. _CACEFFJF:
+
+The viewer
+##########
+
+.. _CACFDDCB:
+
+Overview
+========
+
+.. _CACJDFFH:
+
+.. figure:: mne_analyze/viewer.png
+    :alt: viewer window
+
+    The viewer window with a visualization of MEG and EEG contour maps.
+
+When Windows/Show viewer... is
+selected, the following additional surfaces will be loaded:
+
+- The left and right hemisphere pial surfaces,
+
+- The surface representing the inner helmet shaped wall of the
+  dewar on which the MEG sensors are located,
+
+- The scalp surface, and
+
+- The BEM surfaces.
+
+The scalp surface is loaded from the file ``bem/`` <*subject*>``-head.fif`` under
+the subject's FreeSurfer directory. This surface is automatically
+prepared if you use the watershed algorithm as described in :ref:`BABBDHAG`.
+If you have another source for the head triangulation you can use
+the utility mne_surf2bem to create
+the fif format scalp surface file, see :ref:`BEHCACCJ`.
+
+If a file called ``bem/`` <*subject*>``-bem.fif`` under
+the subject's FreeSurfer directory is present, mne_analyze tries
+to load the BEM surface triangulations from there. This file can
+be a symbolic link to one of the ``-bem.files`` created
+by mne_prepare_bem_model , see :ref:`CHDJFHEB`.
+If the BEM file contains a head surface triangulation, it will be
+used instead of the one present in the ``bem/`` <*subject*>``-head.fif`` file.
+
+Once all required surfaces have been loaded, the viewer window
+shown in :ref:`CACJDFFH` pops up. In addition to the display
+canvas, the viewer has Adjust view controls
+similar to the main surface display and options for graphics output.
+The Adjust view controls do not
+have the option menu for standard viewpoints and has two additional
+buttons:
+
+The output options only include graphics output as snapshots
+(img ) or as movies (mov ).
+
+**Options...**
+
+    This button pops up the viewer options window which controls the appearance
+    of the viewer window.
+
+**Rescale**
+
+    This button adjusts the contour level spacing in the magnetic field and
+    electric potential contour maps so that the number of contour lines
+    is reasonable.
+
+**Reload**
+
+    Checks the modification dates of the surface files loaded to viewer and
+    reloads the data if the files have been changed. This is useful, *e.g.*,
+    for display of different BEM tessellations.
+
+The display can be also adjusted
+using keyboard shortcuts, which are available once you click in
+the viewer display with the left mouse button:
+
+**Arrow keys**
+
+    Rotate the surface by increments specified in degrees in the Adjust View section.
+
+**+**
+
+    Enlarge the image.
+
+**-**
+
+    Reduce the image.
+
+**=**
+
+    Return to the default size.
+
+**r**
+
+    Rotate the image one full revolution around z axis using the currently
+    specified rotation step. This is useful for producing a sequence
+    of images when automatic image saving is on, see :ref:`CACBEBGC`.
+
+**s**
+
+    Produces a image file which contains a snapshot of the currently displayed
+    image. For information on snapshot mode, see :ref:`CACBEBGC`.
+
+**.**
+
+    Stops the rotation invoked with the 'r' key, see
+    above.
+
+The left mouse button can be also used to inquire estimated
+magnetic field potential values on the helmet and head surfaces
+if the corresponding maps have been calculated and displayed.
+
+In addition, the mouse wheel or trackball can be used to
+rotate the image. If a trackball is available, *e.g.*,
+with the Apple MightyMouse, the image can be rotated up and down
+or left and right with the trackball. With a mouse wheel the image
+will rotated up and down when the wheel is rotated. Image rotation
+in the left-right direction is achieved by holding down the shift key
+when rotating the wheel. The shift key
+has the same effect on trackball operation.
+
+.. note:: The trackball and mouse wheel functionality    is dependent on your X server settings. On Mac OSX these settings    are normally correct by default but on a LINUX system some adjustments    to the X server settings maybe necessary. Consult your system administrator    or Google for details.
+
+.. _CACHGDEA:
+
+Viewer options
+==============
+
+.. figure:: mne_analyze/viewer_options.png
+    :alt: viewer options
+
+    The viewer options window
+
+The viewer options window shown above contains three main
+sections to control the appearance of the viewer:
+
+- Selectors for various items to show,
+
+- Options for some of the items, and
+
+- Control of the color and transparency of the items, if applicable.
+  The color can be adjusted either by entering numeric values in the
+  range 0...1 or with help of a color editor which appears from the Color... button.
+  The transparency value has the same range as the other color components,
+  zero indicating a fully transparent (invisible) surface and one a
+  fully opaque one.
+
+The available items are:
+
+**Left hemi**
+
+    The pial surface of the left hemisphere. This surface can be made transparent.
+    Naturally, this surface will only be visible if the scalp is made
+    transparent.
+
+**Right hemi**
+
+    The pial surface of the right hemisphere.
+
+**Inner skull**
+
+    The inner skull surface. This surface can be made transparent. If parts
+    of the pial surface are outside of the inner skull surface, they will
+    be visible, indicating that the inner skull surface is obviously inside
+    the inner skull. Note that this criterion is more conservative than
+    the one imposed during the computation of the forward solution since
+    the source space points are located on the white matter surface
+    rather than on the pial surface. This surface can be displayed only
+    if the BEM file is present, see :ref:`CACFDDCB`.
+
+**Outer skull**
+
+    The outer skull surface. This surface can be made transparent. This surface can
+    be displayed only if the BEM file is present and contains the outer
+    skull surface, see :ref:`CACFDDCB`.
+
+**Scalp**
+
+    The scalp surface. This surface can be made transparent. The display
+    of this surface requires that the scalp triangulation file is present,
+    see :ref:`CACFDDCB`.
+
+**Digitizer data**
+
+    The 3D digitizer data collected before the MEG/EEG acquisition. These
+    data are loaded from File/Load digitizer data... .
+    The display can be restricted to HPI coil locations and cardinal
+    landmarks with the option. The digitizer points are shown as disks
+    whose radius is equal to the distance of the corresponding point
+    from the scalp surface. Points outside the scalp are shown in red
+    and those inside in blue. Distinct shades of cold and warm colors
+    are used for the fiducial landmarks. The HPI coils are shown in
+    green. Further information on these data and their use in coordinate
+    system alignment is given in :ref:`CACEHGCD`.
+
+**Helmet**
+
+    The MEG measurement surface, *i.e.*, inner surface
+    of the dewar.
+
+**EEG electrodes**
+
+    The EEG electrode locations. These will be only available if your data
+    set contains EEG channels.
+
+**MEG sensors**
+
+    Outlines of MEG sensors.
+
+**MEG field map**
+
+    Estimated contour map of the magnetic field component normal to the
+    helmet surface or normal to the scalp, see :ref:`CACICDGA`.
+
+**EEG potential map**
+
+    Interpolated EEG potential map on the scalp surface, see :ref:`CACICDGA`.
+
+**Activity estimates**
+
+    Current estimates on the pial surface.
+
+.. _CACICDGA:
+
+Magnetic field and electric potential maps
+##########################################
+
+Overview
+========
+
+In mne_analyze , the magnetic
+field and potential maps displayed in the viewer window are computed
+using an MNE-based interpolation technique. This approach involves
+the following steps:
+
+- Establish an inverse operator to compute
+  a minimum norm solution on a spherical surface using a spherically
+  symmetric forward model. Instead of assuming a discrete grid of
+  sources, a continuous distribution of tangential currents is employed.
+  In this case the lead field dot products can be computed in closed
+  form. Separate solutions are computed for MEG and EEG.
+
+- The normal component of the magnetic field or the electric
+  potential on the helmet or head surface is computed from the MEG-based
+  and EEG-based MNE. Since the MNE predicts the original measurements
+  accurately, it can also interpolate and extrapolate the data reliably.
+  The grid of interpolation or extrapolation points can be located
+  on the helmet or scalp surface for MEG and on the scalp surface
+  for EEG.
+
+The magnetic field and potential maps appear automatically
+whenever they are enabled from the viewer options, see :ref:`CACHGDEA`.
+
+.. _CACGFBCI:
+
+Technical description
+=====================
+
+Let :math:`x_k` be an MEG or an EEG
+signal at channel :math:`k = 1 \dotso N`. This signal
+is related to the primary current distribution :math:`J^p(r)` through
+the lead field :math:`L_k(r)`:
+
+.. math::    x_k = \int_G {L_k(r) \cdot J^p(r)}\,dG\ ,
+
+where the integration space :math:`G` in
+our case is a spherical surface. The oblique boldface characters
+denote three-component locations vectors and vector fields.
+
+The inner product of two leadfields is defined as:
+
+.. math::    \langle L_j \mid L_k \rangle = \int_G {L_j(r) \cdot L_k(r)}\,dG\ ,
+
+These products constitute the Gram matrix :math:`\Gamma_{jk} = \langle L_j \mid L_k \rangle`.
+The minimum -norm estimate can be expressed as a weighted sum of
+the lead fields:
+
+.. math::    J^* = w^T L\ ,
+
+where :math:`w` is a weight vector
+and :math:`L` is a vector composed of the
+continuous lead-field functions. The weights are determined by the
+requirement
+
+.. math::    x = \langle L \mid J^* \rangle = \Gamma w\ ,
+
+i.e., the estimate must predict the measured signals. Hence,
+
+.. math::    w = \Gamma^{-1} x\ .
+
+However, the Gram matrix is ill conditioned and regularization
+must be employed to yield a stable solution. With help of the SVD
+
+.. math::    \Gamma = U \Lambda V^T
+
+a regularized minimum-norm can now found by replacing the
+data matching condition by
+
+.. math::    x^{(p)} = \Gamma^{(p)} w^{(p)}\ ,
+
+where
+
+.. math::    x^{(p)} = (U^{(p)})^T x \text{  and  } \Gamma^{(p)} = (U^{(p)})^T \Gamma\ ,
+
+respectively. In the above, the columns of :math:`U^{(p)}` are
+the first *k* left singular vectors of :math:`\Gamma`.
+The weights of the regularized estimate are
+
+.. math::    w^{(p)} = V \Lambda^{(p)} U^T x\ ,
+
+where :math:`\Lambda^{(p)}` is diagonal with
+
+.. math::    \Lambda_{jj}^{(p)} = \Bigg\{ \begin{array}{l}
+		 1/{\lambda_j},j \leq p\\
+		 \text{otherwise}
+	     \end{array}
+
+:math:`\lambda_j` being the :math:`j` th singular
+value of :math:`\Gamma`. The truncation point :math:`p` is
+selected in mne_analyze by specifying
+a tolerance :math:`\varepsilon`, which is used to
+determine :math:`p` such that
+
+.. math::    1 - \frac{\sum_{j = 1}^p {\lambda_j}}{\sum_{j = 1}^N {\lambda_j}} < \varepsilon
+
+The extrapolated and interpolated magnetic field or potential
+distribution estimates :math:`\hat{x'}` in a virtual
+grid of sensors can be now easily computed from the regularized
+minimum-norm estimate. With
+
+.. math::    \Gamma_{jk}' = \langle L_j' \mid L_k \rangle\ ,
+
+where :math:`L_j'` are the lead fields
+of the virtual sensors,
+
+.. math::    \hat{x'} = \Gamma' w^{(k)}\ .
+
+Field mapping preferences
+=========================
+
+The parameters of the field maps can be adjusted from the Field mapping preferences dialog shown in :ref:`CACGDCGA` which is accessed through the Adjust/Field mapping... menu item.
+
+.. _CACGDCGA:
+
+.. figure:: mne_analyze/field_mapping_pref.png
+    :alt: Field mapping preferences dialog
+
+    Field mapping preferences dialog.
+
+The Field mapping preferences dialog
+has the following controls, arranged in MEG , EEG ,
+and common sections:
+
+**SVD truncation at**
+
+    Adjusts the smoothing of the field and potential patterns. This parameter
+    specifies the eigenvalue truncation point as described in :ref:`CACGFBCI`. Smaller values correspond to noisier field
+    patterns with less smoothing.
+
+**Use default origin**
+
+    The location of the origin of the spherical head model used in these computations
+    defaults to (0 0 40) mm. If this box is unchecked the origin coordinate
+    fields are enabled to enter a custom origin location. Usually the
+    default origin is appropriate.
+
+**Downsampling grade**
+
+    This option only applies to EEG potential maps and MEG field maps
+    extrapolated to the head surface and controls the number of virtual
+    electrodes or point magnetometers used in the interpolation. Allowed
+    values are: 2 (162 locations), 3 (642 locations), and 4 (2562 locations).
+    Usually the default value 3 is appropriate.
+
+**Number of smoothsteps**
+
+    This option controls how much smoothing, see :ref:`CHDEBAHH`,
+    is applied to the interpolated data before computing the contours.
+    Usually the default value is appropriate.
+
+**Reconstruction surface radius**
+
+    Distance of the spherical reconstruction surface from the sphere model
+    origin. Usually default value is appropriate. For children it may
+    be necessary to make this value smaller.
+
+.. _CACBHDBF:
+
+Working with current estimates
+##############################
+
+.. _CACHFFIJ:
+
+Preferences
+===========
+
+The characteristics of the current estimates displayed are
+controlled from the MNE preferences dialog
+which pops up from Adjust/Estimates... .
+
+This dialog, shown in :ref:`CACJGCDH`, has the following
+controls:
+
+**SNR estimate**
+
+    This controls the regularization of the estimate, i.e., the amount
+    of allowed mismatch between the measured data and those predicted by
+    the estimated current distribution. Smaller SNR means larger allowed
+    mismatch. Typical range of SNR values is 1...7. As discussed in :ref:`CBBDJFBJ`,
+    the SNR value can be translated to the current variance values expressed
+    in the source-covariance matrix R. This translation is presented
+    as the equivalent current standard-deviation value
+
+**Show**
+
+    This radio button box selects the quantity to display. MNE is
+    the minimum norm estimate (estimated value of the current), dSPM is the
+    noise-normalized MNE, and sLORETA is
+    another version of the noise-normalized solution which is claimed
+    to have a smaller location bias than the dSPM.
+
+**Mask with**
+
+    If MNE is selected in the Show radio
+    button box, it is possible to mask the solution with one of the
+    statistical maps. The masking map is thresholded at the value given
+    in the Threshold text field and
+    the MNE is only shown in areas with statistical values above this threshold.
+
+**Value histogram**
+
+    This part of the dialog shows the distribution of the currently
+    shown estimate values over the surface. The histogram is colored
+    to reflect the current scale settings. The fthresh , fmid ,
+    and fmax values are indicated
+    with vertical bars. The histogram is updated when the dialog is
+    popped up and when the estimate type to show changes, not at every
+    new time point selection. The Refresh button
+    makes the histogram current at any time.
+
+**Color scale**
+
+    These text fields control the color scale as described in :ref:`CACGGICI`.
+
+**Options**
+
+    Various options controlling the estimates.
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.45\linewidth}|
+.. _CACGGICI:
+.. table:: The color scale parameters.
+
+    +------------+---------------------------------------------------------+
+    | Parameter  |   Meaning                                               |
+    +============+=========================================================+
+    | fthresh    | If the value is below this level, it will not be shown. |
+    +------------+---------------------------------------------------------+
+    | fmid       | Positive values at this level will show as red.         |
+    |            | Negative values will be dark blue.                      |
+    +------------+---------------------------------------------------------+
+    | fmax       | Positive values at and above this level will be bright  |
+    |            | yellow. Negative values will be bright blue.            |
+    +------------+---------------------------------------------------------+
+    | fmult      | Apply this multiplier to the above thresholds. Default  |
+    |            | is :math:`1` for statistical maps and :math:`1^{-10}`   |
+    |            | for currents (MNE). The vertical bar locations in the   |
+    |            | histogram take this multiplier into account but the     |
+    |            | values indicated are the threshold parameters without   |
+    |            | the multiplier.                                         |
+    +------------+---------------------------------------------------------+
+    | tcmult     | The upper limit of the timecourse vertical scale will   |
+    |            | be :math:`tc_{mult}f_{mult}f_{max}`.                    |
+    +------------+---------------------------------------------------------+
+
+
+.. _CACJGCDH:
+
+.. figure:: mne_analyze/MNE_preferences.png
+    :alt: MNE estimate preferences
+
+    Estimate preferences dialog.
+
+The optional parameters are:
+
+**Retain sign**
+
+    With this option, the sign of the dot product between the current direction
+    and the cortical surface normal will be used as the sign of the
+    values to be displayed. This option yields meaningful data only if
+    a strict or a loose orientation constraint was used in the computation
+    of the inverse operator decomposition.
+
+**Retain normal component only**
+
+    Consider only the current component normal to the cortical mantle. This
+    option is not meaningful with completely free source orientations.
+
+**Show scale bar**
+
+    Show the color scale bar at the lower right corner of the display.
+
+**Show comments**
+
+    Show the standard comments at the lower left corner of the display.
+
+**Time integr. (ms)**
+
+    Integration time for each frame (:math:`\Delta t`).
+    Before computing the estimates time integration will be performed
+    on sensor data. If the time specified for a frame is :math:`t_0`,
+    the integration range will be :math:`t_0 - ^{\Delta t}/_2 \leq t \leq t_0 + ^{\Delta t}/_2`.
+
+**# of smooth steps**
+
+    Before display, the data will be smoothed using this number of steps,
+    see :ref:`CHDEBAHH`.
+
+**Opacity**
+
+    The range of this parameter is 0...1. The default value 1 means
+    that the map overlaid on the cortical surface is completely opaque.
+    With lower opacities the color of the cortical surface will be visible
+    to facilitate understanding the underlying folding pattern from
+    the curvature data displayed.
+
+.. _CACJFFEE:
+
+The SNR display
+===============
+
+The SNR estimate display
+shows the SNR estimated from the whitened data in red and the apparent
+SNR inferred from the mismatch between the measured and predicted
+data in green.
+
+The SNR estimate is computed from the whitened data :math:`\tilde{x}(t)`,
+related to the measured data :math:`x(t)` by
+
+.. math::    \tilde{x}(t) = C^{-^1/_2} x(t)\ ,
+
+where :math:`C^{-^1/_2}` is the whitening
+operator, introduced in :ref:`CHDDHAGE`.
+
+The computation of the apparent SNR will be explained in
+future revisions of this manual.
+
+.. _CACCCFHH:
+
+Inquiring timecourses
+#####################
+
+.. _CHDGHDGE:
+
+Timecourses at vertices
+=======================
+
+Timecourses at individual vertices can be inquired by clicking
+on a desired point on the surface with the left mouse button. If
+the control key was down at the time of a click, the timecourse
+will be added to the timecourse manager but left off. With both
+control and shift down, the timecourse will be added to the timecourse
+manager and switched on. For more information on the timecourse
+manager, see :ref:`CACDIAAD`.
+
+The timecourses are be affected by the Retain sign and Retain normal component only settings in the MNE preferences dialog , see :ref:`CACHFFIJ`.
+
+Timecourses at labels
+=====================
+
+The labels provide means to interrogate timecourse information
+from ROIs. The label files can be created in mne_analyze ,
+see :ref:`CACJCFJJ` or in tksurfer ,
+which is part of the FreeSurfer software. For mne_analyze left-hemisphere
+and right-hemisphere label files should be named <*name*> ``-lh.label`` and <*name*> ``-rh.label`` ,
+respectively.
+
+Individual label files can be loaded from Labels/Load label... . All label files in a directory can be
+loaded from Labels/Load all labels... .
+Once labels are loaded, the label list shown in :ref:`CACJJGEF` appears. Each
+time a new label is added to the list, the names will be reordered
+to alphabetical order. This list can be also brought up from Labels/Show label list . The list can be cleared from Labels/Discard all labels .
+
+.. warning:: Because of the format of the label    files mne_analyze can not certify    that the label files loaded belong to the cortical surfaces of the present    subject.
+
+When a label is selected from the label list, the corresponding
+timecourse appears. The Keep button
+stores the timecourse to the timecourse manager, :ref:`CACDIAAD`.
+
+.. _CACJJGEF:
+
+.. figure:: mne_analyze/label_list.png
+    :alt: label list
+
+    The label list.
+
+The timecourse shown in the MNE amplitude window
+is a compound measure of all timecourses within a label. Two measures
+are available:
+
+**Average**
+
+    Compute the average over all label vertices at each time point.
+
+**Maximum**
+
+    Compute the maximum absolute value over all vertices at each time point.
+    If the data are signed, the value is assigned the sign of the value
+    at the maximum vertex. This may make the timecourse jump from positive
+    to negative abruptly if vertices with different signs are included
+    in the label.
+
+**L2 norm (sample by sample)**
+
+    Compute the :math:`l_2` norm over the values
+    in the vertices at each time point.
+
+**Pick vertex with largest L2 norm over time**
+
+    Compute the :math:`l_2` norm over time in
+    each vertex and show the time course at the vertex with the largest
+    norm.
+
+.. _CACDIAAD:
+
+The timecourse manager
+======================
+
+The timecourse manager shown in :ref:`CACEDEJI` has
+the following controls for each timecourse stored:
+
+.. _CACEDEJI:
+
+.. figure:: mne_analyze/timecourse_manager.png
+    :alt: timecourse manager
+
+    The timecourse manager.
+
+**Numbered checkbox**
+
+    Switches the display of this timecourse on and off.
+
+**Color...**
+
+    This button shows the color of the timecourse curve. The color can be
+    adjusted from the color editor which appears when the button is pressed.
+
+**Save...**
+
+    Saves the timecourse. If a single vertex is selected, the time course file
+    will contain some comment lines starting with the the percent sign,
+    one row of time point values in seconds and another with the data
+    values. The format of the timecourse data is explained in :ref:`CACJJGFA`, below.
+
+**Forget**
+
+    Delete this timecourse from memory.
+
+.. _CACJJGFA:
+
+Label timecourse files
+----------------------
+
+When timecourse corresponding to a label is saved, the default
+is to save the displayed single timecourse in a format identical
+to the vertex timecourses. If Save all timecourses within the label is selected, the Time-by-time output output changes the output to be listed
+time by time rather than vertex by vertex, Include coordinates adds the vertex location information to
+the output file, and Include vertex numbers adds
+the indices of picked vertices to the output, see :ref:`CACHBBFD`.
+The vertex-by-vertex output formats is summarized in :ref:`CACEFHIJ`.
+
+.. _CACHBBFD:
+
+.. figure:: mne_analyze/save_label_timecourse.png
+    :alt: Label timecourse saving options
+
+    Label timecourse saving options.
+
+.. _CACEFHIJ:
+
+.. table:: Vertex-by-vertex output format. :math:`n_p` is the number of vertices, :math:`n_t` is the number of time points, :math:`n_{com}` is the number of comment lines, :math:`t_1 \dotso t_{n_t}` indicate the times in milliseconds, :math:`p` is a vertex number, :math:`x_p y_p z_p` are the coordinates of vertex :math:`p` in millimeters, and :math:`v_p^{(1)} \dotso v_p^{(n_t)}` are the values at vertex :math:`p`.  Items in brackets are only included if *Include coordinates* is active. I [...]
+
+    ===================================  ======================================================
+    Line                                 Contents
+    ===================================  ======================================================
+    :math:`1-n_{com}`                    Comment lines beginning with %
+    :math:`n_{com}+1`                    :math:`\{0.0\}[0.0\ 0.0\ 0.0] t_1 \dotso t_{n_t}`
+    :math:`(n_{com}+1)-(n_p+n_{com}+1)`  :math:`\{p\}[x_p y_p z_p]v_p^{(1)} \dotso v_p^{(n_t)}`
+    ===================================  ======================================================
+
+.. _CACJCFJJ:
+
+Creating new label files
+========================
+
+It is easy to create new label files in mne_analyze.
+For this purpose, an inflated surface should be visible in the main
+display. Follow these steps:
+
+- Clear all previously selected vertices
+  either by choosing Labels/Clear marked vertices or
+  do a right button click on the surface display with the shift key
+  down.
+
+- Mark vertices on the surface with right button click or by
+  right button drag. The vertices should be defined in the desired
+  order on the new label outline. The outline will follow the shortest
+  path along the surface. The shortest path will be calculated along
+  the white matter surface.Note that sometimes the shortest paths
+  appear to be un-intuitive on the inflated surface.
+
+- Do a right button click with control key down inside the label.
+  The outline will be completed and shown as a yellow line. The inside
+  of the label will be filled and shown in green. A file selection
+  box will appear to save the label. Enter the stem of the file name
+  here. The file name will be augmented with ``-lh.label`` or ``-rh.label`` ,
+  depending on the hemisphere on which the label is specified.
+
+.. _CACFCHEC:
+
+Overlays
+########
+
+.. _CACIGHEJ:
+
+.. figure:: mne_analyze/overlay_management.png
+    :alt: The overlay management dialog
+
+    The overlay management dialog.
+
+In addition to source estimates derived from MEG and EEG
+data, mne_analyze can be used
+to display other surface-based data. These overlay data can be imported
+from w and stc files containing single time slice (static) and dynamic
+data (movies), respectively. These data files can be produced by mne_make_movie ,
+FreeSurfer software, and custom programs or Matlab scripts.
+
+The names of the files to be imported should end with ``-`` <*hemi*> .<*type*> , where <*hemi*> indicates
+the hemisphere (``lh`` or ``rh`` and <*type*> is ``w`` or ``stc`` .
+
+Overlays are managed from the dialog shown in :ref:`CACIGHEJ` which is invoked from File/Manage overlays... .
+
+This dialog contains the following
+controls:
+
+**List of overlays loaded**
+
+    Lists the names of the overlays loaded so far.
+
+**Load w...**
+
+    Load a static overlay from a w file. In the open dialog it is possible to
+    specify whether this file contains data for the cortical surface
+    or for scalp. Scalp overlays can be viewed in the viewer window.
+
+**Load stc...**
+
+    Load a dynamic overlay from an stc file. In the open dialog it is
+    possible to specify whether this file contains data for the cortical
+    surface or for scalp. Scalp overlays can be viewed in the viewer window.
+
+**Delete**
+
+    Delete the selected overlay from memory.
+
+**Time scale slider**
+
+    Will be activated if a dynamic overlay is selected. Changes the
+    current time point.
+
+**Overlay type is**
+
+    Selects the type of the data in the current overlay. Different default color
+    scales are provided each overlay type.
+
+**Value histogram**
+
+    Shows the distribution of the values in the current overlay. For
+    large stc files this may take a while to compute since all time
+    points are included. The histogram is colored to reflect the current
+    scale settings. The fthresh , fmid ,
+    and fmax values are indicated
+    with vertical bars.
+
+**Color scale**
+
+    Sets the color scale of the current overlay. To activate the values, press Show .
+    For information on color scale settings, see :ref:`CACGGICI`.
+
+**Options**
+
+    Display options. This a subset of the options in the MNE preferences dialog. For details, see :ref:`CACHFFIJ`.
+
+**Show**
+
+    Show the selected overlay and assign the settings to the current overlay.
+
+**Apply to all**
+
+    Apply the current settings to all loaded overlays.
+
+It is also possible to inquire timecourses of vertices and
+labels from dynamic (stc) cortical overlays in the same way as from
+original data and store the results in text files. If a static overlay
+(w file) or a scalp overlay is selected, the timecourses are picked
+from the data loaded, if available.
+
+.. _CHDGHIJJ:
+
+Fitting current dipoles
+#######################
+
+Starting from MNE software version 2.6, mne_analyze includes
+routines for fitting current dipoles to the data. At present mne_analyze is
+limited to fitting single equivalent current dipole (ECD) at one
+time point. The parameters affecting the dipole fitting procedure
+are described in :ref:`CACEDEGA`. The results are shown in
+the dipole list (:ref:`CACGGAIA`). The selection of channels
+included can be adjusted interactively or by predefined selections
+as described in :ref:`CACIBHCI`.
+
+.. warning:: The current dipole fitting has been    added recently and has not been tested comprehensively. Especially    fitting dipoles to EEG data may be unreliable.
+
+.. _CACEDEGA:
+
+Dipole fitting parameters
+=========================
+
+Prior to fitting current dipoles, the fitting parameters
+must be set with the Dipole fitting preferences dialog
+shown in :ref:`CACFEDEJ`. The dialog is brought up from the Setup fitting... choice in the Dipoles menu.
+This dialog contains three sections: Forward model , Modalities ,
+and Noise estimate .
+
+The Forward model section
+specifies the forward model to be used:
+
+**Sphere model origin x/y/z [mm]**
+
+    Specifies the origin of the spherically symmetric conductor model in
+    MEG/EEG head coordinates, see :ref:`BJEBIBAI`.
+
+**EEG scalp radius [mm]**
+
+    Specifies the radius of the outermost shell in the EEG sphere model. For
+    details, see :ref:`CHDIAFIG`.
+
+**EEG sphere model name**
+
+    Specifies the name of the EEG sphere model to use. For details,
+    see :ref:`CHDIAFIG`.
+
+**BEM model**
+
+    Selects the boundary-element model to use. The button labeled with ... brings
+    up a file-selection dialog to select the BEM file. An existing selection
+    can be cleared with the Unset button.
+    If EEG data are included in fitting, this must be a three-compartment
+    model. Note that the sphere model is used even with a BEM model
+    in effect, see :ref:`CHDFGIEI`.
+
+**Accurate field calculation**
+
+    Switches on the more accurate geometry definition of MEG coils, see :ref:`BJEIAEIE`.
+    In dipole fitting, there is very little difference between the *accurate* and *normal* coil
+    geometry definitions.
+
+The Modalities section
+defines which kind of data (MEG/EEG) are used in fitting. If an
+inverse operator is loaded with the data, this section is fixed and
+greyed out. You can further restrict the selection of channels used
+in dipole fitting with help of channel selections discussed in :ref:`CACIBHCI`.
+
+The Noise estimate section
+of the dialog contains the following items:
+
+**Noise covariance**
+
+    Selects the file containing the noise-covariance matrix. If an inverse operator
+    is loaded, the default is the inverse operator file. The button labeled
+    with ... brings up a file-selection
+    dialog to select the noise covariance matrix file. An existing selection
+    can be cleared with the Unset button.
+
+**Omit off-diagonal terms**
+
+    If a noise covariance matrix is selected, this choice omits the
+    off-diagonal terms from it. This means that individual noise estimates for
+    each channel are used but correlations among channels are not taken
+    into account.
+
+**Regularization**
+
+    Regularize the noise covariance before using it in whitening by
+    adding a multiple of an identity matrix to the diagonal. This is
+    discussed in more detail in :ref:`CBBHEGAB`. Especially if
+    EEG is included in fitting it is advisable to enter a non-zero value
+    (around 0.1) here.
+
+**Planar fixed [fT/cm]**
+
+    In the absence of a noise covariance matrix selection, a diagonal noise
+    covariance with fixed values on the diagonal is used. This entry
+    specifies the fixed value of the planar gradiometers.
+
+**Axial fixed [fT]**
+
+    If a noise covariance matrix file is not specified, this entry specifies a
+    fixed diagonal noise covariance matrix value for axial gradiometers
+    and magnetometers.
+
+**EEG fixed [muV]**
+
+    If a noise covariance matrix file is not specified, this entry specifies a
+    fixed diagonal noise covariance matrix value for axial gradiometers
+    and magnetometers..
+
+.. _CACFEDEJ:
+
+.. figure:: mne_analyze/dipole_parameters.png
+    :alt: The dipole fitting preferences dialog
+
+    The dipole fitting preferences dialog.
+
+.. _CHDFGIEI:
+
+The dipole fitting algorithm
+============================
+
+When the dipole fitting preferences dialog is closed and
+the values have been modified the following preparatory calculations
+take place:
+
+- If EEG data are included in fitting
+  present, the EEG sphere model specification corresponding to EEG sphere model name is loaded and scaled to the
+  the EEG scalp radius .
+
+- If a boundary-element model is used, the additional data depending
+  on the sensor locations are computed.
+
+- The noise covariance matrix is composed according to the specifications
+  in the Dipole fitting preferences dialog.
+
+- The spatially whitened forward solution is computed in a grid
+  of locations to establish the initial guess when a dipole is fitted.
+  If a BEM is in use, the grid will be confined to the inner skull
+  volume. For a sphere model, a spherical volume with an 80-mm radius,
+  centered at the sphere model origin, will be employed. The dipole
+  grid will be rectangular with a 10-mm spacing between the closest
+  dipole locations. Any locations closer than 20 mm to the center
+  of mass of the grid volume will be excluded as well as those closer
+  than 10 mm to the surface. Note that this guess grid is only used
+  for establishing the initial guess; the actual dipole fitting procedure
+  does not constrain the solution to this grid.
+
+When the Fit ECD button
+in the tool bar is clicked with a time point selected from the the
+response, the optimal Equivalent Current Dipole parameters (location,
+orientation, and amplitude) are determined using the following algorithm:
+
+- An initial guess for the location of
+  the dipole is determined using the grid of locations determined
+  in step 4., above. At each guess dipole location, the least squares
+  error between the measured data and a dipole at that location is
+  evaluated and the location corresponding to the smallest error is
+  used as the initial guess location. In this process, the dipole
+  amplitude parameters do not need to be explicitly calculated.
+
+- Using the Nelder-Mead simplex optimization algorithm, an optimal dipole
+  location is determined with the sphere model used as the forward
+  model. Again, the dipole amplitude parameters are not explicitly present
+  in the fitting procedure.
+
+- A second optimization interation using the boundary-element
+  model (if available) or the sphere model as the forward model is
+  conducted. The reason for repeating the optimization even with the
+  sphere model is to reduce the likelihood of having been stuck in
+  a local minimum of the least squares error criterion.
+
+- The optimal dipole amplitude parameters are determined for
+  the optimal dipole location obtained in steps 2. and 3.
+
+- The dipole parameters are reported in the dipole list discussed
+  in :ref:`CACGGAIA`.
+
+Additional notes:
+
+- The noise covariance matrix is always
+  applied to the data and the forward solution as appropriate to correctly
+  weight the different types of MEG channels and EEG. Depending on
+  the dipole fitting settings, the noise covariance may be either
+  a diagonal matrix or a full matrix including the correlations.
+
+- Using the SVD of the whitened gain matrix of three dipole
+  componets at a given location, the component producing the weakest
+  signal amplitude is omitted if the ratio of the smallest and largest
+  singular values is less than 0.2.
+
+- The present MNE software package also contains a batch-mode
+  dipole fitting program called mne_dipole_fit .
+  This piece of software is not yet documented here. However, ``mne_dipole_fit --help`` lists the command-line options which have direct correspondence
+  to the interactive dipole fitting options discussed here.
+
+.. _CACGGAIA:
+
+The dipole list
+===============
+
+.. _CACGGFEJ:
+
+.. figure:: mne_analyze/dipole_list.png
+    :alt: dipole list
+
+    The dipole list.
+
+The dipole list dialog shown in :ref:`CACGGFEJ` contains
+the parameters of the dipoles fitted. In addition, it is possible
+to import current dipole locations from the Neuromag source modelling
+program xfit to mne_analyze . Dipoles
+can be imported in two ways:
+
+- Bring up the dipole list window from Windows/Show dipole list... . Drag and drop selected dipoles
+  from one of the xfit dipole list
+  to this list using the middle mouse button.
+
+- Drag and drop dipoles from one of the xfit dipole
+  lists over the main surface display. The dipole list will appear
+  and contain the dropped dipoles.
+
+The buttons at the bottom of the dialog perform the following
+functions:
+
+**Done**
+
+    Hide
+    the dialog.
+
+**Show**
+
+    Show
+    the currently selected dipoles as specified in Display options ,
+    see below.
+
+**Save**
+
+    Save the selected (or all) dipoles.
+    If the file name specified in the file selection dialog that pops
+    up ends with ``.bdip`` , the dipole data will be saved in
+    the binary bdip format compatible with
+    the Neuromag xfit software, otherwise,
+    a text format output will be used. In the text file, comments will
+    be included on lines starting with the percent sign so that the
+    text format can be easily loaded into Matlab.
+
+**Clear**
+
+    Clear
+    the selected dipoles from the list.
+
+When you double click on one of the dipoles or select several
+dipoles and click Show points
+on the surface displayed in the vicinity of the dipoles will be
+painted according to the specifications given in the Options section of
+the dialog:
+
+**Color**
+
+    By the default, the dipoles are marked in green with transparency (alpha)
+    set to 0.5. I you click on one of the dipoles, you can adjust the
+    color of this dipole by editing the color values or from the color editor
+    appearing when you click Color... .
+    When you click Apply , the new
+    color values are attached to the selected dipole.
+
+**Max. distance for dipoles to show (mm)**
+
+    If this option is on, only dipoles which are closer to the surface
+    than the distance specified in the adjacent text field are displayed.
+
+**Paint all point closer than (mm)**
+
+    Instead of indicating the point closest to the dipole all points
+    closer than the distance given in the text field will be painted
+    if this option is on. This choice is useful for understanding the
+    shape of the neighborhood of a dipole on the cortical surface.
+
+**Number of smooth steps**
+
+    This option spreads out the dipole marking by the given number of smooth
+    steps to make the dipoles more clearly visible. A suitable choice
+    is 3 or 4.
+
+**Keep previous dipoles**
+
+    If this option is on, previously marked dipoles are not cleared
+    from the display before new ones are shown.
+
+.. note:: The surface must be loaded to display dipole    locations. To calculate the distance from the dipoles to the white    matter surface, the white matter tessellation is loaded as needed.    Depending on the precise location of the fitted dipole, the spot    indicating the dipole site may easily appear on a different wall    of a fissure than could be expected. The fissural walls can be far    apart from each other in the inflated view of the cortex even if    they are physically  [...]
+
+.. _CACIBHCI:
+
+Channel selections
+==================
+
+As mentioned in :ref:`CACHBJAC`, the right mouse button
+in the topographical display of channels can be used to restrict
+the selection of channels taken into account in dipole fitting.
+In addition, the channel selections can be manipulated in the channel
+selection window, which pops up from Dipoles/Manage channel selections... . Initially this dialog contains
+the selections defined in or $HOME/.mne/mne_analyze.sel or $MNE_ROOT/share/mne/mne_analyze/mne_analyze.sel,
+the personal file taking precedence over the system wide default.
+The Save button in this dialog
+save the current set of channel selections to the personal selection
+file. The format of this file is identical to the channel selection
+file in mne_browse_raw .
+
+When a channel selection file is in effect. the variances
+of the unselected channels are increased by a factor of 900. This
+means that unselected channels receive virtually no weight in the
+least-squares error function or, equivalently, that they are considered
+to be 30 times more noisy than their true noise value. Since this
+implementation of channel selections requires recomputation of the
+initial guess candidate data discussed in :ref:`CHDFGIEI`,
+above, changing the selection may take a finite amount of time,
+especially if a BEM is used for the forward calculation.
+
+.. note:: Please note that when making a channel    selection in the topographical displays, the channels not present    in a particular layout are also affected. For example, if you select    channels in a layout showing the Vectorview planar gradiometers,    the magnetometer channels and EEG channels will be unselected.
+
+.. _CACEHGCD:
+
+Coordinate frame alignment
+##########################
+
+The MRI-MEG coordinate frame alignment tools included in mne_analyze utilized
+the 3D digitizer (Polhemus) data acquired in the beginning of each
+MEG/EEG session and the scalp surface triangulation shown in the
+viewer window. To access the coordinate frame alignment tools:
+
+- Load digitizer data. You can either
+  load a data set containing digitizer information or load digitizer
+  data from a file through the File/Load digitizer data... menu choice.
+
+- Set up the viewer window and make it visible, see :ref:`CACEFFJF`. The viewer options should be set to show the
+  digitizer data, see :ref:`CACHGDEA`.
+
+- Bring up the Adjust coordinate alignment dialog from Adjust/Coordinate alignment... .
+
+.. figure:: mne_analyze/adjust_alignment.png
+    :alt: The coordinate frame alignment dialog
+
+    The coordinate frame alignment dialog.
+
+The coordinate frame alignment dialog contains the following
+sections:
+
+- Buttons for picking the fiducial points
+  from the scalp surface and one for setting an initial alignment
+  using these points. When one of the fiducials is selected, the viewer
+  display automatically rotates to a suitable orientation to make
+  the corresponding fiducial accessible.
+
+- Controls for fine tuning the alignment. These include movements
+  along the three orthogonal coordinate axes and rotations around
+  them. The buttons marked L and R indicate
+  rotations in counterclockwise and clockwise directions, respectively.
+  The amount of movement (mm) or rotation (degrees) is given in the
+  text fields next to the adjustment buttons.
+
+- Access to an automatic alignment procedure, which employs
+  the Iterative Closest Point (ICP) algorithm.
+
+- Listing of the current coordinate transformation.
+
+- Buttons for discarding outlier points (Discard... ), and for
+  saving and loading the coordinate transformation.
+
+The saving and loading choices are:
+
+**Save default**
+
+    Saves a file which contains the MEG/MRI coordinate transformation
+    only. The file name is generated from the name of the file from which
+    the digitization data were loaded by replacing the ending ``.fif`` with ``-trans.fif`` .
+    If this file already exists, it will be overwritten without any
+    questions asked.
+
+**Save MRI set**
+
+    This option searches for a file called COR.fif in $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets.
+    The file is copied to COR-<*username*>-<*date*>-<*time*>.fif
+    and the current MEG/MRI coordinate transformation as well as the
+    fiducial locations in MRI coordinates are inserted.
+
+**Save...**
+
+    Saves a file which contains the MEG/MRI coordinate transformation
+    only. The ending ``-trans.fif`` is recommended. The file name
+    selection dialog as a button to overwrite.
+
+**Load...**
+
+    Loads the MEG/MRI coordinate transformation from the file specified.
+
+The MEG/MRI coordinate transformation files are employed
+in the forward calculations. The convenience script mne_do_forward solution described in :ref:`BABCHEJD` uses
+a search sequence which is compatible with the file naming conventions
+described above. It is recommended that -trans.fif file
+saved with the Save default and Save... options
+in the mne_analyze alignment
+dialog are used because then the $SUBJECTS_DIR/$SUBJECT directory
+will be composed of files which are dependent on the subjects's
+anatomy only, not on the MEG/EEG data to be analyzed.
+
+Each iteration step of the Iterative Closest Point (ICP) algorithm consists of
+two matching procedures:
+
+- For each digitizer point, transformed
+  from MEG to the MRI coordinate frame using the current coordinate
+  transformation, the closest point on the triangulated surface is
+  determined.
+
+- The best coordinate transformation aligning the digitizer
+  points with the closest points on the head surface is computed.
+
+These two steps are iterated the designated number of times.
+If the Try to keep nasion in place option
+is on, the present location of the nasion receives a strong weight
+in the second part of each iteration step so that nasion movements
+are discouraged.
+
+.. note:: One possible practical approach to coordinate frame alignment is discussed in :ref:`CHDIJBIG`.
+
+.. _CHDCGHIF:
+
+Using a high-resolution head surface tessellations
+==================================================
+
+The newest version of FreeSurfer contains a script called mkheadsurf which
+can be used for coordinate alignment purposes. For more information,
+try ``mkheadsurf --help`` . This script produces a file
+called ``surf/lh.smseghead`` , which can be converted into
+a fif file using mne_surf2bem.
+
+Suggested usage:
+
+- Set the SUBJECTS_DIR correctly.
+
+- Run mkheadsurf: ``mkheadsurf -subjid`` <*subject*> .
+
+- Goto the directory ``$SUBJECTS_DIR/`` <*subject*> ``/bem`` .
+
+- Convert the head surface file: ``mne_surf2bem --surf ../surf/lh.smseghead --id 4 --check --fif`` <*subject*> ``-head-dense.fif``
+
+- Rename the existing head surface file to <*subject*> ``-head-sparse.fif``
+
+- Copy <*subject*> ``-head-dense.fif`` to <*subject*> ``-head.fif``
+
+- Click Reload in the viewer
+  window.
+
+After this you can switch between the dense and smooth head
+surface tessellations by copying either <*subject*> ``-head-dense.fif`` or <*subject*> ``-head-sparse.fif`` to <*subject*> ``-head.fif`` .
+
+If you have Matlab software available on your system, you
+can also benefit from the script mne_make_scalp_surfaces .
+This script invokes mkheadsurf and
+subsequently decimates it using the mne_reduce_surface function
+in the MNE Matlab toolbox, which in turn invokes the reducepatch
+Matlab function. As a result, the $SUBJECTS_DIR/$SUBJECT/bem directory
+will contain 'dense', 'medium',
+and 'sparse' scalp surface tessellations. The
+dense tessellation contains the output of mkheadsurf while
+the medium and sparse tessellations comprise 30,000 and 2,500 triangles,
+respectively. You can then make a symbolic link of one of these
+to <*subject*> ``-head.fif`` .
+The medium grade tessellation is an excellent compromize between
+geometric accuracy and speed in the coordinate system alignment.
+
+.. note:: While the dense head surface tessellation    may help in coordinate frame alignment, it will slow down the operation    of the viewer window considerably. Furthermore, it cannot be used    in forward modelling due to the huge number of triangles. For the    BEM, the dense tessellation does not provide much benefit because    the potential distributions are quite smooth and widespread on the    scalp.
+
+.. _CACJJBGF:
+
+Using fiducial points identified by other software
+==================================================
+
+If you have identified the three fiducial points in software
+outside mne_analyze , it is possible
+to display this information on the head surface visualization. To
+do this, you need to copy the file containing the fiducial location
+information in MRI (surface RAS) coordinates to $SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-fiducials.fif.
+There a three supported ways to create this file:
+
+- Use the mne_make_fiducial_file.m Matlab
+  function (not yet written) to create this file.
+
+- Copy a MRI description file with the MEG-MRI coordinate transformation
+  created with MRIlab (typically $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets/COR-<*date*>.fif
+  to $SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-fiducials.fif.
+
+- For the average subject, fsaverage ,
+  copy the fsaverage-fiducials.fif file provided with mne_analyze
+  in place, see :ref:`CACGEAFI`.
+
+.. _CACIADAI:
+
+Viewing continuous HPI data
+###########################
+
+.. _CACFHFGJ:
+
+.. figure:: mne_analyze/cont_hpi_data.png
+    :alt: Continuous HPI data overview
+
+    Continuous HPI data overview.
+
+The newest versions of Neuromag software allow continuous
+acquisition of signals from the HPI coils. On the basis of these
+data the relative position of the dewar and the head can be computed
+a few times per second. The resulting location data, expressed in
+the form of unit quaternions (see http://mathworld.wolfram.com/Quaternion.html)
+and a translation.
+
+The continuous HPI data can be through the File/View continuous HPI data... menu item, which pops up
+a standard file selection dialog. If the file specified ends with ``.fif`` a
+fif file containing the continuous coordinate transformation information
+is expected. Otherwise, a text log file is read. Both files are
+produced by the Neuromag maxfilter software.
+
+Once the data have been successfully loaded, the dialog shown
+in :ref:`CACFHFGJ` appears. It contains the following information:
+
+- Currently selected time point and overview
+  of the data at the current time point,
+
+- MEG device to MEG head coordinate transformation at the current time
+  point and the incremental transformation from the initial timepoint
+  to the current file.
+
+- Graphical display of the data.
+
+- Controls for the graphical display.
+
+The overview items are:
+
+**GOF**
+
+    Geometric mean of the goodness of fit values of the HPI coils at
+    this time point.
+
+**Origin movement**
+
+    The distance between the head coordinate origins at the first and current
+    time points.
+
+**Angular velocity**
+
+    Estimated current angular velocity of the head.
+
+**Coil movements**
+
+    Comparison of the sensor locations between the first and current time
+    points. The minimum, maximum, average, and median sensor movements
+    are listed.
+
+The graphical display contains the following data:
+
+- The geometric mean of the HPI coil goodness
+  of fits (red curve). The scale for this curve is always 0.9...1.0.
+
+- The average coil (sensor) movement value (blue curve). The
+  scale is adjustable from the buttons below the display.
+
+- The estimated angular velocity (deg/s, green curve). The scale
+  is adjustable from the buttons below the display.
+
+- The current time point indicated with a black cursor.
+
+The slider below the display can be used to select the time
+point. If you click on the slider, the current time can be adjusted
+with the arrow keys. The current head position with respect to the
+sensor array is show in the viewer window if it is visible, see :ref:`CACEFFJF`. Note that a complete set of items listed above
+is only available if a data file has been previously loaded, see :ref:`CACBACHB`.
+
+.. _CACCHCBF:
+
+Working with the MRI viewer
+###########################
+
+.. _CHDEGEHE:
+
+.. figure:: mne_analyze/mri_viewer.png
+    :alt: MRI viewer window
+
+    The MRI viewer control window.
+
+Selecting Show MRI viewer... from
+the View menu starts the FreeSurfer MRI
+viewer program tkmedit to work
+in conjunction with mne_analyze . After
+a few moments, both tkmedit with
+the current subject's T1 MRI data shown and the MRI viewer
+control window shown in :ref:`CHDEGEHE` appear. Note that
+the tkmedit user interface is
+initially hidden. The surfaces of a subject must be loaded before
+starting the MRI viewer.
+
+The MRI viewer control window contains
+the following items:
+
+**Show MRI viewer user interface**
+
+    If this item is checked, the tkmedit user
+    interface window will be show.
+
+**Track surface location in MRI**
+
+    With this item checked, the cursor in the MRI data window follows the
+    current (clicked) location in surface display or viewer. Note that for
+    the *viewer* window the surface location will
+    inquired from the surface closest to the viewer. The MEG helmet
+    surface will not be considered. For example, if you click at an
+    EEG electrode location with the scalp surface displayed, the location
+    of that electrode on the scalp will be shown. The cortical surface
+    locations are inquired from the white matter surface.
+
+**Show dipole locations in MRI**
+
+    If this option is selected, whenever a dipole is displayed in the
+    surface view using the dipole list dialog discussed in :ref:`CACGGAIA` the cursor will also move to the same location
+    in the MRI data window.
+
+**Show digitizer data in MRI**
+
+    If digitizer data are loaded, this option shows the locations with green
+    diamonds in the MRI data.
+
+**Interpolate voxels**
+
+    Toggles trilinear interpolation in the MRI data on and off.
+
+**Max. intensity projection**
+
+    Shows a maximum-intensity projection of the MRI data. This is useful
+    in conjunction with the Show digitizer data in MRI option to evaluate the MEG/MRI coordinate
+    alignment
+
+**Recenter MRI display**
+
+    Brings the cursor to the center of the MRI data.
+
+**Show surface data in MRI**
+
+    This button creates an MRI data set containing the surface data
+    displayed and overlays in with the MRI slices shown in the MRI viewer.
+
+**Show segmentation data in MRI**
+
+    If available, the standard automatically generated segmentation
+    volume (mri/aparc+aseg) is overlaid on the MRI using the standard FreeSurfer
+    color lookup table ($FREESURFER_HOME/FreeSurferColorLUT.txt). As
+    a result, the name of the brain structure or region of corex at
+    the current location of the cursor will be reported if the tkmedit user
+    interface is visible. After the segmentation is loaded this button
+    toggles the display of the segmentation on and off.
+
+**Show command input and output**
+
+    Allows sending tcl commands to tkmedit and
+    shows the responses received. The tkmedit tcl scripting
+    commands are discussed at https://surfer.nmr.mgh.harvard.edu/fswiki/TkMeditGuide/TkMeditReference/TkMeditScripting.
+
+.. _CACGEAFI:
+
+Working with the average brain
+##############################
+
+The FreeSurfer software includes an average subject (fsaverage)
+with a cortical surface reconstruction. In some cases, the average
+subject can be used as a surrogate if the MRIs of a subject are
+not available.
+
+The MNE software comes with additional files which facilitate
+the use of the average subject in conjunction with mne_analyze .
+These files are located in the directory $MNE_ROOT/mne/setup/mne_analyze/fsaverage:
+
+**fsaverage_head.fif**
+
+    The approximate head surface triangulation for fsaverage.
+
+**fsaverage_inner_skull-bem.fif**
+
+    The approximate inner skull surface for fsaverage.
+
+**fsaverage-fiducials.fif**
+
+    The locations of the fiducial points (LPA, RPA, and nasion) in MRI coordinates,
+    see :ref:`CACJJBGF`.
+
+**fsaverage-trans.fif**
+
+    Contains a default MEG-MRI coordinate transformation suitable for fsaverage.
+    For details of using the default transformation, see :ref:`CACBACHB`.
+
+.. _CACJEFAI:
+
+Compatibility with cliplab
+##########################
+
+The following graphics displays are compatible with the Elekta-Neuromag
+report composer cliplab :
+
+- The main surface display area in the
+  main window, see :ref:`CACFJICC`.
+
+- The viewer, see :ref:`CACEFFJF`.
+
+- The sample channel display, see :ref:`CACFGHBJ`.
+
+- The topographical data display, see :ref:`CACHBJAC`.
+
+- The SNR time course display, see :ref:`CACJFFEE`.
+
+- The source time course display, see:ref:`CACCCFHH`
+
+The graphics can be dragged and dropped from these windows
+to one of the cliplab view areas
+using the middle mouse button. Because the topographical display
+area has another function (bed channel selection) tied to the middle
+mouse button, the graphics is transferred by doing a middle mouse
+button drag and drop from the label showing the current time underneath
+the display area itself.
+
+.. note:: The cliplab drag-and-drop    functionality requires that you have the proprietary Elekta-Neuromag    analysis software installed. mne_analyze is compatible    with cliplab versions 1.2.13    and later.
+
+.. _CHDEDFAE:
+
+Visualizing the head position
+#############################
+
+When mne_analyze is invoked
+with the ``--visualizehpi`` option, a simplified user interface shown
+in :ref:`CHDJJGII` is displayed. This interface consists only
+of the viewer window. This *head position visualization* mode
+can be used with existing data files but is most useful for showing
+immediate feedback of the head position during experiments with
+an Elekta-Neuromag MEG system.
+
+.. _CHDJJGII:
+
+.. figure:: mne_analyze/visualize_hpi.png
+    :alt: Snapshot of mne_analyze in the head position visualization mode
+
+    Snapshot of mne_analyze in the head position visualization mode.
+
+As described in :ref:`CHDJECCG`, the head position
+visualization mode can be customized with the --dig, --hpi, --scalehead,
+and --rthelmet options. For this mode to be useful, the --dig and
+--hpi options are mandatory. If existing saved data are viewed,
+both of these can point to a average or raw data file. For on-line
+operation with the Elekta-Neuromag systems, the following files
+in should be used:
+
+``--dig /neuro/dacq/meas_info/isotrak --hpi /neuro/dacq/meas_info/hpi_result``
+
+.. note:: Since MNE software runs only on LINUX and Mac    OS X platforms, one usually needs to NFS mount the volume containing    /neuro directory to another system and access these files remotely.    However, Neuromag has indicated that future versions of their acquisition    software will run on the LINUX platform as well and the complication    of remote operation can then be avoided.
+
+When mne_analyze starts
+in the head position visualization mode and the --dig and --hpi
+options have been specified, the following sequence operations takes
+place:
+
+- The digitizer data, the coordinate transformation
+  between the MEG device and head coordinate frames, as well as the
+  average head surface provided with the MNE software are loaded.
+
+- If the ``--scalehead`` option is invoked, the average head surface
+  is scaled to the approximate size of the subject's head
+  by fitting a sphere to the digitizer and to the head surface points
+  lying above the plane of the fiducial landmarks, respectively. The
+  standard head surface is then scaled by the ration of the radiuses
+  of these two best-fitting spheres. Without --scalehead, the standard
+  head surface is used as is without scaling.
+
+- The known positions of (scaled) fiducial landmarks are matched
+  with those available in the digitizer data. This initial alignment
+  is then refined using the ICP algorithm, see :ref:`CACEHGCD`.
+  This automatic procedure is found to be accurate enough for the
+  visualization purposes.
+
+- Using the coordinate transformation thus established between
+  the coordinate system of the scalp surface (MRI coordinates) and
+  the MEG head coordinates together with the coordinate transformation
+  between the MEG head and device coordinate frames established with
+  HPI, the position of the MEG helmet surface is shown in the viewer
+  window.
+
+If the ``--rthelmet`` option was present, the room-temperature
+helmet surface is shown instead of the MEG sensor surface. The digitizer
+and HPI data files are reloaded and the above steps 1. - 4. are
+repeated when the Reload HPI button
+is pressed. The comment lines in the viewer window show information
+about the digitizer and HPI data files as well as the location of the
+MEG device coordinate origin in the MEG head coordinate system.
+
+.. note:: The appearance of the viewer visualization can    be customized using the Options... button,    see :ref:`CACHGDEA`. Since only the scalp and MEG device    surfaces are loaded, only a limited number of options is active.    The display can also be saved as an image from the img button,    see :ref:`CACBEBGC`.
diff --git a/doc/source/manual/browse.rst b/doc/source/manual/browse.rst
new file mode 100644
index 0000000..8673354
--- /dev/null
+++ b/doc/source/manual/browse.rst
@@ -0,0 +1,2633 @@
+
+
+.. _ch_browse:
+
+===================
+Processing raw data
+===================
+
+Overview
+########
+
+The raw data processor mne_browse_raw is
+designed for simple raw data viewing and processing operations. In
+addition, the program is capable of off-line averaging and estimation
+of covariance matrices. mne_browse_raw can
+be also used to view averaged data in the topographical layout.
+Finally, mne_browse_raw can communicate
+with mne_analyze described in :ref:`ch_interactive_analysis` to
+calculate current estimates from raw data interactively.
+
+mne_browse_raw has also
+an alias, mne_process_raw . If mne_process_raw is
+invoked, no user interface appears. Instead, command line options
+are used to specify the filtering parameters as well as averaging
+and covariance-matrix estimation command files for batch processing. This
+chapter discusses both mne_browse_raw and mne_process_raw .
+
+.. _CACHCFEG:
+
+Command-line options
+####################
+
+This section first describes the options common to mne_browse_raw and mne_process_raw .
+Thereafter, options unique to the interactive (mne_browse_raw)
+and batch (mne_process_raw) modes are
+listed.
+
+.. _BABBGJEA:
+
+Common options
+==============
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---cd <*dir*>**
+
+    Change to this directory before starting.
+
+**\---raw <*name*>**
+
+    Specifies the raw data file to be opened. This option is required
+    for batch version, mne_process_raw. If
+    a raw data file is not specified for the interactive version, mne_browse_raw ,
+    and empty interactive browser will open.
+
+**\---grad <*number*>**
+
+    Apply software gradient compensation of the given order to the data loaded
+    with the ``--raw`` option. This option is effective only
+    for data acquired with the CTF and 4D Magnes MEG systems. If orders
+    different from zero are requested for Neuromag data, an error message appears
+    and data are not loaded. Any compensation already existing in the
+    file can be undone or changed to another order by using an appropriate ``--grad`` options.
+    Possible orders are 0 (No compensation), 1 - 3 (CTF data), and 101
+    (Magnes data). The same compensation will be applied to all data
+    files loaded by mne_process_raw . For mne_browse_raw ,
+    this applies only to the data file loaded by specifying the ``--raw`` option.
+    For interactive data loading, the software gradient compensation
+    is specified in the corresponding file selection dialog, see :ref:`CACDCHAJ`.
+
+**\---filtersize <*size*>**
+
+    Adjust the length of the FFT to be applied in filtering. The number will
+    be rounded up to the next power of two. If the size is :math:`N`,
+    the corresponding length of time is :math:`N/f_s`,
+    where :math:`f_s` is the sampling frequency
+    of your data. The filtering procedure includes overlapping tapers
+    of length :math:`N/2` so that the total FFT
+    length will actually be :math:`2N`. This
+    value cannot be changed after the program has been started.
+
+**\---highpass <*value/Hz*>**
+
+    Highpass filter frequency limit. If this is too low with respect
+    to the selected FFT length and, the data will not be highpass filtered. It
+    is best to experiment with the interactive version to find the lowest applicable
+    filter for your data. This value can be adjusted in the interactive
+    version of the program. The default is 0, *i.e.*,
+    no highpass filter apart from that used during the acquisition will
+    be in effect.
+
+**\---highpassw <*value/Hz*>**
+
+    The width of the transition band of the highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`f_s / (2N)`. This
+    value cannot be adjusted in the interactive version of the program.
+
+**\---lowpass <*value/Hz*>**
+
+    Lowpass filter frequency limit. This value can be adjusted in the interactive
+    version of the program. The default is 40 Hz.
+
+**\---lowpassw <*value/Hz*>**
+
+    The width of the transition band of the lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+**\---eoghighpass <*value/Hz*>**
+
+    Highpass filter frequency limit for EOG. If this is too low with respect
+    to the selected FFT length and, the data will not be highpass filtered.
+    It is best to experiment with the interactive version to find the
+    lowest applicable filter for your data. This value can be adjusted in
+    the interactive version of the program. The default is 0, *i.e.*,
+    no highpass filter apart from that used during the acquisition will
+    be in effect.
+
+**\---eoghighpassw <*value/Hz*>**
+
+    The width of the transition band of the EOG highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`f_s / (2N)`.
+    This value cannot be adjusted in the interactive version of the
+    program.
+
+**\---eoglowpass <*value/Hz*>**
+
+    Lowpass filter frequency limit for EOG. This value can be adjusted in
+    the interactive version of the program. The default is 40 Hz.
+
+**\---eoglowpassw <*value/Hz*>**
+
+    The width of the transition band of the EOG lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+**\---filteroff**
+
+    Do not filter the data. This initial value can be changed in the
+    interactive version of the program.
+
+**\---digtrig <*name*>**
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+**\---digtrigmask <*number*>**
+
+    Mask to be applied to the trigger channel values before considering them.
+    This option is useful if one wants to set some bits in a don't care
+    state. For example, some finger response pads keep the trigger lines
+    high if not in use, *i.e.*, a finger is not in
+    place. Yet, it is convenient to keep these devices permanently connected
+    to the acquisition system. The number can be given in decimal or
+    hexadecimal format (beginning with 0x or 0X). For example, the value
+    255 (0xFF) means that only the lowest order byte (usually trigger
+    lines 1 - 8 or bits 0 - 7) will be considered.
+
+.. note:: Multiple raw data files can be specified for mne_process_raw .
+
+.. note:: Strictly speaking, trigger mask value zero would    mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+
+.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_browse_raw or mne_process_raw .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
+
+.. _CACCHAGA:
+
+Interactive mode options
+========================
+
+These options apply to the interactive (mne_browse_raw)
+version only.
+
+**\---allowmaxshield**
+
+    Allow loading of unprocessed Elekta-Neuromag data with MaxShield
+    on. These kind of data should never be used for source localization
+    without further processing with Elekta-Neuromag software.
+
+**\---deriv <*name*>**
+
+    Specifies the name of a derivation file. This overrides the use
+    of a standard derivation file, see :ref:`CACFHAFH`.
+
+**\---sel <*name*>**
+
+    Specifies the channel selection file to be used. This overrides
+    the use of the standard channel selection files, see :ref:`CACCJEJD`.
+
+.. _CACFAAAJ:
+
+Batch-mode options
+==================
+
+These options apply to the batch-mode version, mne_process_raw only.
+
+**\---proj <*name*>**
+
+    Specify the name of the file of the file containing a signal-space
+    projection (SSP) operator. If ``--proj`` options are present
+    the data file is not consulted for an SSP operator. The operator
+    corresponding to average EEG reference is always added if EEG data
+    are present.
+
+**\---projon**
+
+    Activate the projections loaded. One of the options ``--projon`` or ``--projoff`` must
+    be present on the mne_processs_raw command line.
+
+**\---projoff**
+
+    Deactivate the projections loaded. One of the options ``--projon`` or ``--projoff`` must
+    be present on the mne_processs_raw command line.
+
+**\---makeproj**
+
+    Estimate the noise subspace from the data and create a new signal-space
+    projection operator instead of using one attached to the data file
+    or those specified with the ``--proj`` option. The following
+    eight options define the parameters of the noise subspace estimation. More
+    information on the signal-space projection can be found in :ref:`CACCHABI`.
+
+**\---projevent <*no*>**
+
+    Specifies the events which identify the time points of interest
+    for projector calculation. When this option is present, ``--projtmin`` and ``--projtmax`` are
+    relative to the time point of the event rather than the whole raw
+    data file.
+
+**\---projtmin <*time/s*>**
+
+    Specify the beginning time for the calculation of the covariance matrix
+    which serves as the basis for the new SSP operator. This option
+    is required with ``--projevent`` and defaults to the beginning
+    of the raw data file otherwise. This option is effective only if ``--makeproj`` or ``--saveprojtag`` options
+    are present.
+
+**\---projtmax <*time/s*>**
+
+    Specify the ending time for the calculation of the covariance matrix which
+    serves as the basis for the new SSP operator. This option is required
+    with ``--projevent`` and defaults to the end of the raw data
+    file otherwise. This option is effective only if ``--makeproj`` or ``--saveprojtag`` options
+    are present.
+
+**\---projngrad <*number*>**
+
+    Number of SSP components to include for planar gradiometers (default
+    = 5). This value is system dependent. For example, in a well-shielded
+    quiet environment, no planar gradiometer projections are usually
+    needed.
+
+**\---projnmag <*number*>**
+
+    Number of SSP components to include for magnetometers / axial gradiometers
+    (default = 8). This value is system dependent. For example, in a
+    well-shielded quiet environment, 3 - 4 components are need
+    while in a noisy environment with light shielding even more than
+    8 components may be necessary.
+
+**\---projgradrej <*value/ fT/cm*>**
+
+    Rejection limit for planar gradiometers in the estimation of the covariance
+    matrix frfixom which the new SSP operator is derived. The default
+    value is 2000 fT/cm. Again, this value is system dependent.
+
+**\---projmagrej <*value/ fT*>**
+
+    Rejection limit for planar gradiometers in the estimation of the covariance
+    matrix from which the new SSP operator is derived. The default value
+    is 3000 fT. Again, this value is system dependent.
+
+**\---saveprojtag <*tag*>**
+
+    This option defines the names of files to hold the SSP operator.
+    If this option is present the ``--makeproj`` option is
+    implied. The SSP operator file name is formed by removing the trailing ``.fif`` or ``_raw.fif`` from
+    the raw data file name by appending  <*tag*> .fif
+    to this stem. Recommended value for <*tag*> is ``-proj`` .
+
+**\---saveprojaug**
+
+    Specify this option if you want to use the projection operator file output
+    in the Elekta-Neuromag Signal processor (graph) software.
+
+**\---eventsout <*name*>**
+
+    List the digital trigger channel events to the specified file. By default,
+    only transitions from zero to a non-zero value are listed. If multiple
+    raw data files are specified, an equal number of ``--eventsout`` options
+    should be present. If the file name ends with .fif, the output will
+    be in fif format, otherwise a text event file will be output.
+
+**\---allevents**
+
+    List all transitions to file specified with the ``--eventsout`` option.
+
+**\---events <*name*>**
+
+    Specifies the name of a fif or text format event file (see :ref:`CACBCEGC`) to be associated with a raw data file to be
+    processed. If multiple raw data files are specified, the number
+    of ``--events`` options can be smaller or equal to the
+    number of raw data files. If it is equal, the event filenames will
+    be associated with the raw data files in the order given. If it
+    is smaller, the remaining raw data files for which an event file
+    is not specified will *not* have an event file associated
+    with them. The event file format is recognized from the file name:
+    if it ends with ``.fif`` , the file is assumed to be in
+    fif format, otherwise a text file is expected.
+
+**\---ave <*name*>**
+
+    Specifies the name of an off-line averaging description file. For details
+    of the format of this file, please consult :ref:`CACBBDGC`.
+    If multiple raw data files are specified, the number of ``--ave`` options
+    can be smaller or equal to the number of raw data files. If it is
+    equal, the averaging description file names will be associated with
+    the raw data files in the order given. If it is smaller, the last
+    description file will be used for the remaining raw data files.
+
+**\---saveavetag <*tag*>**
+
+    If this option is present and averaging is evoked with the ``--ave`` option,
+    the outfile and logfile options in the averaging description file
+    are ignored. Instead, trailing ``.fif`` or ``_raw.fif`` is
+    removed from the raw data file name and <*tag*> ``.fif`` or <*tag*> ``.log`` is appended
+    to create the output and log file names, respectively.
+
+**\---gave <*name*>**
+
+    If multiple raw data files are specified as input and averaging
+    is requested, the grand average over all data files will be saved
+    to <*name*> .
+
+**\---cov <*name*>**
+
+    Specify the name of a description file for covariance matrix estimation. For
+    details of the format of this file, please see :ref:`CACEBACG`.
+    If multiple raw data files are specified, the number of ``--cov`` options can
+    be smaller or equal to the number of raw data files. If it is equal, the
+    averaging description file names will be associated with the raw data
+    files in the order given. If it is smaller, the last description
+    file will be used for the remaining raw data files.
+
+**\---savecovtag <*tag*>**
+
+    If this option is present and covariance matrix estimation is evoked with
+    the ``--cov`` option, the outfile and logfile options in
+    the covariance estimation description file are ignored. Instead,
+    trailing ``.fif`` or ``_raw.fif`` is removed from
+    the raw data file name and <*tag*> .fif or <*tag*> .log
+    is appended to create the output and log file names, respectively.
+    For compatibility with other MNE software scripts, ``--savecovtag -cov`` is recommended.
+
+**\---savehere**
+
+    If the ``--saveavetag`` and ``--savecovtag`` options
+    are used to generate the file output file names, the resulting files
+    will go to the same directory as raw data by default. With this
+    option the output files will be generated in the current working
+    directory instead.
+
+**\---gcov <*name*>**
+
+    If multiple raw data files are specified as input and covariance matrix estimation
+    is requested, the grand average over all data files will be saved
+    to <*name*> . The details of
+    the covariance matrix estimation are given in :ref:`CACHAAEG`.
+
+**\---save <*name*>**
+
+    Save a filtered and optionally down-sampled version of the data
+    file to <*name*> . If multiple
+    raw data files are specified, an equal number of ``--save`` options
+    should be present. If <*filename*> ends
+    with ``.fif`` or ``_raw.fif`` , these endings are
+    deleted. After these modifications, ``_raw.fif`` is inserted
+    after the remaining part of the file name. If the file is split
+    into multiple parts (see ``--split`` option below), the
+    additional parts will be called <*name*> ``-`` <*number*> ``_raw.fif``
+
+**\---split <*size/MB*>**
+
+    Specifies the maximum size of the raw data files saved with the ``--save`` option.
+    By default, the output is split into files which are just below
+    2 GB so that the fif file maximum size is not exceed.
+
+**\---anon**
+
+    Do not include any subject information in the output files created with
+    the ``--save`` option.
+
+**\---decim <*number*>**
+
+    The data are decimated by this factor before saving to the file
+    specified with the ``--save`` option. For decimation to
+    succeed, the data must be lowpass filtered to less than third of
+    the sampling frequency effective after decimation.
+
+The user interface
+##################
+
+.. figure:: mne_browse_raw/windows_menu-7.png
+    :alt: The user interface of mne_browse_raw
+    :figwidth: 100%
+    :width: 100%
+
+    The user interface of mne_browse_raw
+
+The mne_browse_raw user
+interface contains the following areas:
+
+- The menu bar.
+
+- The data display area.
+
+- Viewing and averaging tools.
+
+- Message line.
+
+The viewing and averaging tools allow quick browsing of the
+raw data with triggers, adding new triggers, and averaging on a
+single trigger.
+
+The File menu
+#############
+
+.. _CACDCHAJ:
+
+Open
+====
+
+Selecting Open from file
+menu pops up the dialog shown in :ref:`CACBHGFE`.
+
+The Raw files and Maxfilter output buttons change the file name filter to include
+names which end with ``_raw.fif`` or ``sss.fif`` ,
+respectively, to facilitate selection of original raw files or those
+processed with the Neuromag Maxfilter (TM) software
+
+The options under Software gradient compensation allow
+selection of the compensation grade for the data. These selections
+apply to the CTF data only. The standard choices are No compensation and Third-order gradient. If
+other than No compensation is
+attempted for non-CTF data, an error will be issued. The compensation
+selection affects the averages and noise-covariance matrices subsequently
+computed. The desired compensation takes effect independent of the
+compensation state of the data in the file, *i.e.*,
+already compensated data can be uncompensated and vice versa. For more
+information on software gradient compensation please consult :ref:`BEHDDFBI`.
+
+The Keep the initial skip button
+controls how the initial segment of data not stored in the raw data
+file is handled. During the MEG acquisition data are collected continuously
+but saving to the raw data file is controlled by the Record raw button. Initial skip refers to the segment
+of data between the start of the recording and the first activation
+of Record raw . If Keep initial skip is set, this empty segment is taken into
+account in timing, otherwise time zero is set to the beginning of
+the data stored to disk.
+
+When a raw data file is opened, the digital trigger channel
+is scanned for events. For large files this may take a while.
+
+.. note:: After scanning the trigger channel for events, mne_browse_raw and mne_process_raw produce    a fif file containing the event information. This file will be called <*raw data file name without fif extension*> ``-eve.fif`` . If    the same raw data file is opened again, this file will be consulted    for event information thus making it unnecessary to scan through    the file for trigger line events.
+
+.. note:: You can produce the fif event file by running mne_process_raw as follows: ``mne_process_raw --raw`` <*raw data file*> .    The fif format event files can be read and written with the mne_read_events and mne_write_events functions    in the MNE Matlab toolbox, see :ref:`ch_matlab`.
+
+.. _CACBHGFE:
+
+.. figure:: mne_browse_raw/open_dialog.png
+    :alt: Open dialog
+
+    The Open dialog.
+
+.. _BABJEJDG:
+
+Open evoked
+===========
+
+This menu item brings up a standard file selection dialog
+to load evoked-response data from files. All data sets from a file
+are loaded automatically and display in the average view window,
+see :ref:`CACDADBA`. The data loaded are affected by the
+scale settings, see, :ref:`CACBEHCD`, the filter, see :ref:`CACCDBBG`, and the options selected in the Manage averages dialog, see :ref:`CACJFADF`.
+
+.. _CACBDDIE:
+
+Save
+====
+
+It is possible to save filtered and projected data into a
+new raw data file. When you invoke the save option from the file
+menu, you will be prompted for the output file name and a down-sampling
+factor. The sampling frequency after down-sampling must be at least
+three times the lowpass filter corner frequency. The output will
+be split into files which are just below 2 GB so that the fif file
+maximum size is not exceed.
+
+If <*filename*> ends
+with ``.fif`` or ``_raw.fif`` , these endings are
+deleted. After these modifications, ``_raw.fif`` is inserted
+after the remaining part of the file name. If the file is split
+into multiple parts, the additional parts will be called <*name*> ``-`` <*number*> ``_raw.fif`` .
+For downsampling and saving options in mne_process_raw ,
+see :ref:`CACFAAAJ`.
+
+Change working directory
+========================
+
+Brings up a file selection dialog which allows changing of
+the working directory.
+
+.. _CACDFJDA:
+
+Read projection
+===============
+
+Selecting Read projection... from
+the File menu, pops up a dialog
+to enter a name of a file containing a signal-space projection operator
+to be applied to the data. There is an option to keep existing projection
+items.
+
+.. note:: Whenever EEG channels are present in the data,    a projection item corresponding to the average EEG reference is    automatically added.
+
+Save projection
+===============
+
+The Save projection... item
+in the File menu pops up a dialog
+to save the present projection operator into a file. Normally, the
+EEG average reference projection is not included. If you want to
+include it, mark the Include EEG average reference option.
+If your MEG projection includes items for both magnetometers and
+gradiometers and you want to use the projection operator file output
+from here in the Neuromag Signal processor (graph) software,
+mark the Enforce compatibility with graph option.
+
+Apply bad channels
+==================
+
+Applies the current selection of bad channels to the currently
+open raw file.
+
+Load events (text)
+==================
+
+Reads a text format event file. For more information on events,
+see :ref:`BABDFAHA`.
+
+Load events (fif)
+=================
+
+Reads a fif format event file. For more information on events,
+see :ref:`BABDFAHA`.
+
+.. _CACJGIFA:
+
+Save events (text)
+==================
+
+Brings up a a dialog to save all or selected types of events
+into a text file. This file can be edited and used in the averaging
+and covariance matrix estimation as an input file to specify the
+time points of events, see :ref:`CACBCEGC`. For more information
+on events, see :ref:`BABDFAHA`.
+
+Save events (fif)
+=================
+
+Save the events in fif format. These binary event files can
+be read and written with the mne_read_events and mne_write_events functions
+in the MNE Matlab toolbox, see :ref:`ch_matlab`. For more information
+on events, see :ref:`BABDFAHA`.
+
+.. _CACFHAFH:
+
+Load derivations
+================
+
+This menu choice allows loading of channel derivation data
+files created with the mne_make_derivations utility,
+see :ref:`CHDHJABJ`, or using the interactive derivations
+editor in mne_browse_raw , see :ref:`CACJIEHI`, Most common use of derivations is to calculate
+differences between EEG channels, *i.e.*, bipolar
+EEG data. Since any number of channels can be included in a derivation
+with arbitrary weights, other applications are possible as well.
+Before a derivation is accepted to use, the following criteria have
+to be met:
+
+- All channels to be combined into a single
+  derivation must have identical units of measure.
+
+- All channels in a single derivation have to be of the same
+  kind, *e.g.*, MEG channels or EEG channels.
+
+- All channels specified in a derivation have to be present
+  in the currently loaded data set.
+
+Multiple derivation data files can be loaded by specifying
+the Keep previous derivations option in
+the dialog that specifies the derivation file to be loaded. After
+a derivation file has been successfully loaded, a list of available
+derivations will be shown in a message dialog.
+
+Each of the derived channels has a name specified when the
+derivation file was created. The derived channels can be included
+in channel selections, see :ref:`CACCJEJD`. At present, derived
+channels cannot be displayed in topographical data displays. Derived
+channels are not included in averages or noise covariance matrix
+estimation.
+
+.. note:: If the file ``$HOME/.mne/mne_browse_raw-deriv.fif`` exists and    contains derivation data, it is loaded automatically when mne_browse_raw starts    unless the ``--deriv`` option has been used to specify    a nonstandard derivation file, see :ref:`CACCHAGA`.
+
+Save derivations
+================
+
+Saves the current derivations into a file.
+
+Load channel selections
+=======================
+
+This choice loads a new set of channel selections. The default
+directory for the selections is $HOME/.mne. If this directory does
+not exist, it will be created before bringing up the file selection
+dialog to load the selections.
+
+.. _CACDDCGF:
+
+Save channel selections
+=======================
+
+This choice brings up a dialog to save the current channel
+selections. This is particularly useful if the standard set of selections
+has been modified as explained in :ref:`CACCJEJD`. The default
+directory for the selections is $HOME/.mne. If this directory does
+not exist, it will be created before bringing up the file selection
+dialog to save the selections. Note that all currently existing
+selections will be saved, not just those added to the ones initially
+loaded.
+
+Quit
+====
+
+Exits the program without questions asked.
+
+The Adjust menu
+###############
+
+.. _CACCDBBG:
+
+Filter
+======
+
+Selecting Filter... from
+the Adjust menu pops up the dialog
+shown in :ref:`CACCEEGI`.
+
+.. _CACCEEGI:
+
+.. figure:: mne_browse_raw/filter_dialog.png
+    :alt: filter adjustment dialog
+    :align: center
+    :figwidth: 55%
+    :width: 400
+
+    The filter adjustment dialog.
+
+The items in the dialog have the following functions:
+
+**Highpass (Hz)**
+
+    The half-amplitude point of the highpass filter. The width of the transition
+    from zero to one can be specified with the ``--highpassw`` command-line
+    option, see :ref:`CACHCFEG`. Lowest feasible highpass value
+    is constrained by the length of the filter and sampling frequency.
+    You will be informed when you press OK or Apply if
+    the selected highpass could not be realized. The default value zero means
+    no highpass filter is applied in addition to the analog highpass
+    present in the data.
+
+**Lowpass (Hz)**
+
+    The half-amplitude point of the lowpass filter.
+
+**Lowpass transition (Hz)**
+
+    The width of the :math:`\cos^2`-shaped transition
+    from one to zero, centered at the Lowpass value.
+
+**Filter active**
+
+    Selects whether or not the filter is applied to the data.
+
+The filter is realized in the frequency domain and has a
+zero phase shift. When a filter is in effect, the value of the first
+sample in the file is subtracted from the data to correct for an
+initial dc offset. This procedure also eliminates any filter artifacts
+in the beginning of the data.
+
+.. note:: The filter affects both the raw data and evoked-response    data loaded from files. However, the averages computed in mne_browse_raw and shown    in the topographical display are not refiltered if the filter is    changed after the average was computed.
+
+.. _CACBEHCD:
+
+Scales
+======
+
+Selecting Scales... from
+the Adjust menu pops up the dialog
+shown in :ref:`CACBJGBA`.
+
+.. _CACBJGBA:
+
+.. figure:: mne_browse_raw/scales_dialog.png
+    :alt: Scales dialog
+    :figwidth: 100%
+    :width: 100%
+
+    The Scales dialog.
+
+The items in the dialog have the following functions:
+
+**MEG (fT/cm)**
+
+    Sets the scale for MEG planar gradiometer channels in fT/cm. All scale
+    values are defined from zero to maximum, *i.e.*,
+    the viewport where signals are plotted in have the limits ± <*scale value*> .
+
+**MEG axmult (cm)**
+
+    The scale for MEG magnetometers and axial gradiometers is defined
+    by multiplying the gradiometer scale by this number, yielding units
+    of fT.
+
+**EEG** (:math:`\mu V`)
+
+    The scale for EEG channels in :math:`\mu V`.
+
+**EOG** (:math:`\mu V`)
+
+    The scale for EOG channels in :math:`\mu V`.
+
+**ECG (mV)**
+
+    The scale for ECG channels in mV.
+
+**EMG (mV)**
+
+    The scale for EMG channels in mV.
+
+**MISC (V)**
+
+    The scale for MISC channels in V.
+
+**Time span (s)**
+
+    The length of raw data displayed in the main window at a time.
+
+**Show stimulus markers**
+
+    Draw vertical lines at time points where the digital trigger channel has
+    a transition from zero to a nonzero value.
+
+**Segment min. time (s)**
+
+    It is possible to show data segments in the topographical (full
+    view) layout, see below. This parameter sets the starting time point,
+    relative to the selected time, to be displayed.
+
+**Segment max. time (s)**
+
+    This parameter sets the ending time point, relative to the current time,
+    to be displayed in the topographical layout.
+
+**Show segments in full view**
+
+    Switches on the display of data segments in the topographical layout.
+
+**Show segments in sample view**
+
+    Switches on the display of data segments in a "sidebar" to
+    the right of the main display.
+
+**Show channel names**
+
+    Show the names of the channels in the topographical displays.
+
+**Text size**
+
+    Size of the channel number text as a fraction of the height of each viewport.
+
+**Show viewport frames**
+
+    Show the boundaries of the viewports in the topographical displays.
+
+**Show zeroline and zerolevel**
+
+    Show the zero level, *i.e.*, the baseline level
+    in the topographical displays. In addition, the zero time point
+    is indicated in the average views if it falls to the time range, *i.e.*,
+    if the minimum of the time scale is negative and the maximum is
+    positive.
+
+**Scale magnification for averages**
+
+    For average displays, the scales are made more sensitive by this
+    factor.
+
+**Average display baseline min (ms)**
+
+    Sets the lower time limit for the average display baseline. This
+    setting does not affect the averages stored.
+
+**Average display baseline max (ms)**
+
+    Sets the upper time limit for the average display baseline. This
+    setting does not affect the averages stored.
+
+**Use average display baseline**
+
+    Switches the application of a baseline to the displayed averages
+    on and off.
+
+**Average time range min (ms)**
+
+    Sets the minimum time for the average display. This setting is inactive
+    if Autoscale time range is on.
+
+**Average time range max (ms)**
+
+    Sets the maximum time for the average data display. This setting
+    is inactive if Autoscale time range is
+    on.
+
+**Autoscale time range**
+
+    Set the average display time range automatically to be long enough to
+    accommodate all data.
+
+Colors
+======
+
+Shows a dialog which allows changes to the default colors
+of various display items.
+
+.. _CACJIEHI:
+
+Derivations
+===========
+
+Brings up the interactive derivations editor. This editor
+can be used to add or modify derived channels, *i.e.*,
+linear combinations of signals actually recorded. Channel derivations
+can be also created and modified using the mne_make_derivations tool,
+see :ref:`CHDHJABJ`. The interactive editor contains two main
+areas:
+
+- Interactive tools for specifying a channel
+  linear combination. This tool is limited to combining up to five
+  channels in each of the derivations. Clicking Add after
+  defining the name of the new derivation, the weights of the component
+  channels and their names, adds the corresponding arithmetic expression
+  to the text area.
+
+- Text area which contains the currently defined derivations
+  as arithmetic expressions in a format identical to that used by mne_make_derivations .
+  These expressions can be manually edited before accepting the new
+  set of derivations. Initially, the text area will contain the derivations
+  already defined.
+
+The Define button interprets
+the arithmetic expressions in the text area as new derivations and
+closes the dialog. The Cancel button
+closes the dialog without any change in the derivations.
+
+Recommended workflow for defining derived EEG channels and
+associated selections interactively involves the following steps:
+
+- If desired, EEG channels can be relabeled
+  with descriptive names using the mne_rename_channels utility,
+  see :ref:`CHDCFEAJ`. It is strongly recommended that you
+  keep a copy of the channel alias file used by mne_rename_channels .
+  If necessary, you can then easily return to the original channel
+  names by running mne_rename_channels again
+  with the ``--revert`` option.
+
+- Load the data file into mne_browse_raw and
+  use the interactive derivations editor to create the desired derived
+  channels. These are usually differences between the signals in two
+  EEG electrodes.
+
+- Save the derivations from the file menu.
+
+- If desired, move the derivations file to the standard location
+  (``$HOME/.mne/mne_browse_raw-deriv.fif`` ).
+
+- Create new channel selections employing the original and derived channels
+  using the channel selection tool described in :ref:`CACCJEJD`.
+
+- Save the new channel selections from the file menu.
+
+- If desired, change the order of the channels in the selections
+  in the selection file by editing it in a text editor and move it
+  to the standard location ``$HOME/.mne/mne_browse_raw.sel`` .
+
+.. _CACCJEJD:
+
+Selection
+=========
+
+Brings up a dialog to select channels to be shown in the
+main raw data display. This dialog also allows modification of the
+set of channel selections as described below.
+
+By default, the available selections are defined by the file ``$MNE_ROOT/share/mne/mne_browse_raw/mne_browse_raw.sel`` .
+This default channel selection file can be modified by copying the
+file into ``$HOME/.mne/mne_browse_raw.sel`` . The format
+of this text file should be self explanatory.
+
+.. _CACIHFFH:
+
+.. figure:: mne_browse_raw/channel_selection.png
+    :alt: channel selection dialog
+
+    The channel selection dialog.
+
+The channel selection dialog is shown in :ref:`CACIHFFH`.
+The number of items in the selection list depends on the contents
+of your selection file. If the list has the keyboard focus you can
+easily move from one selection to another with the up and down arrow
+keys.
+
+The two buttons below the channel selection buttons facilitate
+the modification of the selections:
+
+**Add...**
+
+    Brings up the selection dialog shown in :ref:`CACFECED` to
+    create new channel selections.
+
+**Omit current**
+
+    Delete the current channel selection. Deletion only affects the
+    selections in the memory of the program. To save the changes permanently
+    into a file, use Save channel selections... in
+    the File menu, see :ref:`CACDDCGF`.
+
+.. _CACFECED:
+
+.. figure:: mne_browse_raw/new_selection.png
+    :alt: Dialog to create a new channel selection
+
+    Dialog to create a new channel selection.
+
+The components of the selection creation dialog shown in :ref:`CACFECED` have the following functions:
+
+**List of channel names**
+
+    The channels selected from this list will be included in the new channel
+    selection. A selection can be extended with the control key. A range
+    of channels can be selected with the shift key. The list contains
+    both the original channels actually present in the file and the names
+    of the channels in currently loaded derivation data, see :ref:`CACFHAFH`.
+
+**Regexp:**
+
+    This provides another way to select channels. By entering here a regular
+    expression as defined in IEEE Standard 1003.2 (POSIX.2), all channels
+    matching it will be selected and added to the present selection.
+    An empty expression deselects all items in the channel list. Some
+    useful regular expressions are listed in :ref:`CACHCHDJ`.
+    In the present version, regular matching does not look at the derived
+    channels.
+
+**Name:**
+
+    This text field specifies the name of the new selection.
+
+**Select**
+
+    Select the channels specified by the regular expression. The same effect
+    can be achieved by entering return in the Regexp: .
+
+**Add**
+
+    Add a new channel selection which contains the channels selected from
+    the channel name list. The name of the selection is specified with
+    the Name: text field.
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.45\linewidth}|
+.. _CACHCHDJ:
+.. table:: Examples of regular expressions for channel selections
+
+    +--------------------+----------------------------------------------+
+    | Regular expression | Meaning                                      |
+    +====================+==============================================+
+    | ``MEG``            | Selects all MEG channels.                    |
+    +--------------------+----------------------------------------------+
+    | ``EEG``            | Selects all EEG channels.                    |
+    +--------------------+----------------------------------------------+
+    | ``MEG.*1$``        | Selects all MEG channels whose names end     |
+    |                    | with the number 1, *i.e.*, all magnetometer  |
+    |                    | channels.                                    |
+    +--------------------+----------------------------------------------+
+    | ``MEG.*[2,3]$``    | Selects all MEG gradiometer channels.        |
+    +--------------------+----------------------------------------------+
+    | ``EEG|STI 014``    | Selects all EEG channels and stimulus        |
+    |                    | channel STI 014.                             |
+    +--------------------+----------------------------------------------+
+    | ``^M``             | Selects all channels whose names begin with  |
+    |                    | the letter M.                                |
+    +--------------------+----------------------------------------------+
+
+.. note:: The interactive tool for creating the channel    selections does not allow you to change the order of the selected    channels from that given by the list of channels. However, the ordering    can be easily changed by manually editing the channel selection    file in a text editor.
+
+.. _CACFGGCF:
+
+Full view layout
+================
+
+Shows a selection of available layouts for the topographical
+views (full view and average display). The system-wide layout files
+reside in ``$MNE_ROOT/share/mne/mne_analyze/lout`` . In
+addition any layout files residing in ``$HOME/.mne/lout`` are
+listed. The default layout is Vectorview-grad. If there is a layout
+file in the user's private layout directory ending with ``-default.lout`` ,
+that layout will be used as the default instead. The Default button
+returns to the default layout.
+
+The format of the layout files is:
+
+  | <*plot area limits*>
+  | <*viewport definition #1*>
+  | ...
+  | <*viewport definition #N*>
+
+The <*plot area limits*> define
+the size of the plot area (:math:`x_{min}\ x_{max}\ y_{min}\ y_{max}`) which should accommodate all view ports. When the layout is used, the
+plot area will preserve its aspect ratio; if the plot window has
+a different aspect ratio, there will be empty space on the sides.
+
+The viewports define the locations of the individual channels
+in the plot. Each viewport definition consists of
+
+  <*number*> :math:`x_0\ y_0` <*width*> <*height*> <*name*> [: <*name*> ] ...
+
+where number is a viewport number (not used by the MNE software), :math:`x_0` and :math:`y_0` are
+the coordinates of the lower-left corner of the viewport, <*width*> and <*height*> are
+the viewport dimensions, and <*name*> is
+a name of a channel. Multiple channel names can be specified by
+separating them with a colon.
+
+When a measurement channel name is matched to a layout channel
+name, all spaces are removed from the channel names and the both
+the layout channel name and the data channel name are converted
+to lower case. In addition anything including and after a hyphen
+(-) is omitted. The latter convention facilitates using CTF MEG
+system data, which has the serial number of the system appended
+to the channel name with a dash. Removal of the spaces is important
+for the Neuromag Vectorview data because newer systems do not have
+spaces in the channel names like the original Vectorview systems
+did.
+
+.. note:: The mne_make_eeg_layout utility    can be employed to create a layout file matching the positioning    of EEG electrodes, see :ref:`CHDDGDJA`.
+
+.. _CACDDIDH:
+
+Projection
+==========
+
+Lists the currently available signal-space projection (SSP)
+vectors and allows the activation and deactivation of items. For
+more information on SSP, see :ref:`CACCHABI`.
+
+Compensation
+============
+
+Brings up a dialog to select software gradient compensation.
+This overrides the choice made at the open time. For details, see :ref:`CACDCHAJ`, above.
+
+.. _CACBIAHD:
+
+Averaging preferences
+=====================
+
+
+.. _CACCFFAH:
+
+.. figure:: mne_browse_raw/average_pref.png
+    :alt: Averaging preferences
+    :figwidth: 35%
+    :width: 300
+
+    Averaging preferences.
+
+Selecting Averaging preferences... from
+the Adjust menu pops up the dialog
+shown in :ref:`CACCFFAH`. These settings apply only to the
+simple averages calculated with help of tools residing just below
+the main raw data display, see :ref:`CACDFGAE`. These settings
+are also applied when a covariance matrix is computed to create
+a SSP operator as described in :ref:`CACEAHEI` and in the
+computation of a covariance matrix from raw data, see :ref:`BABJEIGJ`.
+
+The items in the dialog have the following functions:
+
+**Starting time (ms)**
+
+    Beginning time of the epoch to be averaged (relative to the trigger).
+
+**Ending time (ms)**
+
+    Ending time of the epoch to be averaged.
+
+**Ignore around stimulus (ms)**
+
+    Ignore this many milliseconds on both sides of the trigger when considering
+    the epoch. This parameter is useful for ignoring large stimulus
+    artefacts, *e.g.*, from electrical somatosensory
+    stimulation.
+
+**MEG grad rejection (fT/cm)**
+
+    Rejection criterion for MEG planar gradiometers. If the peak-to-peak
+    value of any planar gradiometer epoch exceed this value, it will
+    be omitted. A negative value turns off rejection for a particular channel
+    type.
+
+**MEG mag rejection (fT)**
+
+    Rejection criterion for MEG magnetometers and axial gradiometers.
+
+**EEG rejection** (:math:`\mu V`)
+
+    Rejection criterion for EEG channels.
+
+**EOG rejection** (:math:`\mu V`)
+
+    Rejection criterion for EOG channels.
+
+**ECG rejection (mV)**
+
+    Rejection criterion for ECG channels.
+
+**MEG grad no signal (fT/cm)**
+
+    Signal detection criterion for MEG planar gradiometers. The peak-to-peak
+    value of all planar gradiometer signals must exceed this value,
+    for the epoch to be included. This criterion allows rejection of data
+    with saturated or otherwise dysfunctional channels.
+
+**MEG mag no signal (fT)**
+
+    Signal detection criterion for MEG magnetometers and axial gradiometers.
+
+**EEG no signal** (:math:`\mu V`)
+
+    Signal detection criterion for EEG channels.
+
+**EOG no signal** (:math:`\mu V`)
+
+    Signal detection criterion for EOG channels.
+
+**ECG no signal (mV)**
+
+    Signal detection criterion for ECG channels.
+
+**Fix trigger skew**
+
+    This option has the same effect as the FixSkew parameter
+    in averaging description files, see :ref:`BABIHFBI`.
+
+**Trace color**
+
+    The color assigned for the averaged traces in the display can be adjusted
+    by pressing this button.
+
+The Process menu
+################
+
+Averaging
+=========
+
+The Average... menu item
+pops up a file selection dialog to access a description file for
+batch-mode averaging. The structure of these files is described
+in :ref:`CACBBDGC`. All parameters for the averaging are
+taken from the description file, *i.e.*, the
+parameters set in the averaging preferences dialog (:ref:`CACBIAHD`) do not effect the result.
+
+Estimation of a covariance matrix
+=================================
+
+The Compute covariance... menu
+item pops up a file selection dialog to access a description file
+which specifies the options for the estimation of a covariance matrix.
+The structure of these files is described in :ref:`CACEBACG`.
+
+.. _BABJEIGJ:
+
+Estimation of a covariance matrix from raw data
+===============================================
+
+The Compute raw data covariance... menu
+item pops up a dialog which specifies a time range for raw data
+covariance matrix estimation and the file to hold the result. If
+a covariance matrix is computed in this way, the rejection parameters
+specified in averaging preferences are in effect. For description
+of the rejection parameters, see :ref:`CACBIAHD`. The time
+range can be also selected interactively from the main raw data
+display by doing a range selection with shift left button drag.
+
+.. _CACEAHEI:
+
+Creating a new SSP operator
+===========================
+
+The Create a new SSP operator... menu
+choice computes a new SSP operator as discussed in :ref:`BABFFCHF`.
+
+.. _BABHAGHF:
+
+.. figure:: mne_browse_raw/new_ssp.png
+    :alt: Time range specification for SSP operator calculation
+
+    Time range specification for SSP operator calculation
+
+When Create a new SSP operator... selected,
+a window shown in :ref:`BABHAGHF` is popped up. It allows
+the specification of a time range to be employed in the calculation
+of a raw data covariance matrix. The time range can be also selected
+interactively from the main raw data display by doing a range selection
+with shift left button drag. Normally, you should use empty room
+data for this computation. For the estimation of the covariance
+matrix any existing projection will be temporarily switched off. Remember
+to inspect your data for bad channels and select an appropriate filter
+setting before creating a new SSP operator. The artifact rejection parameters
+specified averaging preferences will be applied in the covariance
+matrix calculation, see :ref:`CACBIAHD`.
+
+Instead of using continuous raw data, it is also possible
+to employ short epochs around triggers (events) in the calculation
+of the new SSP operator by specifying a positive event number in
+the time specification dialog. This option is very useful, *e.g.*,
+to remove MCG/ECG artifacts from the data to facilitate detection
+of epileptic spikes:
+
+- Select left or right temporal channels
+  to the display.
+
+- Mark several peaks of the MCG signal in the data: click on
+  the first one and control click on the subsequent ones to extend
+  the selection.
+
+- Select an event number next to the Picked to button in the tool bar, see :ref:`CACDFGAE`,
+  and click Picked to . As a result
+  the lines marking the events will change color (by default from
+  green to blue) indicating transition to user-created events.
+
+- Specify an epoch time range to be employed and the event number selected
+  in the previous step for the SSP operator calculation.
+
+Once the parameters are set, click Compute to
+calculate a covariance matrix according to you your specifications.
+Once the covariance matrix is ready, the parts corresponding to
+magnetometer or axial gradiometer, planar gradiometer, and EEG channels
+are separated and the corresponding eigenvectors and eigenvalues
+are computed. Once complete, a projection selector with eight magnetometer
+eigenvectors, five planar gradiometer eigenvectors, three EEG eigenvectors,
+as well as the existing projection items is displayed.
+
+Using the projection selector, you can experiment which vectors
+have a significant effect on the noise level of the data. You should
+strive for using a minimal number of vectors. When the selection
+is complete, you can click Accept to
+introduce this selection of vectors as the new projection operator. Discard abandons
+the set of calculated vectors. Whenever EEG channels are present
+in the data, a projection item corresponding to the average EEG
+reference is automatically added when a new projection operator
+is introduced. More information on the SSP method can be found in :ref:`CACCHABI`.
+
+.. note:: The new projection data created in mne_browse_raw is    not automatically copied to the data file. You need to create a    standalone projection file from File/Save projection... to    save the new projection data and load it manually after the data    file has been loaded if you want to include in any subsequent analysis.
+
+.. note:: The command-line options for mne_process_raw allow    calculation of the SSP operator from continuous data in the batch    mode, see :ref:`CACFAAAJ`.
+
+.. _BABDJGGJ:
+
+The Windows menu
+################
+
+The Windows menu contains
+the following items:
+
+**Show full view...**
+
+    Brings up the topographical display of epochs extracted from the raw
+    data, see :ref:`CACDADBA`.
+
+**Show averages...**
+
+    Brings up the topographical display showing averaged data. These data
+    may include data averaged in the current mne_browse_raw session
+    or those loaded from files, see :ref:`BABJEJDG`.
+
+**Show event list...**
+
+    Brings up a window containing a list of the currently defined events. Clicking
+    on an event in the list, the event is selected, a green cursor appears
+    at the event, and the event is brought to the middle of the raw
+    data display. The event list displayed can be also restricted to user-defined
+    events (annotations) and user-defined events can be deleted. For
+    further information, see :ref:`BABDFAHA`.
+
+**Show annotator...**
+
+    Brings up a window which allows adding new events to the data with
+    annotations or comments. For details, see :ref:`BABDFAHA`.
+
+**Manage averages...**
+
+    Brings up a dialog to control the averaged data sets, see :ref:`CACJFADF`.
+
+**Start mne_analyze...**
+
+    Start interaction between mne_browse_raw and mne_analyze .
+    For details, see :ref:`CACGHEGC`.
+
+**Show head position**
+
+    Starts mne_analyze in the head position visualization mode and shows
+    the relative position of the MEG sensor array and the head using
+    the data in the presently open raw data file. For more details on
+    the head position visualization mode, see Section 7.21.**what?? does not exist!**
+
+**Quit mne_analyze...**
+
+    Quits the mne_analyze program
+    started with Start mne_analyze...
+
+The Help menu
+#############
+
+The contents of the Help menu
+is shown in :ref:`help_menu_browse`:
+
+.. _help_menu_browse:
+
+.. figure:: mne_browse_raw/help_menu.png
+    :alt: Help menu
+
+    The Help menu.
+
+**On version...**
+
+    Displays the version and compilation date of the program.
+
+**On license...**
+
+    Displays the license information.
+
+**About current data...**
+
+    Displays essential information about the currently loaded data set.
+
+**Why the beep?**
+
+    In some simple error situations, mne_browse_raw does
+    not pop up an error dialog but refuses the action and rings the
+    bell. The reason for this can be displayed through this help menu
+    item.
+
+The raw data display
+####################
+
+The main data displays shows a section of the raw data in
+a strip-chart recorder format. The names of the channels displayed
+are shown on the left. The selection of channels is controlled from
+the selection dialog, see :ref:`CACCJEJD`. The length of
+the data section displayed is controlled from the scales dialog
+(:ref:`CACBEHCD`) and the filtering from the filter dialog (:ref:`CACCDBBG`). A signal-space projection can be applied
+to the data by loading a projection operator (:ref:`CACDFJDA`).
+The selection of the projection operator items is controlled from
+the projection dialog described in :ref:`CACDDIDH`.
+
+The control and browsing functions of the main data display
+are:
+
+**Selection of bad channels**
+
+    If you click on a channel name the corresponding channel is marked bad
+    or reinstated as an acceptable one. A channel marked bad is not considered
+    in the artefact rejection procedures in averaging and it is omitted
+    from the signal-space projection operations.
+
+**Browsing**
+
+    Browsing through the data. The section of data displayed can be selected
+    from the scroll bar at the bottom of the display. Additional browsing
+    functionality will be discussed n In addition, if the strip-chart
+    display has the keyboard focus, you can scroll back and forth with
+    the page up and page down keys.
+
+**Selection of time points**
+
+    When you click on the data with the left button, a vertical marker appears.
+    If Show segments in full view and/or Show segments in sample view is active in the scales
+    dialog (see :ref:`CACBEHCD`), a display of an epoch of data
+    specified in the scales dialog will appear. For more information
+    on full view, see :ref:`CACDADBA`. Multiple time points can
+    be selected by holding the control key down when clicking. If multiple
+    time points are selected several samples will be shown in the sample
+    and/or full view, aligned at the picked time point. The tool bar
+    offers functions to operate on the selected time points, see :ref:`CACDFGAE`.
+
+**Range selection**
+
+    Range selection. If you drag on the signals with the left mouse
+    button and the shift key down, a range of times will be selected
+    and displayed in the sample and/or full view. Note: All previous
+    selections are cleared by this operation.
+
+**Saving a copy of the display**
+
+    The right mouse button invokes a popup menu which allows saving of
+    the display in various formats. Best quality is achieved with the Illustrator
+    format. This format has the benefit that it is object oriented and
+    can be edited in Adobe Illustrator.
+
+**Drag and drop**
+
+    Graphics can be moved to one of the Elekta-Neuromag report composer
+    (cliplab ) view areas with the
+    middle mouse button.
+
+.. note:: When selecting bad channels, switch the signal-space    projection off from the projection dialog. Otherwise bad channels    may not be easily recognizable.
+
+.. note:: The cliplab drag-and-drop    functionality requires that you have the proprietary Elekta-Neuromag    analysis software installed. mne_browse_raw is    compatible with cliplab versions    1.2.13 and later.
+
+.. _BABIDADB:
+
+Browsing data
+=============
+
+If the strip-chart display has
+the input focus (click on it, if you are unsure) the keyboard and
+mouse can be used to browse the data as follows:
+
+**Up and down arrow keys**
+
+    Activate the previous or next selection in the selection list.
+
+**Left and right arrow keys**
+
+    If a single time point is selected (green line), move the time point forward
+    and backward by :math:`\pm 1` ms. If the shift
+    key is down, the time point is moved by :math:`\pm 10` ms.
+    If the control key is down (with or without shift), the time point
+    is moved by :math:`\pm 100` ms. If mne_browse_raw is
+    controlling mne_analyze (see :ref:`CACGHEGC`), the mne_analyze displays
+    will be updated accordingly. If the picked time point falls outside
+    the currently displayed section of data, the display will be automatically
+    scrolled backwards or forwards as needed.
+
+**Rotate the mouse wheel or rotate the trackball up/down**
+
+    Activate the previous or next selection in the selection list.
+
+**Rotate the trackball left/right or rotate the wheel with shift down**
+
+    Scroll backward or forward in the data by one screen. With Alt key (Command or Apple key
+    in the Mac keyboard), the amount of scrolling will be :math:`1` s instead
+    of the length of one screen. If shift key is held down with the
+    trackball, both left/right and up/down movements scroll the data
+    in time.
+
+.. note:: The trackball and mouse wheel functionality    is dependent on your X server settings. On Mac OSX these settings    are normally correct by default but on a LINUX system some adjustments    to the X server settings maybe necessary. Consult your system administrator    or Google for details.
+
+.. _BABDFAHA:
+
+Events and annotations
+######################
+
+.. _BABJGEDF:
+
+Overview
+========
+
+In mne_browse_raw and mne_process_raw *events* mark
+interesting time points in the data. When a raw data file is opened,
+a standard event file is consulted for the list of events. If this
+file is not present, the digital trigger channel, defined by the --digtrig option
+or the ``MNE_TRIGGER_CH_NAME`` environment variable is
+scanned for events. For more information, see :ref:`BABBGJEA` and :ref:`CACDCHAJ`.
+
+In addition to the events detected on the trigger channel,
+it is possible to associate user-defined events to the data, either
+by marking data points interactively as described in :ref:`BABCIGGH` or by loading event data from files, see :ref:`BABDGBHI`. Especially if there is a comment associated
+with a user-defined event, we will sometimes call it an *annotation*.
+
+If a data files has annotations (user-defined events) associated
+with it in mne_browse_raw , information
+about them is automatically saved to an annotation file when a data file is closed, *i.e.*,
+when you quit mne_browse_raw or
+load a new data file. This annotation file is called <*raw data file name without fif extension*> ``-annot.fif`` and
+will be stored in the same directory as the raw data file. Therefore,
+write permission to this directory is required to save the annotation
+file.
+
+Both the events defined by the trigger channel and the user-defined
+events have three properties:
+
+- The *time* when the
+  event occurred.
+
+- The *value* on the trigger channel just
+  before the change and now. For user-defined events the value before
+  is always zero and the current value is user defined and does not
+  necessarily reflect a change on the trigger channel. The trigger
+  channel events may also indicate changes between two non-zero values
+  and from a non-zero to zero. The event list described in :ref:`BABFDICC` shows only transitions from zero to a non-zero
+  value. Similarly, the Jump to item
+  in the tool bar, described in :ref:`CACDFGAE`, only detects
+  transitions from zero to a nonzero value.
+
+- An optional *comment* text, which is especially
+  helpful in associating user-defined events with real-world activity, *e.g.*,
+  the subject closing or opening his/her eyes or an epileptic patient
+  showing indications of a seizure.
+
+.. _BABFDICC:
+
+The event list
+==============
+
+The Windows/Show event list... menu
+choice shows a window containing a list of currently defined events.
+The list can be restricted to user-defined events by checking User-defined events only . When an event is selected from the
+list, the main display jumps to the corresponding time. If a user-defined
+event is selected, it can be deleted with the Delete a user-defined event button.
+
+.. _BABDGBHI:
+
+Loading and saving event files
+==============================
+
+Using the Load/Save events choices in the file menu, events
+can be saved in text and fif formats, see :ref:`CACBCEGC`,
+below. The loading dialogs have the following options:
+
+**Match comment with**
+
+    Only those events which will contain comments and in which the comment
+    matches the entered text are loaded. This filtering option is useful, *e.g.*,
+    in loading averaging or covariance matrix computation log files,
+    see :ref:`BABIHFBI` and :ref:`BABCGEJE`.
+    If the word *omit* is entered as the filter,
+    only events corresponding to discarded epochs are loaded and the
+    reason for rejection can be investigated in detail.
+
+**Add as user events**
+
+    Add the events as if they were user-defined events. As a result,
+    the annotation file saved next time mne_browse_raw closes
+    this raw file will contain these events.
+
+**Keep existing events**
+
+    By default, the events loaded will replace the currently defined
+    ones. With this option checked, the loaded event will be merged
+    with the currently existing ones.
+
+The event saving dialogs have the
+following options controlling the data saved:
+
+**Save events read from the data file**
+
+    Save only those event which are not designated as user defined. These
+    are typically the events corresponding to changes in the digital
+    trigger channel. Another possible source for these events is an event
+    file manually loaded *without* the Add as user events option.
+
+**Save events created here**
+
+    Save the user-defined events.
+
+**Save all trigger line transitions**
+
+    By default only those events which are associate with a transition from
+    zero to non-zero value are saved. These include the user-defined
+    events and leading edges of pulses on the trigger line. When this
+    option is present, all events included with the two above options are
+    saved, regardless the type of transition indicated (zero to non-zero,
+    non-zero to another non-zero value, and non-zero value to zero).
+
+.. note:: If you have a text format event file whose content    you want to include as user-defined events and create the automatic    annotation file described in :ref:`BABJGEDF`, proceed as    follows:
+
+- Load the event file with the option Add as user events set.
+
+- Open another data file or quit mne_browse_raw .
+
+- Optionally remove unnecessary events using the event list
+  dialog.
+
+The directory in which the raw data file resides now contains
+an annotation file which will be automatically loaded each time
+the data file is opened. A text format event file suitable for this
+purpose can be created manually, extracted from an EDF+ file using
+the ``--tal`` option in mne_edf2fiff discussed
+in :ref:`BABHDBBD`, or produced by custom software used during
+data acquisition.
+
+.. _BABCIGGH:
+
+Defining annotated events
+=========================
+
+The Windows/Show annotator... shows
+a window to add annotated user-defined events. In this window, the
+buttons in first column mark one or more selected time points with
+the event number shown in the second column with an associated comment
+specified in the third column. Marking also occurs when return is
+pressed on any of the second and third column text fields.
+
+When the dialog is brought up for the first time, the file
+$HOME/.mne/mne_browse_raw.annot is consulted for the definitions
+of the second and third column values, *i.e.*,
+event numbers and comments. You can save the current definitions
+with the Save defs button and
+reload the annotation definition file with Load defs . The annotation definition file may contain comment
+lines starting with '%' or '#' and
+data lines which contain an event number and an optional comment,
+separated from the event number by a colon.
+
+.. note:: If you want to add a user-defined event without    an a comment, you can use the Picked to item    in the tool bar, described in :ref:`CACDFGAE`.
+
+.. _CACBCEGC:
+
+Event files
+===========
+
+A text format event file contains information about transitions
+on the digital trigger line in a raw data file. Any lines beginning
+with the pound sign (``#`` ) are considered as comments.
+The format of the event file data is:
+
+ <*sample*> <*time*> <*from*> <*to*> <*text*>
+
+where
+
+** <*sample*>**
+
+    is
+    the sample number. This sample number takes into account the initial
+    empty space in a raw data file as indicated by the FIFF_FIRST_SAMPLE
+    and/or FIFF_DATA_SKIP tags in the beginning of raw data. Therefore,
+    the event file contents are independent of the Keep initial skip setting in the open dialog.
+
+** <*time*>**
+
+    is
+    the time from the beginning of the file to this sample in seconds.
+
+** <*from*>**
+
+    is
+    the value of the digital trigger channel at <*sample*> -1.
+
+** <*to*>**
+
+    is
+    the value of the digital trigger channel at <*sample*> .
+
+** <*text*>**
+
+    is
+    an optional annotation associated with the event. This comment will
+    be displayed in the event list and on the message line when you
+    move to an event.
+
+When an event file is read back, the <*sample*> value
+will be primarily used to specify the time. If you want the <*time*> to
+be converted to the sample number instead, specify a negative value
+for <*sample*> .
+
+Each event file starts with a "pseudo event" where
+both <*from*> and <*to*> fields
+are equal to zero.
+
+.. warning:: In previous versions of the MNE software,    the event files did not contain the initial empty pseudo event.    In addition the sample numbers did not take into account the initial    empty space in the raw data files. The present version of MNE software    is still backwards compatible with the old version of the event    files and interprets the sample numbers appropriately. However,    the recognition of the old and new event file formats depends on    the initial pseudo  [...]
+
+.. note:: If you have created Matlab, Excel or other scripts    to process the event files, they may need revision to include the    initial pseudo event in order for mne_browse_raw and mne_process_raw to    recognize the edited event files correctly.
+
+.. note:: Events can be also stored in fif format. This    format can be read and written with the Matlab toolbox functions mne_read_events and mne_write_events .
+
+.. _CACDFGAE:
+
+The tool bar
+############
+
+.. _CACCFEGH:
+
+.. figure:: mne_browse_raw/toolbar.png
+    :alt: tool bar controls
+    :figwidth: 100%
+    :width: 100%
+
+    The tool bar controls.
+
+The tool bar controls are shown in :ref:`CACCFEGH`.
+They perform the following functions:
+
+**start/s**
+
+    Allows specification of the starting time of the display as a numeric value.
+    Note that this value will be rounded to the time of the nearest sample
+    when you press return. If you click on this text field, you can also
+    change the time with the up and down cursor keys (1/10 of the window
+    size), and the page up and down (or control up and down cursor)
+    keys (one window size).
+
+**Remove dc**
+
+    Remove the dc offset from the signals for display. This does not affect
+    the data used for averaging and noise-covariance matrix estimation.
+
+**Keep dc**
+
+    Return to the original true dc levels.
+
+**Jump to**
+
+    Enter a value of a trigger to be searched for. The arrow buttons
+    jump to the next event of this kind. A selection is also automatically
+    created and displayed as requested in the scales dialog, see :ref:`CACBEHCD`. If the '+' button is active,
+    previous selections are kept, otherwise they are cleared.
+
+**Picked to**
+
+    Make user events with this event number at all picked time points.
+    It is also possible to add annotated user events with help of the
+    annotation dialog. For further information, see :ref:`BABDFAHA`.
+
+**Forget**
+
+    Forget desired user events.
+
+**Average**
+
+    Compute an average to this event.
+
+The tool bar status line shows the starting time and the
+length of the window in seconds as well as the cursor time point.
+The dates and times in parenthesis show the corresponding wall-clock
+times in the time zone where mne_browse_raw is
+run.
+
+.. note:: The wall-clock times shown are based on the    information in the fif file and may be offset from the true acquisition    time by about 1 second. This offset is constant throughout the file.    The times reflect the time zone setting of the computer used to    analyze the data rather than the one use to acquire them.
+
+.. _CACDADBA:
+
+Topographical data displays
+###########################
+
+Segments of data can shown in a topographical layout in the Full view window, which can be requested from the Scale dialog
+or from the Windows menu. Another
+similar display is available to show the averaged data. The topographical
+layout to use is selected from Adjust/Full view layout... ,
+which brings up a window with a list of available layouts. The default
+layouts reside in ``$MNE_ROOT/share/mne/mne_analyze/lout`` .
+In addition any layout files residing in ``$HOME/.mne/lout`` are listed.
+The format of the layout files is the same as for the Neuromag programs xplotter and xfit .
+A custom EEG layout can be easily created with the mne_make_eeg_layout utility,
+see :ref:`CHDDGDJA`.
+
+Several actions can be performed with the mouse in the topographical data
+display:
+
+**Left button**
+
+    Shows the time and the channel name at the cursor at the bottom
+    of the window.
+
+**Left button drag with shift key**
+
+    Enlarge the view to contain only channels in the selected area.
+
+**Right button**
+
+    Brings up a popup menu which gives a choice of graphics output formats
+    for the current topographical display. Best quality is achieved
+    with the Illustrator format. This format has the benefit that it
+    is object oriented and can be edited in Adobe Illustrator.
+
+**Middle button**
+
+    Drag and drop graphics to one of the cliplab view
+    areas.
+
+.. note:: The cliplab drag-and-drop    functionality requires that you have the proprietary Elekta-Neuromag    analysis software installed. mne_browse_raw is    compatible with cliplab versions    1.2.13 and later.
+
+.. note:: The graphics output files will contain a text    line stating of the time and vertical scales if the zero level/time    and/or viewport frames have been switched on in the scales dialog,    see :ref:`CACBEHCD`.
+
+.. _CACBBDGC:
+
+Description files for off-line averaging
+########################################
+
+For averaging tasks more complex than those involving only
+one trigger, the averaging parameters are specified with help of
+a text file. This section describes the format of this file. A sample
+averaging file can be found in ``$MNE_ROOT/share/mne/mne_browse_raw/templates`` .
+
+Overall format
+==============
+
+Any line beginning with the pound sign (#) in this description
+file is a comment. Each parameter in the description file is defined
+by a keyword usually followed by a value. Text values consisting
+of multiple words, separated by spaces, must be included in quotation
+marks. The case of the keywords in the file does not matter. The
+ending ``.ave`` is suggested for the average description
+files.
+
+The general format of the description file is::
+
+    average {
+        <common parameters>
+        category {
+            <category definition parameters>
+        }
+        
+        ...
+    }
+
+The file may contain arbitrarily many categories. The word ``category`` interchangeable
+with ``condition`` .
+
+.. warning:: Due to a bug that existed in some versions    of the Neuromag acquisition software, the trigger line 8 is incorrectly    decoded on trigger channel STI 014. This can be fixed by running mne_fix_stim14 on    the raw data file before using mne_browse_raw or mne_process_raw .    The bug has been fixed on Nov. 10, 2005.
+
+.. _BABIHFBI:
+
+Common parameters
+=================
+
+The average definition starts with the common parameters.
+They include:
+
+**outfile <*name*>**
+
+    The name of the file where the averages are to be stored. In interactive
+    mode, this can be omitted. The resulting average structure can be
+    viewed and stored from the Manage averages window.
+
+**eventfile <*name*>**
+
+    Optional file to contain event specifications. If this file is present, the
+    trigger events in the raw data file are ignored and this file is
+    consulted instead. The event file format is recognized from the
+    file name: if it ends with ``.fif`` , the file is assumed
+    to be in fif format, otherwise a text file is expected. The text event
+    file format is described in :ref:`CACBCEGC`.
+
+**logfile <*name*>**
+
+    This optional file will contain detailed information about the averaging
+    process. In the interactive mode, the log information can be viewed
+    from the Manage averages window.
+
+**gradReject <*value / T/m*>**
+
+    Rejection limit for MEG gradiometer channels. If the peak-to-peak amplitude
+    within the extracted epoch exceeds this value on any of the gradiometer
+    channels, the epoch will be omitted from the average.
+
+**magReject <*value / T*>**
+
+    Rejection limit for MEG magnetometer and axial gradiometer channels.
+    If the peak-to-peak amplitude within the extracted epoch exceeds
+    this value on any of the magnetometer or axial gradiometer channels,
+    the epoch will be omitted from the average.
+
+**eegReject <*value / V*>**
+
+    Rejection limit for EEG channels. If the peak-to-peak amplitude within
+    the extracted epoch exceeds this value on any of the EEG channels,
+    the epoch will be omitted from the average.
+
+**eogReject <*value / V*>**
+
+    Rejection limit for EOG channels. If the peak-to-peak amplitude within
+    the extracted epoch exceeds this value on any of the EOG channels,
+    the epoch will be omitted from the average.
+
+**ecgReject <*value / V*>**
+
+    Rejection limit for ECG channels. If the peak-to-peak amplitude within
+    the extracted epoch exceeds this value on any of the ECG channels,
+    the epoch will be omitted from the average.
+
+**gradFlat <*value / T/m*>**
+
+    Signal detection criterion for MEG planar gradiometers. The peak-to-peak
+    value of all planar gradiometer signals must exceed this value,
+    for the epoch to be included. This criterion allows rejection of data
+    with saturated or otherwise dysfunctional channels. The default value
+    is zero, *i.e.*, no rejection.
+
+**magFlat <*value / T*>**
+
+    Signal detection criterion for MEG magnetometers and axial gradiometers
+    channels.
+
+**eegFlat <*value / V*>**
+
+    Signal detection criterion for EEG channels.
+
+**eogFlat <*value / V*>**
+
+    Signal detection criterion for EOG channels.
+
+**ecgFlat <*value / V*>**
+
+    Signal detection criterion for ECG channels.
+
+**stimIgnore <*time / s*>**
+
+    Ignore this many seconds on both sides of the trigger when considering
+    the epoch. This parameter is useful for ignoring large stimulus artefacts, *e.g.*,
+    from electrical somatosensory stimulation.
+
+**fixSkew**
+
+    Since the sampling of data and the stimulation devices are usually not
+    synchronized, all trigger input bits may not turn on at the same sample.
+    If this option is included in the off-line averaging description
+    file, the following procedure is used to counteract this: if there is
+    a transition from zero to a nonzero value on the digital trigger channel
+    at sample :math:`n`, the following sample
+    will be checked for a transition from this nonzero value to another
+    nonzero value. If such an event pair is found, the two events will
+    be jointly considered as a transition from zero to the second non-zero
+    value. With the fixSkew option, mne_browse_raw/mne_process_raw behaves
+    like the Elekta-Neuromag on-line averaging and Maxfilter (TM) software.
+
+**name <*text*>**
+
+    A descriptive name for this set of averages. If the name contains multiple
+    words, enclose it in quotation marks "like this".
+    The name will appear in the average manager window listing in the
+    interactive version of the program and as a comment in the processed
+    data section in the output file.
+
+.. _CACHACHH:
+
+Category definition
+===================
+
+A category (condition) is defined by the parameters listed
+in this section.
+
+**event <*number*>**
+
+    The zero time point of an epoch to be averaged is defined by a transition
+    from zero to this number on the digital trigger channel. The interpretation
+    of the values on the trigger channel can be further modified by
+    the ignore and mask keywords. If multiple event parameters are present
+    for a category, all specified events will be included in the average.
+
+**ignore <*number*>**
+
+    If this parameter is specified the selected bits on trigger channel
+    values can be mask (set to zero) out prior to checking for an existence of
+    an event. For example, to ignore the values of trigger input lines three
+    and eight, specify ``ignore 132`` (:math:`2^2 + 2^7 = 132`).
+
+**mask <*number*>**
+
+    Works similarly to ignore except that a mask specifies the trigger channel
+    bits to be included. For example, to look at trigger input lines
+    one to three only, ignoring others, specify ``mask 7``
+    (:math:`2^0 + 2^1 + 2^2 = 7`).
+
+**prevevent <*number*>**
+
+    Specifies the event that is required to occur immediately before
+    the event(s) specified with event parameter(s)
+    in order for averaging to occur. Only one previous event number
+    can be specified.
+
+**prevignore <*number*>**
+
+    Works like ignore but for the
+    events specified with prevevent .
+    If prevignore and prevmask are
+    missing, the mask implied by ignore and mask is
+    applied to prevevent as well.
+
+**prevmask <*number*>**
+
+    Works like mask but for the events
+    specified with prevevent . If prevignore and prevmask are
+    missing, the mask implied by ignore and mask is
+    applied to prevevent as well.
+
+**nextevent <*number*>**
+
+    Specifies the event that is required to occur immediately after
+    the event(s) specified with event parameter(s)
+    in order for averaging to occur. Only one next event number can
+    be specified.
+
+**nextignore <*number*>**
+
+    Works like ignore but for the
+    events specified with nextevent .
+    If nextgnore and nextmask are
+    missing, the mask implied by ignore and mask is
+    applied to nextevent as well.
+
+**nextmask <*number*>**
+
+    Works like mask but for the events
+    specified with nextevent . If nextignore and nextmask are
+    missing, the mask implied by ignore and mask is
+    applied to nextevent as well.
+
+**delay <*time / s*>**
+
+    Adds a delay to the time of the occurrence of an event. Therefore,
+    if this parameter is positive, the zero time point of the epoch
+    will be later than the time of the event and, correspondingly, if
+    the parameter is negative, the zero time point of the epoch will
+    be earlier than the event. By default, there will be no delay.
+
+**tmin <*time / s*>**
+
+    Beginning time point of the epoch.
+
+**tmax <*time / s*>**
+
+    End time point of the epoch.
+
+**bmin <*time / s*>**
+
+    Beginning time point of the baseline. If both ``bmin`` and ``bmax`` parameters
+    are present, the baseline defined by this time range is subtracted
+    from each epoch before they are added to the average.
+
+**basemin <*time / s*>**
+
+    Synonym for bmin.
+
+**bmax <*time / s*>**
+
+    End time point of the baseline.
+
+**basemax <*time / s*>**
+
+    Synonym for bmax.
+
+**name <*text*>**
+
+    A descriptive name for this category. If the name contains multiple words,
+    enclose it in quotation marks "like this". The
+    name will appear in the average manager window listing in the interactive
+    version of the program and as a comment averaging category section
+    in the output file.
+
+**abs**
+
+    Calculate the absolute values of the data in the epoch before adding it to
+    the average.
+
+**stderr**
+
+    The standard error of mean will be computed for this category and included
+    in the output fif file.
+
+.. note:: Specification of the baseline limits does not    any more imply the estimation of the standard error of mean. Instead,    the stderr parameter is required    to invoke this option.
+
+.. _CACEBACG:
+
+Description files for covariance matrix estimation
+##################################################
+
+Covariance matrix estimation is controlled by a another description
+file, very similar to the average definition. A example of a covariance
+description file can be found in the directory ``$MNE_ROOT/share/mne/mne_browse_raw/templates`` .
+
+Overall format
+==============
+
+Any line beginning with the pound sign (#) in this description
+file is a comment. Each parameter in the description file is defined
+by a keyword usually followed by a value. Text values consisting
+of multiple words, separated by spaces, must be included in quotation
+marks. The case of the keywords in the file does not matter. The
+ending ``.cov`` is suggested for the covariance-matrix
+description files.
+
+The general format of the description file is::
+
+    cov {
+        <*common parameters*>
+        def {
+            <*covariance definition parameters*>
+        }
+        ...
+    }
+
+The file may contain arbitrarily many covariance definitions,
+starting with ``def`` .
+
+.. warning:: Due to a bug that existed in some versions    of the Neuromag acquisition software, the trigger line 8 is incorrectly    decoded on trigger channel STI 014. This can be fixed by running mne_fix_stim14 on    the raw data file before using mne_browse_raw or mne_process_raw .    This bug has been fixed in the acquisition software at the Martinos    Center on Nov. 10, 2005.
+
+.. _BABCGEJE:
+
+Common parameters
+=================
+
+The average definition starts with the common parameters.
+They include:
+
+**outfile <*name*>**
+
+    The name of the file where the covariance matrix is to be stores. This
+    parameter is mandatory.
+
+**eventfile <*name*>**
+
+    Optional file to contain event specifications. This file can be
+    either in fif or text format (see :ref:`CACBCEGC`). The event
+    file format is recognized from the file name: if it ends with ``.fif`` ,
+    the file is assumed to be in fif format, otherwise a text file is
+    expected. If this parameter is present, the trigger events in the
+    raw data file are ignored and this event file is consulted instead.
+    The event file format is described in :ref:`CACBCEGC`.
+
+**logfile <*name*>**
+
+    This optional file will contain detailed information about the averaging
+    process. In the interactive mode, the log information can be viewed
+    from the Manage averages window.
+
+**gradReject <*value / T/m*>**
+
+    Rejection limit for MEG gradiometer channels. If the peak-to-peak amplitude
+    within the extracted epoch exceeds this value on any of the gradiometer
+    channels, the epoch will be omitted from the average.
+
+**magReject <*value / T*>**
+
+    Rejection limit for MEG magnetometer and axial gradiometer channels.
+    If the peak-to-peak amplitude within the extracted epoch exceeds
+    this value on any of the magnetometer or axial gradiometer channels,
+    the epoch will be omitted from the average.
+
+**eegReject <*value / V*>**
+
+    Rejection limit for EEG channels. If the peak-to-peak amplitude within
+    the extracted epoch exceeds this value on any of the EEG channels,
+    the epoch will be omitted from the average.
+
+**eogReject <*value / V*>**
+
+    Rejection limit for EOG channels. If the peak-to-peak amplitude within
+    the extracted epoch exceeds this value on any of the EOG channels,
+    the epoch will be omitted from the average.
+
+**ecgReject <*value / V*>**
+
+    Rejection limit for ECG channels. If the peak-to-peak amplitude within
+    the extracted epoch exceeds this value on any of the ECG channels,
+    the epoch will be omitted from the average.
+
+**gradFlat <*value / T/m*>**
+
+    Signal detection criterion for MEG planar gradiometers. The peak-to-peak
+    value of all planar gradiometer signals must exceed this value,
+    for the epoch to be included. This criterion allows rejection of data
+    with saturated or otherwise dysfunctional channels. The default value
+    is zero, *i.e.*, no rejection.
+
+**magFlat <*value / T*>**
+
+    Signal detection criterion for MEG magnetometers and axial gradiometers
+    channels.
+
+**eegFlat <*value / V*>**
+
+    Signal detection criterion for EEG channels.
+
+**eogFlat <*value / V*>**
+
+    Signal detection criterion for EOG channels.
+
+**ecgFlat <*value / V*>**
+
+    Signal detection criterion for ECG channels.
+
+**stimIgnore <*time / s*>**
+
+    Ignore this many seconds on both sides of the trigger when considering
+    the epoch. This parameter is useful for ignoring large stimulus artefacts, *e.g.*,
+    from electrical somatosensory stimulation.
+
+**fixSkew**
+
+    Since the sampling of data and the stimulation devices are usually not
+    synchronized, all trigger input bits may not turn on at the same sample.
+    If this option is included in the off-line averaging description
+    file, the following procedure is used to counteract this: if there is
+    a transition from zero to a nonzero value on the digital trigger channel
+    at sample :math:`n`, the following sample
+    will be checked for a transition from this nonzero value to another
+    nonzero value. If such an event pair is found, the two events will
+    be jointly considered as a transition from zero to the second non-zero
+    value.
+
+**keepsamplemean**
+
+    The means at individual samples will *not* be
+    subtracted in the estimation of the covariance matrix. For details,
+    see :ref:`BABHJDEJ`. This parameter is effective only for
+    estimating the covariance matrix from epochs. It is recommended
+    to specify this option. However, for compatibility with previous
+    MNE releases, keepsamplemean is
+    not on by default.
+
+.. _BABECIAH:
+
+Covariance definitions
+======================
+
+The covariance definitions starting with def specify the
+epochs to be included in the estimation of the covariance matrix.
+
+**event <*number*>**
+
+    The zero time point of an epoch to be averaged is defined by a transition
+    from zero to this number on the digital trigger channel. The interpretation
+    of the values on the trigger channel can be further modified by
+    the ignore and mask keywords. If multiple event parameters are present
+    in a definition, all specified events will be included. If the event
+    parameter is missing or set to zero, the covariance matrix is computed
+    over a section of the raw data, defined by the ``tmin`` and ``tmax`` parameters.
+
+**ignore <*number*>**
+
+    If this parameter is specified the selected bits on trigger channel
+    values can be mask (set to zero) out prior to checking for an existence of
+    an event. For example, to ignore the values of trigger input lines three
+    and eight, specify ``ignore 132`` (:math:`2^2 + 2^7 = 132`).
+
+**mask <*number*>**
+
+    Works similarly to ignore except that a mask specifies the trigger channel
+    bits to be included. For example, to look at trigger input lines
+    one to three only, ignoring others, specify ``mask 7``
+    (:math:`2^0 + 2^1 + 2^2 = 7`).
+
+**delay <*time / s*>**
+
+    Adds a delay to the time of the occurrence of an event. Therefore,
+    if this parameter is positive, the zero time point of the epoch
+    will be later than the time of the event and, correspondingly, if
+    the parameter is negative, the zero time point of the epoch will
+    be earlier than the time of the event. By default, there will be
+    no delay.
+
+**tmin <*time / s*>**
+
+    Beginning time point of the epoch. If the ``event`` parameter
+    is zero or missing, this defines the beginning point of the raw
+    data range to be included.
+
+**tmax <*time / s*>**
+
+    End time point of the epoch. If the ``event`` parameter
+    is zero or missing, this defines the end point of the raw data range
+    to be included.
+
+**bmin <*time / s*>**
+
+    It is possible to remove a baseline from the epochs before they
+    are included in the covariance matrix estimation. This parameter
+    defines the starting point of the baseline. This feature can be
+    employed to avoid overestimation of noise in the presence of low-frequency drifts.
+    Setting of ``bmin`` and ``bmax`` is always recommended
+    for epoch-based covariance matrix estimation.
+
+**basemin <*time / s*>**
+
+    Synonym for bmin.
+
+**bmax <*time / s*>**
+
+    End time point of the baseline, see above.
+
+**basemax <*time / s*>**
+
+    Synonym for bmax.
+
+.. _CACJFADF:
+
+Managing averages
+#################
+
+This selection pops up a dialog which allows the management
+of computed averages. The controls in the dialog, shown in :ref:`CACEFABD`, allow the following:
+
+- Select which categories (conditions)
+  are displayed in the average view.
+
+- Select the colors of the traces.
+
+- Viewing the log information accumulated in the averaging process.
+
+- Saving of averaged data.
+
+- Setting the active vectors for signal-space projection if
+  the data were loaded from a file.
+
+- Setting the current software gradient compensation for data
+  loaded from a file.
+
+.. _CACEFABD:
+
+.. figure:: mne_browse_raw/manage_averages_dialog.png
+    :alt: dialog for managing available averages
+
+    The dialog for managing available averages.
+
+In the example of :ref:`CACEFABD`, the first item
+is an average computed within mne_browse_raw ,
+the second one contains data loaded from a file with signal-space
+projection data available, the third one demonstrates multiple data
+sets loaded from a file with neither projection nor software gradient
+compensation available, and the last one is a data set loaded from file
+with software gradient compensation data present. Note that this
+is now a scrolled window and some of the loaded data may be below
+or above the current view area.
+
+.. _CACCHABI:
+
+The Signal-Space Projection (SSP) method
+########################################
+
+The Signal-Space Projection (SSP) is one approach to rejection
+of external disturbances in software. The section presents some
+relevant details of this method.
+
+General concepts
+================
+
+Unlike many other noise-cancellation approaches, SSP does
+not require additional reference sensors to record the disturbance
+fields. Instead, SSP relies on the fact that the magnetic field
+distributions generated by the sources in the brain have spatial
+distributions sufficiently different from those generated by external
+noise sources. Furthermore, it is implicitly assumed that the linear
+space spanned by the significant external noise patters has a low
+dimension.
+
+Without loss of generality we can always decompose any :math:`n`-channel
+measurement :math:`b(t)` into its signal and
+noise components as
+
+.. math::    b(t) = b_s(t) + b_n(t)
+
+Further, if we know that :math:`b_n(t)` is
+well characterized by a few field patterns :math:`b_1 \dotso b_m`,
+we can express the disturbance as
+
+.. math::    b_n(t) = Uc_n(t) + e(t)\ ,
+
+where the columns of :math:`U` constitute
+an orthonormal basis for :math:`b_1 \dotso b_m`, :math:`c_n(t)` is
+an :math:`m`-component column vector, and
+the error term :math:`e(t)` is small and does
+not exhibit any consistent spatial distributions over time, *i.e.*, :math:`C_e = E \{e e^T\} = I`.
+Subsequently, we will call the column space of :math:`U` the
+noise subspace. The basic idea of SSP is that we can actually find
+a small basis set :math:`b_1 \dotso b_m` such that the
+conditions described above are satisfied. We can now construct the
+orthogonal complement operator
+
+.. math::    P_{\perp} = I - UU^T
+
+and apply it to :math:`b(t)` yielding
+
+.. math::    b(t) = P_{\perp}b_s(t)\ ,
+
+since :math:`P_{\perp}b_n(t) = P_{\perp}Uc_n(t) \approx 0`. The projection operator :math:`P_{\perp}` is
+called the signal-space projection operator and generally provides
+considerable rejection of noise, suppressing external disturbances
+by a factor of 10 or more. The effectiveness of SSP depends on two
+factors:
+
+- The basis set :math:`b_1 \dotso b_m` should
+  be able to characterize the disturbance field patterns completely
+  and
+
+- The angles between the noise subspace space spanned by :math:`b_1 \dotso b_m` and the
+  signal vectors :math:`b_s(t)` should be as close
+  to :math:`\pi / 2` as possible.
+
+If the first requirement is not satisfied, some noise will
+leak through because :math:`P_{\perp}b_n(t) \neq 0`. If the any
+of the brain signal vectors :math:`b_s(t)` is
+close to the noise subspace not only the noise but also the signal
+will be attenuated by the application of :math:`P_{\perp}` and,
+consequently, there might by little gain in signal-to-noise ratio. :ref:`CACFGIEC` demonstrates the effect of SSP on the Vectorview
+magnetometer data. After the elimination of a three-dimensional
+noise subspace, the absolute value of the noise is dampened approximately
+by a factor of 10 and the covariance matrix becomes diagonally dominant.
+
+Since the signal-space projection modifies the signal vectors
+originating in the brain, it is necessary to apply the projection
+to the forward solution in the course of inverse computations. This
+is accomplished by mne_inverse_operator as
+described in :ref:`CBBDDBGF`. For more information on SSP,
+please consult the references listed in :ref:`CEGIEEBB`.
+
+.. _CACFGIEC:
+
+.. figure:: pics/proj-off-on.png
+    :alt: example of the effect of SSP
+
+    An example of the effect of SSP
+    
+    The covariance matrix :math:`C_n` of noise data on the 102 Vectorview magnetometers was computed (a) before and (b) after the application of SSP with three-dimensional noise subspace. The plotted quantity is :math:`\sqrt {|(C_n)_{jk}|}`. Note that the vertical scale in (b) is ten times smaller than in (a).
+
+.. _BABFFCHF:
+
+Estimation of the noise subspace
+================================
+
+As described above, application of SSP requires the estimation
+of the signal vectors :math:`b_1 \dotso b_m` constituting
+the noise subspace. The most common approach, also implemented in mne_browse_raw is
+to compute a covariance matrix of empty room data, compute its eigenvalue
+decomposition, and employ the eigenvectors corresponding to the
+highest eigenvalues as basis for the noise subspace. It is also
+customary to use a separate set of vectors for magnetometers and
+gradiometers in the Vectorview system.
+
+EEG average electrode reference
+===============================
+
+In the computation of EEG-based source estimates, the MNE
+software employs the average-electrode reference, which means that
+the average over all electrode signals :math:`v_1 \dotso v_p` is
+subtracted from each :math:`v_j`:
+
+.. math::    v_{j}' = v_j - \frac{1}{p} \sum_{k} v_k\ .
+
+It is easy to see that the above equation actually corresponds
+to the projection:
+
+.. math::    v' = (I - uu^T)v\ ,
+
+where
+
+.. math::    u = \frac{1}{\sqrt{p}}[1\ ...\ 1]^T\ .
+
+.. _CACHAAEG:
+
+Covariance matrix estimation
+############################
+
+This section describes how the covariance matrices are computed
+for raw data and epochs.
+
+Continuous raw data
+===================
+
+If a covariance matrix of a raw data is computed the data
+are checked for artefacts in 200-sample pieces. Let us collect the
+accepted :math:`M` samples from all channels to
+the vectors :math:`s_j,\ j = 1, \dotsc ,M`. The estimate of the covariance
+matrix is then computed as:
+
+.. math::    \hat{C} = \frac{1}{M - 1} \sum_{j = 1}^M {(s_j - \bar{s})(s_j - \bar{s})}^T
+
+where
+
+.. math::    \bar{s} = \frac{1}{M} \sum_{j = 1}^M s_j
+
+is the average of the signals over all times. Note that no
+attempt is made to correct for low frequency drifts in the data.
+If the contribution of any frequency band is not desired in the
+covariance matrix estimate, suitable band-pass filter should be
+applied.
+
+For actual computations, it is convenient to rewrite the
+expression for the covariance matrix as
+
+.. math::    \hat{C} = \frac{1}{M - 1} \sum_{j = 1}^M {s_j s_j^T} - \frac{M}{M - 1} \bar{s} \bar{s}^T
+
+.. _BABHJDEJ:
+
+Epochs
+======
+
+The calculation of the covariance matrix is slightly more
+complicated in the epoch mode. If the bmin and bmax parameters
+are specified in the covariance matrix description file (see :ref:`BABECIAH`), baseline correction is first applied to each
+epoch.
+
+Let the vectors
+
+.. math::    s_{rpj}\ ;\ p = 1 \dotsc P_r\ ;\ j = 1 \dotsc N_r\ ;\ r = 1 \dotsc R 
+
+be the samples from all channels in the baseline corrected epochs
+used to calculate the covariance matrix. In the above, :math:`P_r` is
+the number of accepted epochs in category :math:`r`, :math:`N_r` is
+the number of samples in the epochs of category :math:`r`,
+and :math:`R` is the number of categories.
+
+If the recommended ``--keepsamplemean`` option
+is specified in the covariance matrix definition file, the baseline
+correction is applied to the epochs but the means at individual
+samples are not subtracted. Thus the covariance matrix will be computed
+as:
+
+.. math::    \hat{C} = \frac{1}{N_C} \sum_{r,p,j} {s_{rpj} s_{rpj}^T}\ ,
+
+where
+
+.. math::    N_C = \sum_{r = 1}^R N_r P_r\ .
+
+If keepsamplemean is *not* specified,
+we estimate the covariance matrix as
+
+.. math::    \hat{C} = \frac{1}{N_C} \sum_{r = 1}^R \sum_{j = 1}^{N_r} \sum_{p = 1}^{P_r} {(s_{rpj} - \bar{s_{rj}}) ((s_{rpj} - \bar{s_{rj}})^T}\ ,
+
+where
+
+.. math::    \bar{s_{rj}} = \frac{1}{P_r} \sum_{p = 1}^{P_r} s_{rpj}
+
+and
+
+.. math::    N_C = \sum_{r = 1}^R {N_r (P_r - 1)}\ ,
+
+which reflects the fact that :math:`N_r` means
+are computed for category :math:`r`. It
+is easy to see that the expression for the covariance matrix estimate
+can be cast into a more convenient form
+
+.. math::    \hat{C} = \frac{1}{N_C} \sum_{r,p,j} {s_{rpj} s_{rpj}^T} - \frac{1}{N_C} \sum_r P_r \sum_j {\bar{s_{rj}} \bar{s_rj}^T}/ .
+
+Subtraction of the means at individual samples is useful
+if it can be expected that the evoked response from previous stimulus
+extends to part of baseline period of the next one.
+
+Combination of covariance matrix estimates
+==========================================
+
+Let us assume that we have computed multiple covariance matrix
+estimates :math:`\hat{C_1} \dotso \hat{C_Q}` with corresponding degrees
+of freedom :math:`N_1 \dotso N_Q`. We can combine these
+matrices together as
+
+.. math::    C = \sum_q {\alpha_q \hat{C}_q}\ ,
+
+where
+
+.. math::    \alpha_q = \frac{N_q}{\sum_q {N_q}}\ .
+
+SSP information included with covariance matrices
+=================================================
+
+If a signal space projection was on when a covariance matrix
+was calculated, information about the projections applied is included
+with the covariance matrix when it is saved. These projection data
+are read by mne_inverse_operator and
+applied to the forward solution as well as appropriate. Inclusion
+of the projections into the covariance matrix limits the possibilities
+to use the ``--bad`` and ``--proj`` options in mne_inverse_operator ,
+see :ref:`CBBDDBGF`.
+
+.. _CACGHEGC:
+
+Interacting with mne_analyze
+############################
+
+To facilitate interactive analysis of raw data, mne_browse_raw can
+run  mne_analyze as a child process.
+In this mode, mne_analyze is "remote controlled" by mne_browse_raw and
+will also send replies to mne_browse_raw to
+keep the two programs synchronized. A practical application of this
+communication is to view field or potential maps and cortically-constrained
+source estimates computed from raw data instantly.
+
+The subordinate mne_analyze is
+started and stopped from Start mne_analyze and Quit mne_analyze in the Windows menu,
+respectively. The following settings are communicated between the
+two processes:
+
+**The raw data file**
+
+    If a new raw data file is opened and a subordinate mne_analyze is active,
+    the name of the raw data file is communicated to mne_analyze and
+    a simplified version of the open dialog appears in mne_analyze allowing
+    selection of an inverse operator or are MEG/MRI coordinate transformation.
+    If a raw data file is already open in mne_browse_raw when mne_analyze is
+    started, the open dialog appears immediately.
+
+**Time point**
+
+    When a new time point is selected in mne_browse_raw the mne_analyze time
+    point selection is updated accordingly. Time point selection in mne_analyze is
+    not transferred to mne_browse_raw .
+
+**Scales**
+
+    The vertical scales are kept synchronized between the two programs.
+    In addition, the settings of the sample time limits are communicated
+    from mne_browse_raw to mne_analyze .
+
+**Filter**
+
+    The filter settings are kept synchronized.
diff --git a/doc/source/manual/convert.rst b/doc/source/manual/convert.rst
new file mode 100644
index 0000000..67c5f45
--- /dev/null
+++ b/doc/source/manual/convert.rst
@@ -0,0 +1,2312 @@
+
+
+.. _ch_convert:
+
+===============
+Data conversion
+===============
+
+Overview
+########
+
+This Chapter describes the data conversion utilities included
+with the MNE software.
+
+.. _BEHIAADG:
+
+Importing data from other MEG/EEG systems
+#########################################
+
+This section describes the utilities to convert data from
+other MEG/EEG systems into the fif format.
+
+Importing 4-D Neuroimaging data
+===============================
+
+The newest version of 4-D Magnes software includes the possibility
+to export data in fif. Please consult the documentation of the Magnes
+system for details of this export utility. However, the exported
+fif file does not include information about the compensation channels
+and the weights to be applied to realize software gradient compensation.
+To augment the Magnes fif files with the necessary information,
+the MNE software includes the utilities mne_insert_4D_comp , mne_create_comp_data ,
+and mne_add_to_meas_info.
+
+As a result, the complete 4D Magnes data conversion process
+involves the following steps:
+
+- Export the raw data fif file from the
+  Magnes system.
+
+- If the data comes from a Magnes system where the primary (helmet) sensors
+  are gradiometers instead of magnetometers, run mne_fix_mag_coil_types with
+  the ``--magnes`` option to correct the channel information
+  in the file, see :ref:`CHDGAAJC`.
+
+- Export a text file containing the Magnes compensation sensor
+  data.
+
+- Create a text file containing the appropriate compensation
+  channel weights.
+
+- Run mne_insert_4D_comp with
+  the files created in the first two steps to merge compensation channel
+  data with the original Magnes fif file.
+
+- Run mne_create_comp_data on
+  the file created in step 3. to make a fif file containing the compensation
+  weights.
+
+- Run mne_add_to_meas_info with
+  the fif files created in steps 4. and 5. as input to result in a
+  complete fif file containing all the necessary data.
+
+.. note:: Including the compensation channel data is recommended    but not mandatory. If the data are saved in the Magnes system are    already compensated, there will be a small error in the forward    calculations whose significance has not been evaluated carefully    at this time.
+
+.. _BEHDEBCH:
+
+Importing CTF data
+==================
+
+The MNE software includes a utility mne_ctf2fiff ,
+based on the BrainStorm Matlab code by Richard Leahy, John Mosher,
+and Sylvain Baillet, to convert data in CTF ds directory to fif
+format.
+
+The command-line options of mne_ctf2fiff are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---verbose**
+
+    Produce a verbose listing of the conversion process to stdout.
+
+**\---ds <*directory*>**
+
+    Read the data from this directory
+
+**\---omit <*filename*>**
+
+    Read the names of channels to be omitted from this text file. Enter one
+    channel name per line. The names should match exactly with those
+    listed in the CTF data structures. By default, all channels are included.
+
+**\---fif <*filename*>**
+
+    The name of the output file. If the length of the raw data exceeds
+    the 2-GByte fif file limit, several output files will be produced.
+    These additional 'extension' files will be tagged
+    with ``_001.fif`` , ``_002.fif`` , etc.
+
+**\---evoked**
+
+    Produce and evoked-response fif file instead of a raw data file.
+    Each trial in the CTF data file is included as a separate category
+    (condition). The maximum number of samples in each trial is limited
+    to 25000.
+
+**\---infoonly**
+
+    Write only the measurement info to the output file, do not include data.
+
+During conversion, the following files are consulted from
+the ds directory:
+
+** <*name*> .res4**
+
+    This file contains most of the header information pertaining the acquisition.
+
+** <*name*> .hc**
+
+    This file contains the HPI coil locations in sensor and head coordinates.
+
+** <*name*> .meg4**
+
+    This file contains the actual MEG data. If the data are split across several
+    files due to the 2-GByte file size restriction, the 'extension' files
+    are called <*name*> ``.`` <*number*> ``_meg4`` .
+
+** <*name*> .eeg**
+
+    This is an optional input file containing the EEG electrode locations. More
+    details are given below.
+
+If the <*name*> ``.eeg`` file,
+produced from the Polhemus data file with CTF software, is present,
+it is assumed to contain lines with the format:
+
+ <*number*> <*name*> <*x/cm*> <*y/cm*> <*z/cm*>
+
+The field <*number*> is
+a sequential number to be assigned to the converted data point in
+the fif file. <*name*> is either
+a name of an EEG channel, one of ``left`` , ``right`` ,
+or ``nasion`` to indicate a fiducial landmark, or any word
+which is not a name of any channel in the data. If <*name*> is
+a name of an EEG channel available in the data, the location is
+included in the Polhemus data as an EEG electrode locations and
+inserted as the location of the EEG electrode. If the name is one
+of the fiducial landmark names, the point is included in the Polhemus
+data as a fiducial landmark. Otherwise, the point is included as
+an additional head surface points.
+
+The standard ``eeg`` file produced by CTF software
+does not contain the fiducial locations. If desired, they can be
+manually copied from the ``pos`` file which was the source
+of the ``eeg`` file.
+
+.. note:: In newer CTF data the EEG position information    maybe present in the ``res4`` file. If the ``eeg`` file    is present, the positions given there take precedence over the information    in the ``res4`` file.
+
+.. note:: mne_ctf2fiff converts    both epoch mode and continuous raw data file into raw data fif files.    It is not advisable to use epoch mode files with time gaps between    the epochs because the data will be discontinuous in the resulting    fif file with jumps at the junctions between epochs. These discontinuities    produce artefacts if the raw data is filtered in mne_browse_raw , mne_process_raw ,    or graph .
+
+.. note:: The conversion process includes a transformation    from the CTF head coordinate system convention to that used in the    Neuromag systems.
+
+.. _BEHBABFA:
+
+Importing CTF Polhemus data
+===========================
+
+The CTF MEG systems store the Polhemus digitization data
+in text files. The utility mne_ctf_dig2fiff was
+created to convert these data files into the fif and hpts formats.
+
+The input data to mne_ctf_dig2fiff is
+a text file, which contains the coordinates of the digitization
+points in centimeters. The first line should contain a single number
+which is the number of points listed in the file. Each of the following
+lines contains a sequential number of the point, followed by the
+three coordinates. mne_ctf_dig2fiff ignores
+any text following the :math:`z` coordinate
+on each line. If the ``--numfids`` option is specified,
+the first three points indicate the three fiducial locations (1
+= nasion, 2 = left auricular point, 3 = right auricular point).
+Otherwise, the input file must end with three lines beginning with ``left`` , ``right`` ,
+or ``nasion`` to indicate the locations of the fiducial
+landmarks, respectively.
+
+.. note:: The sequential numbers should be unique within    a file. I particular, the numbers 1, 2, and 3 must not be appear    more than once if the ``--numfids`` options is used.
+
+The command-line options for mne_ctf_dig2fiff are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---dig <*name*>**
+
+    Specifies the input data file in CTF output format.
+
+**\---numfids**
+
+    Fiducial locations are numbered instead of labeled, see above.
+
+**\---hpts <*name*>**
+
+    Specifies the output hpts file. The format of this text file is
+    described in :ref:`CJADJEBH`.
+
+**\---fif <*name*>**
+
+    Specifies the output fif file.
+
+.. _BEHDDFBI:
+
+Applying software gradient compensation
+=======================================
+
+Since the software gradient compensation employed in CTF
+systems is a reversible operation, it is possible to change the
+compensation status of CTF data in the data files as desired. This
+section contains information about the technical details of the
+compensation procedure and a description of mne_compensate_data ,
+which is a utility to change the software gradient compensation
+state in evoked-response data files.
+
+The fif files containing CTF data converted using the utility mne_ctf2fiff contain
+several compensation matrices which are employed to suppress external disturbances
+with help of the reference channel data. The reference sensors are
+located further away from the brain than the helmet sensors and
+are thus measuring mainly the external disturbances rather than magnetic
+fields originating in the brain. Most often, a compensation matrix
+corresponding to a scheme nicknamed *Third-order gradient
+compensation* is employed.
+
+Let us assume that the data contain :math:`n_1` MEG
+sensor channels, :math:`n_2` reference sensor
+channels, and :math:`n_3` other channels.
+The data from all channels can be concatenated into a single vector
+
+.. math::    x = [x_1^T x_2^T x_3^T]^T\ ,
+
+where :math:`x_1`, :math:`x_2`,
+and :math:`x_3` are the data vectors corresponding
+to the MEG sensor channels, reference sensor channels, and other
+channels, respectively. The data before and after compensation,
+denoted here by :math:`x_{(0)}` and :math:`x_{(k)}`, respectively,
+are related by
+
+.. math::    x_{(k)} = M_{(k)} x_{(0)}\ ,
+
+where the composite compensation matrix is
+
+.. math::    M_{(k)} = \begin{bmatrix}
+		I_{n_1} & C_{(k)} & 0 \\
+		0 & I_{n_2} & 0 \\
+		0 & 0 & I_{n_3}
+		\end{bmatrix}\ .
+
+In the above, :math:`C_{(k)}` is a :math:`n_1` by :math:`n_2` compensation
+data matrix corresponding to compensation "grade" :math:`k`.
+It is easy to see that
+
+.. math::    M_{(k)}^{-1} = \begin{bmatrix}
+		I_{n_1} & -C_{(k)} & 0 \\
+		0 & I_{n_2} & 0 \\
+		0 & 0 & I_{n_3}
+		\end{bmatrix}\ .
+
+To convert from compensation grade :math:`k` to :math:`p` one
+can simply multiply the inverse of one compensate compensation matrix
+by another and apply the product to the data:
+
+.. math::    x_{(k)} = M_{(k)} M_{(p)}^{-1} x_{(p)}\ .
+
+This operation is performed by mne_compensate_data ,
+which has the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in <*name*>**
+
+    Specifies the input data file.
+
+**\---out <*name*>**
+
+    Specifies the output data file.
+
+**\---grad <*number*>**
+
+    Specifies the desired compensation grade in the output file. The value
+    can be 1, 2, 3, or 101. The values starting from 101 will be used
+    for 4D Magnes compensation matrices.
+
+.. note:: Only average data is included in the output.    Evoked-response data files produced with mne_browse_raw or mne_process_raw may    include standard errors of mean, which can not be re-compensated    using the above method and are thus omitted.
+
+.. note:: Raw data cannot be compensated using mne_compensate_data .    For this purpose, load the data to mne_browse_raw or mne_process_raw , specify    the desired compensation grade, and save a new raw data file.
+
+.. _BEHGDDBH:
+
+Importing Magnes compensation channel data
+==========================================
+
+At present, it is not possible to include reference channel
+data to fif files containing 4D Magnes data directly using the conversion
+utilities available for the Magnes systems. However, it is possible
+to export the compensation channel signals in text format and merge
+them with the MEG helmet channel data using mne_insert_4D_comp .
+This utility has the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in <*name*>**
+
+    Specifies the input fif file containing the helmet sensor data.
+
+**\---out <*name*>**
+
+    Specifies the output fif file which will contain both the helmet
+    sensor data and the compensation channel data.
+
+**\---ref <*name*>**
+
+    Specifies a text file containing the reference sensor data.
+
+Each line of the reference sensor data file contains the
+following information:
+
+**epoch #**
+
+    is
+    always one,
+
+**time/s**
+
+    time point of this sample,
+
+**data/T**
+
+    the reference channel data
+    values.
+
+The standard locations of the MEG (helmet) and compensation
+sensors in a Magnes WH3600 system are listed in ``$MNE_ROOT/share/mne/Magnes_WH3600.pos`` . mne_insert_4D_comp matches
+the helmet sensor positions in this file with those present in the
+input data file and transforms the standard compensation channel
+locations accordingly to be included in the output. Since a standard
+position file is only provided for Magnes WH600, mne_insert_4D_comp only
+works for that type of a system.
+
+The fif files exported from the Magnes systems may contain
+slightly smaller number of samples than originally acquired because
+the total number of samples may not be evenly divisible with a reasonable
+number of samples which will be used as the fif raw data file buffer
+size. Therefore, the reference channel data may contain more samples
+than the fif file. The superfluous samples will be omitted from
+the end.
+
+.. _BEHBIIFF:
+
+Creating software gradient compensation data
+============================================
+
+The utility mne_create_comp_data was
+written to create software gradient compensation weight data for
+4D Magnes fif files. This utility takes a text file containing the
+compensation data as input and writes the corresponding fif file
+as output. This file can be merged into the fif file containing
+4D Magnes data with the utility mne_add_to_meas_info .
+
+The command line options of mne_create_comp_data are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in <*name*>**
+
+    Specifies the input text file containing the compensation data.
+
+**\---kind <*value*>**
+
+    The compensation type to be stored in the output file with the data. This
+    value defaults to 101 for the Magnes compensation and does not need
+    to be changed.
+
+**\---out <*name*>**
+
+    Specifies the output fif file containing the compensation channel weight
+    matrix :math:`C_{(k)}`, see :ref:`BEHDDFBI`.
+
+The format of the text-format compensation data file is:
+
+ <*number of MEG helmet channels*> <*number of compensation channels included*>
+ <*cname_1*> <*cname_2*> ...
+ <*name_1*> <*weights*>
+ <*name_2*> <*weights*> ...
+
+In the above <*name_k*> denote
+names of MEG helmet channels and <*cname_k*>
+those of the compensation channels, respectively. If the channel
+names contain spaces, they must be surrounded by quotes, for example, ``"MEG 0111"`` .
+
+.. _BEHBJGGF:
+
+Importing KIT MEG system data
+=============================
+
+The utility mne_kit2fiff was
+created in collaboration with Alec Maranz and Asaf Bachrach to import
+their MEG data acquired with the 160-channel KIT MEG system to MNE
+software.
+
+To import the data, the following input files are mandatory:
+
+- The Polhemus data file (elp file)
+  containing the locations of the fiducials and the head-position
+  indicator (HPI) coils. These data are usually given in the CTF/4D
+  head coordinate system. However, mne_kit2fiff does
+  not rely on this assumption. This file can be exported directly from
+  the KIT system.
+
+- A file containing the locations of the HPI coils in the MEG
+  device coordinate system. These data are used together with the elp file
+  to establish the coordinate transformation between the head and
+  device coordinate systems. This file can be produced easily by manually
+  editing one of the files exported by the KIT system.
+
+- A sensor data file (sns file)
+  containing the locations and orientations of the sensors. This file
+  can be exported directly from the KIT system.
+
+.. note:: The output fif file will use the Neuromag head    coordinate system convention, see :ref:`BJEBIBAI`. A coordinate    transformation between the CTF/4D head coordinates and the Neuromag    head coordinates is included. This transformation can be read with    MNE Matlab Toolbox routines, see :ref:`ch_matlab`.
+
+The following input files are optional:
+
+- A head shape data file (hsp file)
+  containing locations of additional points from the head surface.
+  These points must be given in the same coordinate system as that
+  used for the elp file and the
+  fiducial locations must be within 1 mm from those in the elp file.
+
+- A raw data file containing the raw data values, sample by
+  sample, as text. If this file is not specified, the output fif file
+  will only contain the measurement info block.
+
+By default mne_kit2fiff includes
+the first 157 channels, assumed to be the MEG channels, in the output
+file. The compensation channel data are not converted by default
+but can be added, together with other channels, with the ``--type`` .
+The channels from 160 onwards are designated as miscellaneous input
+channels (MISC 001, MISC 002, etc.). The channel names and types
+of these channels can be afterwards changed with the mne_rename_channels utility,
+see :ref:`CHDCFEAJ`. In addition, it is possible to synthesize
+the digital trigger channel (STI 014) from available analog
+trigger channel data, see the ``--stim`` option, below.
+The synthesized trigger channel data value at sample :math:`k` will
+be:
+
+.. math::    s(k) = \sum_{p = 1}^n {t_p(k) 2^{p - 1}}\ ,
+
+where :math:`t_p(k)` are the thresholded
+from the input channel data d_p(k):
+
+.. math::    t_p(k) = \Bigg\{ \begin{array}{l}
+		 0 \text{  if  } d_p(k) \leq t\\
+		 1 \text{  if  } d_p(k) > t
+	     \end{array}\ .
+
+The threshold value :math:`t` can
+be adjusted with the ``--stimthresh`` option, see below.
+
+mne_kit2fiff accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---elp <*filename*>**
+
+    The name of the file containing the locations of the fiducials and
+    the HPI coils. This option is mandatory.
+
+**\---hsp <*filename*>**
+
+    The name of the file containing the locations of the fiducials and additional
+    points on the head surface. This file is optional.
+
+**\---sns <*filename*>**
+
+    The name of file containing the sensor locations and orientations. This
+    option is mandatory.
+
+**\---hpi <*filename*>**
+
+    The name of a text file containing the locations of the HPI coils
+    in the MEG device coordinate frame, given in millimeters. The order of
+    the coils in this file does not have to be the same as that in the elp file.
+    This option is mandatory.
+
+**\---raw <*filename*>**
+
+    Specifies the name of the raw data file. If this file is not specified, the
+    output fif file will only contain the measurement info block.
+
+**\---sfreq <*value/Hz*>**
+
+    The sampling frequency of the data. If this option is not specified, the
+    sampling frequency defaults to 1000 Hz.
+
+**\---lowpass <*value/Hz*>**
+
+    The lowpass filter corner frequency used in the data acquisition.
+    If not specified, this value defaults to 200 Hz.
+
+**\---highpass <*value/Hz*>**
+
+    The highpass filter corner frequency used in the data acquisition.
+    If not specified, this value defaults to 0 Hz (DC recording).
+
+**\---out <*filename*>**
+
+    Specifies the name of the output fif format data file. If this file
+    is not specified, no output is produced but the elp , hpi ,
+    and hsp files are processed normally.
+
+**\---stim <*chs*>**
+
+    Specifies a colon-separated list of numbers of channels to be used
+    to synthesize a digital trigger channel. These numbers refer to
+    the scanning order channels as listed in the sns file,
+    starting from one. The digital trigger channel will be the last
+    channel in the file. If this option is absent, the output file will
+    not contain a trigger channel.
+
+**\---stimthresh <*value*>**
+
+    The threshold value used when synthesizing the digital trigger channel,
+    see above. Defaults to 1.0.
+
+**\---add <*chs*>**
+
+    Specifies a colon-separated list of numbers of channels to include between
+    the 157 default MEG channels and the digital trigger channel. These
+    numbers refer to the scanning order channels as listed in the sns file,
+    starting from one.
+
+.. note:: The mne_kit2fiff utility    has not been extensively tested yet.
+
+.. _BABHDBBD:
+
+Importing EEG data saved in the EDF, EDF+, or BDF format
+========================================================
+
+Overview
+--------
+
+The mne_edf2fiff allows
+conversion of EEG data from EDF, EDF+, and BDF formats to the fif
+format. Documentation for these three input formats can be found
+at:
+
+**EDF:**
+
+    http://www.edfplus.info/specs/edf.html
+
+**EDF+:**
+
+    http://www.edfplus.info/specs/edfplus.html
+
+**BDF:**
+
+    http://www.biosemi.com/faq/file_format.htm
+
+EDF (European Data Format) and EDF+ are 16-bit formats while
+BDF is a 24-bit variant of this format used by the EEG systems manufactured
+by a company called BioSemi.
+
+None of these formats support electrode location information
+and  head shape digitization information. Therefore, this information
+has to be provided separately. Presently hpts and elp file formats
+are supported to include digitization data. For information on these
+formats, see :ref:`CJADJEBH` and http://www.sourcesignal.com/formats_probe.html.
+Note that it is mandatory to have the three fiducial locations (nasion
+and the two auricular points) included in the digitization data.
+Using the locations of the fiducial points the digitization data
+are converted to the MEG head coordinate system employed in the
+MNE software, see :ref:`BJEBIBAI`. In the comparison of the
+channel names only the initial segment up to the first '-' (dash)
+in the EDF/EDF+/BDF channel name is significant.
+
+The EDF+ files may contain an annotation channel which can
+be used to store trigger information. The Time-stamped Annotation
+Lists (TALs) on the annotation  data can be converted to a trigger
+channel (STI 014) using an annotation map file which associates
+an annotation label with a number on the trigger channel. The TALs
+can be listed with the ``--listtal`` option,
+see below.
+
+.. warning:: The data samples in a BDF file    are represented in a 3-byte (24-bit) format. Since 3-byte raw data    buffers are not presently supported in the fif format    these data will be changed to 4-byte integers in the conversion.    Since the maximum size of a fif file is 2 GBytes, the maximum size of    a BDF file to be converted is approximately 1.5 GBytes
+
+.. warning:: The EDF/EDF+/BDF formats support channel    dependent sampling rates. This feature is not supported by mne_edf2fiff .    However, the annotation channel in the EDF+ format can have a different    sampling rate. The annotation channel data is not included in the    fif files output.
+
+Using mne_edf2fiff
+------------------
+
+The command-line options of mne_edf2fiff are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---edf <*filename*>**
+
+    Specifies the name of the raw data file to process.
+
+**\---tal <*filename*>**
+
+    List the time-stamped annotation list (TAL) data from an EDF+ file here.
+    This output is useful to assist in creating the annotation map file,
+    see the ``--annotmap`` option, below.
+    This output file is an event file compatible with mne_browse_raw and mne_process_raw ,
+    see :ref:`ch_browse`. In addition, in the mapping between TAL
+    labels and trigger numbers provided by the ``--annotmap`` option is
+    employed to assign trigger numbers in the event file produced. In
+    the absence of the ``--annotmap`` option default trigger number 1024
+    is used.
+
+**\---annotmap <*filename*>**
+
+    Specify a file which maps the labels of the TALs to numbers on a trigger
+    channel (STI 014) which will be added to the output file if this
+    option is present. This annotation map file
+    may contain comment lines starting with the '%' or '#' characters.
+    The data lines contain a label-number pair, separated by a colon.
+    For example, a line 'Trigger-1:9' means that each
+    annotation labeled with the text 'Trigger-1' will
+    be translated to the number 9 on the trigger channel.
+
+**\---elp <*filename*>**
+
+    Specifies the name of the an electrode location file. This file
+    is in the "probe" file format used by the *Source
+    Signal Imaging, Inc.* software. For description of the
+    format, see http://www.sourcesignal.com/formats_probe.html. Note
+    that some other software packages may produce electrode-position
+    files with the elp ending not
+    conforming to the above specification. As discussed above, the fiducial
+    marker locations, optional in the "probe" file
+    format specification are mandatory for mne_edf2fiff .
+    When this option is encountered on the command line any previously
+    specified hpts file will be ignored.
+
+**\---hpts <*filename*>**
+
+    Specifies the name of an electrode position file in  the hpts format discussed
+    in :ref:`CJADJEBH`. The mandatory entries are the fiducial marker
+    locations and the EEG electrode locations. It is recommended that
+    electrode (channel) names instead of numbers are used to label the
+    EEG electrode locations. When this option is encountered on the
+    command line any previously specified elp file
+    will be ignored.
+
+**\---meters**
+
+    Assumes that the digitization data in an hpts file
+    is given in meters instead of millimeters.
+
+**\---fif <*filename*>**
+
+    Specifies the name of the fif file to be output.
+
+Post-conversion tasks
+---------------------
+
+This section outlines additional steps to be taken to use
+the EDF/EDF+/BDF file is converted to the fif format in MNE:
+
+- Some of the channels may not have a
+  digitized electrode location associated with them. If these channels
+  are used for EOG or EMG measurements, their channel types should
+  be changed to the correct ones using the mne_rename_channels utility,
+  see :ref:`CHDCFEAJ`. EEG channels which do not have a location
+  associated with them should be assigned to be MISC channels.
+
+- After the channel types are correctly defined, a topographical
+  layout file can be created for mne_browse_raw and mne_analyze using
+  the mne_make_eeg_layout utility,
+  see :ref:`CHDDGDJA`.
+
+- The trigger channel name in BDF files is "Status".
+  This must be specified with the ``--digtrig`` option or with help of
+  the MNE_TRIGGER_CH_NAME environment variable when mne_browse_raw or mne_process_raw is
+  invoked, see :ref:`BABBGJEA`.
+
+- Only the two least significant bytes on the "Status" channel
+  of BDF files are significant as trigger information the ``--digtrigmask``
+  0xff option MNE_TRIGGER_CH_MASK environment variable should be used
+  to specify this to mne_browse_raw and mne_process_raw ,
+  see :ref:`BABBGJEA`.
+
+.. _BEHDGAIJ:
+
+Importing EEG data saved in the Tufts University format
+=======================================================
+
+The utility mne_tufts2fiff was
+created in collaboration with Phillip Holcomb and Annette Schmid
+from Tufts University to import their EEG data to the MNE software.
+
+The Tufts EEG data is included in three files:
+
+- The raw data file containing the acquired
+  EEG data. The name of this file ends with the suffix ``.raw`` .
+
+- The calibration raw data file. This file contains known calibration
+  signals and is required to bring the data to physical units. The
+  name of this file ends with the suffix ``c.raw`` .
+
+- The electrode location information file. The name of this
+  file ends with the suffix ``.elp`` .
+
+The utility mne_tufts2fiff has
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---raw <*filename*>**
+
+    Specifies the name of the raw data file to process.
+
+**\---cal <*filename*>**
+
+    The name of the calibration data file. If calibration data are missing, the
+    calibration coefficients will be set to unity.
+
+**\---elp <*filename*>**
+
+    The name of the electrode location file. If this file is missing,
+    the electrode locations will be unspecified. This file is in the "probe" file
+    format used by the *Source Signal Imaging, Inc.* software.
+    For description of the format, see http://www.sourcesignal.com/formats_probe.html.
+    The fiducial marker locations, optional in the "probe" file
+    format specification are mandatory for mne_tufts2fiff . Note
+    that some other software packages may produce electrode-position
+    files with the elp ending not
+    conforming to the above specification.
+
+.. note::
+
+    The conversion process includes a transformation from the Tufts head coordinate system convention to that used in    the Neuromag systems.
+
+.. note::
+
+    The fiducial landmark locations, optional in the probe file format, must be present for mne_tufts2fiff .
+
+.. _BEHCCCDC:
+
+Importing BrainVision EEG data
+==============================
+
+The utility mne_brain_vision2fiff was
+created to import BrainVision EEG data. This utility also helps
+to import the eXimia (Nexstim) TMS-compatible EEG system data to
+the MNE software. The utility uses an optional fif file containing
+the head digitization data to allow source modeling. The MNE Matlab
+toolbox contains the function fiff_write_dig_file to
+write a digitization file based on digitization data available in
+another format, see :ref:`ch_matlab`.
+
+.. note::
+
+    mne_brain_vision2fiff reads events from the ``vmrk`` file referenced in the
+    ``vhdr`` file, but it only includes events whose "Type" is ``Stimulus`` and
+    whose "description" is given by ``S<number>``. All other events are ignored.
+
+
+The command-line options of mne_brain_vision2fiff are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---header <*name*>**
+
+    The name of the BrainVision header file. The extension of this file
+    is ``vhdr`` . The header file typically refers to a marker
+    file (``vmrk`` ) which is automatically processed and a
+    digital trigger channel (STI 014) is formed from the marker information.
+    The ``vmrk`` file is ignored if the ``--eximia`` option
+    is present.
+
+**\---dig <*name*>**
+
+    The name of the fif file containing the digitization data.
+
+**\---orignames**
+
+    Use the original EEG channel labels. If this option is absent the EEG
+    channels will be automatically renamed to EEG 001, EEG 002, *etc.*
+
+**\---eximia**
+
+    Interpret this as an eXimia data file. The first three channels
+    will be thresholded and interpreted as trigger channels. The composite
+    digital trigger channel will be composed in the same way as in the mne_kit2fiff utility,
+    see :ref:`BEHBJGGF`, above. In addition, the fourth channel
+    will be assigned as an EOG channel. This option is normally used
+    by the mne_eximia2fiff script,
+    see :ref:`BEHGCEHH`.
+
+**\---split <*size/MB*>**
+
+    Split the output data into several files which are no more than <*size*> MB.
+    By default, the output is split into files which are just below
+    2 GB so that the fif file maximum size is not exceeded.
+
+**\---out <*filename*>**
+
+    Specifies the name of the output fif format data file. If <*filename*> ends
+    with ``.fif`` or ``_raw.fif`` , these endings are
+    deleted. After these modifications, ``_raw.fif`` is inserted
+    after the remaining part of the file name. If the file is split
+    into multiple parts, the additional parts will be called
+    <*name*> ``-`` <*number*> ``_raw.fif`` .
+
+.. _BEHGCEHH:
+
+Converting eXimia EEG data
+==========================
+
+EEG data from the Nexstim eXimia system can be converted
+to the fif format with help of the mne_eximia2fiff script.
+It creates a BrainVision ``vhdr`` file and calls mne_brain_vision2fiff.
+Usage:
+
+``mne_eximia2fiff`` [``--dig`` dfile ] [``--orignames`` ] file1 file2 ...
+
+where file1 file2 ...
+are eXimia ``nxe`` files and the ``--orignames`` option
+is passed on to mne_brain_vision2fiff .
+If you want to convert all data files in a directory, say
+
+``mne_eximia2fiff *.nxe``
+
+The optional file specified with the ``--dig`` option is assumed
+to contain digitizer data from the recording in the Nexstim format.
+The resulting fif data file will contain these data converted to
+the fif format as well as the coordinate transformation between
+the eXimia digitizer and MNE head coordinate systems.
+
+.. note:: This script converts raw data files only.
+
+.. _BABCJEAD:
+
+Converting digitization data
+############################
+
+The mne_convert_dig_data utility
+converts Polhemus digitization data between different file formats.
+The input formats are:
+
+**fif**
+
+    The
+    standard format used in MNE. The digitization data are typically
+    present in the measurement files.
+
+**hpts**
+
+    A text format which is a translation
+    of the fif format data, see :ref:`CJADJEBH` below.
+
+**elp**
+
+    A text format produced by the *Source
+    Signal Imaging, Inc.* software. For description of this "probe" format,
+    see http://www.sourcesignal.com/formats_probe.html.
+
+The data can be output in fif and hpts formats.
+Only the last command-line option specifying an input file will
+be honored. Zero or more output file options can be present on the
+command line.
+
+.. note:: The elp and hpts input    files may contain textual EEG electrode labels. They will not be    copied to the fif format output.
+
+The command-line options of mne_convert_dig_data are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fif <*name*>**
+
+    Specifies the name of an input fif file.
+
+**\---hpts <*name*>**
+
+    Specifies the name of an input hpts file.
+
+**\---elp <*name*>**
+
+    Specifies the name of an input elp file.
+
+**\---fifout <*name*>**
+
+    Specifies the name of an output fif file.
+
+**\---hptsout <*name*>**
+
+    Specifies the name of an output hpts file.
+
+**\---headcoord**
+
+    The fif and hpts input
+    files are assumed to contain data in the  MNE head coordinate system,
+    see :ref:`BJEBIBAI`. With this option present, the data are
+    transformed to the MNE head coordinate system with help of the fiducial
+    locations in the data. Use this option if this is not the case or
+    if you are unsure about the definition of the coordinate system
+    of the fif and hpts input
+    data. This option is implied with elp input
+    files. If this option is present, the fif format output file will contain
+    the transformation between the original digitizer data coordinates
+    the MNE head coordinate system.
+
+.. _CJADJEBH:
+
+The hpts format
+===============
+
+The hpts format digitzer
+data file may contain comment lines starting with the pound sign
+(#) and data lines of the form:
+
+ <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
+
+where
+
+** <*category*>**
+
+    defines the type of points. Allowed categories are: hpi , cardinal (fiducial ),eeg ,
+    and extra corresponding to head-position
+    indicator coil locations, cardinal landmarks, EEG electrode locations,
+    and additional head surface points, respectively. Note that tkmedit does not
+    recognize the fiducial as an
+    alias for cardinal .
+
+** <*identifier*>**
+
+    identifies the point. The identifiers are usually sequential numbers. For
+    cardinal landmarks, 1 = left auricular point, 2 = nasion, and 3
+    = right auricular point. For EEG electrodes, identifier = 0 signifies
+    the reference electrode. Some programs (not tkmedit )
+    accept electrode labels as identifiers in the eeg category.
+
+** <*x/mm*> , <*y/mm*> , <*z/mm*>**
+
+    Location of the point, usually in the MEG head coordinate system, see :ref:`BJEBIBAI`.
+    Some programs have options to accept coordinates in meters instead
+    of millimeters. With ``--meters`` option, mne_transform_points lists
+    the coordinates in meters.
+
+.. _BEHDEJEC:
+
+Converting volumetric data into an MRI overlay
+##############################################
+
+With help of the mne_volume_source_space utility
+(:ref:`BJEFEHJI`) it is possible to create a source space which
+is defined within a volume rather than a surface. If the ``--mri`` option
+was used in mne_volume_source_space , the
+source space file contains an interpolator matrix which performs
+a trilinear interpolation into the voxel space of the MRI volume
+specified.
+
+At present, the MNE software does not include facilities
+to compute volumetric source estimates. However, it is possible
+to calculate forward solutions in the volumetric grid and use the
+MNE Matlab toolbox to read the forward solution. It is then possible
+to compute, *e.g.*, volumetric beamformer solutions
+in Matlab and output the results into w or stc files.
+The purpose of the mne_volume_data2mri is
+to produce MRI overlay data compatible with FreeSurfer MRI viewers
+(in the mgh or mgz formats) from this type of w or stc files.
+
+mne_volume_data2mri accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---src <*filename*>**
+
+    The name of the volumetric source space file created with mne_volume_source_space .
+    The source space must have been created with the ``--mri`` option,
+    which adds the appropriate sparse trilinear interpolator matrix
+    to the source space.
+
+**\---w <*filename*>**
+
+    The name of a w file to convert
+    into an MRI overlay.
+
+**\---stc <*filename*>**
+
+    The name of the stc file to convert
+    into an MRI overlay. If this file has many time frames, the output
+    file may be huge. Note: If both ``-w`` and ``--stc`` are
+    specified, ``-w`` takes precedence.
+
+**\---scale <*number*>**
+
+    Multiply the stc or w by
+    this scaling constant before producing the overlay.
+
+**\---out <*filename*>**
+
+    Specifies the name of the output MRI overlay file. The name must end
+    with either ``.mgh`` or ``.mgz`` identifying the
+    uncompressed and compressed FreeSurfer MRI formats, respectively.
+
+.. _BEHBHIDH:
+
+Listing source space data
+#########################
+
+The utility mne_list_source_space outputs
+the source space information into text files suitable for loading
+into the Neuromag MRIlab software.
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---src <*name*>**
+
+    The source space to be listed. This can be either the output from mne_make_source_space
+    (`*src.fif`), output from the forward calculation (`*fwd.fif`), or
+    the output from the inverse operator decomposition (`*inv.fif`).
+
+**\---mri <*name*>**
+
+    A file containing the transformation between the head and MRI coordinates
+    is specified with this option. This file can be either a Neuromag
+    MRI description file, the output from the forward calculation (`*fwd.fif`),
+    or the output from the inverse operator decomposition (`*inv.fif`).
+    If this file is included, the output will be in head coordinates.
+    Otherwise the source space will be listed in MRI coordinates.
+
+**\---dip <*name*>**
+
+    Specifies the 'stem' for the Neuromag text format
+    dipole files to be output. Two files will be produced: <*stem*> -lh.dip
+    and <*stem*> -rh.dip. These correspond
+    to the left and right hemisphere part of the source space, respectively.
+    This source space data can be imported to MRIlab through the File/Import/Dipoles menu
+    item.
+
+**\---pnt <*name*>**
+
+    Specifies the 'stem' for Neuromag text format
+    point files to be output. Two files will be produced: <*stem*> -lh.pnt
+    and <*stem*> -rh.pnt. These correspond
+    to the left and right hemisphere part of the source space, respectively.
+    This source space data can be imported to MRIlab through the File/Import/Strings menu
+    item.
+
+**\---exclude <*name*>**
+
+    Exclude the source space points defined by the given FreeSurfer 'label' file
+    from the output. The name of the file should end with ``-lh.label``
+    if it refers to the left hemisphere and with ``-rh.label`` if
+    it lists points in the right hemisphere, respectively.
+
+**\---include <*name*>**
+
+    Include only the source space points defined by the given FreeSurfer 'label' file
+    to the output. The file naming convention is the same as described
+    above under the ``--exclude`` option. Are 'include' labels are
+    processed before the 'exclude' labels.
+
+**\---all**
+
+    Include all nodes in the output files instead of only those active
+    in the source space. Note that the output files will be huge if
+    this option is active.
+
+.. _BEHBBEHJ:
+
+Listing BEM mesh data
+#####################
+
+The utility mne_list_bem outputs
+the BEM meshes in text format. The default output data contains
+the *x*, *y*, and *z* coordinates
+of the vertices, listed in millimeters, one vertex per line.
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---bem <*name*>**
+
+    The BEM file to be listed. The file name normally ends with -bem.fif or -bem-sol.fif .
+
+**\---out <*name*>**
+
+    The output file name.
+
+**\---id <*number*>**
+
+    Identify the surface to be listed. The surfaces are numbered starting with
+    the innermost surface. Thus, for a three-layer model the surface numbers
+    are: 4 = scalp, 3 = outer skull, 1 = inner skull
+    Default value is 4.
+
+**\---gdipoli**
+
+    List the surfaces in the format required by Thom Oostendorp's
+    gdipoli program. This is also the default input format for mne_surf2bem .
+
+**\---meters**
+
+    List the surface coordinates in meters instead of millimeters.
+
+**\---surf**
+
+    Write the output in the binary FreeSurfer format.
+
+**\---xfit**
+
+    Write a file compatible with xfit. This is the same effect as using
+    the options ``--gdipoli`` and ``--meters`` together.
+
+.. _BEHDIAJG:
+
+Converting surface data between different formats
+#################################################
+
+The utility mne_convert_surface converts
+surface data files between different formats.
+
+.. note:: The MNE Matlab toolbox functions enable    reading of FreeSurfer surface files directly. Therefore, the ``--mat``   option has been removed. The dfs file format conversion functionality    has been moved here from mne_convert_dfs .    Consequently, mne_convert_dfs has    been removed from MNE software.
+
+.. _BABEABAA:
+
+command-line options
+====================
+
+mne_convert_surface accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fif <*name*>**
+
+    Specifies a fif format input file. The first surface (source space)
+    from this file will be read.
+
+**\---tri <*name*>**
+
+    Specifies a text format input file. The format of this file is described in :ref:`BEHDEFCD`.
+
+**\---meters**
+
+    The unit of measure for the vertex locations in a text input files
+    is meters instead of the default millimeters. This option does not
+    have any effect on the interpretation of the FreeSurfer surface
+    files specified with the ``--surf`` option.
+
+**\---swap**
+
+    Swap the ordering or the triangle vertices. The standard convention in
+    the MNE software is to have the vertices in text format files ordered
+    so that the vector cross product of the vectors from vertex 1 to
+    2 and 1 to 3 gives the direction of the outward surface normal. This
+    is also called the counterclockwise ordering. If your text input file
+    does not comply with this right-hand rule, use the ``--swap`` option.
+    This option does not have any effect on the interpretation of the FreeSurfer surface
+    files specified with the ``--surf`` option.
+
+**\---surf <*name*>**
+
+    Specifies a FreeSurfer format
+    input file.
+
+**\---dfs <*name*>**
+
+    Specifies the name of a dfs file to be converted. The surfaces produced
+    by BrainSuite are in the dfs format.
+
+**\---mghmri <*name*>**
+
+    Specifies a mgh/mgz format MRI data file which will be used to define
+    the coordinate transformation to be applied to the data read from
+    a dfs file to bring it to the FreeSurfer MRI
+    coordinates, *i.e.*, the coordinate system of
+    the MRI stack in the file. In addition, this option can be used
+    to insert "volume geometry" information to the FreeSurfer
+    surface file output (``--surfout`` option). If the input file already
+    contains the volume geometry information, --replacegeom is needed
+    to override the input volume geometry and to proceed to writing
+    the data.
+
+**\---replacegeom**
+
+    Replaces existing volume geometry information. Used in conjunction
+    with the ``--mghmri`` option described above.
+
+**\---fifmri <*name*>**
+
+    Specifies a fif format MRI destription file which will be used to define
+    the coordinate transformation to be applied to the data read from
+    a dfs file to bring it to the same coordinate system as the MRI stack
+    in the file.
+
+**\---trans <*name*>**
+
+    Specifies the name of a text file which contains the coordinate
+    transformation to be applied to the data read from the dfs file
+    to bring it to the MRI coordinates, see below. This option is rarely
+    needed.
+
+**\---flip**
+
+    By default, the dfs surface nodes are assumed to be in a right-anterior-superior
+    (RAS) coordinate system with its origin at the left-posterior-inferior
+    (LPI) corner of the MRI stack. Sometimes the dfs file has left and
+    right flipped. This option reverses this flip, *i.e.*,
+    assumes the surface coordinate system is left-anterior-superior
+    (LAS) with its origin in the right-posterior-inferior (RPI) corner
+    of the MRI stack.
+
+**\---shift <*value/mm*>**
+
+    Shift the surface vertices to the direction of the surface normals
+    by this amount before saving the surface.
+
+**\---surfout <*name*>**
+
+    Specifies a FreeSurfer format output file.
+
+**\---fifout <*name*>**
+
+    Specifies a fif format output file.
+
+**\---triout <*name*>**
+
+    Specifies an ASCII output file that will contain the surface data
+    in the triangle file format desribed in :ref:`BEHDEFCD`.
+
+**\---pntout <*name*>**
+
+    Specifies a ASCII output file which will contain the vertex numbers only.
+
+**\---metersout**
+
+    With this option the ASCII output will list the vertex coordinates
+    in meters instead of millimeters.
+
+**\---swapout**
+
+    Defines the vertex ordering of ASCII triangle files to be output.
+    For details, see ``--swap`` option, above.
+
+**\---smfout <*name*>**
+
+    Specifies a smf (Simple Model Format) output file. For details of this
+    format, see http://people.scs.fsu.edu/~burkardt/data/smf.txt.
+
+.. note:: Multiple output options can be specified to    produce outputs in several different formats with a single invocation    of mne_convert_surface .
+
+The coordinate transformation file specified with the ``--trans`` should contain
+a 4 x 4 coordinate transformation matrix:
+
+.. math::    T = \begin{bmatrix}
+		R_{11} & R_{12} & R_{13} & x_0 \\
+		R_{13} & R_{13} & R_{13} & y_0 \\
+		R_{13} & R_{13} & R_{13} & z_0 \\
+		0 & 0 & 0 & 1
+		\end{bmatrix}
+
+defined so that if the augmented location vectors in the
+dfs file and MRI coordinate systems are denoted by :math:`r_{dfs} = [x_{dfs} y_{dfs} z_{dfs} 1]^T` and :math:`r_{MRI} = [x_{MRI} y_{MRI} z_{MRI} 1]^T`,
+respectively,
+
+.. math::    r_{MRI} = Tr_{dfs}
+
+.. _BABBHHHE:
+
+Converting MRI data into the fif format
+#######################################
+
+The utility mne_make_cor_set creates
+a fif format MRI description
+file optionally including the MRI data using FreeSurfer MRI volume
+data as input. The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---dir <*directory*>**
+
+    Specifies a directory containing the MRI volume in COR format. Any
+    previous ``--mgh`` options are cancelled when this option
+    is encountered.
+
+**\---withdata**
+
+    Include the pixel data to the output file. This option is implied
+    with the ``--mgh`` option.
+
+**\---mgh <*name*>**
+
+    An MRI volume volume file in mgh or mgz format.
+    The ``--withdata`` option is implied with this type of
+    input. Furthermore, the :math:`T_3` transformation,
+    the Talairach transformation :math:`T_4` from
+    the talairach.xfm file referred to in the MRI volume, and the the
+    fixed transforms :math:`T_-` and :math:`T_+` will
+    added to the output file. For definition of the coordinate transformations,
+    see :ref:`CHDEDFIB`.
+
+**\---talairach <*name*>**
+
+    Take the Talairach transform from this file instead of the one specified
+    in mgh/mgz files.
+
+**\---out <*name*>**
+
+    Specifies the output file, which is a fif-format MRI description
+    file.
+
+.. _BABBIFIJ:
+
+Collecting coordinate transformations into one file
+###################################################
+
+The utility mne_collect_transforms collects
+coordinate transform information from various sources and saves
+them into a single fif file. The coordinate transformations used
+by MNE software are summarized in Figure 5.1. The output
+of mne_collect_transforms may
+include all transforms referred to therein except for the sensor
+coordinate system transformations :math:`T_{s_1} \dotso T_{s_n}`.
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---meas <*name*>**
+
+    Specifies a measurement data file which provides :math:`T_1`.
+    A forward solution or an inverse operator file can also be specified
+    as implied by Table 5.1.
+
+**\---mri <*name*>**
+
+    Specifies an MRI description or a standalone coordinate transformation
+    file produced by mne_analyze which
+    provides :math:`T_2`. If the ``--mgh`` option
+    is not present mne_collect_transforms also
+    tries to find :math:`T_3`, :math:`T_4`, :math:`T_-`,
+    and :math:`T_+` from this file.
+
+**\---mgh <*name*>**
+
+    An MRI volume volume file in mgh or mgz format.
+    This file provides :math:`T_3`. The transformation :math:`T_4` will
+    be read from the talairach.xfm file referred to in the MRI volume.
+    The fixed transforms :math:`T_-` and :math:`T_+` will
+    also be created.
+
+**\---out <*name*>**
+
+    Specifies the output file. If this option is not present, the collected transformations
+    will be output on screen but not saved.
+
+.. _BEHCHGHD:
+
+Converting an ncov covariance matrix file to fiff
+#################################################
+
+The ncov file format was used to store the noise-covariance
+matrix file. The MNE software requires that the covariance matrix
+files are in fif format. The utility mne_convert_ncov converts
+ncov files to fif format.
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---ncov <*name*>**
+
+    The ncov file to be converted.
+
+**\---meas <*name*>**
+
+    A fif format measurement file used to assign channel names to the noise-covariance
+    matrix elements. This file should have precisely the same channel
+    order within MEG and EEG as the ncov file. Typically, both the ncov
+    file and the measurement file are created by the now mature off-line
+    averager, meg_average .
+
+.. _BEHCDBHG:
+
+Converting a lisp covariance matrix to fiff
+###########################################
+
+The utility mne_convert_lspcov converts a LISP-format noise-covariance file,
+produced by the Neuromag signal processor, graph into fif format.
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---lspcov <*name*>**
+
+    The LISP noise-covariance matrix file to be converted.
+
+**\---meas <*name*>**
+
+    A fif format measurement file used to assign channel names to the noise-covariance
+    matrix elements. This file should have precisely the same channel
+    order within MEG and EEG as the LISP-format covariance matrix file.
+
+**\---out <*name*>**
+
+    The name of a fif format output file. The file name should end with
+    -cov.fif.text format output file. No information about the channel names
+    is included. The covariance matrix file is listed row by row. This
+    file can be loaded to MATLAB, for example
+
+**\---outasc <*name*>**
+
+    The name of a text format output file. No information about the channel
+    names is included. The covariance matrix file is listed row by row.
+    This file can be loaded to MATLAB, for example
+
+.. _BEHCCEBJ:
+
+The MNE data file conversion tool
+#################################
+
+This utility, called mne_convert_mne_data ,
+allows the conversion of various fif files related to the MNE computations
+to other formats. The two principal purposes of this utility are
+to facilitate development of new analysis approaches with Matlab
+and conversion of the forward model and noise covariance matrix
+data into evoked-response type fif files, which can be accessed
+and displayed with the Neuromag source modelling software.
+
+.. note:: Most of the functions of mne_convert_mne_data are    now covered by the MNE Matlab toolbox covered in :ref:`ch_matlab`.    This toolbox is recommended to avoid creating additional files occupying    disk space.
+
+.. _BEHCICCF:
+
+Command-line options
+====================
+
+The command-line options recognize
+by mne_convert_mne_data are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fwd <*name*>**
+
+    Specity the name of the forward solution file to be converted. Channels
+    specified with the ``--bad`` option will be excluded from
+    the file.
+
+**\---fixed**
+
+    Convert the forward solution to the fixed-orientation mode before outputting
+    the converted file. With this option only the field patterns corresponding
+    to a dipole aligned with the estimated cortex surface normal are
+    output.
+
+**\---surfsrc**
+
+    When outputting a free-orientation forward model (three orthogonal dipole
+    components present) rotate the dipole coordinate system at each
+    source node so that the two tangential dipole components are output
+    first, followed by the field corresponding to the dipole aligned
+    with the estimated cortex surface normal. The orientation of the
+    first two dipole components in the tangential plane is arbitrarily selected
+    to create an orthogonal coordinate system.
+
+**\---noiseonly**
+
+    When creating a 'measurement' fif file, do not
+    output a forward model file, just the noise-covariance matrix.
+
+**\---senscov <*name*>**
+
+    Specifies the fif file containing a sensor covariance matrix to
+    be included with the output. If no other input files are specified
+    only the covariance matrix is output
+
+**\---srccov <*name*>**
+
+    Specifies the fif file containing the source covariance matrix to
+    be included with the output. Only diagonal source covariance files
+    can be handled at the moment.
+
+**\---bad <*name*>**
+
+    Specifies the name of the file containing the names of the channels to
+    be omitted, one channel name per line. This does not affect the output
+    of the inverse operator since the channels have been already selected
+    when the file was created.
+
+**\---fif**
+
+    Output the forward model and the noise-covariance matrix into 'measurement' fif
+    files. The forward model files are tagged with <*modalities*> ``-meas-fwd.fif`` and
+    the noise-covariance matrix files with <*modalities*> ``-meas-cov.fif`` .
+    Here, modalities is ``-meg`` if MEG is included, ``-eeg`` if
+    EEG is included, and ``-meg-eeg`` if both types of signals
+    are present. The inclusion of modalities is controlled by the ``--meg`` and ``--eeg`` options.
+
+**\---mat**
+
+    Output the data into MATLAB mat files. This is the default. The
+    forward model files are tagged with <*modalities*> ``-fwd.mat`` forward model
+    and noise-covariance matrix output, with ``-inv.mat`` for inverse
+    operator output, and with ``-inv-meas.mat`` for combined inverse
+    operator and measurement data output, respectively. The meaning
+    of <*modalities*> is the same
+    as in the fif output, described above.
+
+**\---tag <*name*>**
+
+    By default, all variables in the matlab output files start with
+    ``mne\_``. This option allows to change this prefix to <*name*> _.
+
+**\---meg**
+
+    Include MEG channels from the forward solution and noise-covariance
+    matrix.
+
+**\---eeg**
+
+    Include EEG channels from the forward solution and noise-covariance
+    matrix.
+
+**\---inv <*name*>**
+
+    Output the inverse operator data from the specified file into a
+    mat file. The source and noise covariance matrices as well as active channels
+    have been previously selected when the inverse operator was created
+    with mne_inverse_operator . Thus
+    the options ``--meg`` , ``--eeg`` , ``--senscov`` , ``--srccov`` , ``--noiseonly`` ,
+    and ``--bad`` do not affect the output of the inverse operator.
+
+**\---meas <*name*>**
+
+    Specifies the file containing measurement data to be output together with
+    the inverse operator. The channels corresponding to the inverse operator
+    are automatically selected from the file if ``--inv`` .
+    option is present. Otherwise, the channel selection given with ``--sel`` option will
+    be taken into account.
+
+**\---set <*number*>**
+
+    Select the data set to be output from the measurement file.
+
+**\---bmin <*value/ms*>**
+
+    Specifies the baseline minimum value setting for the measurement signal
+    output.
+
+**\---bmax <*value/ms*>**
+
+    Specifies the baseline maximum value setting for the measurement signal
+    output.
+
+.. note:: The ``--tmin`` and ``--tmax`` options    which existed in previous versions of mne_converted_mne_data have    been removed. If output of measurement data is requested, the entire    averaged epoch is now included.
+
+Guide to combining options
+==========================
+
+The combination of options is quite complicated. The :ref:`BEHDCIII` should be
+helpful to determine the combination of options appropriate for your needs.
+
+
+.. tabularcolumns:: |p{0.38\linewidth}|p{0.1\linewidth}|p{0.2\linewidth}|p{0.3\linewidth}|
+.. _BEHDCIII:
+.. table:: Guide to combining mne_convert_mne_data options.
+
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | Desired output                      | Format  | Required options         | Optional options      |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | forward model                       | fif     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---surfsrc           |
+    |                                     |         |   \---meg and/or \---eeg |                       |
+    |                                     |         |   \---fif                |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | forward model                       | mat     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---surfsrc           |
+    |                                     |         |   \---meg and/or --eeg   |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | forward model and sensor covariance | mat     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---surfsrc           |
+    |                                     |         |   \---senscov <*name*>   |                       |
+    |                                     |         |   \---meg and/or --eeg   |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | sensor covariance                   | fif     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       |                       |
+    |                                     |         |   \---senscov <*name*>   |                       |
+    |                                     |         |   \---noiseonly          |                       |
+    |                                     |         |   \---fif                |                       |
+    |                                     |         |   \---meg and/or --eeg   |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | sensor covariance                   | mat     |   \---senscov <*name*>   | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | sensor covariance eigenvalues       | text    |   \---senscov <*name*>   | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       |                       |
+    |                                     |         |   \---eig                |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | evoked MEG/EEG data                 | mat     |   \---meas <*name*>      | \---sel <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---set <*number*>    |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | evoked MEG/EEG data forward model   | mat     |   \---meas <*name*>      | \---bad <*name*>      |
+    |                                     |         |   \---fwd <*name*>       | \---set <*number*>    |
+    |                                     |         |   \---out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | inverse operator data               | mat     |   \---inv <*name*>       |                       |
+    |                                     |         |   \---out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | inverse operator data evoked        | mat     |   \–--inv <*name*>       |                       |
+    | MEG/EEG data                        |         |   \–--meas <*name*>      |                       |
+    |                                     |         |   \–--out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+
+Matlab data structures
+======================
+
+The Matlab output provided by mne_convert_mne_data is
+organized in structures, listed in :ref:`BEHCICCA`. The fields
+occurring in these structures are listed in :ref:`BABCBIGF`.
+
+The symbols employed in variable size descriptions are:
+
+**nloc**
+
+    Number
+    of source locations
+
+**nsource**
+
+    Number
+    of sources. For fixed orientation sources nsource = nloc whereas nsource = 3*nloc for
+    free orientation sources
+
+**nchan**
+
+    Number
+    of measurement channels.
+
+**ntime**
+
+    Number
+    of time points in the measurement data.
+
+.. _BEHCICCA:
+.. table:: Matlab structures produced by mne_convert_mne_data.
+
+    ===============  =======================================
+    Structure        Contents
+    ===============  =======================================
+    <*tag*> _meas      Measured data
+    <*tag*> _inv       The inverse operator decomposition
+    <*tag*> _fwd       The forward solution
+    <*tag*> _noise     A standalone noise-covariance matrix
+    ===============  =======================================
+
+The prefix given with the ``--tag`` option is indicated <*tag*> , see :ref:`BEHCICCF`. Its default value is MNE.
+
+
+.. tabularcolumns:: |p{0.14\linewidth}|p{0.13\linewidth}|p{0.73\linewidth}|
+.. _BABCBIGF:
+.. table:: The fields of Matlab structures.
+
+
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | Variable              | Size            | Description                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | fwd                   | nsource x nchan | The forward solution, one source on each row. For free     |
+    |                       |                 | orientation sources, the fields of the three orthogonal    |
+    |                       |                 | dipoles for each location are listed consecutively.        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | names ch_names        | nchan (string)  | String array containing the names of the channels included |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_types              | nchan x 2       | The column lists the types of the channels (1 = MEG,       |
+    |                       |                 | 2 = EEG). The second column lists the coil types, see      |
+    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
+    |                       |                 | this value equals one.                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_pos                | nchan x 3       | The location information for each channel. The first three |
+    |                       |                 | values specify the origin of the sensor coordinate system  |
+    |                       |                 | or the location of the electrode. For MEG channels, the    |
+    |                       |                 | following nine number specify the *x*, *y*, and            |
+    |                       |                 | *z*-direction unit vectors of the sensor coordinate system.|
+    |                       |                 | For EEG electrodes the first unit vector specifies the     |
+    |                       |                 | location of the reference electrode. If the reference is   |
+    |                       |                 | not specified this value is all zeroes. The remaining unit |
+    |                       |                 | vectors are irrelevant for EEG electrodes.                 |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file. The  |
+    |                       |                 | unit of the data is listed in the first column (T = 112,   |
+    |                       |                 | T/m = 201, V = 107). At present, the second column will be |
+    |                       |                 | always zero, *i.e.*, no unit multiplier.                   |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_cals               | nchan x 2       | Even if the data comes from the conversion already         |
+    |                       |                 | calibrated, the original calibration factors are included. |
+    |                       |                 | The first column is the range member of the fif data       |
+    |                       |                 | structures and while the second is the cal member. To get  |
+    |                       |                 | calibrated values in the units given in ch_units from the  |
+    |                       |                 | raw data, the data must be multiplied with the product of  |
+    |                       |                 | range and cal.                                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sfreq                 | 1               | The sampling frequency in Hz.                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | highpass              | 1               | Highpass filter frequency (Hz)                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_loc            | nloc x 3        | The source locations given in the coordinate frame         |
+    |                       |                 | indicated by the coord_frame member.                       |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_ori            | nsource x 3     | The source orientations                                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_selection      | nsource x 2     | Indication of the sources selected from the complete source|
+    |                       |                 | spaces. Each row contains the number of the source in the  |
+    |                       |                 | complete source space (starting with 0) and the source     |
+    |                       |                 | space number (1 or 2). These numbers refer to the order the|
+    |                       |                 | two hemispheres where listed when mne_make_source_space was|
+    |                       |                 | invoked. mne_setup_source_space lists the left hemisphere  |
+    |                       |                 | first.                                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | coord_frame           | string          | Name of the coordinate frame employed in the forward       |
+    |                       |                 | calculations. Possible values are 'head' and 'mri'.        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | mri_head_trans        | 4 x 4           | The coordinate frame transformation from mri the MEG 'head'|
+    |                       |                 | coordinates.                                               |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
+    |                       |                 | coordinates to the MEG head coordinates                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | noise_cov             | nchan x nchan   | The noise covariance matrix                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_cov            | nsource         | The elements of the diagonal source covariance matrix.     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sing                  | nchan           | The singular values of                                     |
+    |                       |                 | :math:`A = C_0^{-^1/_2} G R^C = U \Lambda V^T`             |
+    |                       |                 | with :math:`R` selected so that                            |
+    |                       |                 | :math:`\text{trace}(AA^T) / \text{trace}(I) = 1`           |
+    |                       |                 | as discussed in :ref:`CHDDHAGE`                            |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | eigen_fields          | nchan x nchan   | The rows of this matrix are the left singular vectors of   |
+    |                       |                 | :math:`A`, i.e., the columns of :math:`U`, see above.      |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | eigen_leads           | nchan x nsource | The rows of this matrix are the right singular vectors of  |
+    |                       |                 | :math:`A`, i.e., the columns of :math:`V`, see above.      |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | noise_eigenval        | nchan           | In terms of :ref:`CHDDHAGE`, eigenvalues of :math:`C_0`,   |
+    |                       |                 | i.e., not scaled with number of averages.                  |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | noise_eigenvec        | nchan           | Eigenvectors of the noise covariance matrix. In terms of   |
+    |                       |                 | :ref:`CHDDHAGE`, :math:`U_C^T`.                            |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | data                  | nchan x ntime   | The measured data. One row contains the data at one time   |
+    |                       |                 | point.                                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | times                 | ntime           | The time points in the above matrix in seconds             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nave                  | 1               | Number of averages as listed in the data file.             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meas_times            | ntime           | The time points in seconds.                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+
+.. _convert_to_matlab:
+
+Converting raw data to Matlab format
+####################################
+
+The utility mne_raw2mat converts
+all or selected channels from a raw data file to a Matlab mat file.
+In addition, this utility can provide information about the raw
+data file so that the raw data can be read directly from the original
+fif file using Matlab file I/O routines.
+
+.. note:: The MNE Matlab toolbox described in :ref:`ch_matlab` provides    direct access to raw fif files without a need for conversion to    mat file format first. Therefore, it is recommended that you use    the Matlab toolbox rather than  mne_raw2mat which    creates large files occupying disk space unnecessarily.
+
+Command-line options
+====================
+
+mne_raw2mat accepts the
+following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---raw <*name*>**
+
+    Specifies the name of the raw data fif file to convert.
+
+**\---mat <*name*>**
+
+    Specifies the name of the destination Matlab file.
+
+**\---info**
+
+    With this option present, only information about the raw data file
+    is included. The raw data itself is omitted.
+
+**\---sel <*name*>**
+
+    Specifies a text file which contains the names of the channels to include
+    in the output file, one channel name per line. If the ``--info`` option
+    is specified, ``--sel`` does not have any effect.
+
+**\---tag <*tag*>**
+
+    By default, all Matlab variables included in the output file start
+    with ``mne\_``. This option changes the prefix to <*tag*> _.
+
+Matlab data structures
+======================
+
+The Matlab files output by mne_raw2mat can
+contain two data structures, <*tag*>_raw and <*tag*>_raw_info .
+If ``--info`` option is specifed, the file contains the
+latter structure only.
+
+The <*tag*>_raw structure
+contains only one field, data which
+is a matrix containing the raw data. Each row of this matrix constitutes
+the data from one channel in the original file. The data type of
+this matrix is the same of the original data (2-byte signed integer,
+4-byte signed integer, or single-precision float).
+
+The fields of the <*tag*>_raw_info structure
+are listed in :ref:`BEHFDCIH`. Further explanation of the bufs field
+is provided in :ref:`BEHJEIHJ`.
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.15\linewidth}|p{0.6\linewidth}|
+.. _BEHFDCIH:
+.. table:: The fields of the raw data info structure.
+
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | Variable              | Size            | Description                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | orig_file             | string          | The name of the original fif file specified with the       |
+    |                       |                 | ``--raw`` option.                                          |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nchan                 | 1               | Number of channels.                                        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nsamp                 | 1               | Total number of samples                                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | bufs                  | nbuf x 4        | This field is present if ``--info`` option was specified on|
+    |                       |                 | the command line. For details, see :ref:`BEHJEIHJ`.        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sfreq                 | 1               | The sampling frequency in Hz.                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | highpass              | 1               | Highpass filter frequency (Hz)                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_names              | nchan (string)  | String array containing the names of the channels included |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_types              | nchan x 2       | The column lists the types of the channesl (1 = MEG, 2 =   |
+    |                       |                 | EEG). The second column lists the coil types, see          |
+    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
+    |                       |                 | this value equals one.                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file.      |
+    |                       |                 | The unit of the data is listed in the first column         |
+    |                       |                 | (T = 112, T/m = 201, V = 107). At present, the second      |
+    |                       |                 | column will be always zero, *i.e.*, no unit multiplier.    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_pos                | nchan x 12      | The location information for each channel. The first three |
+    |                       |                 | values specify the origin of the sensor coordinate system  |
+    |                       |                 | or the location of the electrode. For MEG channels, the    |
+    |                       |                 | following nine number specify the *x*, *y*, and            |
+    |                       |                 | *z*-direction unit vectors of the sensor coordinate system.|
+    |                       |                 | For EEG electrodes the first vector after the electrode    |
+    |                       |                 | location specifies the location of the reference electrode.|
+    |                       |                 | If the reference is not specified this value is all zeroes.|
+    |                       |                 | The remaining unit vectors are irrelevant for EEG          |
+    |                       |                 | electrodes.                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_cals               | nchan x 2       | The raw data output by mne_raw2mat is uncalibrated.        |
+    |                       |                 | The first column is the range member of the fiff data      |
+    |                       |                 | structures and while the second is the cal member. To get  |
+    |                       |                 | calibrared data values in the units given in ch_units from |
+    |                       |                 | the raw data, the data must be multiplied with the product |
+    |                       |                 | of range and cal .                                         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
+    |                       |                 | coordinates to the MEG head coordinates.                   |
+    +-----------------------+-----------------+------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.1\linewidth}|p{0.6\linewidth}|
+.. _BEHJEIHJ:
+.. table:: The bufs member of the raw data info structure.
+
+    +-----------------------+-------------------------------------------------------------------------+
+    | Column                | Contents                                                                |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 1                     | The raw data type (2 or 16 = 2-byte signed integer, 3 = 4-byte signed   |
+    |                       | integer, 4 = single-precision float). All data in the fif file are      |
+    |                       | written in the big-endian byte order. The raw data are stored sample by |
+    |                       | sample.                                                                 |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 2                     | Byte location of this buffer in the original fif file.                  |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 3                     | First sample of this buffer. Since raw data storing can be switched on  |
+    |                       | and off during the acquisition, there might be gaps between the end of  |
+    |                       | one buffer and the beginning of the next.                               |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 4                     | Number of samples in the buffer.                                        |
+    +-----------------------+-------------------------------------------------------------------------+
+
+.. _BEHFIDCB:
+
+Converting epochs to Matlab format
+##################################
+
+The utility mne_epochs2mat converts
+epoch data including all or selected channels from a raw data file
+to a simple binary file with an associated description file in Matlab
+mat file format. With help of the description file, a matlab program
+can easily read the epoch data from the simple binary file. Signal
+space projection and bandpass filtering can be optionally applied
+to the raw data prior to saving the epochs.
+
+.. note:: The MNE Matlab toolbox described in :ref:`ch_matlab` provides direct    access to raw fif files without conversion with mne_epochs2mat first.    Therefore, it is recommended that you use the Matlab toolbox rather than mne_epochs2mat which    creates large files occupying disk space unnecessarily. An exception    to this is the case where you apply a filter to the data and save    the band-pass filtered epochs.
+
+Command-line options
+====================
+
+mne_epochs2mat accepts
+the following command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---raw <*name*>**
+
+    Specifies the name of the raw data fif file to use as input.
+
+**\---mat <*name*>**
+
+    Specifies the name of the destination file. Anything following the last
+    period in the file name will be removed before composing the output
+    file name. The binary epoch file will be called <*trimmed name*> ``.epochs`` and
+    the corresponding Matlab description file will be <*trimmed name*> ``_desc.mat`` .
+
+**\---tag <*tag*>**
+
+    By default, all Matlab variables included in the description file
+    start with ``mne\_``. This option changes the prefix to <*tag*> _.
+
+**\---events <*name*>**
+
+    The file containing the event definitions. This can be a text or
+    fif format file produced by mne_process_raw or mne_browse_raw ,
+    see :ref:`CACBCEGC`. With help of this file it is possible
+    to select virtually any data segment from the raw data file. If
+    this option is missing, the digital trigger channel in the raw data
+    file or a fif format event file produced automatically by mne_process_raw or mne_browse_raw is
+    consulted for event information.
+
+**\---event <*name*>**
+
+    Event number identifying the epochs of interest.
+
+**\---tmin <*time/ms*>**
+
+    The starting point of the epoch with respect to the event of interest.
+
+**\---tmax <*time/ms*>**
+
+    The endpoint of the epoch with respect to the event of interest.
+
+**\---sel <*name*>**
+
+    Specifies a text file which contains the names of the channels to include
+    in the output file, one channel name per line. If the ``--inv`` option
+    is specified, ``--sel`` is ignored. If neither ``--inv`` nor ``--sel`` is
+    present, all MEG and EEG channels are included. The digital trigger
+    channel can be included with the ``--includetrig`` option, described
+    below.
+
+**\---inv <*name*>**
+
+    Specifies an inverse operator, which will be employed in two ways. First,
+    the channels included to output will be those included in the inverse
+    operator. Second, any signal-space projection operator present in
+    the inverse operator file will be applied to the data. This option
+    cancels the effect of ``--sel`` and ``--proj`` options.
+
+**\---digtrig <*name*>**
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+**\---digtrigmask <*number*>**
+
+    Mask to be applied to the trigger channel values before considering them.
+    This option is useful if one wants to set some bits in a don't care
+    state. For example, some finger response pads keep the trigger lines
+    high if not in use, *i.e.*, a finger is not in
+    place. Yet, it is convenient to keep these devices permanently connected
+    to the acquisition system. The number can be given in decimal or
+    hexadecimal format (beginning with 0x or 0X). For example, the value
+    255 (0xFF) means that only the lowest order byte (usually trigger
+    lines 1 - 8 or bits 0 - 7) will be considered.
+
+**\---includetrig**
+
+    Add the digital trigger channel to the list of channels to output.
+    This option should not be used if the trigger channel is already
+    included in the selection specified with the ``--sel`` option.
+
+**\---filtersize <*size*>**
+
+    Adjust the length of the FFT to be applied in filtering. The number will
+    be rounded up to the next power of two. If the size is :math:`N`,
+    the corresponding length of time is :math:`^N/_{f_s}`,
+    where :math:`f_s` is the sampling frequency
+    of your data. The filtering procedure includes overlapping tapers
+    of length :math:`^N/_2` so that the total FFT
+    length will actually be :math:`2N`. The default
+    value is 4096.
+
+**\---highpass <*value/Hz*>**
+
+    Highpass filter frequency limit. If this is too low with respect
+    to the selected FFT length and data file sampling frequency, the
+    data will not be highpass filtered. You can experiment with the
+    interactive version to find the lowest applicable filter for your
+    data. This value can be adjusted in the interactive version of the
+    program. The default is 0, i.e., no highpass filter in effect.
+
+**\---highpassw <*value/Hz*>**
+
+    The width of the transition band of the highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`^{f_s}/_{(2N)}`.
+
+**\---lowpass <*value/Hz*>**
+
+    Lowpass filter frequency limit. This value can be adjusted in the interactive
+    version of the program. The default is 40 Hz.
+
+**\---lowpassw <*value/Hz*>**
+
+    The width of the transition band of the lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+**\---filteroff**
+
+    Do not filter the data.
+
+**\---proj <*name*>**
+
+    Include signal-space projection (SSP) information from this file.
+    If the ``--inv`` option is present, ``--proj`` has
+    no effect.
+
+.. note:: Baseline has not been subtracted from the epochs. This has to be done in subsequent processing with Matlab if so desired.
+
+.. note:: Strictly speaking, trigger mask value zero would mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+
+.. note:: The digital trigger channel can also be set with the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_browse_raw or mne_process_raw .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
+
+The binary epoch data file
+==========================
+
+mne_epochs2mat saves the
+epoch data extracted from the raw data file is a simple binary file.
+The data are stored as big-endian single-precision floating point
+numbers. Assuming that each of the total of :math:`p` epochs
+contains :math:`n` channels and :math:`m` time
+points, the data :math:`s_{jkl}` are ordered
+as
+
+.. math::    s_{111} \dotso s_{1n1} s_{211} \dotso s_{mn1} \dotso s_{mnp}\ ,
+
+where the first index stands for the time point, the second
+for the channel, and the third for the epoch number, respectively.
+The data are not calibrated, i.e., the calibration factors present
+in the Matlab description file have to be applied to get to physical
+units as described below.
+
+.. note:: The maximum size of an epoch data file is 2 Gbytes, *i.e.*, 0.5 Gsamples.
+
+Matlab data structures
+======================
+
+The Matlab description files output by mne_epochs2mat contain
+a data structure <*tag*>_epoch_info .
+The fields of the this structure are listed in :ref:`BEHFDCIH`.
+Further explanation of the epochs member
+is provided in :ref:`BEHHAGHE`.
+
+
+.. tabularcolumns:: |p{0.15\linewidth}|p{0.15\linewidth}|p{0.6\linewidth}|
+.. _BEHIFJIJ:
+.. table:: The fields of the raw data info structure.
+
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | Variable              | Size            | Description                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | orig_file             | string          | The name of the original fif file specified with the       |
+    |                       |                 | ``--raw`` option.                                          |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | epoch_file            | string          | The name of the epoch data file produced by mne_epocs2mat. |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nchan                 | 1               | Number of channels.                                        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nepoch                | 1               | Total number of epochs.                                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | epochs                | nepoch x 5      | Description of the content of the epoch data file,         |
+    |                       |                 | see :ref:`BEHHAGHE`.                                       |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sfreq                 | 1               | The sampling frequency in Hz.                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | highpass              | 1               | Highpass filter frequency (Hz)                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_names              | nchan (string)  | String array containing the names of the channels included |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_types              | nchan x 2       | The column lists the types of the channels (1 = MEG, 2 =   |
+    |                       |                 | EEG). The second column lists the coil types, see          |
+    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
+    |                       |                 | this value equals one.                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file.      |
+    |                       |                 | The unit of the data is listed in the first column         |
+    |                       |                 | (T = 112, T/m = 201, V = 107). At present, the second      |
+    |                       |                 | column will be always zero, *i.e.*, no unit multiplier.    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_pos                | nchan x 12      | The location information for each channel. The first three |
+    |                       |                 | values specify the origin of the sensor coordinate system  |
+    |                       |                 | or the location of the electrode. For MEG channels, the    |
+    |                       |                 | following nine number specify the *x*, *y*, and            |
+    |                       |                 | *z*-direction unit vectors of the sensor coordinate        |
+    |                       |                 | system. For EEG electrodes the first vector after the      |
+    |                       |                 | electrode location specifies the location of the reference |
+    |                       |                 | electrode. If the reference is not specified this value is |
+    |                       |                 | all zeroes. The remaining unit vectors are irrelevant for  |
+    |                       |                 | EEG electrodes.                                            |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_cals               | nchan x 2       | The raw data output by mne_raw2mat are not calibrated.     |
+    |                       |                 | The first column is the range member of the fiff data      |
+    |                       |                 | structures and while the second is the cal member. To      |
+    |                       |                 | get calibrated data values in the units given in           |
+    |                       |                 | ch_units from the raw data, the data must be multiplied    |
+    |                       |                 | with the product of range and cal .                        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
+    |                       |                 | coordinates to the MEG head coordinates.                   |
+    +-----------------------+-----------------+------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.6\linewidth}|
+.. _BEHHAGHE:
+.. table:: The epochs member of the raw data info structure.
+
+    +---------------+------------------------------------------------------------------+
+    | Column        | Contents                                                         |
+    +---------------+------------------------------------------------------------------+
+    | 1             | The raw data type (2 or 16 = 2-byte signed integer, 3 = 4-byte   |
+    |               | signed integer, 4 = single-precision float). The epoch data are  |
+    |               | written using the big-endian byte order. The data are stored     |
+    |               | sample by sample.                                                |
+    +---------------+------------------------------------------------------------------+
+    | 2             | Byte location of this epoch in the binary epoch file.            |
+    +---------------+------------------------------------------------------------------+
+    | 3             | First sample of this epoch in the original raw data file.        |
+    +---------------+------------------------------------------------------------------+
+    | 4             | First sample of the epoch with respect to the event.             |
+    +---------------+------------------------------------------------------------------+
+    | 5             | Number of samples in the epoch.                                  |
+    +---------------+------------------------------------------------------------------+
+
+.. note:: For source modelling purposes, it is recommended    that the MNE Matlab toolbox, see :ref:`ch_matlab` is employed    to read the measurement info instead of using the channel information    in the raw data info structure described in :ref:`BEHIFJIJ`.
diff --git a/doc/source/manual/cookbook.rst b/doc/source/manual/cookbook.rst
new file mode 100644
index 0000000..a55e8aa
--- /dev/null
+++ b/doc/source/manual/cookbook.rst
@@ -0,0 +1,1066 @@
+
+
+.. _ch_cookbook:
+
+============
+The Cookbook
+============
+
+Overview
+########
+
+This section describes the typical workflow needed to produce
+the minimum-norm estimate movies using the MNE software. The workflow
+is summarized in :ref:`CIHBIIAH`.
+
+.. _CIHBIIAH:
+
+.. figure:: pics/Flowchart.png
+    :alt: MNE Workflow Flowchart
+    :align: center
+
+    Workflow of the MNE software
+    
+    References in parenthesis indicate sections and chapters of this manual.
+
+Selecting the subject
+#####################
+
+Before starting the data analysis, setup the environment
+variable SUBJECTS_DIR to select the directory under which the anatomical
+MRI data are stored. Optionally, set SUBJECT as the name of the
+subject's MRI data directory under SUBJECTS_DIR. With this
+setting you can avoid entering the ``--subject`` option common to many
+MNE programs and scripts. In the following sections, files in the
+FreeSurfer directory hierarchy are usually referred to without specifying
+the leading directories. Thus, bem/msh-7-src.fif is used to refer
+to the file $SUBJECTS_DIR/$SUBJECT/bem/msh-7-src.fif.
+
+It is also recommended that the FreeSurfer environment
+is set up before using the MNE software.
+
+.. _CHDBBCEJ:
+
+Cortical surface reconstruction with FreeSurfer
+###############################################
+
+The first processing stage is the creation of various surface
+reconstructions with FreeSurfer .
+The recommended FreeSurfer workflow
+is summarized on the FreeSurfer wiki pages: https://surfer.nmr.mgh.harvard.edu/fswiki/RecommendedReconstruction.
+Please refer to the FreeSurfer wiki pages
+(https://surfer.nmr.mgh.harvard.edu/fswiki/) and other FreeSurfer documentation
+for more information.
+
+.. note:: Only the latest (4.0.X and later) FreeSurfer distributions    contain a version of tkmedit which    is compatible with mne_analyze, see :ref:`CACCHCBF`.
+
+.. _BABCCEHF:
+
+Setting up the anatomical MR images for MRIlab
+##############################################
+
+If you have the Neuromag software installed, the Neuromag
+MRI viewer, MRIlab, can be used to access the MRI slice data created
+by FreeSurfer . In addition, the
+Neuromag MRI directories can be used for storing the MEG/MRI coordinate
+transformations created with mne_analyze ,
+see :ref:`CACEHGCD`.  During the computation of the forward
+solution, mne_do_forwand_solution searches
+for the MEG/MRI coordinate in the Neuromag MRI directories, see :ref:`BABCHEJD`. The fif files created by mne_setup_mrit can
+be loaded into Matlab with the fiff_read_mri function,
+see :ref:`ch_matlab`.
+
+These functions require running the script mne_setup_mri which
+requires that the subject is set with the ``--subject`` option
+or by the SUBJECT environment variable. The script processes one
+or more MRI data sets from ``$SUBJECTS_DIR/$SUBJECT/mri`` ,
+by default they are T1 and brain. This default can be changed by
+specifying the sets by one or more ``--mri`` options.
+
+The script creates the directories ``mri/`` <*name*> ``-neuromag/slices`` and ``mri/`` <*name*> ``-neuromag/sets`` .
+If the input data set is in COR format, mne_setup_mri makes
+symbolic links from the COR files in the directory ``mri/`` <*name*> into ``mri/`` <*name*> ``-neuromag/slices`` ,
+and creates a corresponding fif file COR.fif in ``mri/`` <*name*> ``-neuromag/sets`` ..
+This "description file" contains references to
+the actual MRI slices.
+
+If the input MRI data are stored in the newer mgz format,
+the file created in the ``mri/`` <*name*> ``-neuromag/sets`` directory
+will include the MRI pixel data as well. If available, the coordinate
+transformations to allow conversion between the MRI (surface RAS)
+coordinates and MNI and FreeSurfer Talairach coordinates are copied
+to the MRI description file. mne_setup_mri invokes mne_make_cor_set ,
+described in :ref:`BABBHHHE` to convert the data.
+
+For example:
+
+``mne_setup_mri --subject duck_donald --mri T1``
+
+This command processes the MRI data set T1 for subject duck_donald.
+
+.. note:: If the SUBJECT environment variable is set it    is usually sufficient to run mne_setup_mri without    any options.
+
+.. note:: If the name specified with the ``--mri`` option    contains a slash, the MRI data are accessed from the directory specified    and the ``SUBJECT`` and ``SUBJECTS_DIR`` environment    variables as well as the ``--subject`` option are ignored.
+
+.. _CIHCHDAE:
+
+Setting up the source space
+###########################
+
+This stage consists of the following:
+
+- Creating a suitable decimated dipole
+  grid on the white matter surface.
+
+- Creating the source space file in fif format.
+
+- Creating ascii versions of the source space file for viewing
+  with MRIlab.
+
+All of the above is accomplished with the convenience script mne_setup_source_space . This
+script assumes that:
+
+- The anatomical MRI processing has been
+  completed as described in :ref:`CHDBBCEJ`.
+
+- The environment variable SUBJECTS_DIR is set correctly.
+
+The script accepts the following options:
+
+**\---subject <*subject*>**
+
+    Defines the name of the subject. If the environment variable SUBJECT
+    is set correctly, this option is not required.
+
+**\---morph <*name*>**
+
+    Name of a subject in SUBJECTS_DIR. If this option is present, the source
+    space will be first constructed for the subject defined by the --subject
+    option or the SUBJECT environment variable and then morphed to this
+    subject. This option is useful if you want to create a source spaces
+    for several subjects and want to directly compare the data across
+    subjects at the source space vertices without any morphing procedure
+    afterwards. The drawback of this approach is that the spacing between
+    source locations in the "morph" subject is not going
+    to be as uniform as it would be without morphing.
+
+**\---spacing <*spacing/mm*>**
+
+    Specifies the grid spacing for the source space in mm. If not set,
+    a default spacing of 7 mm is used. Either the default or a 5-mm
+    spacing is recommended.
+
+**\---ico <*number*>**
+
+    Instead of using the traditional method for cortical surface decimation
+    it is possible to create the source space using the topology of
+    a recursively subdivided icosahedron (<*number*> > 0)
+    or an octahedron (<*number*> < 0).
+    This method uses the cortical surface inflated to a sphere as a
+    tool to find the appropriate vertices for the source space. The
+    benefit of the ``--ico`` option is that the source space
+    will have triangulation information for the decimated vertices included, which
+    future versions of MNE software may be able to utilize. The number
+    of triangles increases by a factor of four in each subdivision,
+    starting from 20 triangles in an icosahedron and 8 triangles in an
+    octahedron. Since the number of vertices on a closed surface is :math:`n_{vert} = (n_{tri} + 4)/2`,
+    the number of vertices in the *k* th subdivision of
+    an icosahedron and an octahedron are :math:`10 \cdot 4^k + 2` and :math:`4^{k + 1} + 2`, respectively.
+    The recommended values for <*number*> and
+    the corresponding number of source space locations are listed in :ref:`BABGCDHA`.
+
+**\---surface <*name*>**
+
+    Name of the surface under the surf directory to be used. Defaults
+    to 'white'. ``mne_setup_source_space`` looks
+    for files ``rh.`` <*name*> and ``lh.`` <*name*> under
+    the ``surf`` directory.
+
+**\---overwrite**
+
+    An existing source space file with the same name is overwritten only
+    if this option is specified.
+
+**\---cps**
+
+    Compute the cortical patch statistics. This is need if current-density estimates
+    are computed, see :ref:`CBBDBHDI`. If the patch information is
+    available in the source space file the surface normal is considered to
+    be the average normal calculated over the patch instead of the normal
+    at each source space location. The calculation of this information
+    takes a considerable amount of time because of the large number
+    of Dijkstra searches involved.
+
+.. _BABGCDHA:
+
+.. table:: Recommended subdivisions of an icosahedron and an octahedron for the creation of source spaces. The approximate source spacing and corresponding surface area have been calculated assuming a 1000-cm2 surface area per hemisphere.
+
+    ==========  ========================  =====================  ===============================
+    <*number*>  Sources per hemisphere    Source spacing / mm    Surface area per source / mm2
+    ==========  ========================  =====================  ===============================
+    -5          1026                      9.9                    97
+    4           2562                      6.2                    39
+    -6          4098                      4.9                    24
+    5           10242                     3.1                    9.8
+    ==========  ========================  =====================  ===============================
+
+For example, to create the reconstruction geometry for Donald
+Duck with a 5-mm spacing between the grid points, say
+
+``mne_setup_source_space --subject duck_donald --spacing 5``
+
+As a result, the following files are created into the ``bem`` directory:
+
+- <*subject*>-<*spacing*>- ``src.fif`` containing
+  the source space description in fif format.
+
+- <*subject*>-<*spacing*>- ``lh.pnt`` and <*subject*>-<*spacing*>- ``rh.pnt`` containing
+  the source space points in MRIlab compatible ascii format.
+
+- <*subject*>-<*spacing*>- ``lh.dip`` and <*subject*>-<*spacing*>- ``rh.dip`` containing
+  the source space points in MRIlab compatible ascii format. These
+  files contain 'dipoles', *i.e.*,
+  both source space points and cortex normal directions.
+
+- If cortical patch statistics is requested, another source
+  space file called <*subject*>-<*spacing*> ``p-src.fif`` will
+  be created.
+
+.. note:: <*spacing*> will    be the suggested source spacing in millimeters if the ``--spacing`` option    is used. For source spaces based on *k*th subdivision    of an icosahedron, <*spacing*> will    be replaced by ``ico-`` k or ``oct-`` k , respectively.
+
+.. note:: After the geometry is set up it is possible to    check that the source space points are located on the cortical surface.    This can be easily done with by loading the ``COR.fif`` file    from ``mri/T1/neuromag/sets`` into MRIlab and by subsequently    overlaying the corresponding pnt or dip files using Import/Strings or Import/Dipoles from    the File menu, respectively.
+
+.. note:: If the SUBJECT environment variable is set correctly    it is usually sufficient to run ``mne_setup_source_space`` without    any options.
+
+.. _CHDBJCIA:
+
+Creating the BEM model meshes
+#############################
+
+Calculation of the forward solution using the boundary-element
+model (BEM) requires that the surfaces separating regions of different
+electrical conductivities are tessellated with suitable surface
+elements. Our BEM software employs triangular tessellations. Therefore,
+prerequisites for BEM calculations are the segmentation of the MRI
+data and the triangulation of the relevant surfaces.
+
+For MEG computations, a reasonably accurate solution can
+be obtained by using a single-compartment BEM assuming the shape
+of the intracranial volume. For EEG, the standard model contains
+the intracranial space, the skull, and the scalp.
+
+At present, no bulletproof method exists for creating the
+triangulations. Feasible approaches are described in :ref:`create_bem_model`.
+
+.. _BABDBBFC:
+
+Setting up the triangulation files
+==================================
+
+The segmentation algorithms described in :ref:`create_bem_model` produce
+either FreeSurfer surfaces or triangulation
+data in text. Before proceeding to the creation of the boundary
+element model, standard files (or symbolic links created with the ``ln -s`` command) have to be present in the subject's ``bem`` directory.
+If you are employing ASCII triangle files the standard file names
+are:
+
+**inner_skull.tri**
+
+    Contains the inner skull triangulation.
+
+**outer_skull.tri**
+
+    Contains the outer skull triangulation.
+
+**outer_skin.tri**
+
+    Contains the head surface triangulation.
+
+The corresponding names for FreeSurfer surfaces
+are:
+
+**inner_skull.surf**
+
+    Contains the inner skull triangulation.
+
+**outer_skull.surf**
+
+    Contains the outer skull triangulation.
+
+**outer_skin.surf**
+
+    Contains the head surface triangulation.
+
+.. note:: Different methods can be employed for the creation    of the individual surfaces. For example, it may turn out that the    watershed algorithm produces are better quality skin surface than    the segmentation approach based on the FLASH images. If this is    the case, ``outer_skin.surf`` can set to point to the corresponding    watershed output file while the other surfaces can be picked from    the FLASH segmentation data.
+
+.. note:: The triangulation files can include name of the    subject as a prefix ``<*subject name*>-`` , *e.g.*, ``duck-inner_skull.surf`` .
+
+.. note:: The mne_convert_surface utility    described in :ref:`BEHDIAJG` can be used to convert text format    triangulation files into the FreeSurfer surface format.
+
+.. note:: "Aliases" created with    the Mac OSX finder are not equivalent to symbolic links and do not    work as such for the UNIX shells and MNE programs.
+
+.. _CIHDBFEG:
+
+Setting up the boundary-element model
+#####################################
+
+This stage sets up the subject-dependent data for computing
+the forward solutions:
+
+- The fif format boundary-element model
+  geometry file is created. This step also checks that the input surfaces
+  are complete and that they are topologically correct, *i.e.*,
+  that the surfaces do not intersect and that the surfaces are correctly
+  ordered (outer skull surface inside the scalp and inner skull surface
+  inside the outer skull). Furthermore, the range of triangle sizes
+  on each surface is reported. For the three-layer model, the minimum
+  distance between the surfaces is also computed.
+
+- Text files containing the boundary surface vertex coordinates are
+  created.
+
+- The the geometry-dependent BEM solution data are computed. This step
+  can be optionally omitted. This step takes several minutes to complete.
+
+This step assigns the conductivity values to the BEM compartments.
+For the scalp and the brain compartments, the default is 0.3 S/m.
+The default skull conductivity is 50 times smaller, *i.e.*,
+0.006 S/m. Recent publications, see :ref:`CEGEGDEI`, report
+a range of skull conductivity ratios ranging from 1:15 (Oostendorp *et
+al.*, 2000) to 1:25 - 1:50 (Slew *et al.*,
+2009, Conçalves *et al.*, 2003). The
+MNE default ratio 1:50 is based on the typical values reported in
+(Conçalves *et al.*, 2003), since their
+approach is based comparison of SEF/SEP measurements in a BEM model.
+The variability across publications may depend on individual variations
+but, more importantly, on the precision of the skull compartment
+segmentation.
+
+This processing stage is automated with the script mne_setup_forward_model . This
+script assumes that:
+
+- The anatomical MRI processing has been
+  completed as described in :ref:`CHDBBCEJ`.
+
+- The BEM model meshes have been created as outlined in :ref:`CHDBJCIA`.
+
+- The environment variable SUBJECTS_DIR is set correctly.
+
+mne_setup_forward_model accepts
+the following options:
+
+**\---subject <*subject*>**
+
+    Defines the name of the subject. This can be also accomplished
+    by setting the SUBJECT environment variable.
+
+**\---surf**
+
+    Use the FreeSurfer surface files instead of the default ASCII triangulation
+    files. Please consult :ref:`BABDBBFC` for the standard file
+    naming scheme.
+
+**\---noswap**
+
+    Traditionally, the vertices of the triangles in 'tri' files
+    have been ordered so that, seen from the outside of the triangulation,
+    the vertices are ordered in clockwise fashion. The fif files, however,
+    employ the more standard convention with the vertices ordered counterclockwise.
+    Therefore, mne_setup_forward_model by
+    default reverses the vertex ordering before writing the fif file.
+    If, for some reason, you have counterclockwise-ordered tri files
+    available this behavior can be turned off by defining ``--noswap`` .
+    When the fif file is created, the vertex ordering is checked and
+    the process is aborted if it is incorrect after taking into account
+    the state of the swapping. Should this happen, try to run mne_setup_forward_model again including
+    the ``--noswap`` flag. In particular, if you employ the seglab software
+    to create the triangulations (see :ref:`create_bem_model`), the ``--noswap`` flag
+    is required. This option is ignored if ``--surf`` is specified
+
+**\---ico <*number*>**
+
+    This option is relevant (and required) only with the ``--surf`` option and
+    if the surface files have been produced by the watershed algorithm.
+    The watershed triangulations are isomorphic with an icosahedron,
+    which has been recursively subdivided six times to yield 20480 triangles.
+    However, this number of triangles results in a long computation
+    time even in a workstation with generous amounts of memory. Therefore,
+    the triangulations have to be decimated. Specifying ``--ico 4`` yields 5120 triangles per surface while ``--ico 3`` results
+    in 1280 triangles. The recommended choice is ``--ico 4`` .
+
+**\---homog**
+
+    Use a single compartment model (brain only) instead a three layer one
+    (scalp, skull, and brain). Only the ``inner_skull.tri`` triangulation
+    is required. This model is usually sufficient for MEG but invalid
+    for EEG. If you are employing MEG data only, this option is recommended
+    because of faster computation times. If this flag is specified,
+    the options ``--brainc`` , ``--skullc`` , and ``--scalpc`` are irrelevant.
+
+**\---brainc <*conductivity/ S/m*>**
+
+    Defines the brain compartment conductivity. The default value is 0.3 S/m.
+
+**\---skullc <*conductivity/ S/m*>**
+
+    Defines the skull compartment conductivity. The default value is 0.006 S/m
+    corresponding to a conductivity ratio 1/50 between the brain and
+    skull compartments.
+
+**\---scalpc <*conductivity/ S/m*>**
+
+    Defines the brain compartment conductivity. The default value is 0.3 S/m.
+
+**\---innershift <*value/mm*>**
+
+    Shift the inner skull surface outwards along the vertex normal directions
+    by this amount.
+
+**\---outershift <*value/mm*>**
+
+    Shift the outer skull surface outwards along the vertex normal directions
+    by this amount.
+
+**\---scalpshift <*value/mm*>**
+
+    Shift the scalp surface outwards along the vertex normal directions by
+    this amount.
+
+**\---nosol**
+
+    Omit the BEM model geometry dependent data preparation step. This
+    can be done later by running mne_setup_forward_model without the ``--nosol`` option.
+
+**\---model <*name*>**
+
+    Name for the BEM model geometry file. The model will be created into
+    the directory bem as <*name*>- ``bem.fif`` .	If
+    this option is missing, standard model names will be used (see below).
+
+As a result of running the mne_setup_foward_model script, the
+following files are created into the ``bem`` directory:
+
+- BEM model geometry specifications <*subject*>-<*ntri-scalp*>-<*ntri-outer_skull*>-<*ntri-inner_skull*>- ``bem.fif`` or <*subject*>-<*ntri-inner_skull*> ``-bem.fif`` containing
+  the BEM geometry in fif format. The latter file is created if ``--homog``
+  option is specified. Here, <*ntri-xxx*> indicates
+  the number of triangles on the corresponding surface.
+
+- <*subject*>-<*surface name*>-<*ntri*> ``.pnt`` files
+  are created for each of the surfaces present in the BEM model. These
+  can be loaded to MRIlab to check the location of the surfaces.
+
+- <*subject*>-<*surface name*>-<*ntri*> ``.surf`` files
+  are created for each of the surfaces present in the BEM model. These
+  can be loaded to tkmedit to check
+  the location of the surfaces.
+
+- The BEM 'solution' file containing the geometry
+  dependent solution data will be produced with the same name as the
+  BEM geometry specifications with the ending ``-bem-sol.fif`` .
+  These files also contain all the information in the ``-bem.fif`` files.
+
+After the BEM is set up it is advisable to check that the
+BEM model meshes are correctly positioned. This can be easily done
+with by loading the COR.fif file
+from mri/T1-neuromag/sets into
+MRIlab and by subsequently overlaying the corresponding pnt files
+using Import/Strings from the File menu.
+
+.. note:: The FreeSurfer format    BEM surfaces can be also viewed with the tkmedit program    which is part of the FreeSurfer distribution.
+
+.. note:: If the SUBJECT environment variable is set, it    is usually sufficient to run ``mne_setup_forward_model`` without    any options for the three-layer model and with the ``--homog`` option    for the single-layer model. If the input files are FreeSurfer surfaces, ``--surf`` and ``--ico 4`` are required as well.
+
+.. note:: With help of the ``--nosol`` option    it is possible to create candidate BEM geometry data files quickly    and do the checking with respect to the anatomical MRI data. When    the result is satisfactory, mne_setup_forward_model can be run without ``--nosol`` to    invoke the time-consuming calculation of the solution file as well.
+
+.. note:: The triangle meshes created by the seglab program    have counterclockwise vertex ordering and thus require the ``--noswap``    option.
+
+.. note:: Up to this point all processing stages depend    on the anatomical (geometrical) information only and thus remain    identical across different MEG studies.
+
+Setting up the MEG/EEG analysis directory
+#########################################
+
+The remaining steps require that the actual MEG/EEG data
+are available. It is recommended that a new directory is created
+for the MEG/EEG data processing. The raw data files collected should not be
+copied there but rather referred to with symbolic links created
+with the ``ln -s`` command. Averages calculated
+on-line can be either copied or referred to with links.
+
+.. note:: If you don't know how to create a directory,    how to make symbolic links, or how to copy files from the shell    command line, this is a perfect time to learn about this basic skills    from other users or from a suitable elementary book before proceeding.
+
+Preprocessing the raw data
+##########################
+
+The following MEG and EEG data preprocessing steps are recommended:
+
+- The coding problems on the trigger channel
+  STI 014 may have to fixed, see :ref:`BABCDBDI`.
+
+- EEG electrode location information and MEG coil types may
+  need to be fixed, see :ref:`BABCDFJH`.
+
+- The data may be optionally downsampled to facilitate subsequent
+  processing, see :ref:`BABDGFFG`.
+
+- Bad channels in the MEG and EEG data must be identified, see :ref:`BABBHCFG`.
+
+- The data has to be filtered to the desired passband. If mne_browse_raw or mne_process_raw is
+  employed to calculate the offline averages and covariance matrices,
+  this step is unnecessary since the data are filtered on the fly.
+  For information on these programs, please consult :ref:`ch_browse`.
+
+- For evoked-response analysis, the data has to be re-averaged
+  off line, see :ref:`BABEAEDF`.
+
+.. _BABCDBDI:
+
+Cleaning the digital trigger channel
+====================================
+
+The calibration factor of the digital trigger channel used
+to be set to a value much smaller than one by the Neuromag data
+acquisition software. Especially to facilitate viewing of raw data
+in graph it is advisable to change the calibration factor to one.
+Furthermore, the eighth bit of the trigger word is coded incorrectly
+in the original raw files. Both problems can be corrected by saying:
+
+``mne_fix_stim14`` <*raw file*>
+
+More information about mne_fix_stim14 is
+available in :ref:`CHDBFDIC`. It is recommended that this
+fix is included as the first raw data processing step. Note, however,
+the mne_browse_raw and mne_process_raw always sets
+the calibration factor to one internally.
+
+.. note:: If your data file was acquired on or after November 10, 2005 on the Martinos center Vectorview system, it is not necessary to use mne_fix_stim14 .
+
+.. _BABCDFJH:
+
+Fixing channel information
+==========================
+
+There are two potential discrepancies in the channel information
+which need to be fixed before proceeding:
+
+- EEG electrode locations may be incorrect
+  if more than 60 EEG channels are acquired.
+
+- The magnetometer coil identifiers are not always correct.
+
+These potential problems can be fixed with the utilities mne_check_eeg_locations and mne_fix_mag_coil_types,
+see :ref:`CHDJGGGC` and :ref:`CHDGAAJC`.
+
+.. _BABBHCFG:
+
+Designating bad channels
+========================
+
+Sometimes some MEG or EEG channels are not functioning properly
+for various reasons. These channels should be excluded from the
+analysis by marking them bad using the mne_mark_bad_channels utility,
+see :ref:`CHDDHBEE`. Especially if a channel is not show
+a signal at all (flat) it is most important to exclude it from the
+analysis, since its noise estimate will be unrealistically low and
+thus the current estimate calculations will give a strong weight
+to the zero signal on the flat channels and will essentially vanish.
+It is also important to exclude noisy channels because they can
+possibly affect others when signal-space projections or EEG average electrode
+reference is employed. Noisy bad channels can also adversely affect
+off-line averaging and noise-covariance matrix estimation by causing
+unnecessary rejections of epochs.
+
+Recommended ways to identify bad channels are:
+
+- Observe the quality of data during data
+  acquisition and make notes of observed malfunctioning channels to
+  your measurement protocol sheet.
+
+- View the on-line averages and check the condition of the channels.
+
+- Compute preliminary off-line averages with artefact rejection,
+  signal-space projection, and EEG average electrode reference computation
+  off and check the condition of the channels.
+
+- View raw data in mne_process_raw or
+  the Neuromag signal processor graph without
+  signal-space projection or EEG average electrode reference computation
+  and identify bad channels.
+
+.. note:: It is strongly recommended that bad channels    are identified and marked in the original raw data files. If present    in the raw data files, the bad channel selections will be automatically    transferred to averaged files, noise-covariance matrices, forward    solution files, and inverse operator decompositions.
+
+.. _BABDGFFG:
+
+Downsampling the MEG/EEG data
+=============================
+
+The minimum practical sampling frequency of the Vectorview
+system is 600 Hz. Lower sampling frequencies are allowed but result
+in elevated noise level in the data. It is advisable to lowpass
+filter and downsample the large raw data files often emerging in
+cognitive and patient studies to speed up subsequent processing.
+This can be accomplished with the mne_process_raw and mne_browse_raw software
+modules. For details, see :ref:`CACFAAAJ` and :ref:`CACBDDIE`.
+
+.. note:: It is recommended that the original raw file    is called <*name*>_raw.fif and    the downsampled version <*name*>_ds_raw.fif ,    respectively.
+
+.. _BABEAEDF:
+
+Off-line averaging
+==================
+
+The recommended tools for off-line averaging are mne_browse_raw and mne_process_raw . mne_browse_raw is
+an interactive program for averaging and noise-covariance matrix
+computation. It also includes routines for filtering so that the
+downsampling and filtering steps can be skipped. Therefore, with mne_browse_raw you
+can produce the off-line average and noise-covariance matrix estimates
+directly. The batch-mode version of mne_browse_raw is
+called mne_process_raw . Detailed
+information on mne_browse_raw and mne_process_raw can
+be found in :ref:`ch_browse`.
+
+.. _CHDBEHDC:
+
+Aligning the coordinate frames
+##############################
+
+The calculation of the forward solution requires knowledge
+of the relative location and orientation of the MEG/EEG and MRI
+coordinate systems. The MEG/EEG head coordinate system is defined
+in :ref:`BJEBIBAI`. The conversion tools included in the MNE
+software take care of the idiosyncrasies of the coordinate frame
+definitions in different MEG and EEG systems so that the fif files
+always employ the same definition of the head coordinate system.
+
+Ideally, the head coordinate frame has a fixed orientation
+and origin with respect to the head anatomy. Therefore, a single
+MRI-head coordinate transformation for each subject should be sufficient.
+However, as explained in :ref:`BJEBIBAI`, the head coordinate
+frame is defined by identifying the fiducial landmark locations,
+making the origin and orientation of the head coordinate system
+slightly user dependent. As a result, the most conservative choice
+for the definition of the coordinate transformation computation
+is to re-establish it for each experimental session, *i.e.*,
+each time when new head digitization data are employed.
+
+The interactive source analysis software mne_analyze provides
+tools for coordinate frame alignment, see :ref:`ch_interactive_analysis`. :ref:`CHDIJBIG` also
+contains tips for using mne_analyze for
+this purpose.
+
+Another useful tool for the coordinate system alignment is MRIlab ,
+the Neuromag MEG-MRI integration tool. Section 3.3.1 of the MRIlab User's
+Guide, Neuromag P/N NM20419A-A contains a detailed description of
+this task. Employ the images in the set ``mri/T1-neuromag/sets/COR.fif`` for
+the alignment. Check the alignment carefully using the digitization
+data included in the measurement file as described in Section 5.3.1
+of the above manual. Save the aligned description file in the same
+directory as the original description file without the alignment
+information but under a different name.
+
+.. warning:: This step is extremely important. If    the alignment of the coordinate frames is inaccurate all subsequent    processing steps suffer from the error. Therefore, this step should    be performed by the person in charge of the study or by a trained    technician. Written or photographic documentation of the alignment    points employed during the MEG/EEG acquisition can also be helpful.
+
+.. _BABCHEJD:
+
+Computing the forward solution
+##############################
+
+After the MRI-MEG/EEG alignment has been set, the forward
+solution, *i.e.*, the magnetic fields and electric
+potentials at the measurement sensors and electrodes due to dipole
+sources located on the cortex, can be calculated with help of the
+convenience script mne_do_forward_solution .
+This utility accepts the following options:
+
+**\---subject <*subject*>**
+
+    Defines the name of the subject. This can be also accomplished
+    by setting the SUBJECT environment variable.
+
+**\---src <*name*>**
+
+    Source space name to use. This option overrides the ``--spacing`` option. The
+    source space is searched first from the current working directory
+    and then from ``$SUBJECTS_DIR/`` <*subject*> /bem.
+    The source space file must be specified exactly, including the ``fif`` extension.
+
+**\---spacing <*spacing/mm*>  or ``ico-`` <*number  or ``oct-`` <*number*>**
+
+    This is an alternate way to specify the name of the source space
+    file. For example, if ``--spacing 6`` is given on the command
+    line, the source space files searched for are./<*subject*> -6-src.fif
+    and ``$SUBJECTS_DIR/$SUBJECT/`` bem/<*subject*> -6-src.fif.
+    The first file found is used. Spacing defaults to 7 mm.
+
+**\---bem <*name*>**
+
+    Specifies the BEM to be used. The name of the file can be any of <*name*> , <*name*> -bem.fif, <*name*> -bem-sol.fif.
+    The file is searched for from the current working directory and
+    from ``bem`` . If this option is omitted, the most recent
+    BEM file in the ``bem`` directory is used.
+
+**\---mri <*name*>**
+
+    The name of the MRI description file containing the MEG/MRI coordinate
+    transformation. This file was saved as part of the alignment procedure
+    outlined in :ref:`CHDBEHDC`. The file is searched for from
+    the current working directory and from ``mri/T1-neuromag/sets`` .
+    The search order for MEG/MRI coordinate transformations is discussed
+    below.
+
+**\---trans	 <*name*>**
+
+    The name of a text file containing the 4 x 4 matrix for the coordinate transformation
+    from head to mri coordinates, see below. If the option ``--trans`` is
+    present, the ``--mri`` option is not required. The search
+    order for MEG/MRI coordinate transformations is discussed below.
+
+**\---meas <*name*>**
+
+    This file is the measurement fif file or an off-line average file
+    produced thereof. It is recommended that the average file is employed for
+    evoked-response data and the original raw data file otherwise. This
+    file provides the MEG sensor locations and orientations as well as
+    EEG electrode locations as well as the coordinate transformation between
+    the MEG device coordinates and MEG head-based coordinates.
+
+**\---fwd <*name*>**
+
+    This file will contain the forward solution as well as the coordinate transformations,
+    sensor and electrode location information, and the source space
+    data. A name of the form <*name*> ``-fwd.fif`` is
+    recommended. If this option is omitted the forward solution file
+    name is automatically created from the measurement file name and
+    the source space name.
+
+**\---destdir <*directory*>**
+
+    Optionally specifies a directory where the forward solution will
+    be stored.
+
+**\---mindist <*dist/mm*>**
+
+    Omit source space points closer than this value to the inner skull surface.
+    Any source space points outside the inner skull surface are automatically
+    omitted. The use of this option ensures that numerical inaccuracies
+    for very superficial sources do not cause unexpected effects in
+    the final current estimates. Suitable value for this parameter is
+    of the order of the size of the triangles on the inner skull surface.
+    If you employ the seglab software
+    to create the triangulations, this value should be about equal to
+    the wish for the side length of the triangles.
+
+**\---megonly**
+
+    Omit EEG forward calculations.
+
+**\---eegonly**
+
+    Omit MEG forward calculations.
+
+**\---all**
+
+    Compute the forward solution for all vertices on the source space.
+
+**\---overwrite**
+
+    Overwrite the possibly existing forward model file.
+
+**\---help**
+
+    Show usage information for the script.
+
+The MEG/MRI transformation is determined by the following
+search sequence:
+
+- If the ``--mri`` option was
+  present, the file is looked for literally as specified, in the directory
+  of the measurement file specified with the ``--meas`` option,
+  and in the directory $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets.
+  If the file is not found, the script exits with an error message.
+
+- If the ``--trans`` option was present, the file is
+  looked up literally as specified. If the file is not found, the
+  script exists with an error message.
+
+- If neither ``--mri`` nor ``--trans`` option
+  was not present, the following default search sequence is engaged:
+
+  - The ``.fif`` ending in the
+    measurement file name is replaced by ``-trans.fif`` . If
+    this file is present, it will be used.
+
+  - The newest file whose name ends with ``-trans.fif`` in
+    the directory of the measurement file is looked up. If such a file
+    is present, it will be used.
+
+  - The newest file whose name starts with ``COR-`` in
+    directory $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets is looked
+    up. If such a file is present, it will be used.
+
+  - If all the above searches fail, the script exits with an error
+    message.
+
+This search sequence is designed to work well with the MEG/MRI
+transformation files output by mne_analyze ,
+see :ref:`CACEHGCD`. It is recommended that -trans.fif file
+saved with the Save default and Save... options in
+the mne_analyze alignment dialog
+are used because then the $SUBJECTS_DIR/$SUBJECT directory will
+be composed of files which are dependent on the subjects's
+anatomy only, not on the MEG/EEG data to be analyzed.
+
+.. note:: If the standard MRI description file and BEM    file selections are appropriate and the 7-mm source space grid spacing    is appropriate, only the ``--meas`` option is necessary.    If EEG data is not used ``--megonly`` option should be    included.
+
+.. note:: If it is conceivable that the current-density    transformation will be incorporated into the inverse operator, specify    a source space with patch information for the forward computation.    This is not mandatory but saves a lot of time when the inverse operator    is created, since the patch information does not need to be created    at that stage.
+
+.. note:: The MEG head to MRI transformation matrix specified    with the ``--trans`` option should be a text file containing    a 4-by-4 matrix:
+
+.. math::    T = \begin{bmatrix}
+		R_{11} & R_{12} & R_{13} & x_0 \\
+		R_{13} & R_{13} & R_{13} & y_0 \\
+		R_{13} & R_{13} & R_{13} & z_0 \\
+		0 & 0 & 0 & 1
+		\end{bmatrix}
+	     
+defined so that if the augmented location vectors in MRI
+head and MRI coordinate systems are denoted by :math:`r_{head}[x_{head}\ y_{head}\ z_{head}\ 1]` and :math:`r_{MRI}[x_{MRI}\ y_{MRI}\ z_{MRI}\ 1]`,
+respectively,
+
+.. math::    r_{MRI} = T r_{head}
+
+.. note:: It is not possible to calculate an EEG forward    solution with a single-layer BEM.
+
+.. _BABDEEEB:
+
+Setting up the noise-covariance matrix
+######################################
+
+The MNE software employs an estimate of the noise-covariance
+matrix to weight the channels correctly in the calculations. The
+noise-covariance matrix provides information about field and potential
+patterns representing uninteresting noise sources of either human
+or environmental origin.
+
+The noise covariance matrix can be calculated in several
+ways:
+
+- Employ the individual epochs during
+  off-line averaging to calculate the full noise covariance matrix.
+  This is the recommended approach for evoked responses.
+
+- Employ empty room data (collected without the subject) to
+  calculate the full noise covariance matrix. This is recommended
+  for analyzing ongoing spontaneous activity.
+
+- Employ a section of continuous raw data collected in the presence
+  of the subject to calculate the full noise covariance matrix. This
+  is the recommended approach for analyzing epileptic activity. The
+  data used for this purpose should be free of technical artifacts
+  and epileptic activity of interest. The length of the data segment
+  employed should be at least 20 seconds. One can also use a long
+  (`*> 200 s`) segment of data with epileptic spikes present provided
+  that the spikes occur infrequently and that the segment is apparently
+  stationary with respect to background brain activity.
+
+The new raw data processing tools, mne_browse_raw or mne_process_raw include
+computation of noise-covariance matrices both from raw data and
+from individual epochs. For details, see :ref:`ch_browse`.
+
+.. _CIHCFJEI:
+
+Calculating the inverse operator decomposition
+##############################################
+
+The MNE software doesn't calculate the inverse operator
+explicitly but rather computes an SVD of a matrix composed of the
+noise-covariance matrix, the result of the forward calculation,
+and the source covariance matrix. This approach has the benefit
+that the regularization parameter ('SNR') can
+be adjusted easily when the final source estimates or dSPMs are
+computed. For mathematical details of this approach, please consult :ref:`CBBDJFBJ`.
+
+This computation stage is facilitated by the convenience
+script mne_do_inverse_operator . It
+invokes the program mne_inverse_operator with
+appropriate options, derived from the command line of mne_do_inverse_operator .
+
+mne_do_inverse_operator assumes
+the following options:
+
+**\---fwd <*name of the forward solution file*>**
+
+    This is the forward solution file produced in the computations step described
+    in :ref:`BABCHEJD`.
+
+**\---meg**
+
+    Employ MEG data in the inverse calculation. If neither ``--meg`` nor ``--eeg`` is
+    set only MEG channels are included.
+
+**\---eeg**
+
+    Employ EEG data in the inverse calculation. If neither ``--meg`` nor ``--eeg`` is
+    set only MEG channels are included.
+
+**\---fixed**
+
+    Use fixed source orientations normal to the cortical mantle. By default,
+    the source orientations are not constrained. If ``--fixed`` is specified,
+    the ``--loose`` flag is ignored.
+
+**\---loose <*amount*>**
+
+    Use a 'loose' orientation constraint. This means
+    that the source covariance matrix entries corresponding to the current
+    component normal to the cortex are set equal to one and the transverse
+    components are set to <*amount*> .
+    Recommended value of amount is 0.1...0.6.
+
+**\---depth**
+
+    Employ depth weighting with the standard settings. For details,
+    see :ref:`CBBDFJIE` and :ref:`CBBDDBGF`.
+
+**\---bad <*name*>**
+
+    Specifies a text file to designate bad channels, listed one channel name
+    (like MEG 1933) on each line of the file. Be sure to include both
+    noisy and flat (non-functioning) channels in the list. If bad channels
+    were designated using mne_mark_bad_channels in
+    the measurement file which was specified with the ``--meas`` option when
+    the forward solution was computed, the bad channel information will
+    be automatically included. Also, any bad channel information in
+    the noise-covariance matrix file will be included.
+
+**\---noisecov <*name*>**
+
+    Name of the noise-covariance matrix file computed with one of the methods
+    described in :ref:`BABDEEEB`. By default, the script looks
+    for a file whose name is derived from the forward solution file
+    by replacing its ending ``-`` <*anything*> ``-fwd.fif`` by ``-cov.fif`` .
+    If this file contains a projection operator, which will automatically
+    attached to the noise-covariance matrix by mne_browse_raw and mne_process_raw ,
+    no ``--proj`` option is necessary because mne_inverse_operator will
+    automatically include the projectors from the noise-covariance matrix
+    file. For backward compatibility, --senscov can be used as a synonym
+    for --noisecov.
+
+**\---noiserank <*value*>**
+
+    Specifies the rank of the noise covariance matrix explicitly rather than
+    trying to reduce it automatically. This option is sheldom needed,
+
+**\---megreg <*value*>**
+
+    Regularize the MEG part of the noise-covariance matrix by this amount.
+    Suitable values are in the range 0.05...0.2. For details, see :ref:`CBBHEGAB`.
+
+**\---eegreg <*value*>**
+
+    Like ``--megreg`` but applies to the EEG channels.
+
+**\---diagnoise**
+
+    Omit the off-diagonal terms of the noise covariance matrix. This option
+    is irrelevant to most users.
+
+**\---fmri <*name*>**
+
+    With help of this w file, an *a priori* weighting
+    can be applied to the source covariance matrix. The source of the weighting
+    is usually fMRI but may be also some other data, provided that the weighting can
+    be expressed as a scalar value on the cortical surface, stored in
+    a w file. It is recommended that this w file is appropriately smoothed (see :ref:`CHDEBAHH`)
+    in mne_analyze , tksurfer or
+    with mne_smooth_w to contain
+    nonzero values at all vertices of the triangular tessellation of
+    the cortical surface. The name of the file given is used as a stem of
+    the w files. The actual files should be called <*name*> ``-lh.pri`` and <*name*> ``-rh.pri`` for
+    the left and right hemisphere weight files, respectively. The application
+    of the weighting is discussed in :ref:`CBBDIJHI`.
+
+**\---fmrithresh <*value*>**
+
+    This option is mandatory and has an effect only if a weighting function
+    has been specified with the ``--fmri`` option. If the value
+    is in the *a priori* files falls below this value
+    at a particular source space point, the source covariance matrix
+    values are multiplied by the value specified with the ``--fmrioff`` option
+    (default 0.1). Otherwise it is left unchanged.
+
+**\---fmrioff <*value*>**
+
+    The value by which the source covariance elements are multiplied
+    if the *a priori* weight falls below the threshold
+    set with ``--fmrithresh`` , see above.
+
+**\---srccov <*name*>**
+
+    Use this diagonal source covariance matrix. By default the source covariance
+    matrix is a multiple of the identity matrix. This option is irrelevant
+    to most users.
+
+**\---proj <*name*>**
+
+    Include signal-space projection information from this file.
+
+**\---inv <*name*>**
+
+    Save the inverse operator decomposition here. By default, the script looks
+    for a file whose name is derived from the forward solution file by
+    replacing its ending ``-fwd.fif`` by <*options*> ``-inv.fif`` , where
+    <*options*> includes options ``--meg``, ``--eeg``, and ``--fixed`` with the double
+    dashes replaced by single ones.
+
+**\---destdir <*directory*>**
+
+    Optionally specifies a directory where the inverse operator will
+    be stored.
+
+.. note:: If bad channels are included in the calculation,    strange results may ensue. Therefore, it is recommended that the    data to be analyzed is carefully inspected with to assign the bad    channels correctly.
+
+.. note:: For convenience, the MNE software includes bad-channel    designation files which can be used to ignore all magnetometer or    all gradiometer channels in Vectorview measurements. These files are    called ``vv_grad_only.bad`` and ``vv_mag_only.bad`` , respectively.    Both files are located in ``$MNE_ROOT/share/mne/templates`` .
+
+Analyzing the data
+##################
+
+Once all the preprocessing steps described above have been
+completed, the inverse operator computed can be applied to the MEG
+and EEG data and the results can be viewed and stored in several
+ways:
+
+- The interactive analysis tool mne_analyze can
+  be used to explore the data and to produce quantitative analysis
+  results, screen snapshots, and QuickTime (TM) movie files.
+  For comprehensive information on mne_analyze ,
+  please consult :ref:`ch_interactive_analysis`.
+
+- The command-line tool mne_make_movie can
+  be invoked to produce QuickTime movies and snapshots. mne_make_movie can
+  also output the data in the stc (movies) and w (snapshots) formats
+  for subsequent processing. Furthermore, subject-to-subject morphing
+  is included in mne_make_movie to
+  facilitate cross-subject averaging and comparison of data among
+  subjects. mne_make_movie is described
+  in :ref:`CBBECEDE`.
+
+- The command-line tool mne_make_movie can
+  be employed to interrogate the source estimate waveforms from labels
+  (ROIs).
+
+- The mne_make_movie tool
+  can be also used to create movies from stc files and to resample
+  stc files in time.
+
+- The mne_compute_raw_inverse tool
+  can be used to produce fif files containing source estimates at
+  selected ROIs. The input data file can be either a raw data or evoked
+  response MEG/EEG file, see :ref:`CBBCGHAH`.
+
+- Using the MNE Matlab toolbox, it is possible to perform many
+  of the above operations in Matlab using your own Matlab code based
+  on the MNE Matlab toolbox. For more information on the MNE Matlab
+  toolbox, see :ref:`ch_matlab`.
+
+- It is also possible to average the source estimates across
+  subjects as described in :ref:`ch_morph`.
diff --git a/doc/source/manual/forward.rst b/doc/source/manual/forward.rst
new file mode 100644
index 0000000..eef1946
--- /dev/null
+++ b/doc/source/manual/forward.rst
@@ -0,0 +1,1337 @@
+
+
+.. _ch_forward:
+
+====================
+The forward solution
+====================
+
+Overview
+########
+
+This Chapter covers the definitions of different coordinate
+systems employed in MNE software and FreeSurfer, the details of
+the computation of the forward solutions, and the associated low-level
+utilities.
+
+.. _CHDEDFIB:
+
+MEG/EEG and MRI coordinate systems
+##################################
+
+The coordinate systems used in MNE software (and FreeSurfer)
+and their relationships are depicted in :ref:`CHDFFJIJ`.
+Except for the *Sensor coordinates*, all of the
+coordinate systems are Cartesian and have the "RAS" (Right-Anterior-Superior)
+orientation, *i.e.*, the :math:`x` axis
+points to the right, the :math:`y` axis
+to the front, and the :math:`z` axis up.
+
+.. _CHDFFJIJ:
+
+.. figure:: pics/CoordinateSystems.png
+    :alt: MEG/EEG and MRI coordinate systems
+
+    MEG/EEG and MRI coordinate systems
+
+    The coordinate transforms present in the fif files in MNE and the FreeSurfer files as well as those set to fixed values are indicated with :math:`T_x`, where :math:`x` identifies the transformation.
+
+The coordinate systems related
+to MEG/EEG data are:
+
+**Head coordinates**
+
+    This is a coordinate system defined with help of the fiducial landmarks
+    (nasion and the two auricular points). In fif files, EEG electrode
+    locations are given in this coordinate system. In addition, the head
+    digitization data acquired in the beginning of an MEG, MEG/EEG,
+    or EEG acquisition are expressed in head coordinates. For details,
+    see :ref:`CHDEDFIB`.
+
+**Device coordinates**
+
+    This is a coordinate system tied to the MEG device. The relationship
+    of the Device and Head coordinates is determined during an MEG measurement
+    by feeding current to three to five head-position
+    indicator (HPI) coils and by determining their locations with respect
+    to the MEG sensor array from the magnetic fields they generate.
+
+**Sensor coordinates**
+
+    Each MEG sensor has a local coordinate system defining the orientation
+    and location of the sensor. With help of this coordinate system,
+    the numerical integration data needed for the computation of the
+    magnetic field can be expressed conveniently as discussed in :ref:`BJEIAEIE`. The channel information data in the fif files
+    contain the information to specify the coordinate transformation
+    between the coordinates of each sensor and the MEG device coordinates.
+
+The coordinate systems related
+to MRI data are:
+
+**Surface RAS coordinates**
+
+    The FreeSurfer surface data are expressed in this coordinate system. The
+    origin of this coordinate system is at the center of the conformed
+    FreeSurfer MRI volumes (usually 256 x 256 x 256 isotropic 1-mm3  voxels)
+    and the axes are oriented along the axes of this volume. The BEM
+    surface and the locations of the sources in the source space are
+    usually expressed in this coordinate system in the fif files. In
+    this manual, the *Surface RAS coordinates* are
+    usually referred to as *MRI coordinates* unless
+    there is need to specifically discuss the different MRI-related
+    coordinate systems.
+
+**RAS coordinates**
+
+    This coordinate system has axes identical to the Surface RAS coordinates but the location of the origin
+    is different and defined by the original MRI data, i.e. ,
+    the origin is in a scanner-dependent location. There is hardly any
+    need to refer to this coordinate system explicitly in the analysis
+    with the MNE software. However, since the Talairach coordinates,
+    discussed below, are defined with respect to *RAS coordinates* rather
+    than the *Surface RAS coordinates*, the RAS coordinate
+    system is implicitly involved in the transformation between Surface RAS coordinates and the two *Talairach* coordinate
+    systems.
+
+**MNI Talairach coordinates**
+
+    The definition of this coordinate system is discussed, e.g. ,
+    in  http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach. This
+    transformation is determined during the FreeSurfer reconstruction
+    process.
+
+**FreeSurfer Talairach coordinates**
+
+    The problem with the MNI Talairach coordinates is that the linear MNI
+    Talairach transform does matched the brains completely to the Talairach
+    brain. This is probably because the Talairach atlas brain is a rather
+    odd shape, and as a result, it is difficult to match a standard brain
+    to the atlas brain using an affine transform. As a result, the MNI
+    brains are slightly larger (in particular higher, deeper and longer)
+    than the Talairach brain. The differences are larger as you get
+    further from the middle of the brain, towards the outside. The FreeSurfer
+    Talairach coordinates mitigate this problem by additing a an additional
+    transformation, defined separately for negatice and positive MNI
+    Talairach :math:`z` coordinates. These two
+    transformations, denoted by :math:`T_-` and :math:`T_+` in :ref:`CHDFFJIJ`, are fixed as discussed in http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach
+    (*Approach 2*).
+
+The different coordinate systems are related by coordinate
+transformations depicted in :ref:`CHDFFJIJ`. The arrows and
+coordinate transformation symbols (:math:`T_x`)
+indicate the transformations actually present in the FreeSurfer
+files. Generally,
+
+.. math::    \begin{bmatrix}
+		x_2 \\
+		y_2 \\
+		z_2 \\
+		1
+	        \end{bmatrix} = T_{12} \begin{bmatrix}
+		x_1 \\
+		y_1 \\
+		z_1 \\
+		1
+	        \end{bmatrix} = \begin{bmatrix}
+		R_{11} & R_{12} & R_{13} & x_0 \\
+		R_{13} & R_{13} & R_{13} & y_0 \\
+		R_{13} & R_{13} & R_{13} & z_0 \\
+		0 & 0 & 0 & 1
+	        \end{bmatrix} \begin{bmatrix}
+		x_1 \\
+		y_1 \\
+		z_1 \\
+		1
+	        \end{bmatrix}\ ,
+
+where :math:`x_k`,:math:`y_k`,and :math:`z_k` are the location
+coordinates in two coordinate systems, :math:`T_{12}` is
+the coordinate transformation from coordinate system "1" to "2",
+:math:`x_0`, :math:`y_0`,and :math:`z_0` is the location of the origin
+of coordinate system "1" in coordinate system "2",
+and :math:`R_{jk}` are the elements of the rotation
+matrix relating the two coordinate systems. The coordinate transformations
+are present in different files produced by FreeSurfer and MNE as
+summarized in :ref:`CHDJDEDJ`. The fixed transformations :math:`T_-` and :math:`T_+` are:
+
+.. math::    T_{-} = \begin{bmatrix}
+		0.99 & 0 & 0 & 0 \\
+		0 & 0.9688 & 0.042 & 0 \\
+		0 & -0.0485 & 0.839 & 0 \\
+		0 & 0 & 0 & 1
+	        \end{bmatrix}
+
+and
+
+.. math::    T_{+} = \begin{bmatrix}
+		0.99 & 0 & 0 & 0 \\
+		0 & 0.9688 & 0.046 & 0 \\
+		0 & -0.0485 & 0.9189 & 0 \\
+		0 & 0 & 0 & 1
+	        \end{bmatrix}
+
+.. note:: This section does not discuss the transformation    between the MRI voxel indices and the different MRI coordinates.    However, it is important to note that in FreeSurfer, MNE, as well    as in Neuromag software an integer voxel coordinate corresponds    to the location of the center of a voxel. Detailed information on    the FreeSurfer MRI systems can be found at  https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems.
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.3\linewidth}|p{0.5\linewidth}|
+.. _CHDJDEDJ:
+.. table:: Coordinate transformations in FreeSurfer and MNE software packages. The symbols :math:`T_x` are defined in :ref:`CHDFFJIJ`. Note: mne_make_cor_set /mne_setup_mri prior to release 2.6 did not include transformations :math:`T_3`, :math:`T_4`, :math:`T_-`, and :math:`T_+` in the fif files produced.
+
+    +------------------------------+-------------------------------+--------------------------------------+
+    | Transformation               | FreeSurfer                    | MNE                                  |
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_1`                  | Not present                   | | Measurement data files             |
+    |                              |                               | | Forward solution files (`*fwd.fif`)|
+    |                              |                               | | Inverse operator files (`*inv.fif`)|
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_{s_1}\dots T_{s_n}` | Not present                   | Channel information in files         |
+    |                              |                               | containing :math:`T_1`.              |
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_2`                  | Not present                   | | MRI description files Separate     |
+    |                              |                               | | coordinate transformation files    |
+    |                              |                               | | saved from mne_analyze             |
+    |                              |                               | | Forward solution files             |
+    |                              |                               | | Inverse operator files             |
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_3`                  | `mri/*mgz` files              | MRI description files saved with     |
+    |                              |                               | mne_make_cor_set if the input is in  |
+    |                              |                               | mgz or mgh format.                   |
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_4`                  | mri/transforms/talairach.xfm  | MRI description files saved with     |
+    |                              |                               | mne_make_cor_set if the input is in  |
+    |                              |                               | mgz or mgh format.                   |
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_-`                  | Hardcoded in software         | MRI description files saved with     |
+    |                              |                               | mne_make_cor_set if the input is in  |
+    |                              |                               | mgz or mgh format.                   |
+    +------------------------------+-------------------------------+--------------------------------------+
+    | :math:`T_+`                  | Hardcoded in software         | MRI description files saved with     |
+    |                              |                               | mne_make_cor_set if the input is in  |
+    |                              |                               | mgz or mgh format.                   |
+    +------------------------------+-------------------------------+--------------------------------------+
+
+.. _BJEBIBAI:
+
+The head and device coordinate systems
+######################################
+
+.. figure:: pics/HeadCS.png
+    :alt: Head coordinate system
+
+    The head coordinate system
+
+The MEG/EEG head coordinate system employed in the MNE software
+is a right-handed Cartesian coordinate system. The direction of :math:`x` axis
+is from left to right, that of :math:`y` axis
+to the front, and the :math:`z` axis thus
+points up.
+
+The :math:`x` axis of the head coordinate
+system passes through the two periauricular or preauricular points
+digitized before acquiring the data with positive direction to the
+right. The :math:`y` axis passes through
+the nasion and is normal to the :math:`x` axis.
+The :math:`z` axis points up according to
+the right-hand rule and is normal to the :math:`xy` plane.
+
+The origin of the MEG device coordinate system is device
+dependent. Its origin is located approximately at the center of
+a sphere which fits the occipital section of the MEG helmet best
+with :math:`x` axis axis going from left to right
+and :math:`y` axis pointing front. The :math:`z` axis
+is, again, normal to the :math:`xy` plane
+with positive direction up.
+
+.. note:: The above definition is identical to that    of the Neuromag MEG/EEG (head) coordinate system. However, in 4-D    Neuroimaging and CTF MEG systems the head coordinate frame definition    is different. The origin of the coordinate system is at the midpoint    of the left and right auricular points. The :math:`x` axis    passes through the nasion and the origin with positive direction    to the front. The :math:`y` axis is perpendicular    to the :math:`x` axis on the and lies in [...]
+
+.. _BEHCGJDD:
+
+Creating a surface-based source space
+#####################################
+
+The fif format source space files containing the dipole locations
+and orientations are created with the utility mne_make_source_space .
+This utility is usually invoked by the convenience script mne_setup_source_space ,
+see :ref:`CIHCHDAE`.
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---subject <*name*>**
+
+    Name of the subject in SUBJECTS_DIR. In the absence of this option,
+    the SUBJECT environment variable will be consulted. If it is not
+    defined, mne_setup_source_space exits
+    with an error.
+
+**\---morph <*name*>**
+
+    Name of a subject in SUBJECTS_DIR. If this option is present, the source
+    space will be first constructed for the subject defined by the --subject
+    option or the SUBJECT environment variable and then morphed to this
+    subject. This option is useful if you want to create a source spaces
+    for several subjects and want to directly compare the data across
+    subjects at the source space vertices without any morphing procedure
+    afterwards. The drawback of this approach is that the spacing between
+    source locations in the "morph" subject is not going
+    to be as uniform as it would be without morphing.
+
+**\---surf <*name1*>: <*name2*>:...**
+
+    FreeSurfer surface file names specifying the source surfaces, separated
+    by colons.
+
+**\---spacing <*spacing/mm*>**
+
+    Specifies the approximate grid spacing of the source space in mm.
+
+**\---ico <*number*>**
+
+    Instead of using the traditional method for cortical surface decimation
+    it is possible to create the source space using the topology of
+    a recursively subdivided icosahedron ( <*number*> > 0)
+    or an octahedron ( <*number*>  < 0).
+    This method uses the cortical surface inflated to a sphere as a
+    tool to find the appropriate vertices for the source space. The
+    benefit of the ``--ico`` option is that the source space will have triangulation
+    information between the decimated vertices included, which some
+    future versions of MNE software may be able to utilize. The number
+    of triangles increases by a factor of four in each subdivision,
+    starting from 20 triangles in an icosahedron and 8 triangles in
+    an octahedron. Since the number of vertices on a closed surface
+    is :math:`n_{vert} = (n_{tri} + 4) / 2`, the number of vertices in
+    the *k* th subdivision of an icosahedron and an
+    octahedron are :math:`10 \cdot 4^k +2` and :math:`4_{k + 1} + 2`,
+    respectively. The recommended values for <*number*> and
+    the corresponding number of source space locations are listed in Table 3.1.
+
+**\---all**
+
+    Include all nodes to the output. The active dipole nodes are identified
+    in the fif file by a separate tag. If tri files were used as input
+    the output file will also contain information about the surface
+    triangulation. This option is always recommended to include complete
+    information.
+
+**\---src <*name*>**
+
+    Output file name. Use a name <*dir*>/<*name*>-src.fif
+
+.. note:: If both ``--ico`` and ``--spacing`` options    are present the later one on the command line takes precedence.
+
+.. note:: Due to the differences between the FreeSurfer    and MNE libraries, the number of source space points generated with    the ``--spacing`` option may be different between the current    version of MNE and versions 2.5 or earlier (using ``--spacing`` option    to mne_setup_source_space ) if    the FreeSurfer surfaces employ the (old) quadrangle format or if    there are topological defects on the surfaces. All new FreeSurfer    surfaces are specified as triangular tessellations a [...]
+
+.. _BJEFEHJI:
+
+Creating a volumetric or discrete source space
+##############################################
+
+In addition to source spaces confined to a surface, the MNE
+software provides some support for three-dimensional source spaces
+bounded by a surface as well as source spaces comprised of discrete,
+arbitrarily located source points. The mne_volume_source_space utility
+assists in generating such source spaces.
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---surf <*name*>**
+
+    Specifies a FreeSurfer surface file containing the surface which
+    will be used as the boundary for the source space.
+
+**\---bem <*name*>**
+
+    Specifies a BEM file (ending in ``-bem.fif`` ). The inner
+    skull surface will be used as the boundary for the source space.
+
+**\---origin <*x/mm*> : <*y/mm*> : <*z/mm*>**
+
+    If neither of the two surface options described above is present,
+    the source space will be spherical with the origin at this location,
+    given in MRI (RAS) coordinates.
+
+**\---rad <*radius/mm*>**
+
+    Specifies the radius of a spherical source space. Default value
+    = 90 mm
+
+**\---grid <*spacing/mm*>**
+
+    Specifies the grid spacing in the source space.
+
+**\---mindist <*distance/mm*>**
+
+    Only points which are further than this distance from the bounding surface
+    are included. Default value = 5 mm.
+
+**\---exclude <*distance/mm*>**
+
+    Exclude points that are closer than this distance to the center
+    of mass of the bounding surface. By default, there will be no exclusion.
+
+**\---mri <*name*>**
+
+    Specifies a MRI volume (in mgz or mgh format).
+    If this argument is present the output source space file will contain
+    a (sparse) interpolation matrix which allows mne_volume_data2mri to
+    create an MRI overlay file, see :ref:`BEHDEJEC`.
+
+**\---pos <*name*>**
+
+    Specifies a name of a text file containing the source locations
+    and, optionally, orientations. Each line of the file should contain
+    3 or 6 values. If the number of values is 3, they indicate the source
+    location, in millimeters. The orientation of the sources will be
+    set to the z-direction. If the number of values is 6, the source
+    orientation will be parallel to the vector defined by the remaining
+    3 numbers on each line. With ``--pos`` , all of the options
+    defined above will be ignored. By default, the source position and
+    orientation data are assumed to be given in MRI coordinates.
+
+**\---head**
+
+    If this option is present, the source locations and orientations
+    in the file specified with the ``--pos`` option are assumed
+    to be given in the MEG head coordinates.
+
+**\---meters**
+
+    Indicates that the source locations in the file defined with the ``--pos`` option
+    are give in meters instead of millimeters.
+
+**\---src <*name*>**
+
+    Specifies the output file name. Use a name * <*dir*>/ <*name*>*-src.fif
+
+**\---all**
+
+    Include all vertices in the output file, not just those in use.
+    This option is implied when the ``--mri`` option is present.
+    Even with the ``--all`` option, only those vertices actually
+    selected will be marked to be "in use" in the
+    output source space file.
+
+.. _BEHCACCJ:
+
+Creating the BEM meshes
+#######################
+
+The mne_surf2bem utility
+converts surface triangle meshes from ASCII and FreeSurfer binary
+file formats to the fif format. The resulting fiff file also contains
+conductivity information so that it can be employed in the BEM calculations.
+
+.. note:: The utility mne_tri2fiff previously    used for this task has been replaced by mne_surf2bem .
+
+.. note:: The convenience script mne_setup_forward_model described in :ref:`CIHDBFEG` calls mne_surf2bem with    the appropriate options.
+
+.. note:: The vertices of all surfaces should be given    in the MRI coordinate system.
+
+Command-line options
+====================
+
+This program has the following
+command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---surf <*name*>**
+
+    Specifies a FreeSurfer binary format surface file. Before specifying the
+    next surface (``--surf`` or ``--tri`` options)
+    details of the surface specification can be given with the options
+    listed in :ref:`BEHCDICC`.
+
+**\---tri <*name*>**
+
+    Specifies a text format surface file. Before specifying the next
+    surface (``--surf`` or ``--tri`` options) details
+    of the surface specification can be given with the options listed
+    in :ref:`BEHCDICC`. The format of these files is described
+    in :ref:`BEHDEFCD`.
+
+**\---check**
+
+    Check that the surfaces are complete and that they do not intersect. This
+    is a recommended option. For more information, see :ref:`BEHCBDDE`.
+
+**\---checkmore**
+
+    In addition to the checks implied by the ``--check`` option,
+    check skull and skull thicknesses. For more information, see :ref:`BEHCBDDE`.
+
+**\---fif <*name*>**
+
+    The output fif file containing the BEM. These files normally reside in
+    the bem subdirectory under the subject's mri data. A name
+    ending with ``-bem.fif`` is recommended.
+
+.. _BEHCDICC:
+
+Surface options
+===============
+
+These options can be specified after each ``--surf`` or ``--tri`` option
+to define details for the corresponding surface.
+
+**\---swap**
+
+    Swap the ordering or the triangle vertices. The standard convention in
+    the MNE software is to have the vertices ordered so that the vector
+    cross product of the vectors from vertex 1 to 2 and 1 to 3 gives the
+    direction of the outward surface normal. Text format triangle files
+    produced by the some software packages have an opposite order. For
+    these files, the ``--swap`` . option is required. This option does
+    not have any effect on the interpretation of the FreeSurfer surface
+    files specified with the ``--surf`` option.
+
+**\---sigma <*value*>**
+
+    The conductivity of the compartment inside this surface in S/m.
+
+**\---shift <*value/mm*>**
+
+    Shift the vertices of this surface by this amount, given in mm,
+    in the outward direction, *i.e.*, in the positive
+    vertex normal direction.
+
+**\---meters**
+
+    The vertex coordinates of this surface are given in meters instead
+    of millimeters. This option applies to text format files only. This
+    definition does not affect the units of the shift option.
+
+**\---id <*number*>**
+
+    Identification number to assign to this surface. (1 = inner skull, 3
+    = outer skull, 4 = scalp).
+
+**\---ico <*number*>**
+
+    Downsample the surface to the designated subdivision of an icosahedron.
+    This option is relevant (and required) only if the triangulation
+    is isomorphic with a recursively subdivided icosahedron. For example,
+    the surfaces produced by with mri_watershed are
+    isomorphic with the 5th subdivision of a an icosahedron thus containing 20480
+    triangles. However, this number of triangles is too large for present
+    computers. Therefore, the triangulations have to be decimated. Specifying ``--ico 4`` yields 5120 triangles per surface while ``--ico 3`` results
+    in 1280 triangles. The recommended choice is ``--ico 4`` .
+
+.. _BEHDEFCD:
+
+Tessellation file format
+========================
+
+The format of the text format surface files is the following:
+
+  | <*nvert*>
+  | <*vertex 1*>
+  | <*vertex 2*>
+  | ...
+  | <*vertex nvert*>
+  | <*ntri*>
+  | <*triangle 1*>
+  | <*triangle 2*>
+  | ...
+  | <*triangle ntri*> ,
+
+where <*nvert*> and <*ntri*> are
+the number of vertices and number of triangles in the tessellation,
+respectively.
+
+The format of a vertex entry is
+one of the following:
+
+**x y z**
+
+    The x, y, and z coordinates of the vertex location are given in
+    mm.
+
+**number x y z**
+
+    A running number and the x, y, and z coordinates are given. The running
+    number is not considered by mne_tri2fiff. The nodes must be thus
+    listed in the correct consecutive order.
+
+**x y z nx ny nz**
+
+    The x, y, and z coordinates as well as the approximate vertex normal direction
+    cosines are given.
+
+**number x y z nx ny nz**
+
+    A running number is given in addition to the vertex location and vertex
+    normal.
+
+Each triangle entry consists of the numbers of the vertices
+belonging to a triangle. The vertex numbering starts from one. The
+triangle list may also contain running numbers on each line describing
+a triangle.
+
+.. _BEHCBDDE:
+
+Topology checks
+===============
+
+If the ``--check`` option is specified, the following
+topology checks are performed:
+
+- The completeness of each surface is
+  confirmed by calculating the total solid angle subtended by all
+  triangles from a point inside the triangulation. The result should
+  be very close to :math:`4 \pi`. If the result
+  is :math:`-4 \pi` instead, it is conceivable
+  that the ordering of the triangle vertices is incorrect and the
+  ``--swap`` option should be specified.
+
+- The correct ordering of the surfaces is verified by checking
+  that the surfaces are inside each other as expected. This is accomplished
+  by checking that the sum solid angles subtended by triangles of
+  a surface :math:`S_k` at all vertices of another
+  surface :math:`S_p` which is supposed to be
+  inside it equals :math:`4 \pi`. Naturally, this
+  check is applied only if the model has more than one surface. Since
+  the surface relations are transitive, it is enough to check that
+  the outer skull surface is inside the skin surface and that the
+  inner skull surface is inside the outer skull one.
+
+- The extent of each of the triangulated volumes is checked.
+  If the extent is smaller than 50mm, an error is reported. This
+  may indicate that the vertex coordinates have been specified in
+  meters instead of millimeters.
+
+.. _CHDJFHEB:
+
+Computing the BEM geometry data
+###############################
+
+The utility mne_prepare_bem_model computes
+the geometry information for BEM. This utility is usually invoked
+by the convenience script mne_setup_forward_model ,
+see :ref:`CIHDBFEG`. The command-line options are:
+
+**\---bem <*name*>**
+
+    Specify the name of the file containing the triangulations of the BEM
+    surfaces and the conductivities of the compartments. The standard
+    ending for this file is ``-bem.fif`` and it is produced
+    either with the utility mne_surf2bem (:ref:`BEHCACCJ`) or the convenience script mne_setup_forward_model ,
+    see :ref:`CIHDBFEG`.
+
+**\---sol <*name*>**
+
+    Specify the name of the file containing the triangulation and conductivity
+    information together with the BEM geometry matrix computed by mne_prepare_bem_model .
+    The standard ending for this file is ``-bem-sol.fif`` .
+
+**\---method <*approximation method*>**
+
+    Select the BEM approach. If <*approximation method*> is ``constant`` ,
+    the BEM basis functions are constant functions on each triangle
+    and the collocation points are the midpoints of the triangles. With ``linear`` ,
+    the BEM basis functions are linear functions on each triangle and
+    the collocation points are the vertices of the triangulation. This
+    is the preferred method to use. The accuracy will be the same or
+    better than in the constant collocation approach with about half
+    the number of unknowns in the BEM equations.
+
+.. _BJEIAEIE:
+
+Coil geometry information
+#########################
+
+This Section explains the presentation of MEG detection coil
+geometry information the approximations used for different detection
+coils in MNE software. Two pieces of information are needed to characterize
+the detectors:
+
+- The location and orientation a local
+  coordinate system for each detector.
+
+- A unique identifier, which has an one-to-one correspondence
+  to the geometrical description of the coil.
+
+The sensor coordinate system
+============================
+
+The sensor coordinate system is completely characterized
+by the location of its origin and the direction cosines of three
+orthogonal unit vectors pointing to the directions of the x, y,
+and z axis. In fact, the unit vectors contain redundant information
+because the orientation can be uniquely defined with three angles.
+The measurement fif files list these data in MEG device coordinates.
+Transformation to the MEG head coordinate frame can be easily accomplished
+by applying the device-to-head coordinate transformation matrix
+available in the data files provided that the head-position indicator
+was used. Optionally, the MNE software forward calculation applies
+another coordinate transformation to the head-coordinate data to
+bring the coil locations and orientations to the MRI coordinate system.
+
+If :math:`r_0` is a row vector for
+the origin of the local sensor coordinate system and :math:`e_x`, :math:`e_y`, and :math:`e_z` are the row vectors for the
+three orthogonal unit vectors, all given in device coordinates,
+a location of a point :math:`r_C` in sensor coordinates
+is transformed to device coordinates (:math:`r_D`)
+by
+
+.. math::    [r_D 1] = [r_C 1] T_{CD}\ ,
+
+where
+
+.. math::    T = \begin{bmatrix}
+		e_x & 0 \\
+		e_y & 0 \\
+		e_z & 0 \\
+		r_{0D} & 1
+	        \end{bmatrix}\ .
+
+Calculation of the magnetic field
+=================================
+
+The forward calculation in the MNE software computes the
+signals detected by each MEG sensor for three orthogonal dipoles
+at each source space location. This requires specification of the
+conductor model, the location and orientation of the dipoles, and
+the location and orientation of each MEG sensor as well as its coil
+geometry.
+
+The output of each SQUID sensor is a weighted sum of the
+magnetic fluxes threading the loops comprising the detection coil.
+Since the flux threading a coil loop is an integral of the magnetic
+field component normal to the coil plane, the output of the k :sup:`th`
+MEG channel, :math:`b_k` can be approximated by:
+
+.. math::    b_k = \sum_{p = 1}^{N_k} {w_{kp} B(r_{kp}) \cdot n_{kp}}
+
+where :math:`r_{kp}` are a set of :math:`N_k` integration
+points covering the pickup coil loops of the sensor, :math:`B(r_{kp})` is
+the magnetic field due to the current sources calculated at :math:`r_{kp}`, :math:`n_{kp}` are
+the coil normal directions at these points, and :math:`w_{kp}` are
+the weights associated to the integration points. This formula essentially
+presents numerical integration of the magnetic field over the pickup
+loops of sensor :math:`k`.
+
+There are three accuracy levels for the numerical integration
+expressed above. The *simple* accuracy means
+the simplest description of the coil. This accuracy is not used
+in the MNE forward calculations. The *normal* or *recommended* accuracy typically uses
+two integration points for planar gradiometers, one in each half
+of the pickup coil and four evenly distributed integration points
+for magnetometers. This is the default accuracy used by MNE. If
+the ``--accurate`` option is specified, the forward calculation typically employs
+a total of eight integration points for planar gradiometers and
+sixteen for magnetometers. Detailed information about the integration
+points is given in the next section.
+
+Implemented coil geometries
+===========================
+
+This section describes the coil geometries currently implemented
+in Neuromag software. The coil types fall in two general categories:
+
+- Axial gradiometers and planar gradiometers
+  and
+
+- Planar gradiometers.
+
+For axial sensors, the *z* axis of the
+local coordinate system is parallel to the field component detected, *i.e.*,
+normal to the coil plane.For circular coils, the orientation of
+the *x* and *y* axes on the
+plane normal to the z axis is irrelevant. In the square coils employed
+in the Vectorview (TM) system the *x* axis
+is chosen to be parallel to one of the sides of the magnetometer
+coil. For planar sensors, the *z* axis is likewise
+normal to the coil plane and the x axis passes through the centerpoints
+of the two coil loops so that the detector gives a positive signal
+when the normal field component increases along the *x* axis.
+
+:ref:`BGBBHGEC` lists the parameters of the *normal* coil
+geometry descriptions :ref:`CHDBDFJE` lists the *accurate* descriptions. For simple accuracy,
+please consult the coil definition file, see :ref:`BJECIGEB`.
+The columns of the tables contain the following data:
+
+- The number identifying the coil id.
+  This number is used in the coil descriptions found in the FIF files.
+
+- Description of the coil.
+
+- Number of integration points used
+
+- The locations of the integration points in sensor coordinates.
+
+- Weights assigned to the field values at the integration points.
+  Some formulas are listed instead of the numerical values to demonstrate
+  the principle of the calculation. For example, in the normal coil
+  descriptions of the planar gradiometers the weights are inverses
+  of the baseline of the gradiometer to show that the output is in
+  T/m.
+
+.. note:: The coil geometry information is stored in the file $MNE_ROOT/share/mne/coil_def.dat, which is automatically created by the utility mne_list_coil_def , see :ref:`BJEHHJIJ`.
+
+.. XXX : table of normal coil description is missing
+
+.. tabularcolumns:: |p{0.1\linewidth}|p{0.3\linewidth}|p{0.1\linewidth}|p{0.25\linewidth}|p{0.2\linewidth}|
+.. _BGBBHGEC:
+.. table:: Normal coil descriptions. Note: If a plus-minus sign occurs in several coordinates, all possible combinations have to be included.
+
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | Id   | Description             | n  | r/mm                             | w                    |
+    +======+=========================+====+==================================+======================+
+    | 2    | Neuromag-122            | 2  | (+/-8.1, 0, 0) mm                | +/-1 ⁄ 16.2mm        | 
+    |      | planar gradiometer      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 2000 | A point magnetometer    | 1  | (0, 0, 0)mm                      | 1                    |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3012 | Vectorview type 1       | 2  | (+/-8.4, 0, 0.3) mm              | +/-1 ⁄ 16.8mm        |
+    |      | planar gradiometer      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3013 | Vectorview type 2       | 2  | (+/-8.4, 0, 0.3) mm              | +/-1 ⁄ 16.8mm        |
+    |      | planar gradiometer      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3022 | Vectorview type 1       | 4  | (+/-6.45, +/-6.45, 0.3)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3023 | Vectorview type 2       | 4  | (+/-6.45, +/-6.45, 0.3)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3024 | Vectorview type 3       | 4  | (+/-5.25, +/-5.25, 0.3)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 2000 | An ideal point          | 1  | (0.0, 0.0, 0.0)mm                | 1                    |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4001 | Magnes WH               | 4  | (+/-5.75, +/-5.75, 0.0)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4002 | Magnes WH 3600          | 8  | (+/-4.5, +/-4.5, 0.0)mm          | 1/4                  |
+    |      | axial gradiometer       |    | (+/-4.5, +/-4.5, 50.0)mm         | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4003 | Magnes reference        | 4  | (+/-7.5, +/-7.5, 0.0)mm          | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4004 | Magnes reference        | 8  | (+/-20, +/-20, 0.0)mm            | 1/4                  |
+    |      | gradiometer measuring   |    | (+/-20, +/-20, 135)mm            | -1/4                 |
+    |      | diagonal gradients      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4005 | Magnes reference        | 8  | (87.5, +/-20, 0.0)mm             | 1/4                  |
+    |      | gradiometer measuring   |    | (47.5, +/-20, 0.0)mm             | -1/4                 |
+    |      | off-diagonal gradients  |    | (-87.5, +/-20, 0.0)mm            | 1/4                  |
+    |      |                         |    | (-47.5, +/-20, 0.0)mm            | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5001 | CTF 275 axial           | 8  | (+/-4.5, +/-4.5, 0.0)mm          | 1/4                  |
+    |      | gradiometer             |    | (+/-4.5, +/-4.5, 50.0)mm         | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5002 | CTF reference           | 4  | (+/-4, +/-4, 0.0)mm              | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5003 | CTF reference           | 8  | (+/-8.6, +/-8.6, 0.0)mm          | 1/4                  |
+    |      | gradiometer measuring   |    | (+/-8.6, +/-8.6, 78.6)mm         | -1/4                 |
+    |      | diagonal gradients      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+
+.. tabularcolumns:: |p{0.1\linewidth}|p{0.3\linewidth}|p{0.05\linewidth}|p{0.25\linewidth}|p{0.15\linewidth}|
+.. _CHDBDFJE:
+.. table:: Accurate coil descriptions
+
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | Id   | Description             | n  | r/mm                             | w                    |
+    +======+=========================+====+==================================+======================+
+    | 2    | Neuromag-122 planar     | 8  | +/-(8.1, 0, 0) mm                | +/-1 ⁄ 16.2mm        |
+    |      | gradiometer             |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 2000 | A point magnetometer    | 1  | (0, 0, 0) mm                     | 1                    |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3012 | Vectorview type 1       | 2  | (+/-8.4, 0, 0.3) mm              | +/-1 ⁄ 16.8mm        |
+    |      | planar gradiometer      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3013 | Vectorview type 2       | 2  | (+/-8.4, 0, 0.3) mm              | +/-1 ⁄ 16.8mm        |
+    |      | planar gradiometer      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3022 | Vectorview type 1       | 4  | (+/-6.45, +/-6.45, 0.3)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3023 | Vectorview type 2       | 4  | (+/-6.45, +/-6.45, 0.3)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 3024 | Vectorview type 3       | 4  | (+/-5.25, +/-5.25, 0.3)mm        | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4001 | Magnes WH magnetometer  | 4  | (+/-5.75, +/-5.75, 0.0)mm        | 1/4                  |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4002 | Magnes WH 3600          | 4  | (+/-4.5, +/-4.5, 0.0)mm          | 1/4                  |
+    |      | axial gradiometer       |    | (+/-4.5, +/-4.5, 0.0)mm          | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4004 | Magnes reference        | 8  | (+/-20, +/-20, 0.0)mm            | 1/4                  |
+    |      | gradiometer measuring   |    | (+/-20, +/-20, 135)mm            | -1/4                 |
+    |      | diagonal gradients      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 4005 | Magnes reference        | 8  | (87.5, +/-20, 0.0)mm             | 1/4                  |
+    |      | gradiometer measuring   |    | (47.5, +/-20, 0.0)mm             | -1/4                 |
+    |      | off-diagonal gradients  |    | (-87.5, +/-20, 0.0)mm            | 1/4                  |
+    |      |                         |    | (-47.5, +/-20, 0.0)mm            | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5001 | CTF 275 axial           | 8  | (+/-4.5, +/-4.5, 0.0)mm          | 1/4                  |
+    |      | gradiometer             |    | (+/-4.5, +/-4.5, 50.0)mm         | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5002 | CTF reference           | 4  | (+/-4, +/-4, 0.0)mm              | 1/4                  |
+    |      | magnetometer            |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5003 | CTF 275 reference       | 8  | (+/-8.6, +/-8.6, 0.0)mm          | 1/4                  |
+    |      | gradiometer measuring   |    | (+/-8.6, +/-8.6, 78.6)mm         | -1/4                 |
+    |      | diagonal gradients      |    |                                  |                      |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 5004 | CTF 275 reference       | 8  | (47.8, +/-8.5, 0.0)mm            | 1/4                  |
+    |      | gradiometer measuring   |    | (30.8, +/-8.5, 0.0)mm            | -1/4                 |
+    |      | off-diagonal gradients  |    | (-47.8, +/-8.5, 0.0)mm           | 1/4                  |
+    |      |                         |    | (-30.8, +/-8.5, 0.0)mm           | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+    | 6001 | MIT KIT system axial    | 8  | (+/-3.875, +/-3.875, 0.0)mm      | 1/4                  |
+    |      | gradiometer             |    | (+/-3.875, +/-3.875, 0.0)mm      | -1/4                 |
+    +------+-------------------------+----+----------------------------------+----------------------+
+
+
+.. _BJECIGEB:
+
+The coil definition file
+========================
+
+The coil geometry information is stored in the text file
+$MNE_ROOT/share/mne/coil_def.dat. In this file, any lines starting
+with the pound sign (#) are comments. A coil definition starts with
+a description line containing the following fields:
+
+** <*class*>**
+
+    This is a number indicating class of this coil. Possible values
+    are listed in :ref:`BJEFABHA`.
+
+** <*id*>**
+
+    Coil id value. This value is listed in the first column of Tables :ref:`BGBBHGEC` and :ref:`CHDBDFJE`.
+
+** <*accuracy*>**
+
+    The coil representation accuracy. Possible values and their meanings
+    are listed in :ref:`BJEHIBJC`.
+
+** <*np*>**
+
+    Number of integration points in this representation.
+
+** <*size/m*>**
+
+    The size of the coil. For circular coils this is the diameter of
+    the coil and for square ones the side length of the square. This
+    information is mainly included to facilitate drawing of the coil
+    geometry. It should not be employed to infer a coil approximation
+    for the forward calculations.
+
+** <*baseline/m*>**
+
+    The baseline of a this kind of a coil. This will be zero for magnetometer
+    coils. This information is mainly included to facilitate drawing
+    of the coil geometry. It should not be employed to infer a coil
+    approximation for the forward calculations.
+
+** <*description*>**
+
+    Short description of this kind of a coil. If the description contains several
+    words, it is enclosed in quotes.
+
+.. _BJEFABHA:
+
+.. table:: Coil class values
+
+    =======  =======================================================
+    Value    Meaning
+    =======  =======================================================
+    1        magnetometer
+    2        first-order axial gradiometer
+    3        planar gradiometer
+    4        second-order axial gradiometer
+    1000     an EEG electrode (used internally in software only).
+    =======  =======================================================
+
+
+.. tabularcolumns:: |p{0.1\linewidth}|p{0.5\linewidth}|
+.. _BJEHIBJC:
+.. table:: Coil representation accuracies.
+
+    =======  =====================================================================
+    Value    Meaning
+    =======  =====================================================================
+    1        The simplest representation available
+    2        The standard or *normal* representation (see :ref:`BGBBHGEC`)
+    3        The most *accurate* representation available (see :ref:`CHDBDFJE`)
+    =======  =====================================================================
+
+Each coil description line is followed by one or more integration
+point lines, consisting of seven numbers:
+
+** <*weight*>**
+
+    Gives the weight for this integration point (last column in Tables :ref:`BGBBHGEC` and :ref:`CHDBDFJE`).
+
+** <*x/m*> <*y/m*> <*z/m*>**
+
+    Indicates the location of the integration point (fourth column in Tables :ref:`BGBBHGEC` and :ref:`CHDBDFJE`).
+
+** <*nx*> <*ny*> <*nz*>**
+
+    Components of a unit vector indicating the field component to be selected.
+    Note that listing a separate unit vector for each integration points
+    allows the implementation of curved coils and coils with the gradiometer
+    loops tilted with respect to each other.
+
+.. _BJEHHJIJ:
+
+Creating the coil definition file
+=================================
+
+The standard coil definition file $MNE_ROOT/share/mne/coil_def.dat
+is included with the MNE software package. The coil definition file
+can be recreated with the utility mne_list_coil_def
+as follows:
+
+mne_list_coil_def --out $MNE_ROOT/share/mne/coil_def.dat
+
+.. _CHDDIBAH:
+
+Computing the forward solution
+##############################
+
+Purpose
+=======
+
+Instead of using the convenience script mne_do_forward_solution it
+is also possible to invoke the forward solution computation program mne_forward_solution directly.
+In this approach, the convenience of the automatic file naming conventions
+present in mne_do_forward_solution are
+lost. However, there are some special-purpose options available
+in mne_forward_solution only.
+Please refer to :ref:`BABCHEJD` for information on mne_do_forward_solution.
+
+.. _BJEIGFAE:
+
+Command line options
+====================
+
+mne_forward_solution accepts
+the following command-line options:
+
+**\---src <*name*>**
+
+    Source space name to use. The name of the file must be specified exactly,
+    including the directory. Typically, the source space files reside
+    in $SUBJECTS_DIR/$SUBJECT/bem.
+
+**\---bem <*name*>**
+
+    Specifies the BEM to be used. These files end with bem.fif or bem-sol.fif and
+    reside in $SUBJECTS_DIR/$SUBJECT/bem. The former file contains only
+    the BEM surface information while the latter files contain the geometry
+    information precomputed with mne_prepare_bem_model ,
+    see :ref:`CHDJFHEB`. If precomputed geometry is not available,
+    the linear collocation solution will be computed by mne_forward_solution .
+
+**\---origin <*x/mm*> : <*x/mm*> : <*z/mm*>**
+
+    Indicates that the sphere model should be used in the forward calculations.
+    The origin is specified in MEG head coordinates unless the ``--mricoord`` option
+    is present. The MEG sphere model solution computed using the analytical
+    Sarvas formula. For EEG, an approximative solution described in
+
+**\---eegmodels <*name*>**
+
+    This option is significant only if the sphere model is used and
+    EEG channels are present. The specified file contains specifications
+    of the EEG sphere model layer structures as detailed in :ref:`CHDIAFIG`. If this option is absent the file ``$HOME/.mne/EEG_models`` will
+    be consulted if it exists.
+
+**\---eegmodel <*model name*>**
+
+    Specifies the name of the sphere model to be used for EEG. If this option
+    is missing, the model Default will
+    be employed, see :ref:`CHDIAFIG`.
+
+**\---eegrad <*radius/mm*>**
+
+    Specifies the radius of the outermost surface (scalp) of the EEG sphere
+    model, see :ref:`CHDIAFIG`. The default value is 90 mm.
+
+**\---eegscalp**
+
+    Scale the EEG electrode locations to the surface of the outermost sphere
+    when using the sphere model.
+
+**\---accurate**
+
+    Use accurate MEG sensor coil descriptions. This is the recommended
+    choice. More information
+
+**\---fixed**
+
+    Compute the solution for sources normal to the cortical mantle only. This
+    option should be used only for surface-based and discrete source
+    spaces.
+
+**\---all**
+
+    Compute the forward solution for all vertices on the source space.
+
+**\---label <*name*>**
+
+    Compute the solution only for points within the specified label. Multiple
+    labels can be present. The label files should end with ``-lh.label`` or ``-rh.label`` for
+    left and right hemisphere label files, respectively. If ``--all`` flag
+    is present, all surface points falling within the labels are included.
+    Otherwise, only decimated points with in the label are selected.
+
+**\---mindist <*dist/mm*>**
+
+    Omit source space points closer than this value to the inner skull surface.
+    Any source space points outside the inner skull surface are automatically
+    omitted. The use of this option ensures that numerical inaccuracies
+    for very superficial sources do not cause unexpected effects in
+    the final current estimates. Suitable value for this parameter is
+    of the order of the size of the triangles on the inner skull surface.
+    If you employ the seglab software to create the triangulations, this
+    value should be about equal to the wish for the side length of the
+    triangles.
+
+**\---mindistout <*name*>**
+
+    Specifies a file name to contain the coordinates of source space points
+    omitted due to the ``--mindist`` option.
+
+**\---mri <*name*>**
+
+    The name of the MRI description file containing the MEG/MRI coordinate
+    transformation. This file was saved as part of the alignment procedure
+    outlined in :ref:`CHDBEHDC`. These files typically reside in ``$SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets`` .
+
+**\---trans	 <*name*>**
+
+    The name of a text file containing the 4 x 4 matrix for the coordinate transformation
+    from head to mri coordinates. With ``--trans``, ``--mri`` option is not
+    required.
+
+**\---notrans**
+
+    The MEG/MRI coordinate transformation is taken as the identity transformation, *i.e.*,
+    the two coordinate systems are the same. This option is useful only
+    in special circumstances. If more than one of the ``--mri`` , ``--trans`` ,
+    and ``--notrans`` options are specified, the last one remains
+    in effect.
+
+**\---mricoord**
+
+    Do all computations in the MRI coordinate system. The forward solution
+    matrix is not affected by this option if the source orientations
+    are fixed to be normal to the cortical mantle. If all three source components
+    are included, the forward three source orientations parallel to
+    the coordinate axes is computed. If ``--mricoord`` is present, these
+    axes correspond to MRI coordinate system rather than the default
+    MEG head coordinate system. This option is useful only in special
+    circumstances.
+
+**\---meas <*name*>**
+
+    This file is the measurement fif file or an off-line average file
+    produced thereof. It is recommended that the average file is employed for
+    evoked-response data and the original raw data file otherwise. This
+    file provides the MEG sensor locations and orientations as well as
+    EEG electrode locations as well as the coordinate transformation between
+    the MEG device coordinates and MEG head-based coordinates.
+
+**\---fwd <*name*>**
+
+    This file will contain the forward solution as well as the coordinate transformations,
+    sensor and electrode location information, and the source space
+    data. A name of the form <*name*>-fwd.fif is
+    recommended.
+
+**\---meg**
+
+    Compute the MEG forward solution.
+
+**\---eeg**
+
+    Compute the EEG forward solution.
+
+**\---grad**
+
+    Include the derivatives of the fields with respect to the dipole
+    position coordinates to the output, see :ref:`BJEFEJJG`.
+
+Implementation of software gradient compensation
+================================================
+
+As described in :ref:`BEHDDFBI` the CTF and 4D Neuroimaging
+data may have been subjected to noise cancellation employing the
+data from the reference sensor array. Even though these sensor are
+rather far away from the brain sources, mne_forward_solution takes
+them into account in the computations. If the data file specified
+with the ``--meas`` option has software gradient compensation
+activated, mne_forward_solution computes
+the field of at the reference sensors in addition to the main MEG
+sensor array and computes a compensated forward solution using the
+methods described in :ref:`BEHDDFBI`.
+
+.. warning:: If a data file specified with the ``--meas`` option    and that used in the actual inverse computations with mne_analyze and mne_make_movie have    different software gradient compensation states., the forward solution    will be in mismatch with the data to be analyzed and the current    estimates will be slightly erroneous.
+
+.. _CHDIAFIG:
+
+The EEG sphere model definition file
+====================================
+
+For the computation of the electric potential distribution
+on the surface of the head (EEG) it is necessary to define the conductivities
+(:math:`\sigma`) and radiuses of the spherically
+symmetric layers. Different sphere models can be specified with
+the ``--eegmodels`` option.
+
+The EEG sphere model definition files may contain comment
+lines starting with a # and model
+definition lines in the following format:
+
+ <*name*>: <*radius1*>: <*conductivity1*>: <*radius2*>: <*conductivity2*>:...
+
+When the file is loaded the layers are sorted so that the
+radiuses will be in ascending order and the radius of the outermost
+layer is scaled to 1.0. The scalp radius specified with the ``--eegrad`` option
+is then consulted to scale the model to the correct dimensions.
+Even if the model setup file is not present, a model called Default is
+always provided. This model has the structure given in :ref:`BABEBGDA`
+
+
+.. tabularcolumns:: |p{0.1\linewidth}|p{0.25\linewidth}|p{0.2\linewidth}|
+.. _BABEBGDA:
+.. table:: Structure of the default EEG model
+
+    ========  =======================  =======================
+    Layer     Relative outer radius    :math:`\sigma` (S/m)
+    ========  =======================  =======================
+    Head      1.0                      0.33
+    Skull     0.97                     0.04
+    CSF       0.92                     1.0
+    Brain     0.90                     0.33
+    ========  =======================  =======================
+
+EEG forward solution in the sphere model
+========================================
+
+When the sphere model is employed, the computation of the
+EEG solution can be substantially accelerated by using approximation
+methods described by Mosher, Zhang, and Berg, see :ref:`CEGEGDEI` (Mosher *et
+al.* and references therein). mne_forward_solution approximates
+the solution with three dipoles in a homogeneous sphere whose locations
+and amplitudes are determined by minimizing the cost function:
+
+.. math::    S(r_1,\dotsc,r_m\ ,\ \mu_1,\dotsc,\mu_m) = \int_{scalp} {(V_{true} - V_{approx})}\,dS
+
+where :math:`r_1,\dotsc,r_m` and :math:`\mu_1,\dotsc,\mu_m` are
+the locations and amplitudes of the approximating dipoles and :math:`V_{true}` and :math:`V_{approx}` are
+the potential distributions given by the true and approximative
+formulas, respectively. It can be shown that this integral can be
+expressed in closed form using an expansion of the potentials in
+spherical harmonics. The formula is evaluated for the most superficial
+dipoles, *i.e.*, those lying just inside the
+inner skull surface.
+
+.. _BJEFEJJG:
+
+Field derivatives
+=================
+
+If the ``--grad`` option is specified, mne_forward_solution includes
+the derivatives of the forward solution with respect to the dipole
+location coordinates to the output file. Let
+
+.. math::    G_k = [g_{xk} g_{yk} g_{zk}]
+
+be the :math:`N_{chan} \times 3` matrix containing
+the signals produced by three orthogonal dipoles at location :math:`r_k` making
+up :math:`N_{chan} \times 3N_{source}` the gain matrix
+
+.. math::    G = [G_1 \dotso G_{N_{source}}]\ .
+
+With the ``--grad`` option, the output from mne_forward_solution also
+contains the :math:`N_{chan} \times 9N_{source}` derivative matrix
+
+.. math::    D = [D_1 \dotso D_{N_{source}}]\ ,
+
+where
+
+.. math::    D_k = [\frac{\delta g_{xk}}{\delta x_k} \frac{\delta g_{xk}}{\delta y_k} \frac{\delta g_{xk}}{\delta z_k} \frac{\delta g_{yk}}{\delta x_k} \frac{\delta g_{yk}}{\delta y_k} \frac{\delta g_{yk}}{\delta z_k} \frac{\delta g_{zk}}{\delta x_k} \frac{\delta g_{zk}}{\delta y_k} \frac{\delta g_{zk}}{\delta z_k}]\ ,
+
+where :math:`x_k`, :math:`y_k`, and :math:`z_k` are the location
+coordinates of the :math:`k^{th}` dipole. If
+the dipole orientations are to the cortical normal with the ``--fixed``
+option, the dimensions of :math:`G` and :math:`D` are :math:`N_{chan} \times N_{source}` and :math:`N_{chan} \times 3N_{source}`,
+respectively. Both :math:`G` and :math:`D` can
+be read with the mne_read_forward_solution Matlab
+function, see Table 10.1.
+
+.. _CHDBBFCA:
+
+Averaging forward solutions
+###########################
+
+Purpose
+=======
+
+One possibility to make a grand average over several runs
+of a experiment is to average the data across runs and average the
+forward solutions accordingly. For this purpose, mne_average_forward_solutions computes a
+weighted average of several forward solutions. The program averages both
+MEG and EEG forward solutions. Usually the EEG forward solution is
+identical across runs because the electrode locations do not change.
+
+Command line options
+====================
+
+mne_average_forward_solutions accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fwd <*name*> :[ <*weight*> ]**
+
+    Specifies a forward solution to include. If no weight is specified,
+    1.0 is assumed. In the averaging process the weights are divided
+    by their sum. For example, if two forward solutions are averaged
+    and their specified weights are 2 and 3, the average is formed with
+    a weight of 2/5 for the first solution and 3/5 for the second one.
+
+**\---out <*name*>**
+
+    Specifies the output file which will contain the averaged forward solution.
diff --git a/doc/source/manual/intro.rst b/doc/source/manual/intro.rst
new file mode 100644
index 0000000..e0ea63d
--- /dev/null
+++ b/doc/source/manual/intro.rst
@@ -0,0 +1,45 @@
+
+
+.. _CHDDEFAB:
+
+============
+Introduction
+============
+
+This document describes a set of programs for preprocessing
+and averaging of MEG and EEG data and for constructing cortically-constrained minimum-norm
+estimates. This software package will in the sequel be referred to
+as *MNE software*. The software is based on anatomical
+MRI processing, forward modeling, and source estimation methods published in
+Dale, Fischl, Hämäläinen, and others.
+The software depends on anatomical MRI processing tools provided
+by the FreeSurfer software.
+
+:ref:`CHDBAFGJ` gives an overview of the software
+modules included with MNE software. :ref:`ch_cookbook` is a concise cookbook
+describing a typical workflow for a novice user employing the convenience
+scripts as far as possible. :ref:`ch_browse` to :ref:`ch_misc` give more detailed
+information about the software modules. :ref:`ch_sample_data` discusses
+processing of the sample data set included with the MNE software. :ref:`ch_reading` lists
+some useful background material for the methods employed in the
+MNE software.
+
+:ref:`create_bem_model` is an overview of the BEM model mesh
+generation methods, :ref:`setup_martinos` contains information specific
+to the setup at Martinos Center of Biomedical Imaging, :ref:`install_config` is
+a software installation and configuration guide, :ref:`release_notes` summarizes
+the software history, and :ref:`licence` contains the End-User
+License Agreement.
+
+.. note:: The most recent version of this manual is available    at ``$MNE_ROOT/share/doc/MNE-manual-`` <*version*> ``.pdf`` . For    the present manual, <*version*> = ``2.7`` .    For definition of the ``MNE_ROOT`` environment variable,    see :ref:`user_environment`.
+
+We want to thank all MNE Software users at the Martinos Center and
+in other institutions for their collaboration during the creation
+of this software as well as for useful comments on the software
+and its documentation.
+
+The development of this software has been supported by the
+NCRR *Center for Functional Neuroimaging Technologies* P41RR14075-06, the
+NIH grants 1R01EB009048-01, R01 EB006385-A101, 1R01 HD40712-A1, 1R01
+NS44319-01, and 2R01 NS37462-05, ell as by Department of Energy
+under Award Number DE-FG02-99ER62764 to The MIND Institute. 
diff --git a/doc/source/manual/list.rst b/doc/source/manual/list.rst
new file mode 100644
index 0000000..ee5f9aa
--- /dev/null
+++ b/doc/source/manual/list.rst
@@ -0,0 +1,439 @@
+
+
+.. _CHDBAFGJ:
+
+========
+Overview
+========
+
+List of components
+##################
+
+The principal components of the MNE Software and their functions
+are listed in :ref:`CHDDJIDB`. Documented software is listed
+in italics. :ref:`BABDJHGH` lists various supplementary utilities.
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.65\linewidth}|
+.. _CHDDJIDB:
+.. table:: The software components.
+
+    +----------------------------+--------------------------------------------+
+    | Name                       |   Purpose                                  |
+    +============================+============================================+
+    | *mne_analyze*              | An interactive analysis tool for computing |
+    |                            | source estimates, see                      |
+    |                            | :ref:`ch_interactive_analysis`.            |
+    +----------------------------+--------------------------------------------+
+    | *mne_average_estimates*    | Average data across subjects,              |
+    |                            | see :ref:`CHDEHFGD`.                       |
+    +----------------------------+--------------------------------------------+
+    | *mne_browse_raw*           | Interactive raw data browser. Includes     |
+    |                            | filtering, offline averaging, and          |
+    |                            | computation of covariance matrices,        |
+    |                            | see :ref:`ch_browse`.                      |
+    +----------------------------+--------------------------------------------+
+    | *mne_compute_mne*          | Computes the minimum-norm estimates,       |
+    |                            | see :ref:`BABDABHI`. Most of the           |
+    |                            | functionality of mne_compute_mne is        |
+    |                            | included in mne_make_movie.                |
+    +----------------------------+--------------------------------------------+
+    | *mne_compute_raw_inverse*  | Compute the inverse solution from raw data |
+    |                            | see :ref:`CBBCGHAH`.                       |
+    +----------------------------+--------------------------------------------+
+    | *mne_convert_mne_data*     | Convert MNE data files to other file       |
+    |                            | formats, see :ref:`BEHCCEBJ`.              |
+    +----------------------------+--------------------------------------------+
+    | *mne_do_forward_solution*  | Convenience script to calculate the forward|
+    |                            | solution matrix, see :ref:`BABCHEJD`.      |
+    +----------------------------+--------------------------------------------+
+    | *mne_do_inverse_operator*  | Convenience script for inverse operator    |
+    |                            | decomposition, see :ref:`CIHCFJEI`.        |
+    +----------------------------+--------------------------------------------+
+    | *mne_forward_solution*     | Calculate the forward solution matrix, see |
+    |                            | :ref:`CHDDIBAH`.                           |
+    +----------------------------+--------------------------------------------+
+    | mne_inverse_operator       | Compute the inverse operator decomposition |
+    |                            | see :ref:`CBBDDBGF`.                       |
+    +----------------------------+--------------------------------------------+
+    | *mne_make_movie*           | Make movies in batch mode, see             |
+    |                            | :ref:`CBBECEDE`.                           |
+    +----------------------------+--------------------------------------------+
+    | *mne_make_source_space*    | Create a *fif* source space description    |
+    |                            | file, see :ref:`BEHCGJDD`.                 |
+    +----------------------------+--------------------------------------------+
+    | *mne_process_raw*          | A batch-mode version of mne_browse_raw,    |
+    |                            | see :ref:`ch_browse`.                      |
+    +----------------------------+--------------------------------------------+
+    | mne_redo_file              | Many intermediate result files contain a   |
+    |                            | description of their                       |
+    |                            | 'production environment'. Such files can   |
+    |                            | be recreated easily with this utility.     |
+    |                            | This is convenient if, for example,        |
+    |                            | the selection of bad channels is changed   |
+    |                            | and the inverse operator decomposition has |
+    |                            | to be recalculated.                        |
+    +----------------------------+--------------------------------------------+
+    | mne_redo_file_nocwd        | Works like mne_redo_file but does not try  |
+    |                            | to change in to the working directory      |
+    |                            | specified in the 'production environment'. |
+    +----------------------------+--------------------------------------------+
+    | *mne_setup_forward_model*  | Set up the BEM-related fif files,          |
+    |                            | see :ref:`CIHDBFEG`.                       |
+    +----------------------------+--------------------------------------------+
+    | *mne_setup_mri*            | A convenience script to create the fif     |
+    |                            | files describing the anatomical MRI data,  |
+    |                            | see :ref:`BABCCEHF`                        |
+    +----------------------------+--------------------------------------------+
+    | *mne_setup_source_space*   | A convenience script to create source space|
+    |                            | description file, see :ref:`CIHCHDAE`.     |
+    +----------------------------+--------------------------------------------+
+    | mne_show_environment       | Show information about the production      |
+    |                            | environment of a file.                     |
+    +----------------------------+--------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.65\linewidth}|
+.. _BABDJHGH:
+.. table:: Utility programs.
+
+    +---------------------------------+--------------------------------------------+
+    | Name                            |   Purpose                                  |
+    +=================================+============================================+
+    | *mne_add_patch_info*            | Add neighborhood information to a source   |
+    |                                 | space file, see :ref:`BEHCBCGG`.           |
+    +---------------------------------+--------------------------------------------+
+    | *mne_add_to_meas_info*          | Utility to add new information to the      |
+    |                                 | measurement info block of a fif file. The  |
+    |                                 | source of information is another fif file. |
+    +---------------------------------+--------------------------------------------+
+    | *mne_add_triggers*              | Modify the trigger channel STI 014 in a raw|
+    |                                 | data file, see :ref:`CHDBDDDF`. The same   |
+    |                                 | effect can be reached by using an event    |
+    |                                 | file for averaging in mne_process_raw and  |
+    |                                 | mne_browse_raw.                            |
+    +---------------------------------+--------------------------------------------+
+    | *mne_annot2labels*              | Convert parcellation data into label files,|
+    |                                 | see :ref:`CHDEDHCG`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_anonymize*                 | Remove subject-specific information from a |
+    |                                 | fif data file, see :ref:`CHDIJHIC`.        |
+    +---------------------------------+--------------------------------------------+
+    | *mne_average_forward_solutions* | Calculate an average of forward solutions, |
+    |                                 | see :ref:`CHDBBFCA`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_brain_vision2fiff*         | Convert EEG data from BrainVision format   |
+    |                                 | to fif format, see :ref:`BEHCCCDC`.        |
+    +---------------------------------+--------------------------------------------+
+    | *mne_change_baselines*          | Change the dc offsets according to         |
+    |                                 | specifications given in a text file,       |
+    |                                 | see :ref:`CHDDIDCC`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_change_nave*               | Change the number of averages in an        |
+    |                                 | evoked-response data file. This is often   |
+    |                                 | necessary if the file was derived from     |
+    |                                 | several files.                             |
+    +---------------------------------+--------------------------------------------+
+    | *mne_check_eeg_locations*       | Checks that the EEG electrode locations    |
+    |                                 | have been correctly transferred from the   |
+    |                                 | Polhemus data block to the channel         |
+    |                                 | information tags, see :ref:`CHDJGGGC`.     |
+    +---------------------------------+--------------------------------------------+
+    | *mne_check_surface*             | Check the validity of a FreeSurfer surface |
+    |                                 | file or one of the surfaces within a BEM   |
+    |                                 | file. This program simply checks for       |
+    |                                 | topological errors in surface files.       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_collect_transforms*        | Collect coordinate transformations from    |
+    |                                 | several sources into a single fif file,    |
+    |                                 | see :ref:`BABBIFIJ`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_compensate_data*           | Change the applied software gradient       |
+    |                                 | compensation in an evoked-response data    |
+    |                                 | file, see :ref:`BEHDDFBI`.                 |
+    +---------------------------------+--------------------------------------------+
+    | *mne_convert_lspcov*            | Convert the LISP format noise covariance   |
+    |                                 | matrix output by graph into fif,           |
+    |                                 | see :ref:`BEHCDBHG`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_convert_ncov*              | Convert the ncov format noise covariance   |
+    |                                 | file to fif, see :ref:`BEHCHGHD`.          |
+    +---------------------------------+--------------------------------------------+
+    | *mne_convert_surface*           | Convert FreeSurfer and text format surface |
+    |                                 | files into Matlab mat files,               |
+    |                                 | see :ref:`BEHDIAJG`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_cov2proj*                  | Pick eigenvectors from a covariance matrix |
+    |                                 | and create a signal-space projection (SSP) |
+    |                                 | file out of them, see :ref:`CHDECHBF`.     |
+    +---------------------------------+--------------------------------------------+
+    | *mne_create_comp_data*          | Create a fif file containing software      |
+    |                                 | gradient compensation information from a   |
+    |                                 | text file, see :ref:`BEHBIIFF`.            |
+    +---------------------------------+--------------------------------------------+
+    | *mne_ctf2fiff*                  | Convert a CTF ds folder into a fif file,   |
+    |                                 | see :ref:`BEHDEBCH`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_ctf_dig2fiff*              | Convert text format digitization data to   |
+    |                                 | fif format, see :ref:`BEHBABFA`.           |
+    +---------------------------------+--------------------------------------------+
+    | *mne_dicom_essentials*          | List essential information from a          |
+    |                                 | DICOM file.                                |
+    |                                 | This utility is used by the script         |
+    |                                 | mne_organize_dicom, see :ref:`BABEBJHI`.   |
+    +---------------------------------+--------------------------------------------+
+    | *mne_edf2fiff*                  | Convert EEG data from the EDF/EDF+/BDF     |
+    |                                 | formats to the fif format,                 |
+    |                                 | see :ref:`BEHIAADG`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_epochs2mat*                | Apply bandpass filter to raw data and      |
+    |                                 | extract epochs for subsequent processing   |
+    |                                 | in Matlab, see :ref:`BEHFIDCB`.            |
+    +---------------------------------+--------------------------------------------+
+    | *mne_evoked_data_summary*       | List summary of averaged data from a fif   |
+    |                                 | file to the standard output.               |
+    +---------------------------------+--------------------------------------------+
+    | *mne_eximia2fiff*               | Convert EEG data from the Nexstim eXimia   |
+    |                                 | system to fif format, see :ref:`BEHGCEHH`. |
+    +---------------------------------+--------------------------------------------+
+    | *mne_fit_sphere_to_surf*        | Fit a sphere to a surface given in fif     |
+    |                                 | or FreeSurfer format, see :ref:`CHDECHBF`. |
+    +---------------------------------+--------------------------------------------+
+    | *mne_fix_mag_coil_types*        | Update the coil types for magnetometers    |
+    |                                 | in a fif file, see :ref:`CHDGAAJC`.        |
+    +---------------------------------+--------------------------------------------+
+    | *mne_fix_stim14*                | Fix coding errors of trigger channel       |
+    |                                 | STI 014, see :ref:`BABCDBDI`.              |
+    +---------------------------------+--------------------------------------------+
+    | *mne_flash_bem*                 | Create BEM tessellation using multi-echo   |
+    |                                 | FLASH MRI data, see :ref:`BABFCDJH`.       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_insert_4D_comp*            | Read Magnes compensation channel data from |
+    |                                 | a text file and merge it with raw data     |
+    |                                 | from other channels in a fif file, see     |
+    |                                 | :ref:`BEHGDDBH`.                           |
+    +---------------------------------+--------------------------------------------+
+    | *mne_list_bem*                  | List BEM information in text format,       |
+    |                                 | see :ref:`BEHBBEHJ`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_list_coil_def*             | Create the coil description file. This     |
+    |                                 | is run automatically at when the software  |
+    |                                 | is set up, see :ref:`BJEHHJIJ`.            |
+    +---------------------------------+--------------------------------------------+
+    | *mne_list_proj*                 | List signal-space projection data from a   |
+    |                                 | fif file.                                  |
+    +---------------------------------+--------------------------------------------+
+    | *mne_list_source_space*         | List source space information in text      |
+    |                                 | format suitable for importing into         |
+    |                                 | Neuromag MRIlab, see :ref:`BEHBHIDH`.      |
+    +---------------------------------+--------------------------------------------+
+    | *mne_list_versions*             | List versions and compilation dates of MNE |
+    |                                 | software modules, see :ref:`CHDFIGBG`.     |
+    +---------------------------------+--------------------------------------------+
+    | *mne_make_cor_set*              | Used by mne_setup_mri to create fif format |
+    |                                 | MRI description files from COR or mgh/mgz  |
+    |                                 | format MRI data, see :ref:`BABCCEHF`. The  |
+    |                                 | mne_make_cor_set utility is described      |
+    |                                 | in :ref:`BABBHHHE`.                        |
+    +---------------------------------+--------------------------------------------+
+    | *mne_make_derivations*          | Create a channel derivation data file, see |
+    |                                 | :ref:`CHDHJABJ`.                           |
+    +---------------------------------+--------------------------------------------+
+    | *mne_make_eeg_layout*           | Make a topographical trace layout file     |
+    |                                 | using the EEG electrode locations from     |
+    |                                 | an actual measurement, see :ref:`CHDDGDJA`.|
+    +---------------------------------+--------------------------------------------+
+    | *mne_make_morph_maps*           | Precompute the mapping data needed for     |
+    |                                 | morphing between subjects, see             |
+    |                                 | :ref:`CHDBBHDH`.                           |
+    +---------------------------------+--------------------------------------------+
+    | *mne_make_uniform_stc*          | Create a spatially uniform stc file for    |
+    |                                 | testing purposes.                          |
+    +---------------------------------+--------------------------------------------+
+    | *mne_mark_bad_channels*         | Update the list of unusable channels in    |
+    |                                 | a data file, see :ref:`CHDDHBEE`.          |
+    +---------------------------------+--------------------------------------------+
+    | *mne_morph_labels*              | Morph label file definitions between       |
+    |                                 | subjects, see :ref:`CHDCEAFC`.             |
+    +---------------------------------+--------------------------------------------+
+    | *mne_organize_dicom*            | Organized DICOM MRI image files into       |
+    |                                 | directories, see :ref:`BABEBJHI`.          |
+    +---------------------------------+--------------------------------------------+
+    | *mne_prepare_bem_model*         | Perform the geometry calculations for      |
+    |                                 | BEM forward solutions, see :ref:`CHDJFHEB`.|
+    +---------------------------------+--------------------------------------------+
+    | mne_process_stc                 | Manipulate stc files.                      |
+    +---------------------------------+--------------------------------------------+
+    | *mne_raw2mat*                   | Convert raw data into a Matlab file,       |
+    |                                 | see :ref:`convert_to_matlab`.              |
+    +---------------------------------+--------------------------------------------+
+    | *mne_rename_channels*           | Change the names and types of channels     |
+    |                                 | in a fif file, see :ref:`CHDCFEAJ`.        |
+    +---------------------------------+--------------------------------------------+
+    | *mne_sensitivity_map*           | Compute a sensitivity map and output       |
+    |                                 | the result in a w-file,                    |
+    |                                 | see :ref:`CHDDCBGI`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_sensor_locations*          | Create a file containing the sensor        |
+    |                                 | locations in text format.                  |
+    +---------------------------------+--------------------------------------------+
+    | *mne_show_fiff*                 | List contents of a fif file,               |
+    |                                 | see :ref:`CHDHEDEF`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_simu*                      | Simulate MEG and EEG data,                 |
+    |                                 | see :ref:`CHDECAFD`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_smooth*                    | Smooth a w or stc file.                    |
+    +---------------------------------+--------------------------------------------+
+    | *mne_surf2bem*                  | Create a *fif* file describing the         |
+    |                                 | triangulated compartment boundaries for    |
+    |                                 | the boundary-element model (BEM),          |
+    |                                 | see :ref:`BEHCACCJ`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_toggle_skips*              | Change data skip tags in a raw file into   |
+    |                                 | ignored skips or vice versa.               |
+    +---------------------------------+--------------------------------------------+
+    | *mne_transform_points*          | Transform between MRI and MEG head         |
+    |                                 | coordinate frames, see :ref:`CHDDDJCA`.    |
+    +---------------------------------+--------------------------------------------+
+    | *mne_tufts2fiff*                | Convert EEG data from the Tufts            |
+    |                                 | University format to fif format,           |
+    |                                 | see :ref:`BEHDGAIJ`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_view_manual*               | Starts a PDF reader to show this manual    |
+    |                                 | from its standard location.                |
+    +---------------------------------+--------------------------------------------+
+    | *mne_volume_data2mri*           | Convert volumetric data defined in a       |
+    |                                 | source space created with                  |
+    |                                 | mne_volume_source_space into an MRI        |
+    |                                 | overlay, see :ref:`BEHDEJEC`.              |
+    +---------------------------------+--------------------------------------------+
+    | *mne_volume_source_space*       | Make a volumetric source space,            |
+    |                                 | see :ref:`BJEFEHJI`.                       |
+    +---------------------------------+--------------------------------------------+
+    | *mne_watershed_bem*             | Do the segmentation for BEM using the      |
+    |                                 | watershed algorithm, see :ref:`BABBDHAG`.  |
+    +---------------------------------+--------------------------------------------+
+
+
+File formats
+############
+
+The MNE software employs the fif file format whenever possible.
+New tags have been added to incorporate information specific to
+the calculation of cortically contained source estimates. FreeSurfer
+file formats are also employed when needed to represent cortical
+surface geometry data as well as spatiotemporal distribution of
+quantities on the surfaces. Of particular interest are the w files,
+which contain static overlay data on the cortical surface and stc files,
+which contain dynamic overlays (movies).
+
+Conventions
+###########
+
+When command line examples are shown, the backslash character
+(\\) indicates a continuation line. It is also valid in the shells.
+In most cases, however, you can easily fit the commands listed in
+this manual on one line and thus omit the backslashes. The order
+of options  is irrelevant. Entries to be typed literally are shown
+like ``this`` . *Italicized* text indicates
+conceptual entries. For example, *<*dir*>* indicates a directory
+name.
+
+In the description of interactive software modules the notation <*menu*>/<*item*> is
+often used to denotes menu selections. For example, File/Quit stands
+for the Quit button in the File menu.
+
+All software modules employ the double-dash (``--``) option convention, *i.e.*, the
+option names are preceded by two dashes.
+
+Most of the programs have two common options to obtain general
+information:
+
+**\---help**
+
+    Prints concise usage information.
+
+**\---version**
+
+    Prints the program module name, version number, and compilation date.
+
+.. _user_environment:
+
+User environment
+################
+
+The system-dependent location of the MNE Software will be
+here referred to by the environment variable MNE_ROOT. There are
+two scripts for setting up user environment so that the software
+can be used conveniently:
+
+``$MNE_ROOT/bin/mne_setup_sh``
+
+and
+
+``$MNE_ROOT/bin/mne_setup``
+
+compatible with the POSIX and csh/tcsh shells, respectively. Since
+the scripts set environment variables they should be 'sourced' to
+the present shell. You can find which type of a shell you are using
+by saying
+
+``echo $SHELL``
+
+If the output indicates a POSIX shell (bash or sh) you should issue
+the three commands:
+
+``export MNE_ROOT=`` <*MNE*> ``export MATLAB_ROOT=`` <*Matlab*> ``. $MNE_ROOT/bin/mne_setup_sh``
+
+with <*MNE*> replaced
+by the directory where you have installed the MNE software and <*Matlab*> is
+the directory where Matlab is installed. If you do not have Matlab,
+leave MATLAB_ROOT undefined. If Matlab is not available, the utilities
+mne_convert_mne_data , mne_epochs2mat , mne_raw2mat ,
+and mne_simu will not work.
+
+For csh/tcsh the corresponding commands are:
+
+``setenv MNE_ROOT`` <*MNE*> ``setenv MATLAB_ROOT`` <*Matlab*> ``source $MNE_ROOT/bin/mne_setup``
+
+For BEM mesh generation using the watershed algorithm or
+on the basis of multi-echo FLASH MRI data (see :ref:`create_bem_model`) and
+for accessing the tkmedit program
+from mne_analyze, see :ref:`CACCHCBF`,
+the MNE software needs access to a FreeSurfer license
+and software. Therefore, to use these features it is mandatory that
+you set up the FreeSurfer environment
+as described in the FreeSurfer documentation.
+
+The environment variables relevant to the MNE software are
+listed in :ref:`CIHDGFAA`.
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.55\linewidth}|
+.. _CIHDGFAA:
+.. table:: Environment variables
+
+    +-------------------------+--------------------------------------------+
+    | Name of the variable    |   Description                              |
+    +=========================+============================================+
+    | MNE_ROOT                | Location of the MNE software, see above.   |
+    +-------------------------+--------------------------------------------+
+    | FREESURFER_HOME         | Location of the FreeSurfer software.       |
+    |                         | Needed during FreeSurfer reconstruction    |
+    |                         | and if the FreeSurfer MRI viewer is used   |
+    |                         | with mne_analyze, see :ref:`CACCHCBF`.     |
+    +-------------------------+--------------------------------------------+
+    | SUBJECTS_DIR            | Location of the MRI data.                  |
+    +-------------------------+--------------------------------------------+
+    | SUBJECT                 | Name of the current subject.               |
+    +-------------------------+--------------------------------------------+
+    | MNE_TRIGGER_CH_NAME     | Name of the trigger channel in raw data,   |
+    |                         | see :ref:`BABBGJEA`.                       |
+    +-------------------------+--------------------------------------------+
+    | MNE_TRIGGER_CH_MASK     | Mask to be applied to the trigger channel  |
+    |                         | values, see :ref:`BABBGJEA`.               |
+    +-------------------------+--------------------------------------------+
+
+.. note::
+
+    Section :ref:`setup_martinos` contains information specific to the setup at
+    the Martinos Center including instructions to access the Neuromag software.
diff --git a/doc/source/manual/matlab.rst b/doc/source/manual/matlab.rst
new file mode 100644
index 0000000..c137345
--- /dev/null
+++ b/doc/source/manual/matlab.rst
@@ -0,0 +1,1210 @@
+
+
+.. _ch_matlab:
+
+==================
+The Matlab toolbox
+==================
+
+Overview
+########
+
+The MNE software contains a collection Matlab m-files to
+facilitate interfacing with binary file formats of the MNE software.
+The toolbox is located at ``$MNE_ROOT/share/matlab`` . The
+names of the MNE Matlab toolbox functions begin either with `mne_` or
+with `fiff_` . When you source the mne_setup script
+as described in :ref:`user_environment`, one of the following actions
+takes place:
+
+- If you do not have the Matlab startup.m
+  file, it will be created and lines allowing access to the MNE Matlab
+  toolbox are added.
+
+- If you have startup.m and it does not have the standard MNE
+  Matlab toolbox setup lines, you will be instructed to add them manually.
+
+- If you have startup.m and the standard MNE Matlab toolbox
+  setup lines are there, nothing happens.
+
+A summary of the available routines is provided in Tables :ref:`BGBCGHAG` - :ref:`BGBEFADJ`. The toolbox
+also contains a set of examples which may be useful starting points
+for your own development. The names of these functions start with mne_ex and
+they are listed in :ref:`BGBEFADJ`.
+
+.. note:: The MNE Matlab Toolbox is compatible with    Matlab versions 7.0 or later.
+
+.. note:: The matlab function fiff_setup_read_raw has    a significant change. The sample numbers now take into account possible    initial skip in the file, *i.e.*, the time between    the start of the data acquisition and the start of saving the data    to disk. The first_samp member    of the returned structure indicates the initial skip in samples.    If you want your own routines, which assume that initial skip has    been removed, perform identically with the previous version, subt [...]
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. _BGBCGHAG:
+.. table:: High-level reading routines.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | fiff_find_evoked               | Find all evoked data sets from a file.                       |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_bad_channels         | Read the bad channel list.                                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_ctf_comp             | Read CTF software gradient compensation data.                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_evoked               | Read evoked-response data.                                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_evoked_all           | Read all evoked-response data from a file.                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_meas_info            | Read measurement information.                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_mri                  | Read an MRI description file.                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_proj                 | Read signal-space projection data.                           |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_raw_segment          | Read a segment of raw data with time limits are specified    |
+    |                                | in samples.                                                  |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_raw_segment_times    | Read a segment of raw data with time limits specified        |
+    |                                | in seconds.                                                  |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_setup_read_raw            | Set up data structures before using fiff_read_raw_segment    |
+    |                                | or fiff_read_raw_segment_times.                              |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Channel selection utilities.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | fiff_pick_channels             | Create a selector to pick desired channels from data         |
+    |                                | according to include and exclude lists.                      |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_pick_channels_evoked      | Pick desired channels from evoked-response data according    |
+    |                                | to include and exclude lists.                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_pick_info                 | Modify measurement info to include only selected channels.   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_pick_types                | Create a selector to pick desired channels from data         |
+    |                                | according to channel types (MEG, EEG, STIM) in combination   |
+    |                                | with include and exclude lists.                              |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_pick_types_evoked         | Pick desired channels from evoked-response data according    |
+    |                                | to channel types (MEG, EEG, STIM) in combination with        |
+    |                                | include and exclude lists.                                   |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Coordinate transformation utilities.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | fiff_invert_transform          | Invert a coordinate transformation structure.                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_reset_ch_pos              | Reset channel position transformation to the default values  |
+    |                                | present in the file.                                         |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_transform_eeg_chs         | Transform electrode positions to another coordinate frame.   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_transform_meg_chs         | Apply a coordinate transformation to the sensor location     |
+    |                                | data to bring the integration points to another coordinate   |
+    |                                | frame.                                                       |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Basic reading routines.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | fiff_define_constants          | Define a structure which contains the constant relevant      |
+    |                                | to fif files.                                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_dir_tree_find             | Find nodes of a given type in a directory tree structure.    |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_list_dir_tree             | List a directory tree structure.                             |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_make_dir_tree             | Create a directory tree structure.                           |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_open                      | Open a fif file and create the directory tree structure.     |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_named_matrix         | Read a named matrix from a fif file.                         |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_tag                  | Read one tag from a fif file.                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_read_tag_info             | Read the info of one tag from a fif file.                    |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_split_name_list           | Split a colon-separated list of names into a cell array      |
+    |                                | of strings.                                                  |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Writing routines.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | fiff_end_block                 | Write a FIFF_END_BLOCK tag.                                  |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_end_file                  | Write the standard closing.                                  |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_start_block               | Write a FIFF_START_BLOCK tag.                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_start_file                | Write the appropriate beginning of a file.                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_ch_info             | Write a channel information structure.                       |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_coord_trans         | Write a coordinate transformation structure.                 |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_ctf_comp            | Write CTF compensation data.                                 |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_dig_point           | Write one digitizer data point.                              |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_complex             | Write single-precision complex numbers.                      |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_complex_matrix      | Write a single-precision complex matrix.                     |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_double              | Write double-precision floats.                               |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_double_complex      | Write double-precision complex numbers.                      |
+    +--------------------------------+--------------------------------------------------------------+
+    |fiff_write_double_complex_matrix| Write a double-precision complex matrix.                     |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_double_matrix       | Write a double-precision matrix.                             |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_evoked              | Write an evoked-reponse data file.                           |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_float               | Write single-precision floats.                               |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_float_matrix        | Write a single-precision matrix.                             |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_id                  | Write an id tag.                                             |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_int                 | Write 32-bit integers.                                       |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_int_matrix          | Write a matrix of 32-bit integers.                           |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_name_list           | Write a name list.                                           |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_named_matrix        | Write a named matrix.                                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_proj                | Write SSP data.                                              |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_short               | Write 16-bit integers.                                       |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_string              | Write a string.                                              |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: High-level data writing routines.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | fiff_write_evoked              | Write an evoked-response data file.                          |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_finish_writing_raw        | Write the closing tags to a raw data file.                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_start_writing_raw         | Start writing raw data file, *i.e.*, write the measurement   |
+    |                                | information.                                                 |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_dig_file            | Write a fif file containing digitization data.               |
+    +--------------------------------+--------------------------------------------------------------+
+    | fiff_write_raw_buffer          | Write one raw data buffer. This is used after a call to      |
+    |                                | fiff_start_writing_raw.                                      |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Coil definition utilities.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_add_coil_defs              | Add coil definitions to an array of channel information      |
+    |                                | structures.                                                  |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_load_coil_def              | Load a coil definition file.                                 |
+    +--------------------------------+--------------------------------------------------------------+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Routines for software gradient compensation and signal-space projection.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_compensate_to              | Apply or remove CTF software gradient compensation from      |
+    |                                | evoked-response data.                                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_get_current_comp           | Get the state of software gradient compensation from         |
+    |                                | measurement info.                                            |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_make_compensator           | Make a compensation matrix which switches the status of      |
+    |                                | CTF software gradient compensation from one state to another.|
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_make_projector_info        | Create a signal-space projection operator with the           |
+    |                                | projection item definitions and cell arrays of channel names |
+    |                                | and bad channel names as input.                              |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_make_projector_info        | Like mne_make_projector but uses the measurement info        |
+    |                                | structure as input.                                          |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_set_current_comp           | Change the information about the compensation status in      |
+    |                                | measurement info.                                            |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: High-level routines for reading MNE data files.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_pick_channels_cov          | Pick desired channels from a sensor covariance matrix.       |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_pick_channels_forward      | Pick desired channels (rows) from a forward solution.        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_bem_surfaces          | Read triangular tessellations of surfaces for                |
+    |                                | boundary-element models.                                     |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_cov                   | Read a covariance matrix.                                    |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_epoch                 | Read an epoch of data from the output file of mne_epochs2mat.|
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_events                | Read an event list from a fif file produced by               |
+    |                                | mne_browse_raw or mne_process_raw.                           |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_forward_solution      | Read a forward solution from a fif file.                     |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_inverse_operator      | Read an inverse operator from a fif file.                    |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_morph_map             | Read an morphing map produced with mne_make_morph_maps, see  |
+    |                                | :ref:`CHDBBHDH`.                                             |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_noise_cov             | Read a noise-covariance matrix from a fif file.              |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_source_spaces         | Read source space information from a fif file.               |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: High-level routines for writing MNE data files.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_write_cov                  | Write a covariance matrix to an open file.                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_cov_file             | Write a complete file containing just a covariance matrix.   |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_events               | Write a fif format event file compatible with mne_browse_raw |
+    |                                | and mne_process_raw.                                         |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_inverse_sol_stc      | Write stc files containing an inverse solution or other      |
+    |                                | dynamic data on the cortical surface.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_inverse_sol_w        | Write w files containing an inverse solution or other static |
+    |                                | data on the cortical surface.                                |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. _BABBDDAI:
+.. table:: Routines related to stc, w, and label files.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_read_stc_file              | Read data from one stc file. The vertex numbering in the     |
+    |                                | returned structure will start from 0.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_stc_file1             | Read data from one stc file. The vertex numbering in the     |
+    |                                | returned structure will start from 1.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_w_file                | Read data from one w file. The vertex numbering in the       |
+    |                                | returned structure will start from 0.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_w_file1               | Read data from one w file. The vertex numbering in the       |
+    |                                | returned structure will start from 1.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_stc_file             | Write a new stc file. It is assumed the the vertex numbering |
+    |                                | in the input data structure containing the stc information   |
+    |                                | starts from 0.                                               |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_stc_file1            | Write a new stc file. It is assumed the the vertex numbering |
+    |                                | in the input data structure containing the stc information   |
+    |                                | starts from 1.                                               |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_w_file               | Write a new w file. It is assumed the the vertex numbering   |
+    |                                | in the input data structure containing the w file            |
+    |                                | information starts from 0.                                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_w_file1              | Write a new w file. It is assumed the the vertex numbering   |
+    |                                | in the input data structure containing the w file            |
+    |                                | information starts from 1.                                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_label_file            | Read a label file (ROI).                                     |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_label_file           | Write a label file (ROI).                                    |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_label_time_courses         | Extract time courses corresponding to a label from an        |
+    |                                | stc file.                                                    |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. table:: Routines for reading FreeSurfer surfaces.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_read_curvature             | Read a curvature file.                                       |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_surface               | Read one surface, return the vertex locations and            |
+    |                                | triangulation info.                                          |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_read_surfaces              | Read surfaces corresponding to one or both hemispheres.      |
+    |                                | Optionally read curvature information and add derived        |
+    |                                | surface data.                                                |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_reduce_surface             | Reduce the number of triangles on a surface using the        |
+    |                                | reducepatch Matlab function.                                 |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_write_surface              | Write a FreeSurfer surface file.                             |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. _BGBEGFBD:
+.. table:: Utility functions.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_block_diag                 | Create a sparse block-diagonal matrix out of a vector.       |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_combine_xyz                | Calculate the square sum of the three Cartesian components   |
+    |                                | of several vectors listed in one row or column vector.       |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_file_name                  | Compose a file name relative to $MNE_ROOT.                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_find_channel               | Find a channel by name from measurement info.                |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_find_source_space_hemi     | Determine whether a given source space belongs to the left   |
+    |                                | or right hemisphere.                                         |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_fread3                     | Read a three-byte integer.                                   |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_fwrite3                    | Write a three-byte integer.                                  |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_make_combined_event_file   | Combine data from several trigger channels into one event    |
+    |                                | file.                                                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_omit_first_line            | Omit first line from a multi-line message. This routine is   |
+    |                                | useful for formatting error messages.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_prepare_inverse_operator   | Prepare inverse operator data for calculating L2             |
+    |                                | minimum-norm solutions and dSPM.                             |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_setup_toolbox              | Set up the MNE Matlab toolbox.                               |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_transform_coordinates      | Transform locations between different coordinate systems.    |
+    |                                | This function uses the output file from                      |
+    |                                | mne_collect_transforms described in :ref:`BABBIFIJ` as input.|
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_transpose_named_matrix     | Create a transpose of a named matrix.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_transform_source_space_to  | Transform source space data to another coordinate frame.     |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}|
+.. _BGBEFADJ:
+.. table:: Examples demonstrating the use of the toolbox.
+
+    +--------------------------------+--------------------------------------------------------------+
+    | Function                       | Purpose                                                      |
+    +================================+==============================================================+
+    | mne_ex_average_epochs          | Example of averaging epoch data produced by mne_epochs2mat,  |
+    |                                | see :ref:`BEHFIDCB`.                                         |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_cancel_noise            | Example of noise cancellation procedures.                    |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_compute_inverse         | Example of computing a L2 minimum-norm estimate or a dSPM    |
+    |                                | solution.                                                    |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_data_sets               | Example of listing evoked-response data sets.                |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_evoked_grad_amp         | Compute tangential gradient amplitudes from planar           |
+    |                                | gradiometer data.                                            |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_read_epochs             | Read epoch data from a raw data file.                        |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_read_evoked             | Example of reading evoked-response data.                     |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_read_raw                | Example of reading raw data.                                 |
+    +--------------------------------+--------------------------------------------------------------+
+    | mne_ex_read_write_raw          | Example of processing raw data (read and write).             |
+    +--------------------------------+--------------------------------------------------------------+
+
+
+.. note:: In order for the inverse operator calculation to work correctly with data processed with the Elekta-Neuromag Maxfilter (TM) software, the so-called *processing history* block must be included in data files. Previous versions of the MNE Matlab functions did not copy processing history to files saved. As of March 30, 2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have been enhanced to include these data to the output file as appropriate. If you hav [...]
+
+Some data structures
+####################
+
+The MNE Matlab toolbox relies heavily on structures to organize
+the data. This section gives detailed information about fields in
+the essential data structures employed in the MNE Matlab toolbox.
+In the structure definitions, data types referring to other MNE
+Matlab toolbox structures are shown in italics. In addition, :ref:`BGBJHCGD`
+lists the values of various FIFF constants defined by fiff_define_constants.m .
+The documented structures are:
+
+**tag**
+
+    Contains one tag from the fif file, see :ref:`BGBGIIGD`.
+
+**taginfo**
+
+    Contains the information about one tag, see :ref:`BGBBJBJJ`.
+
+**directory**
+
+    Contains the tag directory as a tree structure, see :ref:`BGBEDHBG`.
+
+**id**
+
+    A fif ID, see :ref:`BGBDAHHJ`.
+
+**named matrix**
+
+    Contains a matrix with names for rows and/or columns, see :ref:`BGBBEDID`.
+    A named matrix is used to store, *e.g.*, SSP vectors and forward solutions.
+
+**trans**
+
+    A 4 x 4 coordinate-transformation matrix operating on augmented column
+    vectors. Indication of the coordinate frames to which this transformation
+    relates is included, see :ref:`BGBDHBIF`.
+
+**dig**
+
+    A Polhemus digitizer data point, see :ref:`BGBHDEDG`.
+
+**coildef**
+
+    The coil definition structure useful for forward calculations and array
+    visualization, see :ref:`BGBGBEBH`. For more detailed information on
+    coil definitions, see :ref:`BJEIAEIE`.
+
+**ch**
+
+    Channel information structure, see :ref:`BGBIABGD`.
+
+**proj**
+
+    Signal-space projection data, see :ref:`BGBCJHJB`.
+
+**comp**
+
+    Software gradiometer compensation data, see :ref:`BGBJDIFD`.
+
+**measurement info**
+
+    Translation of the FIFFB_MEAS_INFO entity, see :ref:`BGBFHDIJ`. This
+    data structure is returned by fiff_read_meas_info .
+
+**surf**
+
+    Used to represent triangulated surfaces and cortical source spaces, see :ref:`BGBEFJCB`.
+
+**cov**
+
+    Used for storing covariance matrices, see :ref:`BGBJJIED`.
+
+**fwd**
+
+    Forward solution data returned by mne_read_forward_solution ,
+    see :ref:`BGBFJIBJ`.
+
+**inv**
+
+    Inverse operator decomposition data returned by mne_read_inverse_operator ,
+    see :ref:`BGBIEIJE`. For more information on inverse operator
+    decomposition, see :ref:`CBBDJFBJ`. For an example on how to
+    compute inverse solution using this data, see the sample routine mne_ex_compute_inverse .
+
+.. note:: The MNE Matlab toolbox tries it best to employ vertex numbering starting from 1 as opposed to 0 as recorded in the data files. There are, however, two exceptions where explicit attention to the vertex numbering convention is needed. First, the standard stc and w file reading and writing routines return and    assume zero-based vertex numbering. There are now versions with names ending with '1', which return and assume one-based vertex numbering, see :ref:`BABBDDAI`. Second, the [...]
+
+
+.. tabularcolumns:: |p{0.38\linewidth}|p{0.06\linewidth}|p{0.46\linewidth}|
+.. _BGBJHCGD:
+.. table:: FIFF constants.
+
+    +-------------------------------+-------+----------------------------------------------------------+
+    | Name                          | Value | Purpose                                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MEG_CH                  | 1     | This is a MEG channel.                                   |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_REF_MEG_CH              | 301   | This a reference MEG channel, located far away from the  |
+    |                               |       | head.                                                    |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_EEF_CH                  | 2     | This is an EEG channel.                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MCG_CH                  | 201   | This a MCG channel.                                      |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_STIM_CH                 | 3     | This is a digital trigger channel.                       |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_EOG_CH                  | 202   | This is an EOG channel.                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_EMG_CH                  | 302   | This is an EMG channel.                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ECG_CH                  | 402   | This is an ECG channel.                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MISC_CH                 | 502   | This is a miscellaneous analog channel.                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_RESP_CH                 | 602   | This channel contains respiration monitor output.        |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_UNKNOWN           | 0     | Unknown coordinate frame.                                |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_DEVICE            | 1     | The MEG device coordinate frame.                         |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_ISOTRAK           | 2     | The Polhemus digitizer coordinate frame (does not appear |
+    |                               |       | in data files).                                          |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_HPI               | 3     | HPI coil coordinate frame (does not appear in data       |
+    |                               |       | files).                                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_HEAD              | 4     | The MEG head coordinate frame (Neuromag convention).     |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_MRI               | 5     | The MRI coordinate frame.                                |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_MRI_SLICE         | 6     | The coordinate frame of a single MRI slice.              |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_MRI_DISPLAY       | 7     | The preferred coordinate frame for displaying the MRIs   |
+    |                               |       | (used by MRIlab).                                        |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_DICOM_DEVICE      | 8     | The DICOM coordinate frame (does not appear in files).   |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_COORD_IMAGING_DEVICE    | 9     | A generic imaging device coordinate frame (does not      |
+    |                               |       | appear in files).                                        |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_COORD_TUFTS_EEG     | 300   | The Tufts EEG data coordinate frame.                     |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_COORD_CTF_DEVICE    | 1001  | The CTF device coordinate frame (does not appear in      |
+    |                               |       | files).                                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_COORD_CTF_HEAD      | 1004  | The CTF/4D head coordinate frame.                        |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_AVERAGE          | 100   | Data aspect: average.                                    |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_STD_ERR          | 101   | Data aspect: standard error of mean.                     |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_SINGLE           | 102   | Single epoch.                                            |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_SUBAVERAGE       | 103   | One subaverage.                                          |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_ALTAVERAGE       | 104   | One alternating (plus-minus) subaverage.                 |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_SAMPLE           | 105   | A sample cut from raw data.                              |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_POWER_DENSITY    | 106   | Power density spectrum.                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_ASPECT_DIPOLE_WAVE      | 200   | The time course of an equivalent current dipole.         |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_BEM_SURF_ID_UNKNOWN     | -1    | Unknown BEM surface.                                     |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_BEM_SURF_ID_BRAIN       | 1     | The inner skull surface                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_BEM_SURF_ID_SKULL       | 3     | The outer skull surface                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_BEM_SURF_ID_HEAD        | 4     | The scalp surface                                        |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_SURF_LEFT_HEMI      | 101   | Left hemisphere cortical surface                         |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_SURF_RIGHT_HEMI     | 102   | Right hemisphere cortical surface                        |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_CARDINAL          | 1     | Digitization point which is a cardinal landmark aka.     |
+    |                               |       | fiducial point                                           |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_HPI               | 2     | Digitized HPI coil location                              |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_EEG               | 3     | Digitized EEG electrode location                         |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_ECG               | 3     | Digitized ECG electrode location                         |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_EXTRA             | 4     | Additional head surface point                            |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_LPA               | 1     | Identifier for left auricular landmark                   |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_NASION            | 2     | Identifier for nasion                                    |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_POINT_RPA               | 3     | Identifier for right auricular landmark                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_FIXED_ORI           | 1     | Fixed orientation constraint used in the computation of  |
+    |                               |       | a forward solution.                                      |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_FREE_ORI            | 2     | No orientation constraint used in the computation of     |
+    |                               |       | a forward solution                                       |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_MEG                 | 1     | Indicates an inverse operator based on MEG only          |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_EEG                 | 2     | Indicates an inverse operator based on EEG only.         |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_MEG_EEG             | 3     | Indicates an inverse operator based on both MEG and EEG. |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_UNKNOWN_COV         | 0     | An unknown covariance matrix                             |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_NOISE_COV           | 1     | Indicates a noise covariance matrix.                     |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_SENSOR_COV          | 1     | Synonym for FIFFV_MNE_NOISE_COV                          |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_SOURCE_COV          | 2     | Indicates a source covariance matrix                     |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_FMRI_PRIOR_COV      | 3     | Indicates a covariance matrix associated with fMRI priors|
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_SIGNAL_COV          | 4     | Indicates the data (signal + noise) covariance matrix    |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_DEPTH_PRIOR_COV     | 5     | Indicates the depth prior (depth weighting) covariance   |
+    |                               |       | matrix                                                   |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_ORIENT_PRIOR_COV    | 6     | Indicates the orientation (loose orientation constrain)  |
+    |                               |       | prior covariance matrix                                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_PROJ_ITEM_NONE          | 0     | The nature of this projection item is unknown            |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_PROJ_ITEM_FIELD         | 1     | This is projection item is a generic field pattern or    |
+    |                               |       | field patters.                                           |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_PROJ_ITEM_DIP_FIX       | 2     | This projection item is the field of one dipole          |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_PROJ_ITEM_DIP_ROT       | 3     | This projection item corresponds to the fields of three  |
+    |                               |       | or two orthogonal dipoles at some location.              |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_PROJ_ITEM_HOMOG_GRAD    | 4     | This projection item contains the homogeneous gradient   |
+    |                               |       | fields as seen by the sensor array.                      |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_PROJ_ITEM_HOMOG_FIELD   | 5     | This projection item contains the three homogeneous field|
+    |                               |       | components as seen by the sensor array.                  |
+    +-------------------------------+-------+----------------------------------------------------------+
+    | FIFFV_MNE_PROJ_ITEM_EEG_AVREF | 10    | This projection item corresponds to the average EEG      |
+    |                               |       | reference.                                               |
+    +-------------------------------+-------+----------------------------------------------------------+
+
+.. _BGBGIIGD:
+
+.. table:: The tag structure.
+
+    =======  ===========  ============================================
+    Field    Data type    Description
+    =======  ===========  ============================================
+    kind     int32        The kind of the data item.
+    type     uint32       The data type used to represent the data.
+    size     int32        Size of the data in bytes.
+    next     int32        Byte offset of the next tag in the file.
+    data     various      The data itself.
+    =======  ===========  ============================================
+
+.. _BGBBJBJJ:
+
+.. table:: The taginfo structure.
+
+    =======  ===========  ============================================
+    Field    Data type    Description
+    =======  ===========  ============================================
+    kind     double       The kind of the data item.
+    type     double       The data type used to represent the data.
+    size     double       Size of the data in bytes.
+    pos      double       Byte offset to this tag in the file.
+    =======  ===========  ============================================
+
+.. _BGBEDHBG:
+
+.. table:: The directory structure.
+
+    ============  ============  ================================================================
+    Field         Data type     Description
+    ============  ============  ================================================================
+    block         double        The block id of this directory node.
+    id            id            The unique identifier of this node.
+    parent_id     id            The unique identifier of the node this node was derived from.
+    nent          double        Number of entries in this node.
+    nchild        double        Number of children to this node.
+    dir           taginfo       Information about tags in this node.
+    children      directory     The children of this node.
+    ============  ============  ================================================================
+
+.. _BGBDAHHJ:
+
+.. table:: The id structure.
+
+    ==========  ===========  ============================================================
+    Field       Data type    Description
+    ==========  ===========  ============================================================
+    version     int32        The fif file version (major  < < 16 | minor).
+    machid      int32(2)     Unique identifier of the computer this id was created on.
+    secs        int32        Time since January 1, 1970 (seconds).
+    usecs       int32        Time since January 1, 1970 (microseconds past secs ).
+    ==========  ===========  ============================================================
+
+.. _BGBBEDID:
+
+.. table:: The named matrix structure.
+
+    ============  ===========  ======================================================================
+    Field         Data type    Description
+    ============  ===========  ======================================================================
+    nrow          int32        Number of rows.
+    ncol          int32        Number of columns.
+    row_names     cell(*)      The names of associated with the rows. This member may be empty.
+    col_names     cell(*)      The names of associated with the columns. This member may be empty.
+    data          various      The matrix data, usually of type single or double.
+    ============  ===========  ======================================================================
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBDHBIF:
+.. table:: The trans structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | from                      | int32     | The source coordinate frame, see :ref:`BGBJHCGD`. Look   |
+    |                           |           | for entries starting with FIFFV_COORD or FIFFV_MNE_COORD.|
+    +---------------------------+-----------+----------------------------------------------------------+
+    | to                        | int32     | The destination coordinate frame.                        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | trans                     |double(4,4)| The 4-by-4 coordinate transformation matrix. This        |
+    |                           |           | operates from augmented position column vectors given in |
+    |                           |           | *from* coordinates to give results in *to* coordinates.  |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBHDEDG:
+.. table:: The dig structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | kind                      | int32     | The type of digitizing point. Possible values are listed |
+    |                           |           | in :ref:`BGBJHCGD`. Look for entries starting with       |
+    |                           |           | FIFF_POINT.                                              |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | ident                     | int32     | Identifier for this point.                               |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | r                         | single(3) | The location of this point.                              |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBGBEBH:
+.. table:: The coildef structure. For more detailed information, see :ref:`BJEIAEIE`.
+
+    +-------------------+-------------------+----------------------------------------------------------+
+    | Field             | Data Type         | Description                                              |
+    +===================+===================+==========================================================+
+    | class             | double            | The coil (or electrode) class.                           |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | id                | double            | The coil (or electrode) id.                              |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | accuracy          | double            | Representation accuracy.                                 |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | num_points        | double            | Number of integration points.                            |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | size              | double            | Coil size.                                               |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | baseline          | double            | Coil baseline.                                           |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | description       | char(*)           | Coil description.                                        |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | coildefs          | double            | Each row contains the integration point weight, followed |
+    |                   | (num_points,7)    | by location [m] and normal.                              |
+    +-------------------+-------------------+----------------------------------------------------------+
+    | FV                | struct            | Contains the faces and vertices which can be used to     |
+    |                   |                   | draw the coil for visualization.                         |
+    +-------------------+-------------------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBIABGD:
+.. table:: The ch structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | scanno                    | int32     | Scanning order number, starting from 1.                  |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | logno                     | int32     | Logical channel number, conventions in the usage of this |
+    |                           |           | number vary.                                             |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | kind                      | int32     | The channel type (FIFFV_MEG_CH, FIFF_EEG_CH, etc., see   |
+    |                           |           | :ref:`BGBJHCGD` ).                                       |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | range                     | double    | The hardware-oriented part of the calibration factor.    |
+    |                           |           | This should be only applied to the continuous raw data.  |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | cal                       | double    | The calibration factor to bring the channels to physical |
+    |                           |           | units.                                                   |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | loc                       | double(12)| The channel location. The first three numbers indicate   |
+    |                           |           | the location [m], followed by the three unit vectors of  |
+    |                           |           | the channel-specific coordinate frame. These data contain|
+    |                           |           | the values saved in the fif file and should not be       |
+    |                           |           | changed. The values are specified in device coordinates  |
+    |                           |           | for MEG and in head coordinates for EEG channels,        |
+    |                           |           | respectively.                                            |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | coil_trans                |double(4,4)| Initially, transformation from the channel coordinates   |
+    |                           |           | to device coordinates. This transformation is updated by |
+    |                           |           | calls to fiff_transform_meg_chs and                      |
+    |                           |           | fiff_transform_eeg_chs.                                  |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | eeg_loc                   | double(6) | The location of the EEG electrode in coord_frame         |
+    |                           |           | coordinates. The first three values contain the location |
+    |                           |           | of the electrode [m]. If six values are present, the     |
+    |                           |           | remaining ones indicate the location of the reference    |
+    |                           |           | electrode for this channel.                              |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | coord_frame               | int32     | Initially, the coordinate frame is FIFFV_COORD_DEVICE    |
+    |                           |           | for MEG channels and FIFFV_COORD_HEAD for EEG channels.  |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | unit                      | int32     | Unit of measurement. Relevant values are: 201 = T/m,     |
+    |                           |           | 112 = T, 107 = V, and 202 = Am.                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | unit_mul                  | int32     | The data are given in unit s multiplied by 10unit_mul.   |
+    |                           |           | Presently, unit_mul is always zero.                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | ch_name                   | char(*)   | Name of the channel.                                     |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | coil_def                  | coildef   | The coil definition structure. This is present only if   |
+    |                           |           | mne_add_coil_defs has been successfully called.          |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBCJHJB:
+.. table:: The proj structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | kind                      | int32     | The type of the projection item. Possible values are     |
+    |                           |           | listed in :ref:`BGBJHCGD`. Look for entries starting     |
+    |                           |           | with FIFFV_PROJ_ITEM or FIFFV_MNE_PROJ_ITEM.             |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | active                    | int32     | Is this item active, i.e., applied or about to be        |
+    |                           |           | applied to the data.                                     |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | data                      | named     | The projection vectors. The column names indicate the    |
+    |                           | matrix    | names of the channels associated to the elements of the  |
+    |                           |           | vectors.                                                 |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBJDIFD:
+.. table:: The comp structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | ctfkind                   | int32     | The kind of the compensation as stored in file.          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | kind                      | int32     | ctfkind mapped into small integer numbers.               |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | save_calibrated           | logical   | Were the compensation data saved in calibrated form. If  |
+    |                           |           | this field is false, the matrix will be decalibrated     |
+    |                           |           | using the fields row_cals and col_cals when the          |
+    |                           |           | compensation data are saved by the toolbox.              |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | row_cals                  | double(*) | Calibration factors applied to the rows of the           |
+    |                           |           | compensation data matrix when the data were read.        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | col_cals                  | double(*) | Calibration factors applied to the columns of the        |
+    |                           |           | compensation data matrix when the data were read.        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | data                      | named     | The compensation data matrix. The row_names list the     |
+    |                           | matrix    | names of the channels to which this compensation applies |
+    |                           |           | and the col_names the compensation channels. For more    |
+    |                           |           | information, see :ref:`BEHDDFBI`.                        |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBFHDIJ:
+.. table:: The meas info structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | file_id                   | id        | The fif ID of the measurement file.                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | meas_id                   | id        | The ID assigned to this measurement by the acquisition   |
+    |                           |           | system or during file conversion.                        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nchan                     | int32     | Number of channels.                                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | sfreq                     | double    | Sampling frequency.                                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | highpass                  | double    | Highpass corner frequency [Hz]. Zero indicates a DC      |
+    |                           |           | recording.                                               |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | lowpass                   | double    | Lowpass corner frequency [Hz].                           |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | chs                       | ch(nchan) | An array of channel information structures.              |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | ch_names                  |cell(nchan)| Cell array of channel names.                             |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | dev_head_t                | trans     | The device to head transformation.                       |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | ctf_head_t                | trans     | The transformation from 4D/CTF head coordinates to       |
+    |                           |           | Neuromag head coordinates. This is only present in       |
+    |                           |           | 4D/CTF data.                                             |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | dev_ctf_t                 | trans     | The transformation from device coordinates to 4D/CTF     |
+    |                           |           | head coordinates. This is only present in 4D/CTF data.   |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | dig                       | dig(*)    | The Polhemus digitization data in head coordinates.      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | bads                      | cell(*)   | Bad channel list.                                        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | projs                     | proj(*)   | SSP operator data.                                       |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | comps                     | comp(*)   | Software gradient compensation data.                     |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBEFJCB:
+
+.. table:: The surf structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | id                        | int32     | The surface ID.                                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | sigma                     | double    | The electrical conductivity of the compartment bounded by|
+    |                           |           | this surface. This field is present in BEM surfaces only.|
+    +---------------------------+-----------+----------------------------------------------------------+
+    | np                        | int32     | Number of vertices on the surface.                       |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | ntri                      | int32     | Number of triangles on the surface.                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | coord_frame               | int32     | Coordinate frame in which the locations and orientations |
+    |                           |           | are expressed.                                           |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | rr                        | double    | The vertex locations.                                    |
+    |                           | (np,3)    |                                                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nn                        | double    | The vertex normals. If derived surface data was not      |
+    |                           | (np,3)    | requested, this is empty.                                |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | tris                      | int32     | Vertex numbers of the triangles in counterclockwise      |
+    |                           | (ntri,3)  | order as seen from the outside.                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nuse                      | int32     | Number of active vertices, *i.e.*, vertices included in  |
+    |                           |           | a decimated source space.                                |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | inuse                     | int32(np) | Which vertices are in use.                               |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | vertno                    |int32(nuse)| Indices of the vertices in use.                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | curv                      | double(np)| Curvature values at the vertices. If curvature           |
+    |                           |           | information was not requested, this field is empty or    |
+    |                           |           | absent.                                                  |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | tri_area                  | double    | The triangle areas in m2.If derived surface data was not |
+    |                           | (ntri)    | requested, this field will be missing.                   |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | tri_cent                  | double    | The triangle centroids. If derived surface data was not  |
+    |                           | (ntri,3)  | requested, this field will be missing.                   |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | tri_nn                    | double    | The triangle normals. If derived surface data was not    |
+    |                           | (ntri,3)  | requested, this field will be missing.                   |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nuse_tri                  | int32     | Number of triangles in use. This is present only if the  |
+    |                           |           | surface corresponds to a source space created with the   |
+    |                           |           | ``--ico`` option.                                        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | use_tris                  | int32     | The vertices of the triangles in use in the complete     |
+    |                           | (nuse_tri)| triangulation. This is present only if the surface       |
+    |                           |           | corresponds to a source space created with the           |
+    |                           |           | ``--ico`` option.                                        |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nearest                   | int32(np) | This field is present only if patch information has been |
+    |                           |           | computed for a source space. For each vertex in the      |
+    |                           |           | triangulation, these values indicate the nearest active  |
+    |                           |           | source space vertex.                                     |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nearest_dist              | double(np)| This field is present only if patch information has been |
+    |                           |           | computed for a source space. For each vertex in the      |
+    |                           |           | triangulation, these values indicate the distance to the |
+    |                           |           | nearest active source space vertex.                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | dist                      | double    | Distances between vertices on this surface given as a    |
+    |                           | (np,np)   | sparse matrix. A zero off-diagonal entry in this matrix  |
+    |                           |           | indicates that the corresponding distance has not been   |
+    |                           |           | calculated.                                              |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | dist_limit                | double    | The value given to mne_add_patch_info with the ``--dist``|
+    |                           |           | option, see :ref:`CJAGCDCC`. This value is presently     |
+    |                           |           | always negative, indicating that only distances between  |
+    |                           |           | active source space vertices, as indicated by the vertno |
+    |                           |           | field of this structure, have been calculated.           |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBJJIED:
+
+.. table:: The cov structure.
+
+    +---------------------------+-----------+----------------------------------------------------------+
+    | Field                     | Data Type | Description                                              |
+    +===========================+===========+==========================================================+
+    | kind                      | double    | What kind of a covariance matrix (1 = noise covariance,  |
+    |                           |           | 2 = source covariance).                                  |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | diag                      | double    | Is this a diagonal matrix.                               |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | dim                       | int32     | Dimension of the covariance matrix.                      |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | names                     | cell(*)   | Names of the channels associated with the entries        |
+    |                           |           | (may be empty).                                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | data                      | double    | The covariance matrix. This a double(dim) vector for a   |
+    |                           | (dim,dim) | diagonal covariance matrix.                              |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | projs                     | proj(*)   | The SSP vectors applied to these data.                   |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | bads                      | cell(*)   | Bad channel names.                                       |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | nfree                     | int32     | Number of data points used to compute this matrix.       |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | eig                       |double(dim)| The eigenvalues of the covariance matrix. This field may |
+    |                           |           | be empty for a diagonal covariance matrix.               |
+    +---------------------------+-----------+----------------------------------------------------------+
+    | eigvec                    | double    | The eigenvectors of the covariance matrix.               |
+    |                           | (dim,dim) |                                                          |
+    +---------------------------+-----------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBFJIBJ:
+
+.. table:: The fwd structure.
+
+    +-------------------------+-------------+----------------------------------------------------------+
+    | Field                   | Data Type   | Description                                              |
+    +=========================+=============+==========================================================+
+    | source_ori              | int32       | Has the solution been computed for the current component |
+    |                         |             | normal to the cortex only (1) or all three source        |
+    |                         |             | orientations (2).                                        |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | coord_frame             | int32       | Coordinate frame in which the locations and orientations |
+    |                         |             | are expressed.                                           |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | nsource                 | int32       | Total number of source space points.                     |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | nchan                   | int32       | Number of channels.                                      |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | sol                     | named       | The forward solution matrix.                             |
+    |                         | matrix      |                                                          |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | sol_grad                | named       | The derivatives of the forward solution with respect to  |
+    |                         | matrix      | the dipole location coordinates, see :ref:`BJEFEJJG`.    |
+    |                         |             | This field is present only if the forward solution was   |
+    |                         |             | computed with the ``--grad`` option, see :ref:`BJEIGFAE`.|
+    +-------------------------+-------------+----------------------------------------------------------+
+    | mri_head_t              | trans       | Transformation from the MRI coordinate frame to the      |
+    |                         |             | (Neuromag) head coordinate frame.                        |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | src                     | surf(:)     | The description of the source spaces.                    |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | source_rr               | double      | The source locations.                                    |
+    |                         | (nsource,3) |                                                          |
+    +-------------------------+-------------+----------------------------------------------------------+
+    | source_nn               | double(:,3) | The source orientations. Number of rows is either        |
+    |                         |             | nsource (fixed source orientations) or 3*nsource         |
+    |                         |             | (all source orientations).                               |
+    +-------------------------+-------------+----------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}|
+.. _BGBIEIJE:
+
+.. table:: The inv structure. Note: The fields proj, whitener, reginv, and noisenorm are filled in by the routine mne_prepare_inverse_operator.
+
+    +---------------------+-------------+----------------------------------------------------------+
+    | Field               | Data Type   | Description                                              |
+    +=====================+=============+==========================================================+
+    | methods             | int32       | Has the solution been computed using MEG data (1), EEG   |
+    |                     |             | data (2), or both (3).                                   |
+    +---------------------+-------------+----------------------------------------------------------+
+    | source_ori          | int32       | Has the solution been computed for the current component |
+    |                     |             | normal to the cortex only (1) or all three source        |
+    |                     |             | orientations (2).                                        |
+    +---------------------+-------------+----------------------------------------------------------+
+    | nsource             | int32       | Total number of source space points.                     |
+    +---------------------+-------------+----------------------------------------------------------+
+    | nchan               | int32       | Number of channels.                                      |
+    +---------------------+-------------+----------------------------------------------------------+
+    | coord_frame         | int32       | Coordinate frame in which the locations and orientations |
+    |                     |             | are expressed.                                           |
+    +---------------------+-------------+----------------------------------------------------------+
+    | source_nn           | double(:,3) | The source orientations. Number of rows is either        |
+    |                     |             | nsource (fixed source orientations) or 3*nsource (all    |
+    |                     |             | source orientations).                                    |
+    +---------------------+-------------+----------------------------------------------------------+
+    | sing                | double      | The singular values, *i.e.*, the diagonal values of      |
+    |                     | (nchan)     | :math:`\Lambda`, see :ref:`CHDBEHBC`.                    |
+    +---------------------+-------------+----------------------------------------------------------+
+    | eigen_leads         | double      | The matrix :math:`V`, see :ref:`CHDBEHBC`.               |
+    |                     | (:,nchan)   |                                                          |
+    +---------------------+-------------+----------------------------------------------------------+
+    | eigen_fields        | double      | The matrix :math:`U^T`, see :ref:`CHDBEHBC`.             |
+    |                     | (nchan,     |                                                          |
+    |                     | nchan)      |                                                          |
+    +---------------------+-------------+----------------------------------------------------------+
+    | noise_cov           | cov         | The noise covariance matrix :math:`C`.                   |
+    +---------------------+-------------+----------------------------------------------------------+
+    | source_cov          | cov         | The source covariance matrix :math:`R`.                  |
+    +---------------------+-------------+----------------------------------------------------------+
+    | src                 | surf(:)     | The description of the source spaces.                    |
+    +---------------------+-------------+----------------------------------------------------------+
+    | mri_head_t          | trans       | Transformation from the MRI coordinate frame to the      |
+    |                     |             | (Neuromag) head coordinate frame.                        |
+    +---------------------+-------------+----------------------------------------------------------+
+    | nave                | double      | The number of averages.                                  |
+    +---------------------+-------------+----------------------------------------------------------+
+    | projs               | proj(:)     | The SSP vectors which were active when the decomposition |
+    |                     |             | was computed.                                            |
+    +---------------------+-------------+----------------------------------------------------------+
+    | proj                | double      | The projection operator computed using projs.            |
+    |                     | (nchan)     |                                                          |
+    +---------------------+-------------+----------------------------------------------------------+
+    | whitener            |             | A sparse matrix containing the noise normalization       |
+    |                     |             | factors. Dimension is either nsource (fixed source       |
+    |                     |             | orientations) or 3*nsource (all source orientations).    |
+    +---------------------+-------------+----------------------------------------------------------+
+    | reginv              | double      | The diagonal matrix :math:`\Gamma`, see :ref:`CHDBEHBC`. |
+    |                     | (nchan)     |                                                          |
+    +---------------------+-------------+----------------------------------------------------------+
+    | noisenorm           | double(:)   | A sparse matrix containing the noise normalization       |
+    |                     |             | factors. Dimension is either nsource (fixed source       |
+    |                     |             | orientations) or 3*nsource (all source orientations).    |
+    +---------------------+-------------+----------------------------------------------------------+
+
+
+On-line documentation for individual routines
+#############################################
+
+Each of the routines listed in Tables :ref:`BGBCGHAG` - :ref:`BGBEFADJ` has on-line documentation accessible by saying ``help`` <*routine name*> in Matlab.
diff --git a/doc/source/manual/mne.rst b/doc/source/manual/mne.rst
new file mode 100644
index 0000000..3e50588
--- /dev/null
+++ b/doc/source/manual/mne.rst
@@ -0,0 +1,1323 @@
+
+
+.. _ch_mne:
+
+=====================
+The current estimates
+=====================
+
+Overview
+########
+
+This Chapter describes the computation of the minimum-norm
+estimates. This is accomplished with two programs: *mne_inverse_operator* and *mne_make_movie*.
+The chapter starts with a mathematical description of the method,
+followed by description of the two software modules. The interactive
+program for inspecting data and inverse solutions, mne_analyze ,
+is covered in :ref:`ch_interactive_analysis`.
+
+.. _CBBDJFBJ:
+
+Minimum-norm estimates
+######################
+
+This section describes the mathematical details of the calculation
+of minimum-norm estimates. In Bayesian sense, the ensuing current
+distribution is the maximum a posteriori (MAP) estimate under the
+following assumptions:
+
+- The viable locations of the currents
+  are constrained to the cortex. Optionally, the current orientations
+  can be fixed to be normal to the cortical mantle.
+
+- The amplitudes of the currents have a Gaussian prior distribution
+  with a known source covariance matrix.
+
+- The measured data contain additive noise with a Gaussian distribution with
+  a known covariance matrix. The noise is not correlated over time.
+
+The linear inverse operator
+===========================
+
+The measured data in the source estimation procedure consists
+of MEG and EEG data, recorded on a total of N channels. The task
+is to estimate a total of M strengths of sources located on the
+cortical mantle. If the number of source locations is P, M = P for
+fixed-orientation sources and M = 3P if the source orientations
+are unconstrained. The regularized linear inverse operator following
+from the Bayesian approach is given by the :math:`M \times N` matrix
+
+.. math::    M = R' G^T (G R' G^T + C)^{-1}\ ,
+
+where G is the gain matrix relating the source strengths
+to the measured MEG/EEG data, :math:`C` is the data noise-covariance matrix
+and :math:`R'` is the source covariance matrix.
+The dimensions of these matrices are :math:`N \times M`, :math:`N \times N`,
+and :math:`M \times M`, respectively. The :math:`M \times 1` source-strength
+vector is obtained by multiplying the :math:`N \times 1` data
+vector by :math:`M`.
+
+The expected value of the current amplitudes at time *t* is
+then given by :math:`\hat{j}(t) = Mx(t)`, where :math:`x(t)` is
+a vector containing the measured MEG and EEG data values at time *t*.
+
+.. _CBBHAAJJ:
+
+Regularization
+==============
+
+The a priori variance of the currents is, in practise, unknown.
+We can express this by writing :math:`R' = R/ \lambda^2`,
+which yields the inverse operator
+
+.. math::    M = R G^T (G R G^T + \lambda^2 C)^{-1}\ ,
+
+where the unknown current amplitude is now interpreted in
+terms of the regularization parameter :math:`\lambda^2`.
+Small :math:`\lambda^2` corresponds to large current amplitudes
+and complex estimate current patterns while a large :math:`\lambda^2` means the
+amplitude of the current is limited and a simpler, smooth, current
+estimate is obtained.
+
+We can arrive in the regularized linear inverse operator
+also by minimizing the cost function
+
+.. math::    S = \tilde{e}^T \tilde{e} + \lambda^2 j^T R^{-1} j\ ,
+
+where the first term consists of the difference between the
+whitened measured data (see :ref:`CHDDHAGE`) and those predicted
+by the model while the second term is a weighted-norm of the current
+estimate. It is seen that, with increasing :math:`\lambda^2`,
+the source term receive more weight and larger discrepancy between
+the measured and predicted data is tolerable.
+
+.. _CHDDHAGE:
+
+Whitening and scaling
+=====================
+
+The MNE software employs data whitening so that a 'whitened' inverse operator
+assumes the form
+
+.. math::    \tilde{M} = R \tilde{G}^T (\tilde{G} R \tilde{G}^T + I)^{-1}\ ,
+
+where :math:`\tilde{G} = C^{-^1/_2}G` is the spatially
+whitened gain matrix. The expected current values are :math:`\hat{j} = Mx(t)`,
+where :math:`x(t) = C^{-^1/_2}x(t)` is a the whitened measurement
+vector at *t*. The spatial whitening operator
+is obtained with the help of the eigenvalue decomposition :math:`C = U_C \Lambda_C^2 U_C^T` as :math:`C^{-^1/_2} = \Lambda_C^{-1} U_C^T`.
+In the MNE software the noise-covariance matrix is stored as the
+one applying to raw data. To reflect the decrease of noise due to
+averaging, this matrix, :math:`C_0`, is scaled
+by the number of averages, :math:`L`, *i.e.*, :math:`C = C_0 / L`.
+
+As shown above, regularization of the inverse solution is
+equivalent to a change in the variance of the current amplitudes
+in the Bayesian *a priori* distribution.
+
+Convenient choice for the source-covariance matrix :math:`R` is
+such that :math:`\text{trace}(\tilde{G} R \tilde{G}^T) / \text{trace}(I) = 1`. With this choice we
+can approximate :math:`\lambda^2 \sim 1/SNR`, where SNR is
+the (power) signal-to-noise ratio of the whitened data.
+
+.. note:: The definition of the signal to noise-ratio/ :math:`\lambda^2` relationship    given above works nicely for the whitened forward solution. In the    un-whitened case scaling with the trace ratio :math:`\text{trace}(GRG^T) / \text{trace}(C)` does not make sense, since the diagonal elements summed have, in general,    different units of measure. For example, the MEG data are expressed    in T or T/m whereas the unit of EEG is Volts.
+
+.. _CBBHEGAB:
+
+Regularization of the noise-covariance matrix
+=============================================
+
+Since finite amount of data is usually available to compute
+an estimate of the noise-covariance matrix :math:`C`,
+the smallest eigenvalues of its estimate are usually inaccurate
+and smaller than the true eigenvalues. Depending on the seriousness
+of this problem, the following quantities can be affected:
+
+- The model data predicted by the current
+  estimate,
+
+- Estimates of signal-to-noise ratios, which lead to estimates
+  of the required regularization, see :ref:`CBBHAAJJ`,
+
+- The estimated current values, and
+
+- The noise-normalized estimates, see :ref:`CBBEAICH`.
+
+Fortunately, the latter two are least likely to be affected
+due to regularization of the estimates. However, in some cases especially
+the EEG part of the noise-covariance matrix estimate can be deficient, *i.e.*,
+it may possess very small eigenvalues and thus regularization of
+the noise-covariance matrix is advisable.
+
+The MNE software accomplishes the regularization by replacing
+a noise-covariance matrix estimate :math:`C` with
+
+.. math::    C' = C + \sum_k {\varepsilon_k \bar{\sigma_k}^2 I^{(k)}}\ ,
+
+where the index :math:`k` goes across
+the different channel groups (MEG planar gradiometers, MEG axial
+gradiometers and magnetometers, and EEG), :math:`\varepsilon_k` are
+the corresponding regularization factors, :math:`\bar{\sigma_k}` are
+the average variances across the channel groups, and :math:`I^{(k)}` are
+diagonal matrices containing ones at the positions corresponding
+to the channels contained in each channel group. The values :math:`\varepsilon_k` can
+be adjusted with the regularization options ``--magreg`` , ``--gradreg`` ,
+and ``--eegreg`` specified at the time of the inverse operator
+decomposition, see :ref:`CBBDDBGF`. The convenience script mne_do_inverse_solution has
+the ``--magreg`` and ``--gradreg`` combined to
+a single option, ``--megreg`` , see :ref:`CIHCFJEI`.
+Suggested range of values for :math:`\varepsilon_k` is :math:`0.05 \dotso 0.2`.
+
+.. _CHDBEHBC:
+
+Computation of the solution
+===========================
+
+The most straightforward approach to calculate the MNE is
+to employ expression for the original or whitened inverse operator
+directly. However, for computational convenience we prefer to take
+another route, which employs the singular-value decomposition (SVD)
+of the matrix
+
+.. math::    A = \tilde{G} R^{^1/_2} = U \Lambda V^T
+
+where the superscript :math:`^1/_2` indicates a
+square root of :math:`R`. For a diagonal matrix,
+one simply takes the square root of :math:`R` while
+in the more general case one can use the Cholesky factorization :math:`R = R_C R_C^T` and
+thus :math:`R^{^1/_2} = R_C`.
+
+With the above SVD it is easy to show that
+
+.. math::    \tilde{M} = R^{^1/_2} V \Gamma U^T
+
+where the elements of the diagonal matrix :math:`\Gamma` are
+
+.. math::    \gamma_k = \frac{1}{\lambda_k} \frac{\lambda_k^2}{\lambda_k^2 + \lambda^2}\ .
+
+With :math:`w(t) = U^T C^{-^1/_2} x(t)` the expression for
+the expected current is
+
+.. math::    \hat{j}(t) = R^C V \Gamma w(t) = \sum_k {\bar{v_k} \gamma_k w_k(t)}\ ,
+
+where :math:`\bar{v_k} = R^C v_k`, :math:`v_k` being
+the :math:`k` th column of :math:`V`. It is thus seen that the current estimate is
+a weighted sum of the 'modified' eigenleads :math:`v_k`.
+
+It is easy to see that :math:`w(t) \propto \sqrt{L}`.
+To maintain the relation :math:`(\tilde{G} R \tilde{G}^T) / \text{trace}(I) = 1` when :math:`L` changes
+we must have :math:`R \propto 1/L`. With this approach, :math:`\lambda_k` is
+independent of  :math:`L` and, for fixed :math:`\lambda`,
+we see directly that :math:`j(t)` is independent
+of :math:`L`.
+
+.. _CBBEAICH:
+
+Noise normalization
+===================
+
+The noise-normalized linear estimates introduced by Dale
+et al. require division of the expected current amplitude by its
+variance. Noise normalization serves three purposes:
+
+- It converts the expected current value
+  into a dimensionless statistical test variable. Thus the resulting
+  time and location dependent values are often referred to as dynamic
+  statistical parameter maps (dSPM).
+
+- It reduces the location bias of the estimates. In particular,
+  the tendency of the MNE to prefer superficial currents is eliminated.
+
+- The width of the point-spread function becomes less dependent
+  on the source location on the cortical mantle. The point-spread
+  is defined as the MNE resulting from the signals coming from a point
+  current source (a current dipole) located at a certain point on
+  the cortex.
+
+In practice, noise normalization requires the computation
+of the diagonal elements of the matrix
+
+.. math::    M C M^T = \tilde{M} \tilde{M}^T\ .
+
+With help of the singular-value decomposition approach we
+see directly that
+
+.. math::    \tilde{M} \tilde{M}^T\ = \bar{V} \Gamma^2 \bar{V}^T\ .
+
+Under the conditions expressed at the end of :ref:`CHDBEHBC`, it follows that the *t*-statistic values associated
+with fixed-orientation sources) are thus proportional to :math:`\sqrt{L}` while
+the *F*-statistic employed with free-orientation sources is proportional
+to :math:`L`, correspondingly.
+
+.. note:: A section discussing statistical considerations    related to the noise normalization procedure will be added to this    manual in one of the subsequent releases.
+
+.. note:: The MNE software usually computes the square    roots of the F-statistic to be displayed on the inflated cortical    surfaces. These are also proportional to :math:`\sqrt{L}`.
+
+.. _CHDCACDC:
+
+Predicted data
+==============
+
+Under noiseless conditions the SNR is infinite and thus leads
+to :math:`\lambda^2 = 0` and the minimum-norm estimate
+explains the measured data perfectly. Under realistic conditions,
+however, :math:`\lambda^2 > 0` and there is a misfit
+between measured data and those predicted by the MNE. Comparison
+of the predicted data, here denoted by :math:`x(t)`,
+and measured one can give valuable insight on the correctness of
+the regularization applied.
+
+In the SVD approach we easily find
+
+.. math::    \hat{x}(t) = G \hat{j}(t) = C^{^1/_2} U \Pi w(t)\ ,
+
+where the diagonal matrix :math:`\Pi` has
+elements :math:`\pi_k = \lambda_k \gamma_k` The predicted data is
+thus expressed as the weighted sum of the 'recolored eigenfields' in :math:`C^{^1/_2} U`.
+
+.. _CBBDBHDI:
+
+Cortical patch statistics
+=========================
+
+If the ``--cps`` option was used in source space
+creation (see :ref:`CIHCHDAE`) or if mne_add_patch_info described
+in :ref:`BEHCBCGG` was run manually the source space file
+will contain for each vertex of the cortical surface the information
+about the source space point closest to it as well as the distance
+from the vertex to this source space point. The vertices for which
+a given source space point is the nearest one define the cortical
+patch associated with with the source space point. Once these data
+are available, it is straightforward to compute the following cortical
+patch statistics (CPS) for each source location :math:`d`:
+
+- The average over the normals of at the
+  vertices in a patch, :math:`\bar{n_d}`,
+
+- The areas of the patches, :math:`A_d`,
+  and
+
+- The average deviation of the vertex normals in a patch from
+  their average, :math:`\sigma_d`, given in degrees.
+
+The orientation constraints
+===========================
+
+The principal sources of MEG and EEG signals are generally
+believed to be postsynaptic currents in the cortical pyramidal neurons.
+Since the net primary current associated with these microscopic
+events is oriented normal to the cortical mantle, it is reasonable
+to use the cortical normal orientation as a constraint in source
+estimation. In addition to allowing completely free source orientations,
+the MNE software implements three orientation constraints based
+of the surface normal data:
+
+- Source orientation can be rigidly fixed
+  to the surface normal direction (the ``--fixed`` option).
+  If cortical patch statistics are available the average normal over
+  each patch, :math:`\bar{n_d}`, are used to define
+  the source orientation. Otherwise, the vertex normal at the source
+  space location is employed.
+
+- A *location independent or fixed loose orientation
+  constraint* (fLOC) can be employed (the ``--loose`` option).
+  In this approach, a source coordinate system based on the local
+  surface orientation at the source location is employed. By default,
+  the three columns of the gain matrix G, associated with a given
+  source location, are the fields of unit dipoles pointing to the
+  directions of the x, y, and z axis of the coordinate system employed
+  in the forward calculation (usually the MEG head coordinate frame).
+  For LOC the orientation is changed so that the first two source
+  components lie in the plane normal to the surface normal at the source
+  location and the third component is aligned with it. Thereafter, the
+  variance of the source components tangential to the cortical surface are
+  reduced by a factor defined by the ``--loose`` option.
+
+- A *variable loose orientation constraint* (vLOC)
+  can be employed (the ``--loosevar`` option). This is similar
+  to fLOC except that the value given with the ``--loosevar`` option
+  will be multiplied by :math:`\sigma_d`, defined above.
+
+.. _CBBDFJIE:
+
+Depth weighting
+===============
+
+The minimum-norm estimates have a bias towards superficial
+currents. This tendency can be alleviated by adjusting the source
+covariance matrix :math:`R` to favor deeper source locations. In the depth
+weighting scheme employed in MNE analyze, the elements of :math:`R` corresponding
+to the :math:`p` th source location are be
+scaled by a factor
+
+.. math::    f_p = (g_{1p}^T g_{1p} + g_{2p}^T g_{2p} + g_{3p}^T g_{3p})^{-\gamma}\ ,
+
+where :math:`g_{1p}`, :math:`g_{2p}`, and :math:`g_{3p}` are the three columns
+of :math:`G` corresponding to source location :math:`p` and :math:`\gamma` is
+the order of the depth weighting, specified with the ``--weightexp`` option
+to mne_inverse_operator . The
+maximal amount of depth weighting can be adjusted ``--weightlimit`` option.
+
+.. _CBBDIJHI:
+
+fMRI-guided estimates
+=====================
+
+The fMRI weighting in MNE software means that the source-covariance matrix
+is modified to favor areas of significant fMRI activation. For this purpose,
+the fMRI activation map is thresholded first at the value defined by
+the ``--fmrithresh`` option to mne_do_inverse_operator or mne_inverse_operator .
+Thereafter, the source-covariance matrix values corresponding to
+the the sites under the threshold are multiplied by :math:`f_{off}`, set
+by the ``--fmrioff`` option.
+
+It turns out that the fMRI weighting has a strong influence
+on the MNE but the noise-normalized estimates are much less affected
+by it.
+
+.. _CBBDGIAE:
+
+Effective number of averages
+############################
+
+It is often the case that the epoch to be analyzed is a linear
+combination over conditions rather than one of the original averages
+computed. As stated above, the noise-covariance matrix computed
+is originally one corresponding to raw data. Therefore, it has to
+be scaled correctly to correspond to the actual or effective number
+of epochs in the condition to be analyzed. In general, we have
+
+.. math::    C = C_0 / L_{eff}
+
+where :math:`L_{eff}` is the effective
+number of averages. To calculate :math:`L_{eff}` for
+an arbitrary linear combination of conditions
+
+.. math::    y(t) = \sum_{i = 1}^n {w_i x_i(t)}
+
+we make use of the the fact that the noise-covariance matrix
+
+.. math::    C_y = \sum_{i = 1}^n {w_i^2 C_{x_i}} = C_0 \sum_{i = 1}^n {w_i^2 / L_i}
+
+which leads to
+
+.. math::    1 / L_{eff} = \sum_{i = 1}^n {w_i^2 / L_i}
+
+An important special case  of the above is a weighted average,
+where
+
+.. math::    w_i = L_i / \sum_{i = 1}^n {L_i}
+
+and, therefore
+
+.. math::    L_{eff} = \sum_{i = 1}^n {L_i}
+
+Instead of a weighted average, one often computes a weighted
+sum, a simplest case being a difference or sum of two categories.
+For a difference :math:`w_1 = 1` and :math:`w_2 = -1` and
+thus
+
+.. math::    1 / L_{eff} = 1 / L_1 + 1 / L_2
+
+or
+
+.. math::    L_{eff} = \frac{L_1 L_2}{L_1 + L_2}
+
+Interestingly, the same holds for a sum, where :math:`w_1 = w_2 = 1`.
+Generalizing, for any combination of sums and differences, where :math:`w_i = 1` or :math:`w_i = -1`, :math:`i = 1 \dotso n`,
+we have
+
+.. math::    1 / L_{eff} = \sum_{i = 1}^n {1/{L_i}}
+
+.. _CBBDDBGF:
+
+Inverse-operator decomposition
+##############################
+
+The program ``mne_inverse_operator`` calculates
+the decomposition :math:`A = \tilde{G} R^C = U \Lambda \bar{V^T}`, described in :ref:`CHDBEHBC`. It is normally invoked from the convenience
+script ``mne_do_inverse_operator`` . This section describes
+the options to ``mne_inverse_operator`` should a user need
+to invoke it directly for special-purpose processing.
+
+The command-line options of ``mne_inverse_operator`` are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---meg**
+
+    Employ MEG data in the calculation of the estimates.
+
+**\---eeg**
+
+    Employ EEG data in the calculation of the estimates. Note: The EEG
+    computations have not been thoroughly tested at this time.
+
+**\---fixed**
+
+    Use fixed source orientations normal to the cortical mantle. By default,
+    the source orientations are not constrained.
+
+**\---loose <*amount*>**
+
+    Employ a loose orientation constraint (LOC). This means that the source
+    covariance matrix entries corresponding to the current component
+    normal to the cortex are set equal to one and the transverse components
+    are set to <*amount*> . Recommended
+    value of amount is 0.2...0.6.
+
+**\---loosevar <*amount*>**
+
+    Use an adaptive loose orientation constraint. This option can be
+    only employed if the source spaces included in the forward solution
+    have the patch information computed, see :ref:`CIHCHDAE`. Blaa
+    blaa...***what???**
+
+**\---fwd <*name*>**
+
+    Specifies the name of the forward solution to use.
+
+**\---noisecov <*name*>**
+
+    Specifies the name of the noise-covariance matrix to use. If this
+    file contains a projection operator, attached by mne_browse_raw and mne_process_raw ,
+    no additional projection vectors can be added with the ``--proj`` option. For
+    backward compatibility, ``--senscov`` can be used as a synonym for ``--noisecov``.
+
+**\---noiserank <*value*>**
+
+    Specifies the rank of the noise covariance matrix explicitly rather than
+    trying to reduce it automatically. This option is seldom needed,
+
+**\---gradreg <*value*>**
+
+    Regularize the planar gradiometer section (channels for which the unit
+    of measurement is T/m) of the noise-covariance matrix by the given
+    amount. The value is restricted to the range 0...1. For details, see :ref:`CBBHEGAB`.
+
+**\---magreg <*value*>**
+
+    Regularize the magnetometer and axial gradiometer section (channels
+    for which the unit of measurement is T) of the noise-covariance matrix
+    by the given amount. The value is restricted to the range 0...1.
+    For details, see :ref:`CBBHEGAB`.
+
+**\---eegreg <*value*>**
+
+    Regularize the EEG section of the noise-covariance matrix by the given
+    amount. The value is restricted to the range 0...1. For details, see :ref:`CBBHEGAB`.
+
+**\---diagnoise**
+
+    Omit the off-diagonal terms from the noise-covariance matrix in
+    the computations. This may be useful if the amount of signal-free
+    data has been insufficient to calculate a reliable estimate of the
+    full noise-covariance matrix.
+
+**\---srccov <*name*>**
+
+    Specifies the name of the diagonal source-covariance matrix to use.
+    By default the source covariance matrix is a multiple of the identity matrix.
+    This option can be employed to incorporate the fMRI constraint.
+    The software to create a source-covariance matrix file from fMRI
+    data will be provided in a future release of this software package.
+
+**\---depth**
+
+    Employ depth weighting. For details, see :ref:`CBBDFJIE`.
+
+**\---weightexp <*value*>**
+
+    This parameter determines the steepness of the depth weighting function
+    (default = 0.8). For details, see :ref:`CBBDFJIE`.
+
+**\---weightlimit <*value*>**
+
+    Maximum relative strength of the depth weighting (default = 10). For
+    details, see :ref:`CBBDFJIE`.
+
+**\---fmri <*name*>**
+
+    With help of this w file, an *a priori* weighting
+    can be applied to the source covariance matrix. The source of the
+    weighting is usually fMRI but may be also some other data, provided
+    that the weighting  can be expressed as a scalar value on the cortical
+    surface, stored in a w file. It is recommended that this w file
+    is appropriately smoothed (see :ref:`CHDEBAHH`) in mne_analyze , tksurfer or
+    with mne_smooth_w to contain
+    nonzero values at all vertices of the triangular tessellation of
+    the cortical surface. The name of the file given is used as a stem of
+    the w files. The actual files should be called <*name*> ``-lh.pri`` and <*name*> ``-rh.pri`` for
+    the left and right hemsphere weight files, respectively. The application
+    of the weighting is discussed in :ref:`CBBDIJHI`.
+
+**\---fmrithresh <*value*>**
+
+    This option is mandatory and has an effect only if a weighting function
+    has been specified with the ``--fmri`` option. If the value
+    is in the *a priori* files falls below this value
+    at a particular source space point, the source covariance matrix
+    values are multiplied by the value specified with the ``--fmrioff`` option
+    (default 0.1). Otherwise it is left unchanged.
+
+**\---fmrioff <*value*>**
+
+    The value by which the source covariance elements are multiplied
+    if the *a priori* weight falls below the threshold
+    set with ``--fmrithresh`` , see above.
+
+**\---bad <*name*>**
+
+    A text file to designate bad channels, listed one channel name on each
+    line of the file. If the noise-covariance matrix specified with the ``--noisecov`` option
+    contains projections, bad channel lists can be included only if
+    they specify all channels containing non-zero entries in a projection
+    vector. For example, bad channels can usually specify all magnetometers
+    or all gradiometers since the projection vectors for these channel
+    types are completely separate. Similarly, it is possible to include
+    MEG data only or EEG data only by using only one of ``--meg`` or ``--eeg`` options
+    since the projection vectors for MEG and EEG are always separate.
+
+**\---surfsrc**
+
+    Use a source coordinate system based on the local surface orientation
+    at the source location. By default, the three dipole components are
+    pointing to the directions of the x, y, and z axis of the coordinate system
+    employed in the forward calculation (usually the MEG head coordinate
+    frame). This option changes the orientation so that the first two
+    source components lie in the plane normal to the surface normal
+    at the source location and the third component is aligned with it.
+    If patch information is available in the source space, the normal
+    is the average patch normal, otherwise the vertex normal at the source
+    location is used. If the ``--loose`` or ``--loosevar`` option
+    is employed, ``--surfsrc`` is implied.
+
+**\---exclude <*name*>**
+
+    Exclude the source space points defined by the given FreeSurfer 'label' file
+    from the source reconstruction. This is accomplished by setting
+    the corresponding entries in the source-covariance matrix equal
+    to zero. The name of the file should end with ``-lh.label``
+    if it refers to the left hemisphere and with ``-rh.label`` if
+    it lists points in the right hemisphere, respectively.
+
+**\---proj <*name*>**
+
+    Include signal-space projection (SSP) information from this file. For information
+    on SSP, see :ref:`CACCHABI`. If the projections are present in
+    the noise-covariance matrix, the ``--proj`` option is
+    not allowed.
+
+**\---csd**
+
+    Compute the inverse operator for surface current densities instead
+    of the dipole source amplitudes. This requires the computation of patch
+    statistics for the source space. Since this computation is time consuming,
+    it is recommended that the patch statistics are precomputed and
+    the source space file containing the patch information is employed
+    already when the forward solution is computed, see :ref:`CIHCHDAE` and :ref:`BABCHEJD`.
+    For technical details of the patch information, please consult :ref:`CBBDBHDI`. This option is considered experimental at
+    the moment.
+
+**\---inv <*name*>**
+
+    Save the inverse operator decomposition here.
+
+.. _CBBECEDE:
+
+Producing movies and snapshots
+##############################
+
+mne_make_movie is a program
+for producing movies and snapshot graphics frames without any graphics
+output to the screen. In addition, mne_make_movie can
+produce stc or w files which contain the numerical current estimate
+data in a simple binary format for postprocessing. These files can
+be displayed in mne_analyze ,
+see :ref:`ch_interactive_analysis`, utilized in the cross-subject averaging
+process, see :ref:`ch_morph`, and read into Matlab using the MNE
+Matlab toolbox, see :ref:`ch_matlab`.
+
+The command-line options to mne_make_movie are
+explained in the following subsections.
+
+General options
+===============
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+Input files
+===========
+
+**\---inv <*name*>**
+
+    Load the inverse operator decomposition from here.
+
+**\---meas <*name*>**
+
+    Load the MEG or EEG data from this file.
+
+**\---set <*number*>**
+
+    The data set (condition) number to load. This is the sequential
+    number of the condition. You can easily see the association by looking
+    at the condition list in mne_analyze when
+    you load the file.
+
+**\---stcin <*name*>**
+
+    Specifies an stc file to read as input.
+
+Times and baseline
+==================
+
+**\---tmin <*time/ms*>**
+
+    Specifies the starting time employed in the analysis. If ``--tmin`` option
+    is missing the analysis starts from the beginning of the epoch.
+
+**\---tmax <*time/ms*>**
+
+    Specifies the finishing time employed in the analysis. If ``--tmax`` option
+    is missing the analysis extends to the end of the epoch.
+
+**\---tstep <*step/ms*>**
+
+    Time step between consequtive movie frames, specified in milliseconds.
+
+**\---integ  <*:math:`\Delta`t/ms*>**
+
+    Integration time for each frame. Defaults to zero. The integration will
+    be performed on sensor data. If the time specified for a frame is :math:`t_0`,
+    the integration range will be :math:`t_0 - \Delta t/2 \leq t \leq t_0 + \Delta t/2`.
+
+**\---pick <*time/ms*>**
+
+    Pick a time for the production of rgb, tif, jpg, png, or w files.
+    Several pick options may be present. The time must be with in the
+    analysis interval, indicated by the ``--tmin`` and ``--tmax`` options.
+    The ``--rgb`` , ``--tif`` , ``--jpg`` , ``--png`` , and ``--w`` options
+    control which file types are actually produced. When a ``--pick`` option
+    is encountered, the effect of any preceding ``--pickrange`` option
+    is ignored.
+
+**\---pickrange**
+
+    All previous ``-pick`` options will be ignored. Instead,
+    snapshots are produced as indicated by the ``--tmin`` , ``--tmax`` ,
+    and ``--tstep`` options. This is useful, *e.g.*,
+    for producing input for scripts merging the individual graphics
+    snapshots into a composite "filmstrip" reprensentation.
+    However, such scripts are not yet part of the MNE software.
+
+**\---bmin <*time/ms*>**
+
+    Specifies the starting time of the baseline. In order to activate
+    baseline correction, both ``--bmin`` and ``--bmax`` options
+    must be present.
+
+**\---bmax <*time/ms*>**
+
+    Specifies the finishing time of the baseline.
+
+**\---baselines <*file_name*>**
+
+    Specifies a file which contains the baseline settings. Each line
+    of the file should contain a name of a channel, followed by the
+    baseline value, separated from the channel name by a colon. The
+    baseline values must be specified in basic units, i.e., Teslas/meter
+    for gradiometers, Teslas for magnetometers, and Volts for EEG channels.
+    If some channels are missing from the baseline file, warning messages are
+    issued: for these channels, the ``--bmin`` and ``--bmax`` settings will
+    be used.
+
+Options controlling the estimates
+=================================
+
+**\---nave <*value*>**
+
+    Specifies the effective number of averaged epochs in the input data, :math:`L_{eff}`,
+    as discussed in :ref:`CBBDGIAE`. If the input data file is
+    one produced by mne_browse_raw or mne_process_raw , the
+    number of averages is correct in the file. However, if subtractions
+    or some more complicated combinations of simple averages are produced,
+    e.g., by  using the xplotter software,
+    the number of averages should be manually adjusted along the guidelines
+    given in :ref:`CBBDGIAE`. This is accomplished either by
+    employing this flag or by adjusting the number of averages in the
+    data file with help of the utility mne_change_nave .
+
+**\---snr <*value*>**
+
+    An estimate for the amplitude SNR. The regularization parameter will
+    be set as :math:`\lambda^2 = 1/SNR^2`. The default value is
+    SNR = 3. Automatic selection of the regularization parameter is
+    currently not supported.
+
+**\---spm**
+
+    Calculate the dSPM instead of the expected current value.
+
+**\---sLORETA**
+
+    Calculate the noise-normalized estimate using the sLORETA approach.
+    sLORETA solutions have in general a smaller location bias than either
+    the expected current (MNE) or the dSPM.
+
+**\---signed**
+
+    Indicate the current direction with respect to the cortex outer
+    normal by sign. Currents flowing out of the cortex are thus considered
+    positive (warm colors) and currents flowing into the cortex negative (cold
+    colors).
+
+**\---picknormalcomp**
+
+    The components of the estimates corresponding to directions tangential
+    with the cortical mantle are zeroed out.
+
+.. _CBBBBHIF:
+
+Visualization options
+=====================
+
+**\---subject <*subject*>**
+
+    Specifies the subject whose MRI data is employed in the visualization.
+    This must be the same subject that was used for computing the current
+    estimates. The environment variable SUBJECTS_DIR must be set to
+    point to a locations where the subjects are to be found.
+
+**\---morph <*subject*>**
+
+    Morph the data to to the cortical surface of another subject. The Quicktime
+    movie, stc-file, graphics snapshot, and w-file outputs are affected
+    by this option, *i.e.*, they will take the morphing
+    into account and will represent the data on the cortical surface
+    of the subject defined with this option. The stc files morphed to
+    a single subject's cortical surface are used by mne_average_estimates to
+    combine data from different subjects, see :ref:`CHDFDIFE`.
+    If morphing is selected appropriate smoothing must be specified
+    with the ``--smooth`` option. The morphing process can
+    be made faster by precomputing the necessary morphing maps with mne_make_morph_maps ,
+    see :ref:`CHDBBHDH`. More information about morphing and averaging
+    can be found in :ref:`ch_morph`.
+
+**\---morphgrade <*number*>**
+
+    Adjusts the number of vertices in the stc files produced when morphing
+    is in effect. By default the number of vertices is 10242 corresponding
+    to --morphgrade value 5. Allowed values are 3, 4, 5, and 6 corresponding
+    to 642, 2562, 10242, and 40962 vertices, respectively.
+
+**\---surface <*surface name*>**
+
+    Name of the surface employed in the visualization. The default is inflated .
+
+**\---curv <*name*>**
+
+    Specify a nonstandard curvature file name. The default curvature files
+    are ``lh.curv`` and ``rh.curv`` . With this option,
+    the names become ``lh.`` <*name*> and ``rh.`` <*name*> .
+
+**\---patch <*name*> [: <*angle/deg*> ]**
+
+    Specify the name of a surface patch to be used for visualization instead
+    of the complete cortical surface. A complete name of a patch file
+    in the FreeSurface surf directory must be given. The name should
+    begin with lh or rh to allow association of the patch with a hemisphere.
+    Maximum of two ``--patch`` options can be in effect, one patch for each
+    hemisphere. If the name refers to a flat patch, the name can be
+    optionally followed by a colon and a rotation angle in degrees.
+    The flat patch will be then rotated counterclockwise by this amount
+    before display. You can check a suitable value for the rotation
+    angle by loading the patch interactively in mne_analyze .
+
+**\---width <*value*>**
+
+    Width of the graphics output frames in pixels. The default width
+    is 600 pixels.
+
+**\---height <*value*>**
+
+    Height of the graphics output frames in pixels. The default height
+    is 400 pixels.
+
+**\---mag <*factor*>**
+
+    Magnify the the visualized scene by this factor.
+
+**\---lh**
+
+    Select the left hemisphere for graphics output. By default, both hemisphere
+    are processed.
+
+**\---rh**
+
+    Select the right hemisphere for graphics output. By default, both hemisphere
+    are processed.
+
+**\---view <*name*>**
+
+    Select the name of the view for mov, rgb, and tif graphics output files.
+    The default viewnames, defined in ``$MNE_ROOT/share/mne/mne_analyze/eyes`` ,
+    are *lat* (lateral), *med* (medial), *ven* (ventral),
+    and *occ* (occipital). You can override these
+    defaults by creating the directory .mne under your home directory
+    and copying the eyes file there. Each line of the eyes file contais
+    the name of the view, the viewpoint for the left hemisphere, the
+    viewpoint for the right hemisphere, left hemisphere up vector, and
+    right hemisphere up vector. The entities are separated by semicolons.
+    Lines beginning with the pound sign (#) are considered to be comments.
+
+**\---smooth <*nstep*>**
+
+    Number of smoothsteps to take when producing the output frames. Depending
+    on the source space decimation, an appropriate number is 4 - 7.
+    Smoothing does not have any effect for the original brain if stc
+    files are produced. However, if morphing is selected smoothing is
+    mandatory even with stc output. For details of the smoothing procedure,
+    see :ref:`CHDEBAHH`.
+
+**\---nocomments**
+
+    Do not include the comments in the image output files or movies.
+
+**\---noscalebar**
+
+    Do not include the scalebar in the image output files or movies.
+
+**\---alpha <*value*>**
+
+    Adjust the opacity of maps shown on the cortical surface (0 = transparent,
+    1 = totally opaque). The default value is 1.
+
+Thresholding
+============
+
+**\---fthresh <*value*>**
+
+    Specifies the threshold for the displayed colormaps. At the threshold,
+    the overlayed color will be equal to the background surface color.
+    For currents, the value will be multiplied by :math:`1^{-10}`.
+    The default value is 8.
+
+**\---fmid <*value*>**
+
+    Specifies the midpoint for the displayed colormaps. At this value, the
+    overlayed color will be read (positive values) or blue (negative values).
+    For currents, the value will be multiplied by :math:`1^{-10}`.
+    The default value is 15.
+
+**\---fmax <*value*>**
+
+    Specifies the maximum point for the displayed colormaps. At this value,
+    the overlayed color will bright yellow (positive values) or light
+    blue (negative values). For currents, the value will be multiplied
+    by :math:`1^{-10}`. The default value is 20.
+
+**\---fslope <*value*>**
+
+    Included for backwards compatibility. If this option is specified
+    and ``--fmax`` option is *not* specified, :math:`F_{max} = F_{mid} + 1/F_{slope}`.
+
+Output files
+============
+
+**\---mov <*name*>**
+
+    Produce QuickTime movie files. This is the 'stem' of
+    the ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.mov`` is added to indicate a QuickTime output
+    file. The movie is produced for all times as dictated by the ``--tmin`` , ``--tmax`` , ``--tstep`` ,
+    and ``--integ`` options.
+
+**\---qual <*value*>**
+
+    Quality of the QuickTime movie output. The default quality is 80 and
+    allowed range is 25 - 100. The size of the movie files is a monotonously
+    increasing function of the movie quality.
+
+**\---rate <*rate*>**
+
+    Specifies the frame rate of the QuickTime movies. The default value is :math:`1/(10t_{step})`,
+    where :math:`t_{step}` is the time between subsequent
+    movie frames produced in seconds.
+
+**\---rgb <*name*>**
+
+    Produce rgb snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.rgb`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+
+**\---tif <*name*>**
+
+    Produce tif snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.tif`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+    The tif output files are *not* compressed. Pass
+    the files through an image processing program to compress them.
+
+**\---jpg <*name*>**
+
+    Produce jpg snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.jpg`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+
+**\---png <*name*>**
+
+    Produce png snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.png`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+
+**\---w <*name*>**
+
+    Produce w file snapshots. This is the 'stem' of
+    the ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` .w or ``-rh`` .w
+    is then appended. Files are produced for all picked times as dictated
+    by the ``--pick`` and ``--integ`` options.
+
+**\---stc <*name*>**
+
+    Produce stc files for either the original subject or the one selected with
+    the ``--morph`` option. These files will contain data only
+    for the decimated locations. If morphing is selected, appropriate
+    smoothing is mandatory. The morphed maps will be decimated with
+    help of a subdivided icosahedron so that the morphed stc files will
+    always contain 10242 vertices. These morphed stc files can be easily
+    averaged together, e.g., in Matlab since they always contain an
+    identical set of vertices.
+
+**\---norm <*name*>**
+
+    Indicates that a separate w file
+    containing the noise-normalization values will be produced. The
+    option ``--spm`` must also be present. Nevertheless, the
+    movies and stc files output will
+    contain MNE values. The noise normalization data files will be called <*name*>- <*SNR*> ``-lh.w`` and <*name*>- <*SNR*> ``-rh.w`` .
+
+.. _CBBHHCEF:
+
+Label processing
+================
+
+**\---label <*name*>**
+
+    Specifies a label file to process. For each label file, the values
+    of the computed estimates are listed in text files. The label files
+    are produced by tksurfer or mne_analyze and
+    specify regions of interests (ROIs). A label file name should end
+    with ``-lh.label`` for left-hemisphere ROIs and with ``-rh.label`` for
+    right-hemisphere ones. The corresponding output files are tagged
+    with ``-lh-`` <*data type*> ``.amp`` and ``-rh-`` <*data type*> ``.amp``, respectively. <*data type*> equals ``'mne`` ' for
+    expected current data and ``'spm`` ' for
+    dSPM data. Each line of the output file contains the waveform of
+    the output quantity at one of the source locations falling inside
+    the ROI. For more information about the label output formats, see :ref:`CACJJGFA`.
+
+**\---labelcoords**
+
+    Include coordinates of the vertices in the output. The coordinates will
+    be listed in millimeters in the coordinate system which was specified
+    for the forward model computations. This option cannot be used with
+    stc input files (``--stcin`` ) because the stc files do
+    not contain the coordinates of the vertices.
+
+**\---labelverts**
+
+    Include vertex numbers in the output. The numbers refer to the complete
+    triangulation of the corresponding surface and are zero based. The
+    vertex numbers are by default on the first row or first column of the
+    output file depending on whether or not the ``--labeltimebytime`` option
+    is present.
+
+**\---labeltimebytime**
+
+    Output the label data time by time instead of the default vertex-by-vertex
+    output.
+
+**\---labeltag <*tag*>**
+
+    End the output files with the specified tag. By default, the output files
+    will end with ``-mne.amp`` or ``-spm.amp`` depending
+    on whether MNE or one of the noise-normalized estimates (dSPM or sLORETA)
+    was selected.
+
+**\---labeloutdir <*directory*>**
+
+    Specifies the directory where the output files will be located.
+    By default, they will be in the current working directory.
+
+**\---labelcomments**
+
+    Include comments in the output files. The comment lines begin with the
+    percent sign to make the files compatible with Matlab.
+
+**\---scaleby <*factor*>**
+
+    By default, the current values output to the files will be in the
+    actual physical units (Am). This option allows scaling of the current
+    values to other units. mne_analyze typically
+    uses 1e10 to bring the numbers to a human-friendly scale.
+
+Using stc file input
+====================
+
+The ``--stcin`` option allows input of stc files.
+This feature has several uses:
+
+- QuickTime movies can be produced from
+  existing stc files without having to resort to EasyMeg.
+
+- Graphics snapshot can be produced from existing stc files.
+
+- Existing stc files can be temporally resampled with help of
+  the ``--tmin`` , ``--tmax`` , ``--tstep`` ,
+  and ``--integ`` options.
+
+- Existing stc files can be morphed to another cortical surface
+  by specifying the ``--morph`` option.
+
+- Timecourses can be inquired and stored into text files with
+  help of the ``--label`` options, see above.
+
+.. _CBBCGHAH:
+
+Computing inverse from raw and evoked data
+##########################################
+
+The purpose of the utility mne_compute_raw_inverse is
+to compute inverse solutions from either evoked-response or raw
+data at specified ROIs (labels) and to save the results in a fif
+file which can be viewed with mne_browse_raw ,
+read to Matlab directly using the MNE Matlab Toolbox, see :ref:`ch_matlab`,
+or converted to Matlab format using either mne_convert_mne_data , mne_raw2mat ,
+or mne_epochs2mat , see :ref:`ch_convert`.
+
+.. _CHDEIHFA:
+
+Command-line options
+====================
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in <*filename*>**
+
+    Specifies the input data file. This can be either an evoked data
+    file or a raw data file.
+
+**\---bmin <*time/ms*>**
+
+    Specifies the starting time of the baseline. In order to activate
+    baseline correction, both ``--bmin`` and ``--bmax`` options
+    must be present. This option applies to evoked data only.
+
+**\---bmax <*time/ms*>**
+
+    Specifies the finishing time of the baseline. This option applies
+    to evoked data only.
+
+**\---set <*number*>**
+
+    The data set (condition) number to load. This is the sequential
+    number of the condition. You can easily see the association by looking
+    at the condition list in mne_analyze when
+    you load the file.
+
+**\---inv <*name*>**
+
+    Load the inverse operator decomposition from here.
+
+**\---nave <*value*>**
+
+    Specifies the effective number of averaged epochs in the input data, :math:`L_{eff}`,
+    as discussed in :ref:`CBBDGIAE`. If the input data file is
+    one produced by mne_browse_raw or mne_process_raw ,
+    the number of averages is correct in the file. However, if subtractions
+    or some more complicated combinations of simple averages are produced,
+    e.g., by  using the xplotter software,
+    the number of averages should be manually adjusted along the guidelines
+    given in :ref:`CBBDGIAE`. This is accomplished either by
+    employing this flag or by adjusting the number of averages in the
+    data file with help of the utility mne_change_nave .
+
+**\---snr <*value*>**
+
+    An estimate for the amplitude SNR. The regularization parameter will
+    be set as :math:`\lambda^2 = 1/SNR^2`. The default value is
+    SNR = 1. Automatic selection of the regularization parameter is
+    currently not supported.
+
+**\---spm**
+
+    Calculate the dSPM instead of the expected current value.
+
+**\---picknormalcomp**
+
+    The components of the estimates corresponding to directions tangential
+    with the cortical mantle are zeroed out.
+
+**\---mricoord**
+
+    Provide source locations and orientations in the MRI coordinate frame
+    instead of the default head coordinate frame.
+
+**\---label <*name*>**
+
+    Specifies a label file to process. For each label file, the values
+    of the computed estimates stored in a fif file. For more details,
+    see :ref:`CBBHJDAI`. The label files are produced by tksurfer
+    or mne_analyze and specify regions
+    of interests (ROIs). A label file name should end with ``-lh.label`` for
+    left-hemisphere ROIs and with ``-rh.label`` for right-hemisphere
+    ones. The corresponding output files are tagged with ``-lh-`` <*data type*> ``.fif`` and ``-rh-`` <*data type*> ``.fif`` , respectively. <*data type*> equals ``'mne`` ' for expected
+    current data and ``'spm`` ' for dSPM data.
+    For raw data, ``_raw.fif`` is employed instead of ``.fif`` .
+    The output files are stored in the same directory as the label files.
+
+**\---labelselout**
+
+    Produces additional label files for each label processed, containing only
+    those vertices within the input label which correspond to available
+    source space vertices in the inverse operator. These files have the
+    same name as the original label except that ``-lh`` and ``-rh`` are replaced
+    by ``-sel-lh`` and ``-sel-rh`` , respectively.
+
+**\---align_z**
+
+    Instructs the program to try to align the waveform signs within
+    the label. For more information, see :ref:`CBBHJDAI`. This
+    flag will not have any effect if the inverse operator has been computed
+    with the strict orientation constraint active.
+
+**\---labeldir <*directory*>**
+
+    All previous ``--label`` options will be ignored when this
+    option is encountered. For each label in the directory, the output
+    file defined with the ``--out`` option will contain a summarizing
+    waveform which is the average of the waveforms in the vertices of
+    the label. The ``--labeldir`` option implies ``--align_z`` and ``--picknormalcomp`` options.
+
+**\---orignames**
+
+    This option is used with the ``--labeldir`` option, above.
+    With this option, the output file channel names will be the names
+    of the label files, truncated to 15 characters, instead of names
+    containing the vertex numbers.
+
+**\---out <*name*>**
+
+    Required with ``--labeldir`` . This is the output file for
+    the data.
+
+**\---extra <*name*>**
+
+    By default, the output includes the current estimate signals and
+    the digital trigger channel, see ``--digtrig`` option,
+    below. With the ``--extra`` option, a custom set of additional
+    channels can be included. The extra channel text file should contain
+    the names of these channels, one channel name on each line. With
+    this option present, the digital trigger channel is not included
+    unless specified in the extra channel file.
+
+**\---noextra**
+
+    No additional channels will be included with this option present.
+
+**\---digtrig <*name*>**
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+**\---split <*size/MB*>**
+
+    Specifies the maximum size of the raw data files saved. By default, the
+    output is split into files which are just below 2 GB so that the
+    fif file maximum size is not exceed.
+
+.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_compute_raw_inverse .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. _CBBHJDAI:
+
+Implementation details
+======================
+
+The fif files output from mne_compute_raw_inverse have
+various fields of the channel information set to facilitate interpretation
+by postprocessing software as follows:
+
+**channel name**
+
+    Will be set to J[xyz] <*number*> ,
+    where the source component is indicated by the coordinat axis name
+    and number is the vertex number, starting from zero, in the complete
+    triangulation of the hemisphere in question.
+
+**logical channel number**
+
+    Will be set to is the vertex number, starting from zero, in the
+    complete triangulation of the hemisphere in question.
+
+**sensor location**
+
+    The location of the vertex in head coordinates or in MRI coordinates,
+    determined by the ``--mricoord`` flag.
+
+**sensor orientation**
+
+    The *x*-direction unit vector will point to the
+    direction of the current. Other unit vectors are set to zero. Again,
+    the coordinate system in which the orientation is expressed depends
+    on the ``--mricoord`` flag.
+
+The ``--align_z`` flag tries to align the signs
+of the signals at different vertices of the label. For this purpose,
+the surface normals within the label are collected into a :math:`n_{vert} \times 3` matrix.
+The preferred orientation will be taken as the first right singular
+vector of this matrix, corresponding to its largest singular value.
+If the dot product of the surface normal of a vertex is negative,
+the sign of the estimates at this vertex are inverted. The inversion
+is reflected in the current direction vector listed in the channel
+information, see above.
+
+.. note:: The raw data files output by mne_compute_raw_inverse can be converted to mat files with mne_raw2mat, see :ref:`convert_to_matlab`. Alternatively, the files can be read directly from Matlab using the routines in the MNE Matlab toolbox, see :ref:`ch_matlab`. The evoked data output can be easily read directly from Matlab using the fiff_load_evoked routine in the MNE Matlab toolbox. Both raw data and evoked output files can be loaded into mne_browse_raw, see :ref:`ch_browse`.
diff --git a/doc/source/manual/mne_analyze/MNE_preferences.png b/doc/source/manual/mne_analyze/MNE_preferences.png
new file mode 100644
index 0000000..cfb9891
Binary files /dev/null and b/doc/source/manual/mne_analyze/MNE_preferences.png differ
diff --git a/doc/source/manual/mne_analyze/adjust_alignment.png b/doc/source/manual/mne_analyze/adjust_alignment.png
new file mode 100644
index 0000000..00ee4b9
Binary files /dev/null and b/doc/source/manual/mne_analyze/adjust_alignment.png differ
diff --git a/doc/source/manual/mne_analyze/adjust_lights.png b/doc/source/manual/mne_analyze/adjust_lights.png
new file mode 100644
index 0000000..cd493c9
Binary files /dev/null and b/doc/source/manual/mne_analyze/adjust_lights.png differ
diff --git a/doc/source/manual/mne_analyze/adjust_menu.png b/doc/source/manual/mne_analyze/adjust_menu.png
new file mode 100644
index 0000000..26e08d1
Binary files /dev/null and b/doc/source/manual/mne_analyze/adjust_menu.png differ
diff --git a/doc/source/manual/mne_analyze/cont_hpi_data.png b/doc/source/manual/mne_analyze/cont_hpi_data.png
new file mode 100644
index 0000000..ae91150
Binary files /dev/null and b/doc/source/manual/mne_analyze/cont_hpi_data.png differ
diff --git a/doc/source/manual/mne_analyze/dipole_list.png b/doc/source/manual/mne_analyze/dipole_list.png
new file mode 100644
index 0000000..e7337cf
Binary files /dev/null and b/doc/source/manual/mne_analyze/dipole_list.png differ
diff --git a/doc/source/manual/mne_analyze/dipole_parameters.png b/doc/source/manual/mne_analyze/dipole_parameters.png
new file mode 100644
index 0000000..2b02b10
Binary files /dev/null and b/doc/source/manual/mne_analyze/dipole_parameters.png differ
diff --git a/doc/source/manual/mne_analyze/dipoles_menu.png b/doc/source/manual/mne_analyze/dipoles_menu.png
new file mode 100644
index 0000000..3b28490
Binary files /dev/null and b/doc/source/manual/mne_analyze/dipoles_menu.png differ
diff --git a/doc/source/manual/mne_analyze/epoch_selector.png b/doc/source/manual/mne_analyze/epoch_selector.png
new file mode 100644
index 0000000..2ae0c04
Binary files /dev/null and b/doc/source/manual/mne_analyze/epoch_selector.png differ
diff --git a/doc/source/manual/mne_analyze/field_mapping_pref.png b/doc/source/manual/mne_analyze/field_mapping_pref.png
new file mode 100644
index 0000000..63124cd
Binary files /dev/null and b/doc/source/manual/mne_analyze/field_mapping_pref.png differ
diff --git a/doc/source/manual/mne_analyze/file_menu.png b/doc/source/manual/mne_analyze/file_menu.png
new file mode 100644
index 0000000..943de88
Binary files /dev/null and b/doc/source/manual/mne_analyze/file_menu.png differ
diff --git a/doc/source/manual/mne_analyze/hardcopy_controls.png b/doc/source/manual/mne_analyze/hardcopy_controls.png
new file mode 100644
index 0000000..775aedb
Binary files /dev/null and b/doc/source/manual/mne_analyze/hardcopy_controls.png differ
diff --git a/doc/source/manual/mne_analyze/help_menu.png b/doc/source/manual/mne_analyze/help_menu.png
new file mode 100644
index 0000000..869667b
Binary files /dev/null and b/doc/source/manual/mne_analyze/help_menu.png differ
diff --git a/doc/source/manual/mne_analyze/image_dialog.png b/doc/source/manual/mne_analyze/image_dialog.png
new file mode 100644
index 0000000..a700c93
Binary files /dev/null and b/doc/source/manual/mne_analyze/image_dialog.png differ
diff --git a/doc/source/manual/mne_analyze/label_list.png b/doc/source/manual/mne_analyze/label_list.png
new file mode 100644
index 0000000..b5b8f26
Binary files /dev/null and b/doc/source/manual/mne_analyze/label_list.png differ
diff --git a/doc/source/manual/mne_analyze/labels_menu.png b/doc/source/manual/mne_analyze/labels_menu.png
new file mode 100644
index 0000000..f744f6e
Binary files /dev/null and b/doc/source/manual/mne_analyze/labels_menu.png differ
diff --git a/doc/source/manual/mne_analyze/main_window.png b/doc/source/manual/mne_analyze/main_window.png
new file mode 100644
index 0000000..b74ee9b
Binary files /dev/null and b/doc/source/manual/mne_analyze/main_window.png differ
diff --git a/doc/source/manual/mne_analyze/movie_dialog.png b/doc/source/manual/mne_analyze/movie_dialog.png
new file mode 100644
index 0000000..8dad35a
Binary files /dev/null and b/doc/source/manual/mne_analyze/movie_dialog.png differ
diff --git a/doc/source/manual/mne_analyze/mri_viewer.png b/doc/source/manual/mne_analyze/mri_viewer.png
new file mode 100644
index 0000000..d8c1df5
Binary files /dev/null and b/doc/source/manual/mne_analyze/mri_viewer.png differ
diff --git a/doc/source/manual/mne_analyze/open_dialog.png b/doc/source/manual/mne_analyze/open_dialog.png
new file mode 100644
index 0000000..90bed87
Binary files /dev/null and b/doc/source/manual/mne_analyze/open_dialog.png differ
diff --git a/doc/source/manual/mne_analyze/overlay_management.png b/doc/source/manual/mne_analyze/overlay_management.png
new file mode 100644
index 0000000..7709687
Binary files /dev/null and b/doc/source/manual/mne_analyze/overlay_management.png differ
diff --git a/doc/source/manual/mne_analyze/patch_selection_dialog.png b/doc/source/manual/mne_analyze/patch_selection_dialog.png
new file mode 100644
index 0000000..b30853d
Binary files /dev/null and b/doc/source/manual/mne_analyze/patch_selection_dialog.png differ
diff --git a/doc/source/manual/mne_analyze/save_label_timecourse.png b/doc/source/manual/mne_analyze/save_label_timecourse.png
new file mode 100644
index 0000000..459bfbc
Binary files /dev/null and b/doc/source/manual/mne_analyze/save_label_timecourse.png differ
diff --git a/doc/source/manual/mne_analyze/scales_dialog.png b/doc/source/manual/mne_analyze/scales_dialog.png
new file mode 100644
index 0000000..2929a21
Binary files /dev/null and b/doc/source/manual/mne_analyze/scales_dialog.png differ
diff --git a/doc/source/manual/mne_analyze/surface_controls.png b/doc/source/manual/mne_analyze/surface_controls.png
new file mode 100644
index 0000000..8200db0
Binary files /dev/null and b/doc/source/manual/mne_analyze/surface_controls.png differ
diff --git a/doc/source/manual/mne_analyze/surface_selection_dialog.png b/doc/source/manual/mne_analyze/surface_selection_dialog.png
new file mode 100644
index 0000000..947dd87
Binary files /dev/null and b/doc/source/manual/mne_analyze/surface_selection_dialog.png differ
diff --git a/doc/source/manual/mne_analyze/timecourse_manager.png b/doc/source/manual/mne_analyze/timecourse_manager.png
new file mode 100644
index 0000000..87789c5
Binary files /dev/null and b/doc/source/manual/mne_analyze/timecourse_manager.png differ
diff --git a/doc/source/manual/mne_analyze/view_menu.png b/doc/source/manual/mne_analyze/view_menu.png
new file mode 100644
index 0000000..d827754
Binary files /dev/null and b/doc/source/manual/mne_analyze/view_menu.png differ
diff --git a/doc/source/manual/mne_analyze/viewer.png b/doc/source/manual/mne_analyze/viewer.png
new file mode 100644
index 0000000..937441d
Binary files /dev/null and b/doc/source/manual/mne_analyze/viewer.png differ
diff --git a/doc/source/manual/mne_analyze/viewer_options.png b/doc/source/manual/mne_analyze/viewer_options.png
new file mode 100644
index 0000000..61bea5c
Binary files /dev/null and b/doc/source/manual/mne_analyze/viewer_options.png differ
diff --git a/doc/source/manual/mne_analyze/visualize_hpi.png b/doc/source/manual/mne_analyze/visualize_hpi.png
new file mode 100644
index 0000000..510218a
Binary files /dev/null and b/doc/source/manual/mne_analyze/visualize_hpi.png differ
diff --git a/doc/source/manual/mne_analyze/windows_menu.png b/doc/source/manual/mne_analyze/windows_menu.png
new file mode 100644
index 0000000..264af7a
Binary files /dev/null and b/doc/source/manual/mne_analyze/windows_menu.png differ
diff --git a/doc/source/manual/mne_browse_raw/adjust_menu.png b/doc/source/manual/mne_browse_raw/adjust_menu.png
new file mode 100644
index 0000000..68f4135
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/adjust_menu.png differ
diff --git a/doc/source/manual/mne_browse_raw/adust_menu.png b/doc/source/manual/mne_browse_raw/adust_menu.png
new file mode 100644
index 0000000..0ad24b5
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/adust_menu.png differ
diff --git a/doc/source/manual/mne_browse_raw/average_pref.png b/doc/source/manual/mne_browse_raw/average_pref.png
new file mode 100644
index 0000000..5b695f5
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/average_pref.png differ
diff --git a/doc/source/manual/mne_browse_raw/channel_selection.png b/doc/source/manual/mne_browse_raw/channel_selection.png
new file mode 100644
index 0000000..6a4719c
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/channel_selection.png differ
diff --git a/doc/source/manual/mne_browse_raw/file_menu.png b/doc/source/manual/mne_browse_raw/file_menu.png
new file mode 100644
index 0000000..70cce2a
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/file_menu.png differ
diff --git a/doc/source/manual/mne_browse_raw/filter_dialog.png b/doc/source/manual/mne_browse_raw/filter_dialog.png
new file mode 100644
index 0000000..8d727ee
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/filter_dialog.png differ
diff --git a/doc/source/manual/mne_browse_raw/help_menu.png b/doc/source/manual/mne_browse_raw/help_menu.png
new file mode 100644
index 0000000..b4c301d
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/help_menu.png differ
diff --git a/doc/source/manual/mne_browse_raw/main.png b/doc/source/manual/mne_browse_raw/main.png
new file mode 100644
index 0000000..817fc23
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/main.png differ
diff --git a/doc/source/manual/mne_browse_raw/manage_averages_dialog.png b/doc/source/manual/mne_browse_raw/manage_averages_dialog.png
new file mode 100644
index 0000000..715ea5e
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/manage_averages_dialog.png differ
diff --git a/doc/source/manual/mne_browse_raw/new_selection.png b/doc/source/manual/mne_browse_raw/new_selection.png
new file mode 100644
index 0000000..61cc536
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/new_selection.png differ
diff --git a/doc/source/manual/mne_browse_raw/new_ssp.png b/doc/source/manual/mne_browse_raw/new_ssp.png
new file mode 100644
index 0000000..b3f4cff
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/new_ssp.png differ
diff --git a/doc/source/manual/mne_browse_raw/open_dialog copy.png b/doc/source/manual/mne_browse_raw/open_dialog copy.png
new file mode 100644
index 0000000..35470da
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/open_dialog copy.png differ
diff --git a/doc/source/manual/mne_browse_raw/open_dialog.png b/doc/source/manual/mne_browse_raw/open_dialog.png
new file mode 100644
index 0000000..5817403
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/open_dialog.png differ
diff --git a/doc/source/manual/mne_browse_raw/process_menu.png b/doc/source/manual/mne_browse_raw/process_menu.png
new file mode 100644
index 0000000..ce2b75f
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/process_menu.png differ
diff --git a/doc/source/manual/mne_browse_raw/process_menu2.png b/doc/source/manual/mne_browse_raw/process_menu2.png
new file mode 100644
index 0000000..d4884ec
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/process_menu2.png differ
diff --git a/doc/source/manual/mne_browse_raw/scales_dialog.png b/doc/source/manual/mne_browse_raw/scales_dialog.png
new file mode 100644
index 0000000..4586079
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/scales_dialog.png differ
diff --git a/doc/source/manual/mne_browse_raw/scales_dialog2.png b/doc/source/manual/mne_browse_raw/scales_dialog2.png
new file mode 100644
index 0000000..bfa726d
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/scales_dialog2.png differ
diff --git a/doc/source/manual/mne_browse_raw/toolbar.png b/doc/source/manual/mne_browse_raw/toolbar.png
new file mode 100644
index 0000000..46c90a0
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/toolbar.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-0.png b/doc/source/manual/mne_browse_raw/windows_menu-0.png
new file mode 100644
index 0000000..03c2995
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-0.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-1.png b/doc/source/manual/mne_browse_raw/windows_menu-1.png
new file mode 100644
index 0000000..cda820c
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-1.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-10.png b/doc/source/manual/mne_browse_raw/windows_menu-10.png
new file mode 100644
index 0000000..8562afc
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-10.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-11.png b/doc/source/manual/mne_browse_raw/windows_menu-11.png
new file mode 100644
index 0000000..9fb11a3
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-11.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-12.png b/doc/source/manual/mne_browse_raw/windows_menu-12.png
new file mode 100644
index 0000000..3727729
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-12.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-13.png b/doc/source/manual/mne_browse_raw/windows_menu-13.png
new file mode 100644
index 0000000..cafdb8b
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-13.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-14.png b/doc/source/manual/mne_browse_raw/windows_menu-14.png
new file mode 100644
index 0000000..f0644e2
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-14.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-15.png b/doc/source/manual/mne_browse_raw/windows_menu-15.png
new file mode 100644
index 0000000..92a658b
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-15.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-16.png b/doc/source/manual/mne_browse_raw/windows_menu-16.png
new file mode 100644
index 0000000..0e4d215
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-16.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-17.png b/doc/source/manual/mne_browse_raw/windows_menu-17.png
new file mode 100644
index 0000000..fc2662e
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-17.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-2.png b/doc/source/manual/mne_browse_raw/windows_menu-2.png
new file mode 100644
index 0000000..b774d61
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-2.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-3.png b/doc/source/manual/mne_browse_raw/windows_menu-3.png
new file mode 100644
index 0000000..57528c2
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-3.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-4.png b/doc/source/manual/mne_browse_raw/windows_menu-4.png
new file mode 100644
index 0000000..a3d9b20
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-4.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-5.png b/doc/source/manual/mne_browse_raw/windows_menu-5.png
new file mode 100644
index 0000000..894d61c
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-5.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-6.png b/doc/source/manual/mne_browse_raw/windows_menu-6.png
new file mode 100644
index 0000000..d396931
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-6.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-7.png b/doc/source/manual/mne_browse_raw/windows_menu-7.png
new file mode 100644
index 0000000..0c59ffd
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-7.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-8.png b/doc/source/manual/mne_browse_raw/windows_menu-8.png
new file mode 100644
index 0000000..b5f149a
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-8.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-9.png b/doc/source/manual/mne_browse_raw/windows_menu-9.png
new file mode 100644
index 0000000..9865ae3
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu-9.png differ
diff --git a/doc/source/manual/mne_browse_raw/windows_menu.png b/doc/source/manual/mne_browse_raw/windows_menu.png
new file mode 100644
index 0000000..81f1ea5
Binary files /dev/null and b/doc/source/manual/mne_browse_raw/windows_menu.png differ
diff --git a/doc/source/manual/morph.rst b/doc/source/manual/morph.rst
new file mode 100644
index 0000000..25b3c6c
--- /dev/null
+++ b/doc/source/manual/morph.rst
@@ -0,0 +1,409 @@
+
+
+.. _ch_morph:
+
+======================
+Morphing and averaging
+======================
+
+Overview
+########
+
+The spherical morphing of the surfaces accomplished by FreeSurfer can be
+employed to bring data from different subjects into a common anatomical
+frame. This chapter describes utilities which make use of the spherical morphing
+procedure. mne_morph_labels morphs
+label files between subjects allowing the definition of labels in
+a one brain and transforming them to anatomically analogous labels
+in another. mne_average_estimates offers
+the capability to compute averages of data computed with the MNE software
+across subjects.
+
+.. _CHDJDHII:
+
+The morphing maps
+#################
+
+The MNE software accomplishes morphing with help of morphing
+maps which can be either computed on demand or precomputed using mne_make_morph_maps ,
+see :ref:`CHDBBHDH`. The morphing is performed with help
+of the registered spherical surfaces (``lh.sphere.reg`` and ``rh.sphere.reg`` )
+which must be produced in FreeSurfer .
+A morphing map is a linear mapping from cortical surface values
+in subject A (:math:`x^{(A)}`) to those in another
+subject B (:math:`x^{(B)}`)
+
+.. math::    x^{(B)} = M^{(AB)} x^{(A)}\ ,
+
+where :math:`M^{(AB)}` is a sparse matrix
+with at most three nonzero elements on each row. These elements
+are determined as follows. First, using the aligned spherical surfaces,
+for each vertex :math:`x_j^{(B)}`, find the triangle :math:`T_j^{(A)}` on the
+spherical surface of subject A which contains the location :math:`x_j^{(B)}`.
+Next, find the numbers of the vertices of this triangle and set
+the corresponding elements on the *j* th row of :math:`M^{(AB)}` so that :math:`x_j^{(B)}` will
+be a linear interpolation between the triangle vertex values reflecting
+the location :math:`x_j^{(B)}` within the triangle :math:`T_j^{(A)}`.
+
+It follows from the above definition that in general
+
+.. math::    M^{(AB)} \neq (M^{(BA)})^{-1}\ ,
+
+*i.e.*,
+
+.. math::    x_{(A)} \neq M^{(BA)} M^{(AB)} x^{(A)}\ ,
+
+even if
+
+.. math::    x^{(A)} \approx M^{(BA)} M^{(AB)} x^{(A)}\ ,
+
+*i.e.*, the mapping is *almost* a
+bijection.
+
+.. _CHDEBAHH:
+
+About smoothing
+###############
+
+The current estimates are normally defined only in a decimated
+grid which is a sparse subset of the vertices in the triangular
+tessellation of the cortical surface. Therefore, any sparse set
+of values is distributed to neighboring vertices to make the visualized
+results easily understandable. This procedure has been traditionally
+called smoothing but a more appropriate name
+might be smudging or blurring in
+accordance with similar operations in image processing programs.
+
+In MNE software terms, smoothing of the vertex data is an
+iterative procedure, which produces a blurred image :math:`x^{(N)}` from
+the original sparse image :math:`x^{(0)}` by applying
+in each iteration step a sparse blurring matrix:
+
+.. math::    x^{(p)} = S^{(p)} x^{(p - 1)}\ .
+
+On each row :math:`j` of the matrix :math:`S^{(p)}` there
+are :math:`N_j^{(p - 1)}` nonzero entries whose values
+equal :math:`1/N_j^{(p - 1)}`. Here :math:`N_j^{(p - 1)}` is
+the number of immediate neighbors of vertex :math:`j` which
+had non-zero values at iteration step :math:`p - 1`.
+Matrix :math:`S^{(p)}` thus assigns the average
+of the non-zero neighbors as the new value for vertex :math:`j`.
+One important feature of this procedure is that it tends to preserve
+the amplitudes while blurring the surface image.
+
+Once the indices non-zero vertices in :math:`x^{(0)}` and
+the topology of the triangulation are fixed the matrices :math:`S^{(p)}` are
+fixed and independent of the data. Therefore, it would be in principle
+possible to construct a composite blurring matrix
+
+.. math::    S^{(N)} = \prod_{p = 1}^N {S^{(p)}}\ .
+
+However, it turns out to be computationally more effective
+to do blurring with an iteration. The above formula for :math:`S^{(N)}` also
+shows that the smudging (smoothing) operation is linear.
+
+.. _CHDBBHDH:
+
+Precomputing the morphing maps
+##############################
+
+The utility mne_make_morph_maps was
+created to assist mne_analyze and mne_make_movie in
+morphing. Since the morphing maps described above take a while to
+compute, it is beneficial to construct all necessary maps in advance
+before using mne_make_movie .
+The precomputed morphing maps are located in ``$SUBJECTS_DIR/morph-maps`` . mne_make_morph_maps creates
+this directory automatically if it does not exist. If this directory
+exists when mne_analyze or mne_make_movie is run
+and morphing is requested, the software first looks for already
+existing morphing maps there. Also, if mne_analyze or mne_make_movie have
+to recompute any morphing maps, they will be saved to ``$SUBJECTS_DIR/morph-maps`` if
+this directory exists.
+
+The names of the files in ``$SUBJECTS_DIR/morph-maps`` are
+of the form:
+
+ <*A*> - <*B*> -``morph.fif`` ,
+
+where <*A*> and <*B*> are
+names of subjects. These files contain the maps for both hemispheres,
+and in both directions, *i.e.*, both :math:`M^{(AB)}` and :math:`M^{(BA)}`, as
+defined above. Thus the files <*A*> - <*B*> -``morph.fif`` or <*B*> - <*A*> -``morph.fif`` are
+functionally equivalent. The name of the file produced by mne_analyze or mne_make_movie depends
+on the role of <*A*> and <*B*> in
+the analysis.
+
+If you choose to compute the morphing maps in batch in advance,
+use mne_make_morph_maps , which
+accepts the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---redo**
+
+    Recompute the morphing maps even if they already exist.
+
+**\---from <*subject*>**
+
+    Compute morphing maps from this subject.
+
+**\---to <*subject*>**
+
+    Compute morphing maps to this subject.
+
+**\---all**
+
+    Do all combinations. If this is used without either ``--from`` or ``--to`` options,
+    morphing maps for all possible combinations are computed. If ``--from`` or ``--to`` is
+    present, only maps between the specified subject and all others
+    are computed.
+
+.. note:: Because all morphing map files contain maps    in both directions, the choice of ``--from`` and ``--to`` options    only affect the naming of the morphing map files to be produced. mne_make_morph_maps creates    directory ``$SUBJECTS_DIR/morph-maps`` if necessary.
+
+.. _CHDCEAFC:
+
+Morphing label data
+###################
+
+In some instances it is desirable to use anatomically equivalent
+labels for all subjects in a study. This can be accomplished by
+creating a set of labels in one subject and morphing them to another
+subjects anatomy using the spherical morphing procedure. mne_morph_labels was
+created to facilitate this task. It has the following command-line
+options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---from <*subject*>**
+
+    Name of the subject for which the labels were originally defined.
+
+**\---to <*subject*>**
+
+    Name of the subject for which the morphed labels should be created.
+
+**\---labeldir <*directory*>**
+
+    A directory containing the labels to morph.
+
+**\---prefix <*prefix*>**
+
+    Adds <*prefix*> in the beginning
+    of the output label names. A dash will be inserted between <*prefix*> and
+    the rest of the name.
+
+**\---smooth <*number*>**
+
+    Apply smoothing with the indicated number of iteration steps (see :ref:`CHDEBAHH`) to the labels before morphing them. This is
+    advisable because otherwise the resulting labels may have little
+    holes in them since the morphing map is not a bijection. By default,
+    two smoothsteps are taken.
+
+As the labels are morphed, a directory with the name of the
+subject specified with the ``--to`` option is created under
+the directory specified with ``--labeldir`` to hold the
+morphed labels.
+
+.. _CHDFDIFE:
+
+Averaging
+#########
+
+Overview
+========
+
+As illustrated in :ref:`CHDDJBDH`, cross-subject averaging
+involves three straightforward steps:
+
+- Use mne_make_movie to
+  create stc files morphed to a single subject. This requires the
+  use of the ``--morph`` option, see :ref:`CBBECEDE`.5.
+  The resulting files will have identical selections of vertices on
+  the cortical surface of the subject used in averaging. This step
+  can be speeded up by precomputing the morphing maps employed in
+  the process, see :ref:`CHDBBHDH`.
+
+- Employ mne_average_estimates or
+  a Matlab script to read the data from the stc files and to produce
+  an output stc file containing the averaged data. The MNE Matlab
+  toolbox routines for reading and writing stc files are documented
+  in :ref:`ch_matlab`.
+
+- Use mne_analyze or mne_make_movie to
+  visualize the result or use the stc files from the previous step
+  in your own Matlab routines in further processing.
+
+.. _CHDDJBDH:
+
+.. figure:: pics/Averaging-flowchart.png
+    :alt: Workflow of the cross-subject averaging process in MNE
+
+    Workflow of the cross-subject averaging process
+    
+    References in parenthesis indicate sections and chapters of this manual
+
+.. note:: The old utility mne_grand_average has    been removed from the MNE software because of its inefficiency.    All users should adopt the combination of mne_make_movie and mne_average_estimates instead.
+
+.. warning:: With the ``--ico`` option it    is now possible to generate source spaces with equal number of vertices    in each subject. This may lead to the wrong conclusion that stc    data could be averaged without doing the morphing step first. Even    with identical number vertices in the source spaces it is mandatory    to process the data through mne_make_movie to    create corresponding source locations before using mne_average_estimates .
+
+.. _CHDEHFGD:
+
+The averager
+============
+
+mne_average_estimates is
+the new utility for averaging data in stc files. It requires that
+all stc files represent data on one individual's cortical
+surface and contain identical sets of vertices. mne_average_estimates uses
+linear interpolation to resample data in time as necessary. The
+command line arguments are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---desc <*filenname*>**
+
+    Specifies the description file for averaging. The format of this
+    file is described below.
+
+The description file
+--------------------
+
+The description file for mne_average_estimates consists
+of a sequence of tokens, separated by whitespace (space, tab, or
+newline). If a token consists of several words it has to be enclosed
+in quotes. One or more tokens constitute an phrase, which has a
+meaning for the averaging definition. Any line starting with the
+pound sign (#) is a considered to be a comment line. There are two
+kinds of phrases in the description file: global and contextual.
+The global phrases have the same meaning independent on their location
+in the file while the contextual phrases have different effects depending
+on their location in the file.
+
+There are three types of contexts in the description file:
+the global context, an input context,
+and the output context. In the
+beginning of the file the context is global for
+defining global parameters. The input context
+defines one of the input files (subjects) while the output context
+specifies the destination for the average.
+
+The global phrases are:
+
+**tmin <*value/ms*>**
+
+    The minimum time to be considered. The output stc file starts at
+    this time point if the time ranges of the stc files include this
+    time. Otherwise the output starts from the next later available
+    time point.
+
+**tstep <*step/ms*>**
+
+    Time step between consecutive movie frames, specified in milliseconds.
+
+**tmax <*value/ms*>**
+
+    The maximum time point to be considered. A multiple of tstep will be
+    added to the first time point selected until this value or the last time
+    point in one of the input stc files is reached.
+
+**integ  <:math:`\Delta t` /*ms*>**
+
+    Integration time for each frame. Defaults to zero. The integration will
+    be performed on sensor data. If the time specified for a frame is :math:`t_0`,
+    the integration range will be :math:`t_0 - ^{\Delta t}/_2 \leq t \leq t_0 + ^{\Delta t}/_2`.
+
+**stc <*filename*>**
+
+    Specifies an input stc file. The filename can be specified with
+    one of the ``-lh.stc`` and ``-rh.stc`` endings
+    or without them. This phrase ends the present context and starts
+    an input context.
+
+**deststc <*filename*>**
+
+    Specifies the output stc file. The filename can be specified with
+    one of the ``-lh.stc`` and ``-rh.stc`` endings
+    or without them. This phrase ends the present context and starts
+    the output context.
+
+**lh**
+
+    Process the left hemisphere. By default, both hemispheres are processed.
+
+**rh**
+
+    Process the left hemisphere. By default, both hemispheres are processed.
+
+The contextual phrases are:
+
+**weight <*value*>**
+
+    Specifies the weight of the current data set. This phrase is valid
+    in the input and output contexts.
+
+**abs**
+
+    Specifies that the absolute value of the data should be taken. Valid
+    in all contexts. If specified in the global context, applies to
+    all subsequent input and output contexts. If specified in the input
+    or output contexts, applies only to the data associated with that
+    context.
+
+**pow <*value*>**
+
+    Specifies that the data should raised to the specified power. For
+    negative values, the absolute value of the data will be taken and
+    the negative sign will be transferred to the result, unless abs is
+    specified. Valid in all contexts. Rules of application are identical
+    to abs .
+
+**sqrt**
+
+    Means pow 0.5
+
+The effects of the options can be summarized as follows.
+Suppose that the description file includes :math:`P` contexts
+and the temporally resampled data are organized in matrices :math:`S^{(p)}`,
+where :math:`p = 1 \dotso P` is the subject index, and
+the rows are the signals at different vertices of the cortical surface.
+The average computed by mne_average_estimates is
+then:
+
+.. math::    A_{jk} = |w[\newcommand\sgn{\mathop{\mathrm{sgn}}\nolimits}\sgn(B_{jk})]^{\alpha}|B_{jk}|^{\beta}
+
+with
+
+.. math::    B_{jk} = \sum_{p = 1}^p {\bar{w_p}[\newcommand\sgn{\mathop{\mathrm{sgn}}\nolimits}\sgn(S_{jk}^{(p)})^{\alpha_p}|S_{jk}^{(p)}|^{\beta_p}}
+
+and
+
+.. math::    \bar{w_p} = w_p / \sum_{p = 1}^p {|w_p|}\ .
+
+In the above, :math:`\beta_p` and :math:`w_p` are
+the powers and weights assigned to each of the subjects whereas :math:`\beta` and :math:`w` are
+the output weight and power value, respectively. The sign is either
+included (:math:`\alpha_p = 1`, :math:`\alpha = 1`)
+or omitted (:math:`\alpha_p = 2`, :math:`\alpha = 2`)
+depending on the presence of abs phrases in the description file.
+
+.. note:: mne_average_estimates requires    that the number of vertices in the stc files are the same and that    the vertex numbers are identical. This will be the case if the files    have been produced in mne_make_movie using    the ``--morph`` option.
+
+.. note:: It is straightforward to read and write stc    files using the MNE Matlab toolbox described in :ref:`ch_matlab` and    thus write custom Matlab functions to realize more complicated custom    group analysis tools.
diff --git a/doc/source/manual/pics/Averaging-flowchart.png b/doc/source/manual/pics/Averaging-flowchart.png
new file mode 100644
index 0000000..343ff02
Binary files /dev/null and b/doc/source/manual/pics/Averaging-flowchart.png differ
diff --git a/doc/source/manual/pics/CoordinateSystems.png b/doc/source/manual/pics/CoordinateSystems.png
new file mode 100644
index 0000000..f71441a
Binary files /dev/null and b/doc/source/manual/pics/CoordinateSystems.png differ
diff --git a/doc/source/manual/pics/Digitizer-example.png b/doc/source/manual/pics/Digitizer-example.png
new file mode 100644
index 0000000..35ca1aa
Binary files /dev/null and b/doc/source/manual/pics/Digitizer-example.png differ
diff --git a/doc/source/manual/pics/Flowchart.png b/doc/source/manual/pics/Flowchart.png
new file mode 100644
index 0000000..2b7edef
Binary files /dev/null and b/doc/source/manual/pics/Flowchart.png differ
diff --git a/doc/source/manual/pics/HeadCS.png b/doc/source/manual/pics/HeadCS.png
new file mode 100644
index 0000000..a55c2b3
Binary files /dev/null and b/doc/source/manual/pics/HeadCS.png differ
diff --git a/doc/source/manual/pics/cover.png b/doc/source/manual/pics/cover.png
new file mode 100644
index 0000000..305c526
Binary files /dev/null and b/doc/source/manual/pics/cover.png differ
diff --git a/doc/source/manual/pics/flat.png b/doc/source/manual/pics/flat.png
new file mode 100644
index 0000000..256ef7b
Binary files /dev/null and b/doc/source/manual/pics/flat.png differ
diff --git a/doc/source/manual/pics/morphed.png b/doc/source/manual/pics/morphed.png
new file mode 100644
index 0000000..489f4b7
Binary files /dev/null and b/doc/source/manual/pics/morphed.png differ
diff --git a/doc/source/manual/pics/neuromag.png b/doc/source/manual/pics/neuromag.png
new file mode 100644
index 0000000..6a890b0
Binary files /dev/null and b/doc/source/manual/pics/neuromag.png differ
diff --git a/doc/source/manual/pics/orig.png b/doc/source/manual/pics/orig.png
new file mode 100644
index 0000000..629b0b8
Binary files /dev/null and b/doc/source/manual/pics/orig.png differ
diff --git a/doc/source/manual/pics/proj-off-on.png b/doc/source/manual/pics/proj-off-on.png
new file mode 100644
index 0000000..54e50f5
Binary files /dev/null and b/doc/source/manual/pics/proj-off-on.png differ
diff --git a/doc/source/manual/pics/title_page.png b/doc/source/manual/pics/title_page.png
new file mode 100644
index 0000000..b357834
Binary files /dev/null and b/doc/source/manual/pics/title_page.png differ
diff --git a/doc/source/manual/reading.rst b/doc/source/manual/reading.rst
new file mode 100644
index 0000000..248972e
--- /dev/null
+++ b/doc/source/manual/reading.rst
@@ -0,0 +1,145 @@
+
+
+.. _ch_reading:
+
+====================
+Related publications
+====================
+
+General MEG reviews
+###################
+
+M. Hämäläinen, R. Hari, R. Ilmoniemi,
+J. Knuutila, and O. V. Lounasmaa, "Magnetoencephalography - theory,
+instrumentation, and applications to noninvasive studies of the
+working human brain," Reviews of Modern Physics, vol. 65, pp. 413-497,
+1993.
+
+S. Baillet, J. C. Mosher, and R. M. Leahy, "Electromagnetic
+Brain Mapping," IEEE Signal Processing Magazine, vol. 18, pp. 14
+- 30, 2001.
+
+M. Hämäläinen and R. Hari, "Magnetoencephalographic
+Characterization of Dynamic Brain Activation: Basic Principles and
+Methods of Data Collection and Source Analysis," in Brain mapping
+: the methods, A. W. Toga and J. C. Mazziotta, Eds. Amsterdam ;
+Boston: Academic Press, 2002.
+
+Cortical surface reconstruction and morphing
+############################################
+
+A. M. Dale, B. Fischl, and M. I. Sereno, "Cortical surface-based
+analysis. I. Segmentation and surface reconstruction," Neuroimage,
+vol. 9, pp. 179-94., 1999.
+
+B. Fischl, M. I. Sereno, and A. M. Dale, "Cortical surface-based
+analysis. II: Inflation, flattening, and a surface-based coordinate
+system," Neuroimage, vol. 9, pp. 195-207., 1999.
+
+B. Fischl, M. I. Sereno, R. B. Tootell, and A. M. Dale, "High-resolution intersubject
+averaging and a coordinate system for the cortical surface," Hum
+Brain Mapp, vol. 8, pp. 272-84, 1999.
+
+.. _CEGEGDEI:
+
+Forward modeling
+################
+
+M. S. Hämäläinen and J. Sarvas,
+"Realistic conductivity geometry model of the human head for interpretation
+of neuromagnetic data," IEEE Trans. Biomed. Eng., vol. BME-36, pp.
+165-171, 1989.
+
+B. Fischl, D. H. Salat, A. J. van der Kouwe, N. Makris, F.
+Segonne, B. T. Quinn, and A. M. Dale, "Sequence-independent segmentation
+of magnetic resonance images," Neuroimage, vol. 23 Suppl 1, pp.
+S69-84, 2004.
+
+F. Segonne, A. M. Dale, E. Busa, M. Glessner, D. Salat, H.
+K. Hahn, and B. Fischl, "A hybrid approach to the skull stripping
+problem in MRI," Neuroimage, vol. 22, pp. 1060-75, Jul 2004.
+
+J. Jovicich, S. Czanner, D. Greve, E. Haley, A. van der Kouwe,
+R. Gollub, D. Kennedy, F. Schmitt, G. Brown, J. Macfall, B. Fischl,
+and A. Dale, "Reliability in multi-site structural MRI studies:
+effects of gradient non-linearity correction on phantom and human
+data," Neuroimage, vol. 30, pp. 436-43, 2006.
+
+J. C. Mosher, R. M. Leahy, and P. S. Lewis, "EEG and MEG:
+forward solutions for inverse methods," IEEE Trans Biomed Eng, vol.
+46, pp. 245-59, 1999.
+
+.. _CEGIEEBB:
+
+Signal-space projections
+########################
+
+C. D. Tesche, M. A. Uusitalo, R. J. Ilmoniemi, M. Huotilainen,
+M. Kajola, and O. Salonen, "Signal-space projections of MEG data
+characterize both distributed and well-localized neuronal sources,"
+Electroencephalogr Clin Neurophysiol, vol. 95, pp. 189-200, 1995.
+
+M. A. Uusitalo and R. J. Ilmoniemi, "Signal-space projection
+method for separating MEG or EEG into components," Med Biol Eng
+Comput, vol. 35, pp. 135-40, 1997.
+
+Minimum-norm estimates
+######################
+
+M. Hämäläinen and R. Ilmoniemi,
+"Interpreting  magnetic fields of the brain: minimum norm estimates,"
+Helsinki University of Technology, Espoo TKK-F-A559, 1984.
+
+A. Dale and M. Sereno, "Improved localization of cortical
+activity by combining EEG and MEG with MRI cortical surface reconstruction:
+A linear approach," J. Cog. Neurosci, vol. 5, pp. 162-176, 1993.
+
+M. S. Hämäläinen and R. J. Ilmoniemi,
+"Interpreting magnetic fields of the brain: minimum norm estimates,"
+Med Biol Eng Comput, vol. 32, pp. 35-42., 1994.
+
+A. M. Dale, A. K. Liu, B. R. Fischl, R. L. Buckner, J. W.
+Belliveau, J. D. Lewine, and E. Halgren, "Dynamic statistical parametric
+mapping: combining fMRI and MEG for high-resolution imaging of cortical
+activity," Neuron, vol. 26, pp. 55-67, 2000.
+
+A. K. Liu, A. M. Dale, and J. W. Belliveau, "Monte Carlo
+simulation studies of EEG and MEG localization accuracy," Hum Brain
+Mapp, vol. 16, pp. 47-62, 2002.
+
+F. H. Lin, J. W. Belliveau, A. M. Dale, and M. S. Hamalainen,
+"Distributed current estimates using cortical orientation constraints,"
+Hum Brain Mapp, vol. 27, pp. 1-13, 2006.
+
+T. F. Oostendorp, J. Delbeke, and D. F. Stegeman, "The conductivity
+of the human skull: results of in vivo and in vitro measurements,"
+IEEE Trans Biomed Eng, vol. 47, pp. 1487-92, Nov 2000.
+
+S. I. Gonçalves, J. C. de Munck, J. P. Verbunt,
+F. Bijma, R. M. Heethaar, and F. Lopes da Silva, "In vivo measurement
+of the brain and skull resistivities using an EIT-based method and
+realistic models for the head," IEEE Trans Biomed Eng, vol. 50,
+pp. 754-67, 2003.
+
+S. Lew, C. H. Wolters, A. Anwander, S. Makeig, and R. S.
+MacLeod, "Improved EEG source analysis using low-resolution conductivity
+estimation in a four-compartment finite element head model," Hum
+Brain Mapp, vol. 30, pp. 2862-78, 2009.
+
+fMRI-weighted estimates
+#######################
+
+A. M. Dale, A. K. Liu, B. R. Fischl, R. L. Buckner, J. W.
+Belliveau, J. D. Lewine, and E. Halgren, "Dynamic statistical parametric
+mapping: combining fMRI and MEG for high-resolution imaging of cortical
+activity," Neuron, vol. 26, pp. 55-67, 2000.
+
+A. K. Liu, J. W. Belliveau, and A. M. Dale, "Spatiotemporal
+imaging of human brain activity using functional MRI constrained
+magnetoencephalography data: Monte Carlo simulations," Proc Natl
+Acad Sci U S A, vol. 95, pp. 8945-50., 1998.
+
+F. H. Lin, T. Witzel, M. S. Hamalainen, A. M. Dale, J. W.
+Belliveau, and S. M. Stufflebeam, "Spectral spatiotemporal imaging
+of cortical oscillations and interactions in the human brain," Neuroimage,
+vol. 23, pp. 582-95, 2004.
diff --git a/doc/source/manual/sampledata.rst b/doc/source/manual/sampledata.rst
new file mode 100644
index 0000000..60c3707
--- /dev/null
+++ b/doc/source/manual/sampledata.rst
@@ -0,0 +1,768 @@
+
+
+.. _ch_sample_data:
+
+===================
+The sample data set
+===================
+
+Purpose
+#######
+
+This Chapter gives a detailed description of the processing
+of a sample data set, which can be employed to familiarize with
+the workflow described in :ref:`ch_cookbook`.
+
+.. note:: Going through the analysis exercise in    this chapter is not a substitute for reading other chapters of this    manual and understanding the concepts underlying MNE software.
+
+.. _sample_data_overview:
+
+Overview
+########
+
+The MNE software is accompanied by a sample data set which
+includes the MRI reconstructions created with FreeSurfer and the
+an MEG/EEG data set. These data were acquired with the Neuromag
+Vectorview system at MGH/HMS/MIT Athinoula A. Martinos Center Biomedical
+Imaging. EEG data from a 60-channel electrode cap was acquired simultaneously with
+the MEG. The original MRI data set was acquired with a Siemens 1.5 T
+Sonata scanner using an MPRAGE sequence.
+
+.. note:: These data are provided solely for the    purpose of getting familiar with the MNE software. They should not    be redistributed to third parties. The data should not be used to    evaluate the performance of the MEG or MRI system employed.
+
+In the MEG/EEG experiment, checkerboard patterns were presented
+into the left and right visual field, interspersed by tones to the
+left or right ear. The interval between the stimuli was 750 ms. Occasionally
+a smiley face was presented at the center of the visual field.
+The subject was asked to press a key with the right index finger
+as soon as possible after the appearance of the face. A listing
+of the corresponding trigger codes is provided in :ref:`BABDHIFJ`
+
+.. _BABDHIFJ:
+
+.. table:: Trigger codes for the sample data set.
+
+    =========  =====  ==========================================
+    Name              #Contents
+    =========  =====  ==========================================
+    LA         1      Response to left-ear auditory stimulus
+    RA         2      Response to right-ear auditory stimulus
+    LV         3      Response to left visual field stimulus
+    RV         4      Response to right visual field stimulus
+    smiley     5      Response to the smiley face
+    button     32     Response triggered by the button press
+    =========  =====  ==========================================
+
+Setting up
+##########
+
+The sample data set is distributed with the MNE software
+as a compressed tar archive located at ``$MNE_ROOT/sample-data/MNE-sample-data.tar.gz`` .
+To make a personal copy of the sample data set, follow these steps:
+
+- Set up for using the MNE software as
+  instructed in :ref:`user_environment` of this manual.
+
+- Create a directory for your personal copy: ``mkdir`` <*yourdir*> , where <*yourdir*> is
+  the location where you want your personal copy to reside. Tho store
+  the sample data set and to finish the tutorials in this Chapter, you
+  need approximately 600 MBytes of space on the disk where <*yourdir*> is
+  located.
+
+- Go to your newly created sample data directory: ``cd`` <*yourdir*> .
+
+- Extract the sample data: ``tar zxvf`` <*dir*> ``/MNE-sample-data.tar.gz`` ,
+  where <*dir*> is the location
+  of the tar archive, provided by your system administrator.
+
+To start the tutorials you need to:
+
+- Set up MNE software user environment, see :ref:`user_environment`.
+
+- Set the SUBJECTS_DIR environment variable:``setenv SUBJECTS_DIR`` <*yourdir*> ``/subjects`` (csh
+  and tcsh) or ``export SUBJECTS_DIR=`` <*yourdir*> ``/subjects`` (POSIX-compatible
+  shell). Most users at the Martinos Center have tcsh as their login shell.
+
+- Assign the SUBJECT environment variable the value ``sample`` .
+
+- For convenience, you can also set the environment variable
+  SAMPLE to <*yourdir*> . The following
+  tutorial examples assume you have done this.
+
+- Set up the FreeSurfer environment
+  using the commands specific to your site. The FreeSurfer license
+  is needed for the source space creation covered in :ref:`CHDIGEJG`.
+
+.. note:: From this point on, directories and files under    your personal copy of the sample data set under <*yourdir*> will    be referred to by relative pathnames. For example, the file <*yourdir*> ``/MEG/sample/audvis.ave`` will    be simply called ``MEG/sample/audvis.ave`` .
+
+.. note:: You can also proceed without FreeSurfer installed    if you choose to use source space creation using the recursively    subdivided octahedron or icosahedron method. For more information,    see the Note in :ref:`CHDIGEJG`.
+
+Contents of the data set
+########################
+
+The sample data set contains two main directories: ``MEG/sample`` (the MEG/EEG
+data) and ``subjects/sample`` (the MRI reconstructions).
+An overview of the data provided is given in Tables :ref:`CHDDDIHE` and :ref:`CHDDEGGC`. In addition to
+subject sample , the MRI surface
+reconstructions from another subject, morph ,
+are provided to demonstrate the morphing capabilities of the MNE software.
+
+.. _CHDDDIHE:
+
+.. table:: Contents of the MEG/sample directory.
+
+    ========================  =====================================================================
+    File                      Contents
+    ========================  =====================================================================
+    sample/audvis_raw.fif     The raw MEG/EEG data
+    audvis.ave                A template script for off-line averaging
+    auvis.cov                 A template script for the computation of a noise-covariance matrix
+    ========================  =====================================================================
+
+.. _CHDDEGGC:
+
+.. table:: Overview of the contents of the subjects/sample directory.
+
+    =======================  ======================================================================
+    File / directory         Contents
+    =======================  ======================================================================
+    bem                      Directory for the forward modelling data
+    bem/watershed            BEM surface segmentation data computed with the watershed algorithm
+    bem/inner_skull.surf     Inner skull surface for BEM
+    bem/outer_skull.surf     Outer skull surface for BEM
+    bem/outer_skin.surf      Skin surface for BEM
+    sample-head.fif          Skin surface in fif format for mne_analyze visualizations
+    surf                     Surface reconstructions
+    mri/T1                   The T1-weighted MRI data employed in visualizations
+    =======================  ======================================================================
+
+The following preprocessing steps have been already accomplished
+in the sample data set:
+
+- The MRI surface reconstructions have
+  been computed using the FreeSurfer software.
+
+- The BEM surfaces have been created with the watershed algorithm,
+  see :ref:`BABBDHAG`.
+
+- The MEG/EEG raw data file has been checked with the utilities described
+  in :ref:`BABCDBDI` and :ref:`BABCDFJH`.
+
+- Template scripts for averaging and computation of the noise-covariance
+  matrices have been written.
+
+Setting up subject-specific data
+################################
+
+.. _CHDBBAEJ:
+
+Structural MRIs
+===============
+
+To set up the structural MRIs for processing with the Neuromag
+MRI viewer, MRIlab, say
+
+``mne_setup_mri``
+
+This command sets up the directories ``subjects/sample/mri/T1-neuromag`` and ``subjects/sample/mri/brain-neuromag`` .
+For more information, see :ref:`BABCCEHF`.
+
+.. _CHDIGEJG:
+
+Source space
+============
+
+The source space with a 5-mm grid spacing is set up by saying:
+
+``mne_setup_source_space --ico -6``
+
+This command sets up the source-space related files in directory ``subjects/sample/bem`` as
+described in :ref:`CIHCHDAE`.
+
+.. _CHDJDGBD:
+
+Boundary-element models
+=======================
+
+The geometry calculations for the single-layer boundary-element
+model are accomplished with the command:
+
+``mne_setup_forward_model --homog --surf --ico 4``
+
+This command sets up the homogeneous BEM-model related files
+in directory ``subjects/sample/bem`` as described in :ref:`CIHDBFEG`. In
+addition to the homogeneous BEM, you also need the three-layer BEM model,
+which can be used for both EEG and MEG:
+
+``mne_setup_forward_model --surf --ico 4``
+
+The above commands employ the ``inner_skull.surf`` , ``outer_skull.surf`` ,
+and ``outer_skin.surf`` triangulation files located in ``subjects/sample/bem`` .
+The option ``--ico 4`` will create a model with 5120 triangles
+on each surface. Depending on the speed of your computer, the three-layer
+model may take quite a while to set up.
+
+Setting up a custom EEG layout
+##############################
+
+A data specific EEG layout will facilitate viewing of the
+EEG data. The MNE programs mne_browse_raw and mne_analyze look
+for user-specific layouts in ``$HOME/.mne/lout`` . Thus,
+you can create an EEG layout for the sample data set with the following
+commands:
+
+``mkdir -p $HOME/.mne/lout``
+
+``cd $SAMPLE/MEG/sample``
+
+``mne_make_eeg_layout --fif sample_audvis_raw.fif --lout $HOME/.mne/lout/sample-EEG.lout``
+
+Please refer to :ref:`CHDDGDJA` for more information
+on mne_make_eeg_layout .
+
+.. note:: It is usually sufficient to create one EEG layout    for each electrode cap you are using in your experiment rather than    using a different layout file for each data file generated using    the same cap.
+
+Previewing the data
+###################
+
+Previewing your raw data before proceeding to averaging and
+computation of the current estimates is most important to avoid
+unintentional errors caused by noisy or dysfunctional channels,
+frequent eye blinks, inappropriate bandpass filtering etc.
+
+One possible routemap for the preview session is outlined
+below:
+
+- Go to the MEG/EEG data directory: ``cd $SAMPLE/MEG/sample`` .
+
+- Launch mne_browse_raw .
+
+- Open the raw data file ``sample_audvis_raw.fif`` from File/Open... .
+
+- Switch all SSP vectors off from Adjust/Projection... .
+
+- Set the lowpass filter corner to a high value, e.g., 150 Hz
+  from Adjust/Filter... .
+
+- Browse through all channels using the selections available
+  under Adjust/Selection... and
+  look for very noisy or flat channels. You should be able to readily
+  identify two such channels among all MEG and EEG channels. You may
+  need to click Remove DC to reliably
+  associate the noisy or flat waveform with the channel name on the
+  left. Also, experiment with switching the EEG average reference
+  projection on and off and you will notice that the EEG bad channel
+  cannot be seen after the projection.
+
+- Mark the channels you identified in step 6. bad for this viewing
+  session by clicking on their channel names on the left. You can
+  save the bad channel selection to the file from File/Apply bad channels . Bad channel marking can be removed
+  by clicking on their channel names again and selecting File/Apply bad channels . Alternatively, you can use the utility mne_mark_bad_channels to
+  set a bad channel selection, see :ref:`CHDDHBEE`.
+
+- Switch the projections back on and change filter to a 40-Hz
+  lowpass.
+
+- Compute a preliminary average for the left-ear auditory stimulus:
+
+  - Open the averaging preferences dialog
+    (Adjust/Averaging preferences... .
+
+  - Set the time scale to -100...300 ms.
+
+  - Click on the text next to Average: in
+    the main window and press return. After a while, a topographical
+    display appears with the averaged responses. Notice that the bad
+    channels are not displayed.
+
+  - Change to different layouts from Adjust/Full view layout... . Inspect both the MEG and EEG waveforms.
+
+- Compute a preliminary average for the right-ear auditory stimulus:
+
+  - Open the averaging preferences.
+
+  - Click on the Trace color... button
+    and change the trace color to something different from the default
+    yellow.
+
+  - Change the text next to Average: to
+    2 and press return. Average to the right-ear tones will be computed.
+    Compare the to sets of averages and verify that all channels show
+    reasonable data.
+
+- Go to Windows/Manage averages... and
+  delete the preliminary averages just computed.
+
+After these steps, you are ready to proceed to the actual
+analysis.
+
+Off-line averaging
+##################
+
+Go to directory ``$SAMPLE/MEG/sample`` . With help
+of :ref:`CACBBDGC`, familiarize yourself with the averaging
+script ``audvis.ave`` .
+
+Using the averaging script interactively
+========================================
+
+You can invoke an averaging script in mne_browse_raw from Process/Average... .
+Select the ``audvis.ave`` script from the file selection
+box that appears. Once averaging is complete, you can inspect the
+details of the averaged responses in the Averages window,
+which appears automatically. You can redisplay it from Windows/Show averages... . The window, which appears when you
+select Adjust/Manage averages... allows
+you to:
+
+- Select which conditions (categories)
+  are displayed.
+
+- Change the trace colors.
+
+- Inspect the averaging log.
+
+- Save the averaged data.
+
+- Delete this set of averages.
+
+.. note:: If you decide to save the averages in the interactive    mode, use the name ``sample_audvis-ave.fif`` for the result.
+
+Using the averaging script in batch mode
+========================================
+
+The batch-mode version of mne_browse_raw , mne_process_raw can
+be used for averaging as well. Batch mode averaging can be done
+with the command:
+
+``mne_process_raw --raw sample_audvis_raw.fif `` ``--lowpass 40 --projoff `` ``--saveavetag -ave --ave audvis.ave``
+
+The functions of the options are:
+
+**\---raw**
+
+    Specifies the raw file.
+
+**\---lowpass**
+
+    Specifies the lowpass filter corner frequency.
+
+**\---projoff**
+
+    Do not apply signal-space projection and average electrode reference
+    to the data. Regardless, the projection information is included with
+    the data file so that it can be applied later. It is also possible
+    to specify the ``--projon`` option but then there is no
+    possibility to view the original data in subsequent phases of the
+    analysis.
+
+**\---saveavetag**
+
+    Specifies how the averages are named. With this option, the ``_raw.fif`` ending
+    is stripped of the original raw data file and the tag specified
+    with this option (``--ave`` ) is added. The average file
+    and the corresponding log file will have the extensions ``.fif`` and ``.log`` , respectively.
+
+**\---ave**
+
+    Specifies the averaging script.
+
+As a result of running the averaging script a file called ``sample_audvis-ave.fif`` is
+created. It contains averages to the left and right ear auditory
+as well as to the left and right visual field stimuli.
+
+.. _CHDHBGGH:
+
+Viewing the off-line average
+############################
+
+The average file computed in the previous section can be
+viewed in  mne_browse_raw .
+
+To view the averaged signals, invoke mne_browse_raw :
+
+``cd $SAMPLE/MEG/sample``
+
+``mne_browse_raw &``
+
+This Section gives only very basic information about the
+use of mne_browse_raw for viewing
+evoked-response data. Please consult :ref:`ch_browse` for more
+comprehensive information.
+
+Loading the averages
+====================
+
+mne_browse_raw loads all
+the available data from an average file at once:
+
+- Select Open evoked... from
+  the File menu.
+
+- Select the average file ``sample_audvis-ave.fif`` file
+  from the list and click OK .
+
+- A topographical display of the waveforms with gradiometer
+  channels included appears.
+
+Inspecting the auditory data
+============================
+
+Select the left and right ear auditory stimulus responses
+for display:
+
+- Select Manage averages... from
+  the Adjust menu.
+
+- Click off all other conditions except the auditory ones.
+
+Set the time scale and baseline:
+
+- Select Scales... from
+  the Adjust menu.
+
+- Switch off Autoscale time range and
+  set the Average time range from -200
+  to 500 ms.
+
+- Switch on Use average display baseline and
+  set Average display baseline from
+  -200 to 0 ms.
+
+- Click OK .
+
+You can display a subset of responses from the topographical display
+by holding the shift key down and dragging with the mouse, left
+button down. When you drag on the response with just the left button
+down, the signal timing, and channel name are displayed at the bottom. If
+the left mouse button is down and you press shift down the time
+is give both in absolute units and relative to the point where shift
+was pressed down.
+
+Observe the following:
+
+- The main deflection occurs around 100 ms
+  over the left and right temporal areas.
+
+- The left-ear response (shown in yellow) is stronger on the
+  right than on the left. The opposite is true for the right-ear response,
+  shown in red.
+
+Inspecting the visual data
+==========================
+
+Go back to the Manage averages... dialog
+and switch all other conditions except the visual ones.
+
+Observe the following:
+
+- The left and right visual field responses
+  are quite different in spatial distribution in the occipital area.
+
+- There is a later response in the right parietal area, almost
+  identical to both visual stimuli.
+
+.. note:: If you have the Neuromag software available,    the averaged data can be also viewed in the Neuromag data plotter    (xplotter ). See :ref:`BABGFDJG` for    instructions on how to use the Neuromag software at the MGH Martinos    Center.
+
+Computing the noise-covariance matrix
+#####################################
+
+Another piece of information derived from the raw data file
+is the estimate for the noise-covariance matrix, which can be computed
+with the command:
+
+``mne_process_raw --raw sample_audvis_raw.fif `` ``--lowpass 40 --projon `` ``--savecovtag -cov --cov audvis.cov``
+
+Using the definitions in ``audvis.cov`` , this command
+will create the noise-covariance matrix file ``sample_audvis-cov.fif`` .
+In this case the projections are set on. The projection information
+is then attached to the noise-covariance matrix and will be automatically
+loaded when the inverse-operator decomposition is computed.
+
+.. note:: You can study the contents of the covariance    matrix computation description file ``audvis.cov`` with the help of :ref:`CACEBACG`.
+
+.. _CHDIJBIG:
+
+MEG-MRI coordinate system alignment
+###################################
+
+The mne_analyze module
+of the MNE is one option for the coordinate alignment. It uses a
+triangulated scalp surface to facilitate the alignment.
+
+.. _CHDEDCAE:
+
+Initial alignment
+=================
+
+Follow these steps to make an initial approximation for the
+coordinate alignment.
+
+- Go to directory ``MEG/sample`` .
+
+- Launch mne_analyze
+
+- Select File/Load digitizer data... and
+  load the digitizer data from ``sample_audvis_raw.fif`` .
+
+- Load an inflated surface for subject sample from File/Load surface...
+
+- Bring up the viewer window from View/Show viewer...
+
+- Click Options... in the
+  viewer window. Make the following selections:
+
+  - Switch left and right cortical surface
+    display off.
+
+  - Make the scalp transparent.
+
+  - Switch Digitizer data on.
+
+- After a while, the digitizer points will be shown. The color
+  of the circles indicates whether the point is inside (blue) or outside
+  (red) of the scalp. The HPI coils are shown in green and the landmark
+  locations in light blue or light red color. The initial alignment
+  is way off!
+
+- Switch the Digitizer data off
+  to get the big circles out of the way.
+
+- Bring up the coordinate alignment window from Adjust/Coordinate alignment...
+
+- Click on the RAP (Right
+  Auricular Point) button. It turns red, indicating that you should
+  select the point from the viewer window. Click at the approximate
+  location of this point in the viewer. The button jumps up, turns
+  to normal color, and the MRI coordinates of the point appear in
+  the text fields next to the button.
+
+- Proceed similarly for the other two landmark points: Nasion
+  and LAP (Left Auricular Point).
+
+- Press Align using fiducials .
+  Notice that the coordinate transformation changes from a unit transformation
+  (no rotation, no origin translation) to a one determined by the
+  identified landmark locations. The rotation matrix (upper left 3
+  x 3 part of the transformation) should have positive values close
+  to one on the diagonal. Three is a significant rotation around the
+  x axis as indicated by elements (3,2) and (2,3) of the rotation
+  matrix. The *x* and *y* values
+  of the translation should be small and the *z* value
+  should be negative, around -50 mm. An example of an initial
+  coordinate transformation is shown in :ref:`CHDFIHAC`.
+
+- Make the Digitizer data again
+  visible from the options of the viewer window. Note that the points
+  are now much closer to the scalp surface.
+
+.. _CHDFIHAC:
+
+.. figure:: pics/Digitizer-example.png
+    :alt: Example of an initial coordinate alignment
+
+    Example of an initial coordinate alignment.
+
+Refining the coordinate transformation
+======================================
+
+Before proceeding to the refinement procedure, it is useful
+to remove outlier digitizer points. When you rotate the image in
+the viewer window, you will notice that there is at least one such
+point over the right cheek. To discard this point:
+
+- Click on Discard in
+  the Adjust coordinate alignment window.
+
+- Enter 10 for the distance of the points to be discarded.
+
+- Click done. The outlier point disappears.
+
+The coordinate transformation can be adjusted manually with
+the arrow buttons in the middle part of the Adjust coordinate alignment dialog. These buttons move
+the digitizer points in the directions indicated by the amount listed
+next to each of the buttons.
+
+An automatic iterative procedure, Iterative Closest Point
+(ICP) matching is also provided. At each iteration step
+
+- For each digitizer point, transformed
+  from MEG to the MRI coordinate frame, the closest point on the triangulated
+  surface is determined.
+
+- The best coordinate transformation aligning the digitizer
+  points with the closest points on the head surface is computed.
+
+In step 2 of the iteration, the nasion is assigned five times
+the weight of the other points since it can be assumed that the
+nasion is the easiest point to identify reliably from the surface
+image.
+
+The ICP alignment can be invoked by entering the desired
+number of iterations next to the ICP align button
+followed by return or simply pressing the ICP align button.
+The iteration will converge in 10 to 20 steps.
+
+.. warning:: Use the ICP alignment option in mne_analyze with    caution. The iteration will not converge to a reasonable solution    unless and initial alignment is performed first according to :ref:`CHDEDCAE`. Outlier points should be excluded as described    above. No attempt is made to compensate for the possible distance    of the digitized EEG electrode locations from the scalp.
+
+Saving the transformation
+=========================
+
+To create a MRI fif description file which incorporates the
+coordinate transformation click Save MRI set in
+the Adjust coordinate alignment dialog.
+This will create the MRI set file in the ``$SUBJECTS_DIR/sample/mri/T1-neuromag/sets`` directory,
+which was created by mne_setup_mri_data ,
+see :ref:`CHDBBAEJ`. The file will be called
+
+``COR-`` <*username*>- <*date*>- <*time*> .fif
+
+where <*username*> is
+your login name.
+
+You can also save transformation to a fif file through the Save... button.
+If the file does not exist, it will only contain the coordinate
+transformation. If the file exists it will be inserted to the appropriate
+context. An existing transformation will not be replaced unless Overwrite existing transform is checked in the save dialog.
+
+Once you have saved the coordinate transformation, press Done and
+quit mne_analyze (File/Quit ).
+
+.. note:: If you dismiss the alignment dialog before    saving the transformation, it will be lost.
+
+The forward solution
+####################
+
+To compute the forward solution, say:
+
+``cd $SAMPLE/MEG/sample``
+
+``mne_do_forward_solution --mindist 5 --spacing oct-6 --bem sample-5120-5120-5120 --meas sample_audvis-ave.fif``
+
+This produces an EEG and MEG forward solution with source
+space points closer than 5 mm to the inner skull surface omitted.
+The source space created in :ref:`CHDIGEJG` will be employed.
+As the output from this command will indicate The forward solution
+will be stored in file ``sample_audvis-ave-oct-6-fwd.fif`` .
+
+This command uses the three-layer BEM model ``sample-5120-5120-5120-bem-sol.fif`` created
+in :ref:`CHDJDGBD`. If you want to use the single-compartment
+BEM ``sample-5120-bem-sol.fif`` usable for MEG data only
+say:
+
+``cd $SAMPLE/MEG/sample``
+
+``mne_do_forward_solution --mindist 5 --spacing oct-6 --meas sample_audvis-ave.fif --bem sample-5120 --megonly``
+
+The inverse operator decomposition
+##################################
+
+The inverse operator information, necessary for the computation
+of the MNEs and dSPMs is accomplished by the command:
+
+``mne_do_inverse_operator --fwd sample_audvis-ave-oct-6-fwd.fif --depth --loose 0.2 --meg --eeg``
+
+This produces a depth-weighted inverse operator decomposition
+with 'loose' orientation constraint applied. More
+details on the convenience script mne_do_inverse_operator are
+provided in :ref:`CIHCFJEI`.
+
+The above command employs both EEG and MEG data. To create
+separate solution for EEG and MEG, run the commands:
+
+``mne_do_inverse_operator --fwd sample_audvis-ave-oct-6-fwd.fif --depth --loose 0.2 --meg``
+
+and
+
+``mne_do_inverse_operator --fwd sample_audvis-ave-oct-6-fwd.fif --depth --loose 0.2 --eeg``
+
+.. note:: If you were using a single-compartment BEM to    compute the forward solution, you can only compute the MEG inverse    operator.
+
+Interactive analysis
+####################
+
+The most exciting part of this exercise is to explore the
+data and the current estimates in mne_analyze .
+This section contains some useful steps to get you started. A lot
+of information about the capabilities of mne_analyze is
+given in :ref:`ch_interactive_analysis`. Batch-mode processing with mne_make_movie is discussed
+in :ref:`CBBECEDE`. Cross-subject averaging is covered in :ref:`ch_morph`.
+
+Before launching mne_analyze it
+is advisable to go to the directory ``MEG/sample`` . The
+current working directory can be also changed from mne_analyze .
+
+Getting started
+===============
+
+Launch mne_analyze . Select Help/On GLX... , which brings up a window containing Open
+GL rendering context information. If first line in the information
+dialog that pops up says Nondirect rendering context instead of Direct rendering context you will experience slow graphics
+performance. To fix this, your system software, graphics adapter
+or both need to be updated. Consult a computer support person for
+further information.
+
+Load surfaces
+=============
+
+It is reasonable to start the analysis by loading the display
+surfaces: choose the inflated surface for subject sample from
+the dialog that appears when you select File/Load surface... .
+
+Load the data
+=============
+
+Select File/Open... . Select ``sample_audvis-ave.fif`` as
+your data file and select the Left auditory data
+set. Select the inverse operator ``sample_audvis-ave-oct-6-meg-eeg-inv.fif`` and
+press OK . After a while the signals
+appear in the sample waveform and topographical displays. Click
+on the N100m peak in the auditory response. A dSPM map appears in
+the main surface display.
+
+Show field and potential maps
+=============================
+
+Select Windows/Show viewer... .
+After a while the viewer window appears. Click on the N100m peak
+again. Once the field map preparation computations are complete,
+the magnetic field and potential maps appear. Investigate the viewer
+window options with help of :ref:`CACEFFJF`.
+
+Show current estimates
+======================
+
+The options affecting the current estimates are accessible
+from Adjust/Estimate parameters... .
+With help of :ref:`CACBHDBF`, investigate the effects of the
+parameter settings.
+
+Labels and timecourses
+======================
+
+While in directory ``MEG/sample`` , create a directory
+called ``label`` :
+
+``mkdir label``
+
+Using the information in :ref:`CACJCFJJ`, create two
+labels ``A-lh.label`` and ``A-rh.label`` in the
+approximate location of the left and right auditory cortices. Save
+these labels in the newly created ``label`` directory.
+
+Load all labels from the ``label`` directory and
+investigate the timecourses in these two labels as well as at individual
+vertices. Information on label processing can be found from :ref:`CACCCFHH`.
+
+Morphing
+========
+
+Goto to ``$SUBJECTS_DIR`` and create the directory ``morph-maps`` .
+Load the inflated surface for subject morph as
+the morphing surfaces. Try switching between the original and morphing
+surfaces. More information about morphing is available in :ref:`CACGBEIB` and
+in :ref:`ch_morph`.
+
+There is also a left-hemisphere occipital patch file available
+for subject morph . Load a righ-hemifield
+visual response instead of the auditory one and investigate mapping
+of the current estimates on the patch.
diff --git a/doc/source/manual/utilities.rst b/doc/source/manual/utilities.rst
new file mode 100644
index 0000000..69c1ad5
--- /dev/null
+++ b/doc/source/manual/utilities.rst
@@ -0,0 +1,1402 @@
+
+
+.. _ch_misc:
+
+=======================
+Miscellaneous utilities
+=======================
+
+Overview
+########
+
+This Chapter describes various utility programs included
+with the MNE software. Each utility documentation consists of a
+brief description of the purpose followed by the specification of
+command-line options.
+
+.. _CHDFIGBG:
+
+Finding software versions
+#########################
+
+The utility mne_list_versions lists
+version numbers and compilation dates of all software modules that
+provide this information. This administration utility is located
+in ``$MNE_ROOT/bin/admin`` , The output from mne_list_versions or
+output of individual modules with ``--version`` option
+is useful when bugs are reported to the developers of MNE software.
+
+.. _CHDHEDEF:
+
+Listing contents of a fif file
+##############################
+
+Using the utility mne_show_fiff it
+is possible to display information about the contents of a fif file
+to the standard output. The command line options for mne_show_fiff are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in  <*name*>**
+
+    Specifies the fif file whose contents will be listed.
+
+**\---verbose**
+
+    Produce a verbose output. The data of most tags is included in the output.
+    This excludes matrices and vectors. Only the first 80 characters
+    of strings are listed unless the ``--long`` option is present.
+
+**\---blocks**
+
+    Only list the blocks (the tree structure) of the file. The tags
+    within each block are not listed.
+
+**\---indent  <*number*>**
+
+    Number of spaces for indentation for each deeper level in the tree structure
+    of the fif files. The default indentation is 3 spaces in terse and
+    no spaces in verbose listing mode.
+
+**\---long**
+
+    List all data from string tags instead of the first 80 characters.
+    This options has no effect unless the ``--verbose`` option
+    is also present.
+
+**\---tag  <*number*>**
+
+    List only tags of this kind. Multiple ``--tag`` options
+    can be specified to list several different kinds of data.
+
+mne_show_fiff reads the
+explanations of tag kinds, block kinds, and units from ``$MNE_ROOT/share/mne/fiff_explanations.txt`` .
+
+Data file modification utilities
+################################
+
+This section contains utilities which can be used to add
+information or fix existing information in MEG/EEG data fif files.
+Unless otherwise noted these utilities can be applied to both raw
+and evoked data files.
+
+.. _CHDDHBEE:
+
+Designating bad channels: mne_mark_bad_channels
+===============================================
+
+This utility adds or replaces information about unusable
+(bad) channels. The command line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---bad  <*filename*>**
+
+    Specify a text file containing the names of the bad channels, one channel
+    name per line. The names of the channels in this file must match
+    those in the data file exactly. If this option is missing, the bad channel
+    information is cleared.
+
+** <*data file name*>**
+
+    The remaining arguments are taken as data file names to be modified.
+
+.. _CHDBFDIC:
+
+Fixing the encoding of the trigger channel: mne_fix_stim14
+==========================================================
+
+Some earlier versions of the Neuromag acquisition software
+had a problem with the encoding of the eighth bit on the digital
+stimulus channel STI 014. This problem has been now fixed. Old data
+files can be fixed with mne_fix_stim14 ,
+which takes raw data file names as arguments. mne_fix_stim14 also
+changes the calibration of STI 014 to unity. If the encoding of
+STI 014 is already correct, running mne_fix_stim14 will
+not have any effect on the raw data.
+
+In newer Neuromag Vectorview systems with 16-bit digital
+inputs the upper two bytes of the samples may be incorrectly set
+when stimulus input 16 is used and the data are acquired in the
+32-bit  mode. This problem can be fixed by running mne_fix_stim14 on
+a raw data file with the ``--32`` option:
+
+``mne_fix_stim14 --32``  <*raw data file*>
+
+In this case, the correction will be applied to the stimulus
+channels 'STI101' and 'STI201'.
+
+.. _CHDJGGGC:
+
+Updating EEG location info: mne_check_eeg_locations
+===================================================
+
+Some versions of the Neuromag acquisition software did not
+copy the EEG channel location information properly from the Polhemus
+digitizer information data block to the EEG channel information
+records if the number of EEG channels exceeds 60. The purpose of mne_check_eeg_locations is
+to detect this problem and fix it, if requested. The command-line
+options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---file  <*name*>**
+
+    Specify the measurement data file to be checked or modified.
+
+**\---dig  <*name*>**
+
+    Name of the file containing the Polhemus digitizer information. Default
+    is the data file name.
+
+**\---fix**
+
+    By default mne_check_eeg_locations only
+    checks for missing EEG locations (locations close to the origin).
+    With --fix mne_check_eeg_locations reads
+    the Polhemus data from the specified file and copies the EEG electrode
+    location information to the channel information records in the measurement
+    file. There is no harm running mne_check_eeg_locations on
+    a data file even if the EEG channel locations were correct in the
+    first place.
+
+.. _CHDGAAJC:
+
+Updating magnetometer coil types: mne_fix_mag_coil_types
+========================================================
+
+The purpose of mne_fix_mag_coil_types is
+to change coil type 3022 to 3024 in the MEG channel definition records
+in the data files specified on the command line.
+
+As shown in Tables 5.2 and 5.3, the Neuromag Vectorview systems
+can contain magnetometers with two different coil sizes (coil types
+3022 and 3023 vs. 3024). The systems incorporating coils of type
+3024 were introduced last. At some sites the data files have still
+defined the magnetometers to be of type 3022 to ensure compatibility
+with older versions of Neuromag software. In the MNE software as
+well as in the present version of Neuromag software coil type 3024
+is fully supported. Therefore, it is now safe to upgrade the data
+files to use the true coil type.
+
+If the ``--magnes`` option is specified, the 4D
+Magnes magnetometer coil type (4001) is changed to 4D Magnes gradiometer
+coil type (4002). Use this option always and *only
+if* your Magnes data comes from a system with axial gradiometers
+instead of magnetometers. The fif converter included with the Magnes
+system does not assign the gradiometer coil type correctly.
+
+.. note:: The effect of the difference between the coil    sizes of magnetometer types 3022 and 3024 on the current estimates    computed by the MNE software is very small. Therefore the use of mne_fix_mag_coil_types is    not mandatory.
+
+.. _CHDCFEAJ:
+
+Modifying channel names and types: mne_rename_channels
+======================================================
+
+Sometimes it is necessary to change the names types of channels
+in MEG/EEG data files. Such situations include:
+
+- Designating an EEG as an EOG channel.
+  For example, the EOG channels are not recognized as such in the
+  fif files converted from CTF data files.
+
+- Changing the name of the digital trigger channel of interest
+  to STI 014 so that mne_browse_raw and mne_process_raw will
+  recognize the correct channel without the need to specify the ``--digtrig``
+  option or the MNE_TRIGGER_CH_NAME environment variable every time a
+  data file is loaded.
+
+The utility mne_rename_channels was
+designed to meet the above needs. It recognizes the following command-line
+options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fif  <*name*>**
+
+    Specifies the name of the data file to modify.
+
+**\---alias  <*name*>**
+
+    Specifies the text file which contains the modifications to be applied,
+    see below.
+
+**\---revert**
+
+    Reverse the roles of old and new channel names in the alias file.
+
+Each line in the alias file contains the old name and new
+name for a channel, separated by a colon. The old name is a name
+of one of the channels presently in the file and the new name is
+the name to be assigned to it. The old name must match an existing
+channel name in the file exactly. The new name may be followed by
+another colon and a number which is the channel type to be assigned
+to this channel. The channel type options are listed in :ref:`CHDFHGCA`.
+
+.. _CHDFHGCA:
+
+.. table:: Channel types.
+
+    ==============  ======================
+    Channel type    Corresponding number
+    ==============  ======================
+    MEG             1
+    MCG             201
+    EEG             2
+    EOG             202
+    EMG             302
+    ECG             402
+    MISC            502
+    STIM            3
+    ==============  ======================
+
+.. warning:: Do not attempt to designate MEG channels    to EEG channels or vice versa. This may result in strange errors    during source estimation.
+
+.. note:: You might consider renaming the EEG channels    with descriptive labels related to the standard 10-20 system. This    allows you to use standard EEG channel names when defining derivations,    see :ref:`CHDHJABJ` and :ref:`CACFHAFH`, as well as in the    channel selection files used in mne_browse_raw ,    see :ref:`CACCJEJD`.
+
+.. _CHDBDDDF:
+
+Modifying trigger channel data: mne_add_triggers
+================================================
+
+Purpose
+-------
+
+The utility mne_add_triggers modifies
+the digital trigger channel (STI 014) in raw data files
+to include additional transitions. Since the raw data file is modified,
+it is possible to make irreversible changes. Use this utility with
+caution. It is recommended that you never run mne_add_triggers on
+an original raw data file.
+
+Command line options
+--------------------
+
+mne_add_triggers accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---raw  <*name*>**
+
+    Specifies the raw data file to be modified.
+
+**\---trg  <*name*>**
+
+    Specifies the trigger line modification list. This text file should
+    contain two entries per line: the sample number and the trigger
+    number to be added into the file. The number of the first sample
+    in the file is zero. It is recommended that trigger numbers whose
+    binary equivalent has lower eight bits equal to zero are used to
+    avoid conflicts with the ordinary triggers occurring in the file.
+
+**\---delete**
+
+    Delete the triggers defined by the trigger file instead of adding
+    them. This enables changing the file to its original state, provided
+    that the trigger file is preserved.
+
+.. note:: Since mne_browse_raw and mne_process_raw can    employ an event file which effectively adds new trigger instants, mne_add_triggers is    for the most part obsolete but it has been retained in the MNE software    suite for backward compatibility.
+
+.. _CHDIJHIC:
+
+Removing identifying information
+================================
+
+Depending no the settings during acquisition in the Elekta-Neuromag EEG/MEG
+systems the data files may contain subject identifying information
+in unencrypted form. The utility mne_anonymize was
+written to clear tags containing such information from a fif file.
+Specifically, this utility removes the following tags from the fif
+file:
+
+.. _CHDEHBCG:
+
+.. table:: Tags cleared by mne_anonymize .
+
+    ========================  ==============================================
+    Tag                       Description
+    ========================  ==============================================
+    FIFF_SUBJ_FIRST_NAME      First name of the subject
+    FIFF_SUBJ_MIDDLE_NAME     Middle name of the subject
+    FIFF_SUBJ_LAST_NAME       Last name of the subject
+    FIFF_SUBJ_BIRTH_DAY       Birthday of the subject (Julian day number)
+    FIFF_SUBJ_SEX             The sex of the subject
+    FIFF_SUBJ_HAND            Handedness of the subject
+    FIFF_SUBJ_WEIGHT          Weight of the subject in kg
+    FIFF_SUBJ_HEIGHT          Height of the subject in m
+    FIFF_SUBJ_COMMENT         Comment about the subject
+    ========================  ==============================================
+
+.. note:: mne_anonymize normally    keeps the FIFF_SUBJ_HIS_ID tag which can be used to identify the    subjects uniquely after the information listed in :ref:`CHDEHBCG` have    been removed. If the ``--his`` option is specified on the command line,    the FIFF_SUBJ_HIS_ID tag will be removed as well. The data of the    tags listed in :ref:`CHDEHBCG` and the optional FIFF_SUBJ_HIS_ID    tag are overwritten with zeros and the space claimed by omitting    these tags is added to the free sp [...]
+
+mne_anonymize recognizes
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---his**
+
+    Remove the FIFF_SUBJ_HIS_ID tag as well, see above.
+
+**\---file  <*name*>**
+
+    Specifies the name of the file to be modified.
+
+.. note:: You need write permission to the file to be    processed.
+
+.. _CJACECAH:
+
+Copying the processing history
+==============================
+
+In order for the inverse operator calculation to work correctly
+with data processed with the Elekta-Neuromag Maxfilter (TM) software,
+the so-called *processing history* block must
+be included in data files. Previous versions of the MNE Matlab functions
+did not copy processing history to files saved. As of March 30,
+2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have
+been enchanced to include these data to the output file as appropriate.
+If you have older raw data files created in Matlab from input which
+has been processed Maxfilter, it is necessary to copy the *processing
+history* block from the original to modified raw data
+file using the mne_copy_processing_history utility described
+below. The raw data processing programs mne_browse_raw and mne_process_raw have
+handled copying of the processing history since revision 2.5 of
+the MNE software.
+
+mne_copy_processing_history is
+simple to use:
+
+``mne_copy_processing_history --from``  <*from*> ``--to``  <*to*> ,
+
+where  <*from*> is an
+original raw data file containing the processing history and  <*to*> is
+a file output with older MNE Matlab routines. Be careful: this operation
+cannot be undone. If the  <*from*> file
+does not have the processing history block or the  <*to*> file
+already has it, the destination file remains unchanged.
+
+.. _CHDHJABJ:
+
+Creating a derivation file
+##########################
+
+Purpose
+=======
+
+In mne_browse_raw , channel
+derivations are defined as linear combinations of real channels
+existing in the data files. The utility mne_make_derivations reads
+derivation data from a suitably formatted text file and produces
+a fif file containing the weights of derived channels as a sparse
+matrix. Two input file formats are accepted:
+
+- A file containing arithmetic expressions
+  defining the derivations and
+
+- A file containing a matrix which specifies the weights of
+  the channels in each derivation.
+
+Both of these formats are described in
+
+Command-line options
+====================
+
+mne_make_derivations recognizes
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in  <*name*>**
+
+    Specifies a measurement file which contains the EEG electrode locations.
+    This file is not modified.
+
+**\---inmat  <*name*>**
+
+    Specifies the output file where the layout is stored. Suffix ``.lout`` is recommended
+    for layout files. mne_analyze and mne_browse_raw look
+    for the custom layout files from the directory ``$HOME/.mne/lout`` .
+
+**\---trans**
+
+    Indicates that the file specified with the ``--inmat`` option
+    contains a transpose of the derivation matrix.
+
+**\---thresh  <*value*>**
+
+    Specifies the threshold between values to be considered zero and non-zero
+    in the input file specified with the ``--inmat`` option.
+    The default threshold is :math:`10^{-6}`.
+
+**\---out  <*name*>**
+
+    Specifies output fif file to contain the derivation data. The recommended
+    name of the derivation file has the format  <:math:`name`> ``-deriv.fif`` .
+
+**\---list  <*name*>**
+
+    List the contents of a derivation file to standard output. If this
+    option is missing and ``--out`` is specified, the content
+    of the output file will be listed once it is complete. If neither ``--list`` nor ``--out`` is present,
+    and ``--in`` or ``--inmat`` is specified, the
+    interpreted contents of the input file is listed.
+
+Derivation file formats
+=======================
+
+All lines in the input files starting with the pound sign
+(#) are considered to be comments. The format of a derivation in
+a arithmetic input file is:
+
+.. math::    \langle name \rangle = [\langle w_1 \rangle *] \langle name_1 \rangle + [\langle w_2 \rangle *] \langle name_2 \rangle \dotso
+
+where <:math:`name`> is the
+name of the derived channel, :math:`name_k` are
+the names of the channels comprising the derivation, and :math:`w_k` are
+their weights. Note that spaces are necessary between the items.
+Channel names containing spaces must be put in quotes. For example,
+
+``EEG-diff = "EEG 003" - "EEG 002"``
+
+defines a channel ``EEG-diff`` which is a difference
+between ``EEG 003`` and ``EEG 002`` . Similarly,
+
+``EEG-der = 3 * "EEG 010" - 2 * "EEG 002"``
+
+defines a channel which is three times ``EEG 010`` minus
+two times ``EEG 002`` .
+
+The format of a matrix derivation file is:
+
+.. math::    \langle nrow \rangle \langle ncol \rangle \langle names\ of\ the\ input\ channels \rangle \langle name_1 \rangle \langle weights \rangle \dotso
+
+The combination of the two arithmetic examples, above can
+be thus represented as:
+
+``2 3 "EEG 002" "EEG 003" "EEG 010" EEG-diff -1 1  0 EEG-der -2 0  3``
+
+Before a derivation is accepted to use by mne_browse_raw ,
+the following criteria have to be met:
+
+- All channels to be combined into a single
+  derivation must have identical units of measure.
+
+- All channels in a single derivation have to be of the same
+  kind, *e.g.*, MEG channels or EEG channels.
+
+- All channels specified in a derivation have to be present
+  in the currently loaded data set.
+
+The validity check is done when a derivation file is loaded
+into mne_browse_raw , see :ref:`CACFHAFH`.
+
+.. note:: You might consider renaming the EEG channels    with descriptive labels related to the standard 10-20 system using    the mne_rename_channels utility,    see :ref:`CHDCFEAJ`. This allows you to use standard EEG    channel names in the derivations you define as well as in the channel    selection files used in mne_browse_raw ,    see :ref:`CACCJEJD`.
+
+.. _CHDDGDJA:
+
+Creating a custom EEG layout
+############################
+
+Purpose
+=======
+
+Both MNE software (mne_analyze and mne_browse_raw)
+and Neuromag software (xplotter and xfit)
+employ text layout files to create topographical displays of MEG
+and EEG data. While the MEG channel layout is fixed, the EEG layout
+varies from experiment to experiment, depending on the number of
+electrodes used and the electrode cap configuration. The utility mne_make_eeg_layout was
+created to produce custom EEG layout files based on the EEG electrode
+location information included in the channel description records.
+
+mne_make_eeg_layout uses
+azimuthal equidistant projection to map the EEG channel locations
+onto a plane. The mapping consists of the following steps:
+
+- A sphere is fitted to the electrode
+  locations and the locations are translated by the location of the
+  origin of the best-fitting sphere.
+
+- The spherical coordinates (:math:`r_k`, :math:`\theta_k`, and :math:`\phi_k`)
+  corresponding to each translated electrode location are computed.
+
+- The projected locations :math:`u_k = R \theta_k \cos{\phi_k}` and :math:`v_k = R \theta_k \sin{\phi_k}` are
+  computed. By default, :math:`R = 20/{^{\pi}/_2}`, *i.e.* at
+  the equator (:math:`\theta = ^{\pi}/_2`) the multiplier is
+  20. This projection radius can be adjusted with the ``--prad`` option.
+  Increasing or decreasing :math:`R` makes
+  the spacing between the channel viewports larger or smaller, respectively.
+
+- A viewport with width 5 and height 4 is placed centered at
+  the projected location. The width and height of the viewport can
+  be adjusted with the ``--width`` and ``--height`` options
+
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---lout  <*name*>**
+
+    Specifies the name of the layout file to be output.
+
+**\---nofit**
+
+    Do not fit a sphere to the electrode locations but use a standard sphere
+    center (:math:`x = y = 0`, and :math:`z = 40` mm) instead.
+
+**\---prad  <*value*>**
+
+    Specifies a non-standard projection radius :math:`R`,
+    see above.
+
+**\---width  <*value*>**
+
+    Specifies the width of the viewports. Default value = 5.
+
+**\---height  <*value*>**
+
+    Specifies the height of the viewports. Default value = 4.
+
+.. _BEHCBCGG:
+
+Adding neighborhood/topology information to source spaces
+#########################################################
+
+Purpose
+=======
+
+The utility mne_add_patch_info uses
+the detailed cortical surface geometry information to add data about
+cortical patches corresponding to each source space point. A new
+copy of the source space(s) included in the input file is created
+with the patch information included. In addition to the patch information, mne_add_patch_info can
+optionally calculate distances, along the cortical surface, between
+the vertices selected to the source space.
+
+.. note:: Depending on the speed of your computer and the options selected, mne_add_patch_info takes 5 - 30 minutes to run.
+
+.. _CJAGCDCC:
+
+Command line options
+====================
+
+mne_add_patch_info accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---verbose**
+
+    Provide verbose output during the calculations.
+
+**\---dist  <*dist/mm*>**
+
+    Invokes the calculation of distances between vertices included in
+    the source space along the cortical surface. Only pairs whose distance in
+    the three-dimensional volume is less than the specified distance are
+    considered. For details, see :ref:`CJAIFJDD`, below.
+
+**\---src  <*name*>**
+
+    The input source space file. The source space files usually end
+    with ``-src.fif`` .
+
+**\---srcp  <*name*>**
+
+    The output source space file which will contain the patch information.
+    If the file exists it will overwritten without asking for permission.
+    A recommended naming convention is to add the letter ``p`` after the
+    source spacing included in the file name. For example, if the input
+    file is ``mh-7-src.fif`` , a recommended output file name
+    is ``mh-7p-src.fif`` .
+
+**\---w  <*name*>**
+
+    Name of a w file, which will contain the patch area information. Two
+    files will be created:  <*name*> ``-lh.w`` and  <*name*> ``-rh.w`` .
+    The numbers in the files are patch areas in :math:`\text{mm}^2`.
+    The source space vertices are marked with value 150.
+
+**\---labeldir  <*directory*>**
+
+    Create a label file corresponding to each of the patches in the
+    given directory. The directory must be created before running mne_add_patch_info .
+
+.. _CJAIFJDD:
+
+Computational details
+=====================
+
+By default, mne_add_patch_info creates
+a copy of the source space(s) with the following additional information
+for each vertex in the original dense triangulation of the cortex:
+
+- The number of the closest active source
+  space vertex and
+
+- The distance to this vertex.
+
+This information can be used to determine, *e.g.*,
+the sizes of the patches, their average normals, and the standard
+deviation of the normal directions. This information is also returned
+by the mne_read_source_space Matlab function as described in Table 10.28.
+
+The ``--dist`` option to mne_add_patch_info invokes
+the calculation of inter-vertex distances. These distances are computed
+along the the cortical surface (usually the white matter) on which
+the source space vertices are located.
+
+Since the calculation of all possible distances would take
+a very long time, the distance given with the ``--dist`` option allows
+restriction to the neighborhood of each source space vertex. This
+neighborhood is defined as the sphere around each source space vertex,
+with radius given by the ``--dist`` option. Because the distance calculation
+is done along the folded cortical surface whose details are given
+by the dense triangulation of the cortical surface produced by FreeSurfer,
+some of the distances computed will be larger than the value give
+with --dist.
+
+Converting covariance data into an SSP operator
+###############################################
+
+Purpose
+=======
+
+The utility mne_cov2proj picks
+eigenvectors from a covariance matrix and outputs them as a signal-space
+projection (SSP) file.
+
+Command line options
+====================
+
+mne_cov2proj accepts the
+following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---cov  <*name*>**
+
+    The covariance matrix file to be used a source. The covariance matrix
+    files usually end with ``-cov.fif`` .
+
+**\---proj  <*name*>**
+
+    The output file to contain the projection. It is recommended that
+    the file name ends with ``-proj.fif`` .
+
+**\---bad  <*name*>**
+
+    Specify channels not to be included when an eigenvalue decomposition
+    of the covariance matrix is computed.
+
+**\---include  <*val1*> [: <*val2*> ]**
+
+    Select an eigenvector or a range of eigenvectors to include. It
+    is recommended that magnetometers, gradiometers, and EEG data are handled
+    separately with help of the ``--bad`` , ``--meg`` , ``--megmag`` , ``--meggrad`` ,
+    and ``--eeg`` options.
+
+**\---meg**
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to MEG channels are included.
+
+**\---eeg**
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to EEG channels are included.
+
+**\---megmag**
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to MEG magnetometer channels are included.
+
+**\---meggrad**
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to MEG planar gradiometer channels are included.
+
+.. note:: The ``--megmag`` and ``--meggrad`` employ    the Vectorview channel numbering scheme to recognize MEG magnetometers    (channel names ending with '1') and planar gradiometers    (other channels). Therefore, these options are only meaningful in    conjunction with data acquired with a Neuromag Vectorview system.
+
+.. _CHDECHBF:
+
+Fitting a sphere to a surface
+#############################
+
+Purpose
+=======
+
+The utility mne_fit_sphere_to_surf finds
+the sphere which best fits a given surface.
+
+Command line options
+====================
+
+mne_fit_sphere_to_surf accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---bem  <*name*>**
+
+    A BEM file to use. The names of these files usually end with ``bem.fif`` or ``bem-sol.fif`` .
+
+**\---surf  <*name*>**
+
+    A FreeSurfer surface file to read. This is an alternative to using
+    a surface from the BEM file.
+
+**\---scalp**
+
+    Use the scalp surface instead of the inner skull surface in sphere
+    fitting. If the surface is specified with the ``--surf`` option,
+    this one is irrelevant.
+
+**\---mritrans  <*name*>**
+
+    A file containing a transformation matrix between the MEG head coordinates
+    and MRI coordinates. With this option, the sphere origin will be
+    output in MEG head coordinates. Otherwise the output will be in MRI
+    coordinates.
+
+.. _CHDDCBGI:
+
+Computing sensitivity maps
+##########################
+
+Purpose
+=======
+
+mne_sensitivity_map computes
+the size of the columns of the forward operator and outputs the
+result in w files.
+
+Command line options
+====================
+
+mne_sensitivity_map accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fwd  <*name*>**
+
+    Specifies a forward solution file to analyze. By default the MEG
+    forward solution is considered.
+
+**\---proj  <*name*>**
+
+    Specifies a file containing an SSP operator to be applied. If necessary,
+    multiple ``--proj`` options can be specified. For map types 1 - 4 (see
+    below), SSP is applied to the forward model data. For map types
+    5 and 6, the effects of SSP are evaluated against the unmodified
+    forward model.
+
+**\---eeg**
+
+    Use the EEG forward solution instead of the MEG one. It does not make
+    sense to consider a combination because of the different units of
+    measure. For the same reason, gradiometers and magnetometers have
+    to be handled separately, see ``--mag`` option below. By
+    default MEG gradiometers are included.
+
+**\---mag**
+
+    Include MEG magnetometers instead of gradiometers
+
+**\---w  <*name*>**
+
+    Specifies the stem of the output w files. To obtain the final output file
+    names, ``-lh.w`` and ``-rh.w`` is appended for
+    the left and right hemisphere, respectively.
+
+**\---smooth  <*number*>**
+
+    Specifies the number of smooth steps to apply to the resulting w files.
+    Default: no smoothing.
+
+**\---map  <*number*>**
+
+    Select the type of a sensitivity map to compute. At present, valid numbers
+    are 1 - 6. For details, see :ref:`CHDCDJIJ`, below.
+
+.. _CHDCDJIJ:
+
+Available sensitivity maps
+==========================
+
+In the following, let
+
+.. math::    G_k = [g_{xk} g_{yk} g_{zk}]
+
+denote the three consecutive columns of the gain matrix :math:`G` corresponding to
+the fields of three orthogonal dipoles at source space location :math:`k`.
+Further, lets assume that the source coordinate system has been
+selected so that the :math:`z` -axis points
+to the cortical normal direction and the :math:`xy` plane
+is thus the tangent plane of the cortex at the source space location :math:`k`
+Next, compute the SVD
+
+.. math::    G_k = U_k \Lambda_k V_k
+
+and let :math:`g_{1k} = u_{1k} \lambda_{1k}`, where :math:`\lambda_{1k}` and :math:`u_{1k}` are
+the largest singular value and the corresponding left singular vector
+of :math:`G_k`, respectively. It is easy to see
+that :math:`g_{1k}` is has the largest power
+among the signal distributions produced by unit dipoles at source
+space location :math:`k`.
+
+Furthermore, assume that the colums orthogonal matrix :math:`U_P` (:math:`U_P^T U_P = I`) contain
+the orthogonal basis of the noise subspace corresponding to the signal
+space projection (SSP) operator :math:`P` specified
+with one or more ``--proj`` options so that :math:`P = I - U_P U_P^T`.
+For more information on SSP, see :ref:`CACCHABI`.
+
+With these definitions the map selections defined with the ``--map`` option correspond
+to the following
+
+**\---map 1**
+
+    Compute :math:`\sqrt{g_{1k}^T g_{1k}} = \lambda_{1k}` at each source space point.
+    Normalize the result so that the maximum values equals one.
+
+**\---map 2**
+
+    Compute :math:`\sqrt{g_z^T g_z}` at each source space point.
+    Normalize the result so that the maximum values equals one. This
+    is the amplitude of the signals produced by unit dipoles normal
+    to the cortical surface.
+
+**\---map 3**
+
+    Compute :math:`\sqrt{g_z^T g_z / g_{1k}^T g_{1k}}` at each source space point.
+
+**\---map 4**
+
+    Compute :math:`1 - \sqrt{g_z^T g_z / g_{1k}^T g_{1k}}` at each source space point.
+    This could be called the *radiality index*.
+
+**\---map 5**
+
+    Compute the subspace correlation between :math:`g_z` and :math:`U_P`: :math:`\text{subcorr}^2(g_z , U_P) = (g_z^T U_P U_P^T g_z)/(g_z^T g_z)`.
+    This index equals zero, if :math:`g_z` is
+    orthogonal to :math:`U_P` and one if :math:`g_z` lies
+    in the subspace defined by :math:`U_P`. This
+    map shows how close the field pattern of a dipole oriented perpendicular
+    to the cortex at each cortical location is to the subspace removed
+    by the SSP.
+
+**\---map 6**
+
+    Compute :math:`\sqrt{g_z^T P g_z / g_z^T g_z}`, which is the fraction
+    of the field pattern of a dipole oriented perpendicular to the cortex
+    at each cortical location remaining after applying the SSP a dipole
+    remaining
+
+.. _CHDDDJCA:
+
+Transforming locations
+######################
+
+Purpose
+=======
+
+mne_transform_points applies
+the coordinate transformation relating the MEG head coordinates
+and the MRI coordinates to a set of locations listed in a text file.
+
+Command line options
+====================
+
+mne_transform_points accepts
+the following command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in  <*name*>**
+
+    Specifies the input file. The file must contain three numbers on
+    each line which are the *x*, *y*,
+    and *z* coordinates of point in space. By default,
+    the input is in millimeters.
+
+**\---iso  <*name*>**
+
+    Specifies a name of a fif file containing Isotrak data. If this
+    option is present file will be used as the input instead of the
+    text file specified with the ``--in`` option.
+
+**\---trans  <*name*>**
+
+    Specifies the name of a fif file containing the coordinate transformation
+    between the MEG head coordinates and MRI coordinates. If this file
+    is not present, the transformation will be replaced by a unit transform.
+
+**\---out  <*name*>**
+
+    Specifies the output file. This file has the same format as the
+    input file.
+
+**\---hpts**
+
+    Output the data in the head points (hpts)
+    format accepted by tkmedit . In
+    this format, the coordinates are preceded by a point category (hpi,
+    cardinal or fiducial, eeg, extra) and a sequence number, see :ref:`CJADJEBH`.
+
+**\---meters**
+
+    The coordinates are listed in meters rather than millimeters.
+
+**\---tomri**
+
+    By default, the coordinates are transformed from MRI coordinates to
+    MEG head coordinates. This option reverses the transformation to
+    be from MEG head coordinates to MRI coordinates.
+
+.. _CHDDIDCC:
+
+Inquiring and changing baselines
+################################
+
+The utility mne_change_baselines computes
+baseline values and applies them to an evoked-response data file.
+The command-line options are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---in  <*name*>**
+
+    Specifies the input data file.
+
+**\---set  <*number*>**
+
+    The data set number to compute baselines from or to apply baselines
+    to. If this option is omitted, all average data sets in the input file
+    are processed.
+
+**\---out  <*name*>**
+
+    The output file.
+
+**\---baselines  <*name*>**
+
+    Specifies a text file which contains the baseline values to be applied. Each
+    line should contain a channel name, colon, and the baseline value
+    given in 'native' units (T/m, T, or V). If this
+    option is encountered, the limits specified by previous ``--bmin`` and ``--bmax`` options will not
+    have an effect.
+
+**\---list  <*name*>**
+
+    Specifies a text file to contain the baseline values. Listing is
+    provided only if a specific data set is selected with the ``--set`` option.
+
+**\---bmin  <*value/ms*>**
+
+    Lower limit of the baseline. Effective only if ``--baselines`` option is
+    not present. Both ``--bmin`` and ``--bmax`` must
+    be present to compute the baseline values. If either ``--bmin`` or ``--bmax`` is
+    encountered, previous ``--baselines`` option will be ignored.
+
+**\---bmax  <*value/ms*>**
+
+    Upper limit of the baseline.
+
+.. _CHDECAFD:
+
+Data simulator
+##############
+
+Purpose
+=======
+
+The utility mne_simu creates
+simulated evoked response data for investigation of the properties
+of the inverse solutions. It computes MEG signals generated by dipoles
+normal to the cortical mantle at one or several ROIs defined with
+label files. Colored noise can be added to the signals.
+
+Command-line options
+====================
+
+mne_simu has the following
+command-line options:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---fwd  <*name*>**
+
+    Specify a forward solution file to employ in the simulation.
+
+**\---label  <*name*>**
+
+    Specify a label
+
+**\---meg**
+
+    Provide MEG data in the output file.
+
+**\---eeg**
+
+    Provide EEG data in the output file.
+
+**\---out  <*name*>**
+
+    Specify the output file. By default, this will be an evoked data
+    file in the fif format.
+
+**\---raw**
+
+    Output the data as a raw data fif file instead of an evoked one.
+
+**\---mat**
+
+    Produce Matlab output of the simulated fields instead of the fif evoked
+    file.
+
+**\---label  <*name*>**
+
+    Define an ROI. Several label files can be present. By default, the sources
+    in the labels will have :math:`\cos^2` -shaped non-overlapping
+    timecourses, see below.
+
+**\---timecourse  <*name*>**
+
+    Specifies a text file which contains an expression for a source
+    time course, see :ref:`CHDCFIBH`. If no --timecourse options
+    are present, the standard source time courses described in :ref:`CHDFIIII` are used. Otherwise, the time course expressions
+    are read from the files specified. The time course expressions are
+    associated with the labels in the order they are specified. If the
+    number of expressions is smaller than the number of labels, the
+    last expression specified will reused for the remaining labels.
+
+**\---sfreq  <*freq/Hz*>**
+
+    Specifies the sampling frequency of the output data (default = 1000 Hz). This
+    option is used only with the time course files.
+
+**\---tmin  <*time/ms*>**
+
+    Specifies the starting time of the data, used only with time course files
+    (default -200 ms).
+
+**\---tmax  <*time/ms*>**
+
+    Specifies the ending time of the data, used only with time course files
+    (default 500 ms).
+
+**\---seed  <*number*>**
+
+    Specifies the seed for random numbers. This seed is used both for adding
+    noise, see :ref:`CHDFBJIJ` and for random numbers in source waveform
+    expressions, see :ref:`CHDCFIBH`. If no seed is specified, the
+    current time in seconds since Epoch (January 1, 1970) is used.
+
+**\---all**
+
+    Activate all sources on the cortical surface uniformly. This overrides the ``--label`` options.
+
+.. _CHDFBJIJ:
+
+Noise simulation
+================
+
+Noise is added to the signals if the ``--senscov`` and ``--nave`` options
+are present. If ``--nave`` is omitted the number of averages
+is set to :math:`L = 100`. The noise is computed
+by first generating vectors of Gaussian random numbers :math:`n(t)` with :math:`n_j(t) \sim N(0,1)`.
+Thereafter, the noise-covariance matrix :math:`C` is
+used to color the noise:
+
+.. math::    n_c(t) = \frac{1}{\sqrt{L}} \Lambda U^T n(t)\ ,
+
+where we have used the eigenvalue decomposition positive-definite
+covariance matrix:
+
+.. math::    C = U \Lambda^2 U^T\ .
+
+Note that it is assumed that the noise-covariance matrix
+is given for raw data, *i.e.*, for :math:`L = 1`.
+
+.. _CHDFIIII:
+
+Simulated data
+==============
+
+The default source waveform :math:`q_k` for
+the :math:`k^{th}` label is nonzero at times :math:`t_{kp} = (100(k - 1) + p)/f_s`, :math:`p = 0 \dotso 100` with:
+
+.. math::    q_k(t_{kp}) = Q_k \cos^2{(\frac{\pi p}{100} - \frac{\pi}{2})}\ ,
+
+i.e., the source waveforms are non-overlapping 100-samples
+wide :math:`\cos^2` pulses. The sampling frequency :math:`f_s = 600` Hz.
+The source amplitude :math:`Q_k` is determined
+so that the strength of each of the dipoles in a label will be :math:`50 \text{nAm}/N_k`.
+
+Let us denote the sums of the magnetic fields and electric
+potentials produced by the dipoles normal to the cortical mantle
+at label :math:`k` by :math:`x_k`. The simulated
+signals are then:
+
+.. math::    x(t_j) = \sum_{k = 1}^{N_s} {q_k(t_j) x_k + n_c(t_j)}\ ,
+
+where :math:`N_s` is the number of
+sources.
+
+.. _CHDCFIBH:
+
+Source waveform expressions
+===========================
+
+The ``--timecourse`` option provides flexible possibilities
+to define the source waveforms in a functional form. The source
+waveform expression files consist of lines of the form:
+
+ <*variable*> ``=``  <*arithmetic expression*>
+
+Each file may contain multiple lines. At the end of the evaluation,
+only the values in the variable ``y`` (``q`` )
+are significant, see :ref:`CHDJBIEE`. They assume the role
+of :math:`q_k(t_j)` to compute the simulated signals
+as described in :ref:`CHDFIIII`, above.
+
+All expressions are case insensitive. The variables are vectors
+with the length equal to the number of samples in the responses,
+determined by the ``--tmin`` , ``--tmax`` , and ``--sfreq`` options.
+The available variables are listed in :ref:`CHDJBIEE`.
+
+.. _CHDJBIEE:
+
+.. table:: Available variable names in source waveform expressions.
+
+    ================  =======================================
+    Variable          Meaning
+    ================  =======================================
+    x                 time [s]
+    t                 current value of x in [ms]
+    y                 the source amplitude [Am]
+    q                 synonym for y
+    a , b , c , d     help variables, initialized to zeros
+    ================  =======================================
+
+The arithmetic expressions can use usual arithmetic operations
+as well as  mathematical functions listed in :ref:`CHDJIBHA`.
+The arguments can be vectors or scalar numbers. In addition, standard
+relational operators ( <, >, ==, <=, >=) and their textual
+equivalents (lt, gt, eq, le, ge) are available. Table :ref:`CHDDJEHH` gives some
+useful examples of source waveform expressions.
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.6\linewidth}|
+.. _CHDJIBHA:
+.. table:: Mathematical functions available for source waveform expressions
+
+    +-----------------------+---------------------------------------------------------------+
+    | Function              | Description                                                   |
+    +-----------------------+---------------------------------------------------------------+
+    | abs(x)                | absolute value                                                |
+    +-----------------------+---------------------------------------------------------------+
+    | acos(x)               | :math:`\cos^{-1}x`                                            |
+    +-----------------------+---------------------------------------------------------------+
+    | asin(x)               | :math:`\sin^{-1}x`                                            |
+    +-----------------------+---------------------------------------------------------------+
+    | atan(x)               | :math:`\tan^{-1}x`                                            |
+    +-----------------------+---------------------------------------------------------------+
+    | atan2(x,y)            | :math:`\tan^{-1}(^y/_x)`                                      |
+    +-----------------------+---------------------------------------------------------------+
+    | ceil(x)               | nearest integer larger than :math:`x`                         |
+    +-----------------------+---------------------------------------------------------------+
+    | cos(x)                | :math:`\cos x`                                                |
+    +-----------------------+---------------------------------------------------------------+
+    | cosw(x,a,b,c)         | :math:`\cos^2` -shaped window centered at :math:`b` with a    |
+    |                       | rising slope of length :math:`a` and a trailing slope of      |
+    |                       | length :math:`b`.                                             |
+    +-----------------------+---------------------------------------------------------------+
+    | deg(x)                | The value of :math:`x` converted to from radians to degrees   |
+    +-----------------------+---------------------------------------------------------------+
+    | erf(x)                | :math:`\frac{1}{2\pi} \int_0^x{\text{exp}(-t^2)dt}`           |
+    +-----------------------+---------------------------------------------------------------+
+    | erfc(x)               | :math:`1 - \text{erf}(x)`                                     |
+    +-----------------------+---------------------------------------------------------------+
+    | exp(x)                | :math:`e^x`                                                   |
+    +-----------------------+---------------------------------------------------------------+
+    | floor(x)              | Largest integer value not larger than :math:`x`               |
+    +-----------------------+---------------------------------------------------------------+
+    | hypot(x,y)            | :math:`\sqrt{x^2 + y^2}`                                      |
+    +-----------------------+---------------------------------------------------------------+
+    | ln(x)                 | :math:`\ln x`                                                 |
+    +-----------------------+---------------------------------------------------------------+
+    | log(x)                | :math:`\log_{10} x`                                           |
+    +-----------------------+---------------------------------------------------------------+
+    | maxp(x,y)             | Takes the maximum between :math:`x` and :math:`y`             |
+    +-----------------------+---------------------------------------------------------------+
+    | minp(x,y)             | Takes the minimum between :math:`x` and :math:`y`             |
+    +-----------------------+---------------------------------------------------------------+
+    | mod(x,y)              | Gives the remainder of  :math:`x` divided by :math:`y`        |
+    +-----------------------+---------------------------------------------------------------+
+    | pi                    | Ratio of the circumference of a circle and its diameter.      |
+    +-----------------------+---------------------------------------------------------------+
+    | rand                  | Gives a vector of uniformly distributed random numbers        |
+    |                       | from 0 to 1.                                                  |
+    +-----------------------+---------------------------------------------------------------+
+    | rnorm(x,y)            | Gives a vector of Gaussian random numbers distributed as      |
+    |                       | :math:`N(x,y)`. Note that if :math:`x` and :math:`y` are      |
+    |                       | vectors, each number generated will a different mean and      |
+    |                       | variance according to the arguments.                          |
+    +-----------------------+---------------------------------------------------------------+
+    | shift(x,s)            | Shifts the values in the input vector :math:`x` by the number |
+    |                       | of positions given by :math:`s`. Note that :math:`s` must be  |
+    |                       | a scalar.                                                     |
+    +-----------------------+---------------------------------------------------------------+
+    | sin(x)                | :math:`\sin x`                                                |
+    +-----------------------+---------------------------------------------------------------+
+    | sqr(x)                | :math:`x^2`                                                   |
+    +-----------------------+---------------------------------------------------------------+
+    | sqrt(x)               | :math:`\sqrt{x}`                                              |
+    +-----------------------+---------------------------------------------------------------+
+    | tan(x)                | :math:`\tan x`                                                |
+    +-----------------------+---------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.4\linewidth}|p{0.4\linewidth}|
+.. _CHDDJEHH:
+.. table:: Examples of source waveform expressions.
+
+    +---------------------------------------------+-------------------------------------------------------------+
+    | Expression                                  | Meaning                                                     |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 20e-9*sin(2*pi*10*x)                    | A 10-Hz sine wave with 20 nAm amplitude                     |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 20e-9*sin(2*pi*2*x)*sin(2*pi*10*x)      | A 10-Hz 20-nAm sine wave, amplitude modulated               |
+    |                                             | sinusoidally at 2 Hz.                                       |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 20e-9*cosw(t,100,100,100)               | :math:`\cos^2`-shaped pulse, centered at :math:`t` = 100 ms |
+    |                                             | with 100 ms leading and trailing slopes, 20 nAm amplitude   |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 30e-9*(t > 0)*(t  <* 300)*sin(2*pi*20*x)| 20-Hz sine wave, 30 nAm amplitude, cropped in time to       |
+    |                                             | 0...300 ms.                                                 |
+    +---------------------------------------------+-------------------------------------------------------------+
+
+.. _CHDEDHCG:
+
+Converting parcellation data into labels
+########################################
+
+The utility mne_annot2labels converts
+cortical parcellation data into a set of labels. The parcellation
+data are read from the directory ``$SUBJECTS_DIR/$SUBJECT/label`` and
+the resulting labels are written to the current directory. mne_annot2labels requires
+that the environment variable ``$SUBJECTS_DIR`` is set.
+The command line options for mne_annot2labels are:
+
+**\---version**
+
+    Show the program version and compilation date.
+
+**\---help**
+
+    List the command-line options.
+
+**\---subject  <*name*>**
+
+    Specifies the name of the subject. If this option is not present
+    the ``$SUBJECT`` environment variable is consulted. If
+    the subject name cannot be determined, the program quits.
+
+**\---parc  <*name*>**
+
+    Specifies the parcellation name to convert. The corresponding parcellation
+    file names will be ``$SUBJECTS_DIR/$SUBJECT/label/``  <*hemi*> ``h.``  <*name*> ``.annot`` where  <*hemi*> is ``l`` or ``r`` for the
+    left and right hemisphere, respectively.
diff --git a/doc/source/mne-python.rst b/doc/source/mne-python.rst
new file mode 100644
index 0000000..b26b91e
--- /dev/null
+++ b/doc/source/mne-python.rst
@@ -0,0 +1,19 @@
+.. _mne_python:
+
+======================
+MNE with Python
+======================
+
+.. toctree::
+   :maxdepth: 1
+
+   getting_started.rst
+   python_tutorial.rst
+   auto_examples/index.rst
+   python_reference.rst
+   whats_new.rst
+   contributing.rst
+
+.. raw:: html
+
+    <a class="twitter-timeline" href="https://twitter.com/mne_python" data-widget-id="317730454184804352">Tweets by @mne_python</a>
diff --git a/doc/source/python_reference.rst b/doc/source/python_reference.rst
new file mode 100644
index 0000000..c85e9c4
--- /dev/null
+++ b/doc/source/python_reference.rst
@@ -0,0 +1,547 @@
+=========
+Reference
+=========
+
+.. automodule:: mne
+   :no-members:
+   :no-inherited-members:
+
+This is the classes and functions reference of mne-python. Functions are
+grouped thematically by analysis stage. In addition, all File I/O functions
+are collected in a separate section. Functions and classes that are not below
+a module heading are found in the :py:mod:`mne` namespace.
+
+
+Classes
+=======
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   fiff.Raw
+   Epochs
+   fiff.Evoked
+   SourceEstimate
+   Covariance
+   Label
+   BiHemiLabel
+   preprocessing.ICA
+
+
+Logging and Configuration
+=========================
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   get_config_path
+   get_config
+   set_log_level
+   set_log_file
+   set_config
+
+:py:mod:`mne.cuda`:
+
+.. automodule:: mne.cuda
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.cuda
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   init_cuda
+
+File I/O
+========
+
+.. currentmodule:: mne
+
+Classes:
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   fiff.Evoked
+   fiff.Raw
+
+Functions:
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   parse_config
+   read_bem_surfaces
+   read_cov
+   read_dip
+   read_epochs
+   read_events
+   read_forward_solution
+   read_label
+   read_proj
+   read_reject_parameters
+   read_selection
+   read_source_estimate
+   read_source_spaces
+   read_surface
+   read_trans
+   save_stc_as_volume
+   write_bem_surface
+   write_cov
+   write_events
+   write_forward_solution
+   write_label
+   write_proj
+   write_source_spaces
+   write_surface
+   write_trans
+
+.. currentmodule:: mne.fiff.bti
+
+:py:mod:`mne.fiff.bti`:
+
+Functions:
+
+.. autosummary::
+  :toctree: generated/
+  :template: function.rst
+
+  read_raw_bti
+
+:py:mod:`mne.datasets.sample`:
+
+.. automodule:: mne.datasets.sample
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.datasets.sample
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   data_path
+
+:py:mod:`mne.datasets.megsim`:
+
+.. automodule:: mne.datasets.megsim
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.datasets.megsim
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   data_path
+   load_data
+
+
+Visualization
+=============
+
+:py:mod:`mne.viz`:
+
+.. automodule:: mne.viz
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.viz
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   circular_layout
+   mne_analyze_colormap
+   plot_connectivity_circle
+   plot_cov
+   plot_drop_log
+   plot_evoked
+   plot_evoked_topomap
+   plot_ica_panel
+   plot_image_epochs
+   plot_raw
+   plot_source_estimates
+   plot_sparse_source_estimates
+   plot_topo
+   plot_topo_image_epochs
+   plot_topo_phase_lock
+   plot_topo_power
+   plot_topo_tfr
+   plot_topomap
+   compare_fiff
+
+.. currentmodule:: mne.fiff
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   show_fiff
+
+Preprocessing
+=============
+
+Projections:
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   compute_proj_epochs
+   compute_proj_evoked
+   compute_proj_raw
+   read_proj
+   write_proj
+
+:py:mod:`mne.preprocessing`:
+
+.. automodule:: mne.preprocessing
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.preprocessing
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   compute_proj_ecg
+   compute_proj_eog
+   find_ecg_events
+   find_eog_events
+   ica_find_ecg_events
+   ica_find_eog_events
+   read_ica
+   run_ica
+
+:py:mod:`mne.filter`:
+
+.. automodule:: mne.filter
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.filter
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   band_pass_filter
+   construct_iir_filter
+   high_pass_filter
+   low_pass_filter
+
+
+Events
+======
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   concatenate_events
+   find_events
+   find_stim_steps
+   make_fixed_length_events
+   merge_events
+   parse_config
+   pick_events
+   read_events
+   write_events
+
+.. currentmodule:: mne.event
+
+.. autosummary::
+  :toctree: generated/
+  :template: function.rst
+
+   define_target_events
+
+.. currentmodule:: mne.epochs
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   combine_event_ids
+   equalize_epoch_counts
+
+
+Sensor Space Data
+=================
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   fiff.pick_channels
+   fiff.pick_channels_cov
+   fiff.pick_channels_forward
+   fiff.pick_channels_regexp
+   fiff.pick_types
+   fiff.pick_types_evoked
+   fiff.pick_types_forward
+
+   read_epochs
+   read_reject_parameters
+   read_selection
+
+
+Covariance
+==========
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   compute_covariance
+   compute_raw_data_covariance
+   read_cov
+   write_cov
+
+
+Forward Modeling
+================
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   apply_forward
+   apply_forward_raw
+   average_forward_solutions
+   do_forward_solution
+   read_bem_surfaces
+   read_forward_solution
+   read_trans
+   read_source_spaces
+   read_surface
+   sensitivity_map
+   write_bem_surface
+   write_trans
+
+.. currentmodule:: mne.forward
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   restrict_forward_to_label
+   restrict_forward_to_stc
+
+
+Inverse Solutions
+=================
+
+:py:mod:`mne.minimum_norm`:
+
+.. automodule:: mne.minimum_norm
+  :no-members:
+  :no-inherited-members:
+
+.. currentmodule:: mne.minimum_norm
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   apply_inverse
+   apply_inverse_epochs
+   apply_inverse_raw
+   compute_rank_inverse
+   make_inverse_operator
+   read_inverse_operator
+   source_band_induced_power
+   source_induced_power
+   write_inverse_operator
+
+:py:mod:`mne.inverse_sparse`:
+
+.. automodule:: mne.inverse_sparse
+  :no-members:
+  :no-inherited-members:
+
+.. currentmodule:: mne.inverse_sparse
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   mixed_norm
+   tf_mixed_norm
+   gamma_map
+
+:py:mod:`mne.beamformer`:
+
+.. automodule:: mne.beamformer
+  :no-members:
+  :no-inherited-members:
+
+.. currentmodule:: mne.beamformer
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   lcmv
+   lcmv_epochs
+   lcmv_raw
+
+
+Source Space Data
+=================
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   compute_morph_matrix
+   extract_label_time_course
+   grade_to_tris
+   grade_to_vertices
+   grow_labels
+   labels_from_parc
+   label_sign_flip
+   morph_data
+   morph_data_precomputed
+   read_dip
+   read_label
+   read_source_estimate
+   save_stc_as_volume
+   stc_to_label
+   transform_coordinates
+   vertex_to_mni
+   write_label
+
+
+Time-Frequency
+==============
+
+:py:mod:`mne.time_frequency`:
+
+.. automodule:: mne.time_frequency
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.time_frequency
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   ar_raw
+   compute_raw_psd
+   iir_filter_raw
+   induced_power
+   morlet
+   single_trial_power
+   yule_walker
+   ar_raw
+   iir_filter_raw
+   stft
+   istft
+   stftfreq
+
+
+Connectivity Estimation
+=======================
+
+:py:mod:`mne.connectivity`:
+
+.. automodule:: mne.connectivity
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.connectivity
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   seed_target_indices
+   spectral_connectivity
+   phase_slope_index
+
+
+Statistics
+==========
+
+:py:mod:`mne.stats`:
+
+.. automodule:: mne.stats
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.stats
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   bonferroni_correction
+   fdr_correction
+   permutation_cluster_test
+   permutation_cluster_1samp_test
+   permutation_t_test
+   spatio_temporal_cluster_1samp_test
+   ttest_1samp_no_p
+
+Functions to compute connectivity (adjacency) matrices for cluster-level statistics
+
+.. currentmodule:: mne
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   spatial_dist_connectivity
+   spatial_src_connectivity
+   spatial_tris_connectivity
+   spatio_temporal_src_connectivity
+   spatio_temporal_tris_connectivity
+   spatio_temporal_dist_connectivity
+
+
+Simulation
+==========
+
+:py:mod:`mne.simulation`:
+
+.. automodule:: mne.simulation
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.simulation
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   generate_evoked
+   generate_sparse_stc
+   select_source_in_label
diff --git a/doc/source/python_tutorial.rst b/doc/source/python_tutorial.rst
new file mode 100644
index 0000000..d9cae03
--- /dev/null
+++ b/doc/source/python_tutorial.rst
@@ -0,0 +1,417 @@
+.. _mne_python_tutorial:
+
+=========================================================
+Tutorial: MEG and EEG data processing with MNE and Python
+=========================================================
+
+Python offers transparent scripting on top of MNE.
+It was designed to be an alternative to the MNE matlab toolbox
+but now it can do much more (customize events, compute
+contrasts, statistics, time-frequency analysis etc.)
+It uses the same files as standard MNE unix commands:
+no need to convert your files to a new system or database.
+
+What you're not supposed to do with MNE Python
+----------------------------------------------
+
+    - **Forward modeling**: BEM computation and mesh creation (see :ref:`ch_forward`)
+
+What you can do with MNE Python
+-------------------------------
+
+    - **Raw data visualization** to visualize recordings, can also use *mne_browse_raw* for extended functionality (see :ref:`ch_browse`)
+    - **Epoching**: Define epochs, baseline correction, handle conditions etc.
+    - **Averaging** to get Evoked data
+    - **Compute SSP pojectors** to remove ECG and EOG artifacts
+    - **Compute ICA** to remove artifacts or select latent sources.
+    - **Linear inverse solvers** (dSPM, MNE)
+    - **Connectivity estimation** in sensor and source space
+    - **MNE source estimates visualization**
+    - **Time-frequency** analysis with Morlet wavelets (induced power, phase lock value) also in the source space
+    - **Spectrum estimation** using multi-taper method
+    - **Compute contrasts** between conditions, between sensors, across subjects etc.
+    - **Non-parametric statistics** in time, space and frequency (including cluster-level)
+    - **Scripting** (batch and parallel computing)
+
+.. note:: Package based on the FIF file format from Neuromag but can work with CTF and 4D after conversion to FIF.
+
+
+Installation of the required materials
+---------------------------------------
+
+See :ref:`getting_started` with Python.
+
+Get the code
+^^^^^^^^^^^^
+
+  You can manually get the latest version of the code at:
+
+  https://github.com/mne-tools/mne-python
+
+  Then from the mne-python folder (containing a setup.py file) you can install with::
+
+      python setup.py install
+
+  You can also install the latest release with easy_install::
+
+      easy_install -U mne
+
+  or with pip::
+
+      pip install mne --upgrade
+
+  For the latest development version (the most up to date)::
+
+      pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
+
+
+Make life easier
+~~~~~~~~~~~~~~~~
+
+  For optimal performance we recommend using numpy / scipy with the multi-threaded
+  ATLAS, gotoblas2, or intel MKL. The EPD python distribution for example ships with
+  tested MKL-compiled numpy / scipy versions. Depending on the use case and your system
+  this may speed up operations by a factor greater than 10.
+
+  The expected location for the MNE-sample data is my-path-to/mne-python/examples.
+  If you downloaded data and an example asks you whether to download it again, make sure
+  the data reside in the examples directory and you run the script from its current directory.
+
+  From IPython e.g. say::
+
+   cd examples/preprocessing
+
+   %run plot_find_ecg_artifacts.py
+
+
+From raw data to evoked data
+----------------------------
+
+.. _ipython: http://ipython.scipy.org/
+
+Now, launch `ipython`_ (Advanced Python shell)::
+
+  $ ipython -pylab -wthread
+
+First, load the mne package:
+
+    >>> import mne
+
+If you'd like to turn information status messages off:
+
+    >>> mne.set_log_level('WARNING')
+
+But it's generally a good idea to leave them on:
+
+    >>> mne.set_log_level('INFO')
+
+You can set the default level by setting the environment variable
+"MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
+
+    >>> mne.set_config('MNE_LOGGING_LEVEL','WARNING') # doctest: +SKIP
+
+Note that the location of the mne-python preferences file (for easier manual
+editing) can be found using:
+
+    >>> mne.get_config_path() # doctest: +SKIP
+
+By default logging messages print to the console, but look at
+mne.set_log_file() to save output to a file.
+
+Access raw data
+^^^^^^^^^^^^^^^
+
+    >>> from mne.datasets import sample
+    >>> data_path = sample.data_path()
+    >>> raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+    >>> print raw_fname # doctest: +SKIP
+    ./MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw.fif
+
+.. note:: The MNE sample dataset should be downloaded automatically but be patient (approx. 2GB)
+
+Read data from file:
+
+    >>> raw = mne.fiff.Raw(raw_fname) # doctest:+ELLIPSIS
+    Opening raw data ...
+    Ready.
+    >>> print raw
+    <Raw  |  n_channels x n_times : 376 x 41700>
+
+Look at the channels in raw:
+
+    >>> print raw.ch_names # doctest:+ELLIPSIS
+    ['MEG 0113', 'MEG 0112', ...]
+
+Read and plot a segment of raw data
+
+    >>> start, stop = raw.time_as_index([100, 115])  # 100 s to 115 s data segment
+    >>> data, times = raw[:, start:stop]
+    Reading 15015 ... 17266  =     99.998 ...   114.989 secs...
+    [done]
+    >>> print data.shape
+    (376, 2252)
+    >>> print times.shape
+    (2252,)
+    >>> data, times = raw[2:20:3, start:stop]  # take some Magnetometers
+    Reading 15015 ... 17266  =     99.998 ...   114.989 secs...
+    [done]
+
+.. figure:: _images/plot_read_and_write_raw_data.png
+    :alt: Raw data
+
+Save a segment of 150s of raw data (MEG only):
+
+    >>> picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, exclude='bads')
+    >>> raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks, overwrite=True) # doctest: +ELLIPSIS
+    Reading ...
+
+Define and read epochs
+^^^^^^^^^^^^^^^^^^^^^^
+
+First extract events:
+
+    >>> events = mne.find_events(raw, stim_channel='STI 014')
+    Reading 0 ... 41699  =      0.000 ...   277.709 secs...
+    [done]
+    319 events found
+    Events id: [ 1  2  3  4  5 32]
+    >>> print events[:5]
+    [[6994    0    2]
+     [7086    0    3]
+     [7192    0    1]
+     [7304    0    4]
+     [7413    0    2]]
+
+Note that, by default, we use stim_channel='STI 014'. If you have a different
+system (e.g., a newer system that uses channel 'STI101' by default), you can
+use the following to set the default stim channel to use for finding events:
+
+    >>> mne.set_config('MNE_STIM_CHANNEL', 'STI101') # doctest: +SKIP
+
+Events are stored as 2D numpy array where the first column is the time instant
+and the last one is the event number. It is therefore easy to manipulate.
+
+Define epochs parameters:
+
+    >>> event_id = dict(aud_l=1, aud_r=2)  # event trigger and conditions
+    >>> tmin = -0.2  # start of each epoch (200ms before the trigger)
+    >>> tmax = 0.5  # end of each epoch (500ms after the trigger)
+
+Exclude some channels (original bads + 2 more):
+
+    >>> raw.info['bads'] += ['MEG 2443', 'EEG 053']
+
+The variable raw.info['bads'] is just a python list.
+
+Pick the good channels, excluding raw.info['bads']:
+
+    >>> picks = mne.fiff.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False, exclude='bads')
+
+Alternatively one can restrict to magnetometers or gradiometers with:
+
+    >>> mag_picks = mne.fiff.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
+    >>> grad_picks = mne.fiff.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
+
+Define the baseline period:
+
+    >>> baseline = (None, 0)  # means from the first instant to t = 0
+
+Define peak-to-peak rejection parameters for gradiometers, magnetometers and EOG:
+
+    >>> reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+
+Read epochs:
+
+    >>> epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=baseline, preload=False, reject=reject)
+    Created an SSP operator (subspace dimension = 4)
+    4 projection items activated
+    145 matching events found
+    >>> print epochs
+    <Epochs  |  n_events : 145 (good & bad), tmin : -0.2 (s), tmax : 0.5 (s), baseline : (None, 0),
+     'aud_r': 73, 'aud_l': 72>
+
+Get single epochs for one condition:
+
+    >>> epochs_data = epochs['aud_l'].get_data() # doctest: +ELLIPSIS
+    Reading ...
+    >>> print epochs_data.shape
+    (55, 365, 106)
+
+epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time instants).
+
+Scipy supports read and write of matlab files. You can save your single trials with:
+
+    >>> from scipy import io
+    >>> io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
+
+or if you want to keep all the information about the data you can save your epochs
+in a fif file:
+
+    >>> epochs.save('sample-epo.fif') # doctest: +ELLIPSIS
+    Reading ...
+
+and read them later with:
+
+    >>> saved_epochs = mne.read_epochs('sample-epo.fif') # doctest: +ELLIPSIS
+    Reading ...
+
+Compute evoked responses for auditory responses by averaging and plot it:
+
+    >>> evoked = epochs['aud_l'].average() # doctest: +ELLIPSIS
+    Reading ...
+    >>> print evoked
+    <Evoked  |  comment : 'aud_l', time : [-0.199795, 0.499488], n_epochs : 55, n_channels x n_times : 364 x 106>
+    >>> evoked.plot() # doctest:+SKIP
+
+.. figure:: _images/plot_read_epochs.png
+    :alt: Evoked data
+
+.. topic:: Exercise
+
+  1. Extract the max value of each epoch
+
+  >>> max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
+  Reading ...
+  >>> print max_in_each_epoch[:4] # doctest:+ELLIPSIS
+  [1.93751...e-05, 1.64055...e-05, 1.85453...e-05, 2.04128...e-05]
+
+It is also possible to read evoked data stored in a fif file:
+
+    >>> evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+    >>> evoked1 = mne.fiff.read_evoked(evoked_fname, setno='Left Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
+    Reading .../MNE-sample-data/MEG/sample/sample_audvis-ave.fif ...
+        Read a total of 4 projection items:
+            PCA-v1 (1 x 102) active
+            PCA-v2 (1 x 102) active
+            PCA-v3 (1 x 102) active
+            Average EEG reference (1 x 60) active
+        Found the data of interest:
+            t =    -199.80 ...     499.49 ms (Left Auditory)
+            0 CTF compensation matrices available
+            nave = 55 - aspect type = 100
+    Projections have already been applied. Doing nothing.
+    Applying baseline correction ... (mode: mean)
+
+Or another one stored in the same file:
+
+    >>> evoked2 = mne.fiff.read_evoked(evoked_fname, setno='Right Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
+    Reading ...
+
+Compute a contrast:
+
+    >>> contrast = evoked1 - evoked2
+
+    >>> print contrast
+    <Evoked  |  comment : 'Left Auditory - Right Auditory', time : [-0.199795, 0.499488], n_epochs : 116, n_channels x n_times : 376 x 421>
+
+Time-Frequency: Induced power and phase-locking values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Define parameters:
+
+    >>> import numpy as np
+    >>> n_cycles = 2  # number of cycles in Morlet wavelet
+    >>> frequencies = np.arange(7, 30, 3)  # frequencies of interest
+    >>> Fs = raw.info['sfreq']  # sampling in Hz
+
+Compute induced power and phase-locking values:
+
+    >>> from mne.time_frequency import induced_power
+    >>> power, phase_lock = induced_power(epochs_data, Fs=Fs, frequencies=frequencies, n_cycles=2, n_jobs=1)
+
+.. figure:: _images/plot_time_frequency.png
+    :alt: Time-Frequency
+
+Inverse modeling: MNE and dSPM on evoked and raw data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Import the required functions:
+
+    >>> from mne.minimum_norm import apply_inverse, read_inverse_operator
+
+Read the inverse operator:
+
+    >>> fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+    >>> inverse_operator = read_inverse_operator(fname_inv) # doctest: +ELLIPSIS
+    Reading ...
+
+Define the inverse parameters:
+
+    >>> snr = 3.0
+    >>> lambda2 = 1.0 / snr ** 2
+    >>> method = "dSPM"
+
+Compute the inverse solution:
+
+    >>> stc = apply_inverse(evoked, inverse_operator, lambda2, method)
+    Preparing the inverse operator for use...
+        Scaled noise and source covariance from nave = 1 to nave = 55
+        Created the regularized inverter
+        Created an SSP operator (subspace dimension = 3)
+        Created the whitener using a full noise covariance matrix (3 small eigenvalues omitted)
+        Computing noise-normalization factors (dSPM)...
+    [done]
+    Picked 305 channels from the data
+    Computing inverse...
+    (eigenleads need to be weighted)...
+    combining the current components...
+    (dSPM)...
+    [done]
+
+Save the source time courses to disk:
+
+    >>> stc.save('mne_dSPM_inverse')
+    Writing STC to disk...
+    [done]
+
+Now, let's compute dSPM on a raw file within a label:
+
+    >>> fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
+    >>> label = mne.read_label(fname_label)
+
+Compute inverse solution during the first 15s:
+
+    >>> from mne.minimum_norm import apply_inverse_raw
+    >>> start, stop = raw.time_as_index([0, 15])  # read the first 15s of data
+    >>> stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label, start, stop)
+    Preparing the inverse operator for use...
+        Scaled noise and source covariance from nave = 1 to nave = 1
+        Created the regularized inverter
+        Created an SSP operator (subspace dimension = 3)
+        Created the whitener using a full noise covariance matrix (3 small eigenvalues omitted)
+        Computing noise-normalization factors (dSPM)...
+    [done]
+    Picked 305 channels from the data
+    Computing inverse...
+    Reading 0 ... 2251  =      0.000 ...    14.991 secs...
+    [done]
+    (eigenleads need to be weighted)...
+    combining the current components...
+    [done]
+
+Save result in stc files:
+
+    >>> stc.save('mne_dSPM_raw_inverse_Aud')
+    Writing STC to disk...
+    [done]
+
+What else can you do?
+^^^^^^^^^^^^^^^^^^^^^
+
+    - detect heart beat QRS component
+    - detect eye blinks and EOG artifacts
+    - compute SSP projections to remove ECG or EOG artifacts
+    - compute Independent Component Analysis (ICA) to remove artifacts or select latent sources
+    - estimate noise covariance matrix from Raw and Epochs
+    - visualize cross-trial response dynamics using epochs images
+    - estimate power in the source space
+    - estimate connectivity in sensor and source space
+    - morph stc from one brain to another for group studies
+    - visualize source estimates
+    - export raw, epochs, and evoked data to other python data analysis libraries i.e. pandas and nitime
+
+
+Want to know more ?
+^^^^^^^^^^^^^^^^^^^
+
+Browse :ref:`examples-index` gallery.
\ No newline at end of file
diff --git a/doc/source/this_project.inc b/doc/source/this_project.inc
new file mode 100644
index 0000000..23ade2e
--- /dev/null
+++ b/doc/source/this_project.inc
@@ -0,0 +1,5 @@
+.. mne-python
+.. _mne-python: http://mne-tools.github.com/mne-python-intro
+.. _`mne-python GitHub`: http://github.com/mne-tools/mne-python
+.. _`mne-python sample dataset`: ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
+
diff --git a/doc/source/whats_new.rst b/doc/source/whats_new.rst
new file mode 100644
index 0000000..d2f5687
--- /dev/null
+++ b/doc/source/whats_new.rst
@@ -0,0 +1,444 @@
+What's new
+==========
+
+.. _changes_0_6:
+
+Version 0.6
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - Linear (and zeroth-order) detrending for Epochs and Evoked by `Eric Larson`_
+
+   - Label morphing between subjects by `Eric Larson`_
+
+   - Define events based on time lag between reference and target event by `Denis Engemann`_
+
+   - ICA convenience function implementing an automated artifact removal workflow by `Denis Engemann`_
+
+   - Bad channels no longer included in epochs by default by `Eric Larson`_
+
+   - Support for diagonal noise covariances in inverse methods and rank computation by `Eric Larson`_
+
+   - Support for using CUDA in FFT-based FIR filtering (method='fft') and resampling by `Eric Larson`_
+
+   - Optimized FFT length selection for faster overlap-add filtering by `Martin Luessi`_
+
+   - Ability to exclude bad channels from evoked plots or shown them in red by `Martin Luessi`_
+
+   - Option to show both hemispheres when plotting SourceEstimate with PySurfer by `Martin Luessi`_
+
+   - Optimized Raw reading and epoching routines to limit memory copies by `Eric Larson`_
+
+   - Advanced options to save raw files in short or double precision by `Eric Larson`_
+
+   - Option to detect decreasing events using find_events by `Simon Kornblith`_
+
+   - Option to change default stim_channel used for finding events by `Eric Larson`_
+
+   - Use average patch normal from surface-oriented forward solution in inverse calculation when possible by `Eric Larson`_
+
+   - Function to plot drop_log from Epochs instance by `Eric Larson`_
+
+   - Estimate rank of Raw data by `Eric Larson`_
+
+   - Support reading of BTi/4D data by `Denis Engemann`_
+
+   - Wrapper for generating forward solutions by `Eric Larson`_
+
+   - Averaging forward solutions by `Eric Larson`_
+
+   - Events now contain the pre-event stim channel value in the middle column, by `Christian Brodbeck`_
+
+   - New function `mne.find_stim_steps` for finding all steps in a stim channel by `Christian Brodbeck`_
+
+   - Get information about FIFF files using mne.fiff.show_fiff() by `Eric Larson`_
+
+   - Compute forward fields sensitivity maps by `Alex Gramfort`_ and `Eric Larson`_
+
+   - Support reading of KIT data by `Teon Brooks`_ and `Christian Brodbeck`_
+
+   - Raw data visualization by `Eric Larson`_
+
+   - Smarter SourceEstimate object that contains linear inverse kernel and sensor space data for fast time-frequency transforms in source space by `Martin Luessi`_
+
+   - Add example of decoding/MVPA on MEG sensor data by `Alex Gramfort`_
+
+   - Add support for non-paired tests in spatiotemporal cluster stats by `Alex Gramfort`_
+
+   - Add unified SSP-projector API for Raw, Epochs and Evoked objects by `Denis Engemann`_, `Alex Gramfort`_ `Eric Larson`_ and `Martin Luessi`_
+
+   - Add support for delayed SSP application at evoked stage `Denis Engemann`_, `Alex Gramfort`_, `Eric Larson`_ and `Martin Luessi`_
+
+   - Support selective parameter updating in functions taking dicts as arguments by `Denis Engemann`_
+
+   - New ICA method `sources_as_epochs` to create Epochs in ICA space by `Denis Engemann`_
+
+   - New method in Evoked and Epoch classes to shift time scale by `Mainak Jas`_
+
+   - Added option to specify EOG channel(s) when computing PCA/SSP projections for EOG artifacts by `Mainak Jas`_
+
+   - Improved connectivity interface to allow combinations of signals, e.g., seed time series and source estimates, by `Martin Luessi`_
+
+   - Effective connectivity estimation using Phase Slope Index (PSI) by `Martin Luessi`_
+
+   - Support for threshold-free cluster enhancement (TFCE) by `Eric Larson`_
+
+   - Support for "hat" variance regularization by `Eric Larson`_
+
+   - Access source estimates as Pandas DataFrame by `Denis Engemann`_.
+
+   - Add example of decoding/MVPA on MEG source space data by `Denis Engemann`_
+
+   - Add support for --tstart option in mne_compute_proj_eog.py by `Alex Gramfort`_
+
+   - Add two-way repeated measures ANOVA for mass-univariate statistics by `Denis Engemann`_, `Eric Larson`_ and `Alex Gramfort`_
+
+   - Add function for summarizing clusters from spatio-temporal-cluster permutation tests by `Denis Engemann`_ and `Eric Larson`_
+
+   - Add generator support for lcmv_epochs by `Denis Engemann`_
+
+   - Gamma-MAP sparse source localization method by `Martin Luessi`_ and `Alex Gramfort`_
+
+   - Add regular expression and substring support for selecting parcellation labels by `Denis Engemann`_
+
+   - New plot_evoked option for interactive and reversible selection of SSP projection vectors by `Denis Engemann`_
+
+   - Plot 2D flat topographies with interpolation for evoked and SSPs by `Christian Brodbeck`_ and `Alex Gramfort`_
+
+   - Support delayed SSP applicationon for 2D flat topographies by `Denis Engemann`_ and `Christian Brodbeck`_ and `Alex Gramfort`_
+
+   - Allow picking maximum power source, a.k.a. "optimal", orientation in LCMV beamformers by `Roman Goj`_, `Alex Gramfort`_, `Denis Engemann`_ and `Martin Luessi`_
+
+   - Add sensor type scaling parameter to plot_topo by `Andrew Dykstra`_, `Denis Engemann`_  and `Eric Larson`_
+
+   - Support delayed SSP application in plot_topo by `Denis Engemann`_
+
+API
+~~~
+
+   - Deprecated use of fiff.pick_types without specifying exclude -- use either [] (none), 'bads' (bad channels), or a list of string (channel names).
+
+   - Depth bias correction in dSPM/MNE/sLORETA make_inverse_operator is now done like in the C code using only gradiometers if present, else magnetometers, and EEG if no MEG channels are present.
+
+   - Fixed-orientation inverse solutions need to be made using 'fixed=True' option (using non-surface-oriented forward solutions if no depth weighting is used) to maintain compatibility with MNE C code.
+
+   - Raw.save() will only overwrite the destination file, if it exists, if option overwrite=True is set.
+
+   - mne.utils.set_config(), get_config(), get_config_path() moved to mne namespace.
+
+   - Raw constructor argument proj_active deprecated -- use proj argument instead.
+
+   - Functions from the mne.mixed_norm module have been moved to the mne.inverse_sparse module.
+
+   - Deprecate CTF compensation (keep_comp and dest_comp) in Epochs and move it to Raw with a single compensation parameter.
+
+   - Remove artifacts module. Artifacts- and preprocessing related functions can now be found in mne.preprocessing.
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number
+of commits):
+
+   * 340  Eric Larson
+   * 330  Denis A. Engemann
+   * 204  Alexandre Gramfort
+   *  72  Christian Brodbeck
+   *  66  Roman Goj
+   *  65  Martin Luessi
+   *  37  Teon Brooks
+   *  18  Mainak Jas
+   *   9  Simon Kornblith
+   *   7  Daniel Strohmeier
+   *   6  Romain Trachel
+   *   5  Yousra BEKHTI
+   *   5  Brad Buran
+   *   1  Andrew Dykstra
+   *   1  Christoph Dinh
+
+.. _changes_0_5:
+
+Version 0.5
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - Multi-taper PSD estimation for single epochs in source space using minimum norm by `Martin Luessi`_
+
+   - Read and visualize .dip files obtained with xfit or mne_dipole_fit by `Alex Gramfort`_
+
+   - Make EEG layout by `Eric Larson`_
+
+   - Ability to specify SSP projectors when computing covariance from raw by `Eric Larson`_
+
+   - Read and write txt based event files (.eve or .txt) by `Eric Larson`_
+
+   - Pass qrs threshold to preprocessing functions by `Eric Larson`_
+
+   - Compute SSP projections from continuous raw data by `Eric Larson`_
+
+   - Support for applied SSP projections when loading Raw by `Eric Larson`_ and `Alex Gramfort`_
+
+   - Support for loading Raw stored in different fif files by `Eric Larson`_
+
+   - IO of many Evoked in a single fif file + compute Epochs.standard_error by `Eric Larson`_ and `Alex Gramfort`_
+
+   - ICA computation on Raw and Epochs with automatic component selection by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Saving ICA sources to fif files and creating ICA topography layouts by
+     `Denis Engemann`_
+
+   - Save and restore ICA session to and from fif by `Denis Engemann`_
+
+   - Export raw, epochs and evoked data as data frame to the pandas library by `Denis Engemann`_
+
+   - Export raw, epochs and evoked data to the nitime library by `Denis Engemann`_
+
+   - Copy methods for raw and epochs objects by `Denis Engemann`_, `Martin Luessi`_ and `Alex Gramfort`_
+
+   - New raw objects method to get the time at certain indices by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Plot method for evoked objects by `Denis Engemann`_
+
+   - Enhancement of cluster-level stats (speed and memory efficiency) by `Eric Larson`_ and `Martin Luessi`_
+
+   - Reading of source space distances by `Eric Larson`_
+
+   - Support for filling / smoothing labels and speedup of morphing by `Eric Larson`_
+
+   - Adding options for morphing by `Eric Larson`_
+
+   - Plotting functions for time frequency and epochs image topographies by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Plotting ERP/ERF images by `Alex Gramfort`_
+
+   - See detailed subplot when cliking on a channel inside a topography plot by `Martin Luessi`_, `Eric Larson`_ and `Denis Engemann`_
+
+   - Misc channel type support plotting functions by `Denis Engemann`_
+
+   - Improved logging support by `Eric Larson`_
+
+   - Whitening of evoked data for plotting and quality checking by `Alex Gramfort`_
+
+   - Transparent I/O of gzipped fif files (as .fif.gz) by `Eric Larson`_
+
+   - Spectral connectivity estimation in sensor and source space by `Martin Luessi`_
+
+   - Read and write Epochs in FIF files by `Alex Gramfort`_
+
+   - Resampling of Raw, Epochs, and Evoked by `Eric Larson`_
+
+   - Creating epochs objects for different conditions and accessing conditions via user-defined name by `Denis Engemann`_ , `Eric Larson`_, `Alex Gramfort`_ and `Christian Brodbeck`_
+
+   - Visualizing evoked responses from different conditions in one topography plot by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Support for L21 MxNE solver using coordinate descent using scikit-learn by `Alex Gramfort`_ and `Daniel Strohmeier`_
+
+   - Support IIR filters (butterworth, chebyshev, bessel, etc.) by `Eric Larson`_
+
+   - Read labels from FreeSurfer parcellation by  `Martin Luessi`_
+
+   - Combining labels in source space by `Christian Brodbeck`_
+
+   - Read and write source spaces, surfaces and coordinate transforms to and from files by `Christian Brodbeck`_
+
+   - Downsample epochs by `Christian Brodbeck`_ and `Eric Larson`_
+
+   - New labels class for handling source estimates by `Christian Brodbeck`_, `Martin Luessi`_  and `Alex Gramfort`_
+
+   - New plotting routines to easily display SourceEstimates using PySurfer by `Alex Gramfort`_
+
+   - Function to extract label time courses from SourceEstimate(s) by `Martin Luessi`_
+
+   - Function to visualize connectivity as circular graph by `Martin Luessi`_ and `Alex Gramfort`_
+
+   - Time-frequency Mixed Norm Estimates (TF-MxNE) by `Alex Gramfort`_ and `Daniel Strohmeier`_
+
+
+API
+~~~
+   - Added nave parameter to source_induced_power() and source_band_induced_power(), use nave=1 by default (wrong nave was used before).
+
+   - Use mne.layout.read_layout instead of mne.layout.Layout to read a layout file (.lout)
+
+   - Use raw.time_as_index instead of time_to_index (still works but is deprecated).
+
+   - The artifacts module (mne.artifacts) is now merged into mne.preprocessing
+
+   - Epochs objects now also take dicts as values for the event_id argument. They now can represent multiple conditions.
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number
+of commits):
+
+   * 313  Eric Larson
+   * 226  Alexandre Gramfort
+   * 219  Denis A. Engemann
+   * 104  Christian Brodbeck
+   *  85  Martin Luessi
+   *   6  Daniel Strohmeier
+   *   4  Teon Brooks
+   *   1  Dan G. Wakeman
+
+
+.. _changes_0_4:
+
+Version 0.4
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - Add function to compute source PSD using minimum norm by `Alex Gramfort`_
+
+   - L21 Mixed Norm Estimates (MxNE) by `Alex Gramfort`_ and `Daniel Strohmeier`_
+
+   - Generation of simulated evoked responses by `Alex Gramfort`_, `Daniel Strohmeier`_, and `Martin Luessi`_
+
+   - Fit AR models to raw data for temporal whitening by `Alex Gramfort`_.
+
+   - speedup + reduce memory of mne.morph_data by `Alex Gramfort`_.
+
+   - Backporting scipy.signal.firwin2 so filtering works with old scipy by `Alex Gramfort`_.
+
+   - LCMV Beamformer for evoked data, single trials, and raw data by `Alex Gramfort`_ and `Martin Luessi`_.
+
+   - Add support for reading named channel selections by `Martin Luessi`_.
+
+   - Add Raw.filter method to more easily band pass data by `Alex Gramfort`_.
+
+   - Add tmin + tmax parameters in mne.compute_covariance to estimate noise covariance in epochs baseline without creating new epochs by `Alex Gramfort`_.
+
+   - Add support for sLORETA in apply_inverse, apply_inverse_raw, apply_inverse_epochs (API Change) by `Alex Gramfort`_.
+
+   - Add method to regularize a noise covariance by `Alex Gramfort`_.
+
+   - Read and write measurement info in forward and inverse operators for interactive visualization in mne_analyze by `Alex Gramfort`_.
+
+   - New mne_compute_proj_ecg.py and mne_compute_proj_eog.py scripts to estimate ECG/EOG PCA/SSP vectors by `Alex Gramfort`_ and `Martin Luessi`_.
+
+   - Wrapper function and script (mne_maxfilter.py) for Elekta Neuromag MaxFilter(TM) by `Martin Luessi`_
+
+   - Add method to eliminate stimulation artifacts from raw data by linear interpolation or windowing by `Daniel Strohmeier`_.
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number
+of commits):
+
+   * 118 Alexandre Gramfort
+   * 81  Martin Luessi
+   * 15  Daniel Strohmeier
+   *  4  Christian Brodbeck
+   *  4  Louis Thibault
+   *  2  Brad Buran
+
+.. _changes_0_3:
+
+Version 0.3
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - Sign flip computation for robust label average of signed values by `Alex Gramfort`_.
+
+   - Reading and writing of .w files by `Martin Luessi`_.
+
+   - Support for modifying Raw object and allow raw data preloading with memory mapping by `Martin Luessi`_ and `Alex Gramfort`_.
+
+   - Support of arithmetic of Evoked data (useful to concatenate between runs and compute contrasts) by `Alex Gramfort`_.
+
+   - Support for computing sensor space data from a source estimate using an MNE forward solution by `Martin Luessi`_.
+
+   - Support of arithmetic of Covariance by `Alex Gramfort`_.
+
+   - Write BEM surfaces in Python  by `Alex Gramfort`_.
+
+   - Filtering operations and apply_function interface for Raw object by `Martin Luessi`_.
+
+   - Support for complex valued raw fiff files and computation of analytic signal for Raw object by `Martin Luessi`_.
+
+   - Write inverse operators (surface and volume) by `Alex Gramfort`_.
+
+   - Covariance matrix computation with multiple event types by `Martin Luessi`_.
+
+   - New tutorial in the documentation and new classes and functions reference page by `Alex Gramfort`_.
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number
+of commits):
+
+    * 80  Alexandre Gramfort
+    * 51  Martin Luessi
+
+Version 0.2
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - New stats functions for FDR correction and Bonferroni by `Alex Gramfort`_.
+
+   - Faster time-frequency using downsampling trick by `Alex Gramfort`_.
+
+   - Support for volume source spaces by `Alex Gramfort`_ (requires next MNE release or nightly).
+
+   - Improved Epochs handling by `Martin Luessi`_ (slicing, drop_bad_epochs).
+
+   - Bug fix in Epochs + ECG detection by Manfred Kitzbichler.
+
+   - New pick_types_evoked function by `Alex Gramfort`_.
+
+   - SourceEstimate now supports algebra by `Alex Gramfort`_.
+
+API changes summary
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here are the code migration instructions when upgrading from mne-python
+version 0.1:
+
+  - New return values for the function find_ecg_events
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number
+of commits):
+
+    * 33  Alexandre Gramfort
+    * 12  Martin Luessi
+    *  2  Yaroslav Halchenko
+    *  1  Manfred Kitzbichler
+
+.. _Alex Gramfort: http://alexandre.gramfort.net
+
+.. _Martin Luessi: http://www.nmr.mgh.harvard.edu/martinos/people/showPerson.php?people_id=1600
+
+.. _Yaroslav Halchenko: http://www.onerussian.com/
+
+.. _Daniel Strohmeier: http://www.tu-ilmenau.de/bmti/fachgebiete/biomedizinische-technik/dipl-ing-daniel-strohmeier/
+
+.. _Eric Larson: http://faculty.washington.edu/larsoner/
+
+.. _Denis Engemann: https://github.com/dengemann
+
+.. _Christian Brodbeck: https://github.com/christianmbrodbeck
+
+.. _Simon Kornblith: http://simonster.com
+
+.. _Teon Brooks: https://files.nyu.edu/tlb331/public/
+
+.. _Mainak Jas: http://ltl.tkk.fi/wiki/Mainak_Jas
+
+.. _Roman Goj: http://romanmne.blogspot.co.uk
+
+.. _Andrew Dykstra: https://github.com/adykstra
diff --git a/doc/sphinxext/README.txt b/doc/sphinxext/README.txt
new file mode 100644
index 0000000..0353408
--- /dev/null
+++ b/doc/sphinxext/README.txt
@@ -0,0 +1,25 @@
+===================
+ Sphinx Extensions
+===================
+
+We've copied these sphinx extensions over from nipy-core.  Any edits
+should be done upstream in nipy-core, not here in nipype!
+
+These are a few sphinx extensions we are using to build the nipy
+documentation.  In this file we list where they each come from, since we intend
+to always push back upstream any modifications or improvements we make to them.
+
+It's worth noting that some of these are being carried (as copies) by more
+than one project.  Hopefully once they mature a little more, they will be
+incorproated back into sphinx itself, so that all projects can use a common
+base.
+
+* From numpy:
+  * docscrape.py
+  * docscrape_sphinx.py
+  * numpydoc.py
+
+* From matplotlib:
+  * inheritance_diagram.py
+  * ipython_console_highlighting.py
+  * only_directives.py
diff --git a/doc/sphinxext/docscrape.py b/doc/sphinxext/docscrape.py
new file mode 100644
index 0000000..a6d333e
--- /dev/null
+++ b/doc/sphinxext/docscrape.py
@@ -0,0 +1,497 @@
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+4
+class Reader(object):
+    """A line-based string reader.
+
+    """
+    def __init__(self, data):
+        """
+        Parameters
+        ----------
+        data : str
+           String with lines separated by '\n'.
+
+        """
+        if isinstance(data,list):
+            self._str = data
+        else:
+            self._str = data.split('\n') # store string as list of lines
+
+        self.reset()
+
+    def __getitem__(self, n):
+        return self._str[n]
+
+    def reset(self):
+        self._l = 0 # current line nr
+
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+        def is_empty(line):
+            return not line.strip()
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self,n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''
+
+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+    def __init__(self,docstring):
+        docstring = textwrap.dedent(docstring).split('\n')
+
+        self._doc = Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': [''],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Attributes': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'Warnings': [],
+            'References': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def __getitem__(self,key):
+        return self._parsed_data[key]
+
+    def __setitem__(self,key,val):
+        if not self._parsed_data.has_key(key):
+            warn("Unknown section %s" % key)
+        else:
+            self._parsed_data[key] = val
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip() #    ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self,doc):
+        i = 0
+        j = 0
+        for i,line in enumerate(doc):
+            if line.strip(): break
+
+        for j,line in enumerate(doc[::-1]):
+            if line.strip(): break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip(): # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'): # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self,content):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+
+            params.append((arg_name,arg_type,desc))
+
+        return params
+
+
+    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+    def _parse_see_also(self, content):
+        """
+        func_name : Descriptive text
+            continued text
+        another_func_name : Descriptive text
+        func_name1, func_name2, :meth:`func_name`, func_name3
+
+        """
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'"""
+            m = self._name_rgx.match(text)
+            if m:
+                g = m.groups()
+                if g[1] is None:
+                    return g[3], None
+                else:
+                    return g[2], g[1]
+            raise ValueError("%s is not a item name" % text)
+
+        def push_item(name, rest):
+            if not name:
+                return
+            name, role = parse_item_name(name)
+            items.append((name, list(rest), role))
+            del rest[:]
+
+        current_func = None
+        rest = []
+
+        for line in content:
+            if not line.strip(): continue
+
+            m = self._name_rgx.match(line)
+            if m and line[m.end():].strip().startswith(':'):
+                push_item(current_func, rest)
+                current_func, line = line[:m.end()], line[m.end():]
+                rest = [line.split(':', 1)[1].strip()]
+                if not rest[0]:
+                    rest = []
+            elif not line.startswith(' '):
+                push_item(current_func, rest)
+                current_func = None
+                if ',' in line:
+                    for func in line.split(','):
+                        push_item(func, [])
+                elif line.strip():
+                    current_func = line
+            elif current_func is not None:
+                rest.append(line.strip())
+        push_item(current_func, rest)
+        return items
+
+    def _parse_index(self, section, content):
+        """
+        .. index: default
+           :refguide: something, else, and more
+
+        """
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out
+
+    def _parse_summary(self):
+        """Grab signature (if given) and summary"""
+        if self._is_at_section():
+            return
+
+        summary = self._doc.read_to_next_empty_line()
+        summary_str = " ".join([s.strip() for s in summary]).strip()
+        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+            self['Signature'] = summary_str
+            if not self._is_at_section():
+                self['Summary'] = self._doc.read_to_next_empty_line()
+        else:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        for (section,content) in self._read_sections():
+            if not section.startswith('..'):
+                section = ' '.join([s.capitalize() for s in section.split(' ')])
+            if section in ('Parameters', 'Attributes', 'Methods',
+                           'Returns', 'Raises', 'Warns'):
+                self[section] = self._parse_param_list(content)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*','\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param,param_type,desc in self[name]:
+                out += ['%s : %s' % (param, param_type)]
+                out += self._str_indent(desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']: return []
+        out = []
+        out += self._str_header("See Also")
+        last_had_desc = True
+        for func, desc, role in self['See Also']:
+            if role:
+                link = ':%s:`%s`' % (role, func)
+            elif func_role:
+                link = ':%s:`%s`' % (func_role, func)
+            else:
+                link = "`%s`_" % func
+            if desc or last_had_desc:
+                out += ['']
+                out += [link]
+            else:
+                out[-1] += ", %s" % link
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            out += ['   :%s: %s' % (section, ', '.join(references))]
+        return out
+
+    def __str__(self, func_role=''):
+        out = []
+        out += self._str_signature()
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters','Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_section('Warnings')
+        out += self._str_see_also(func_role)
+        for s in ('Notes','References','Examples'):
+            out += self._str_section(s)
+        out += self._str_index()
+        return '\n'.join(out)
+
+
+def indent(str,indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+    """Deindent a list of lines maximally"""
+    return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+    def __init__(self, func, role='func', doc=None):
+        self._f = func
+        self._role = role # e.g. "func" or "meth"
+        if doc is None:
+            doc = inspect.getdoc(func) or ''
+        try:
+            NumpyDocString.__init__(self, doc)
+        except ValueError, e:
+            print '*'*78
+            print "ERROR: '%s' while parsing `%s`" % (e, self._f)
+            print '*'*78
+            #print "Docstring follows:"
+            #print doclines
+            #print '='*78
+
+        if not self['Signature']:
+            func, func_name = self.get_func()
+            try:
+                # try to read signature
+                argspec = inspect.getargspec(func)
+                argspec = inspect.formatargspec(*argspec)
+                argspec = argspec.replace('*','\*')
+                signature = '%s%s' % (func_name, argspec)
+            except TypeError, e:
+                signature = '%s()' % func_name
+            self['Signature'] = signature
+
+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+
+    def __str__(self):
+        out = ''
+
+        func, func_name = self.get_func()
+        signature = self['Signature'].replace('*', '\*')
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
+        if self._role:
+            if not roles.has_key(self._role):
+                print "Warning: invalid role %s" % self._role
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                                             func_name)
+
+        out += super(FunctionDoc, self).__str__(func_role=self._role)
+        return out
+
+
+class ClassDoc(NumpyDocString):
+    def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
+        if not inspect.isclass(cls):
+            raise ValueError("Initialise using a class. Got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+        self._name = cls.__name__
+        self._func_doc = func_doc
+
+        if doc is None:
+            doc = pydoc.getdoc(cls)
+
+        NumpyDocString.__init__(self, doc)
+
+    @property
+    def methods(self):
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and callable(func)]
+
+    def __str__(self):
+        out = ''
+        out += super(ClassDoc, self).__str__()
+        out += "\n\n"
+
+        #for m in self.methods:
+        #    print "Parsing `%s`" % m
+        #    out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
+        #    out += '.. index::\n   single: %s; %s\n\n' % (self._name, m)
+
+        return out
diff --git a/doc/sphinxext/docscrape_sphinx.py b/doc/sphinxext/docscrape_sphinx.py
new file mode 100644
index 0000000..eda6c35
--- /dev/null
+++ b/doc/sphinxext/docscrape_sphinx.py
@@ -0,0 +1,137 @@
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+import re, inspect, textwrap, pydoc
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+    # string conversion routines
+    def _str_header(self, name, symbol='`'):
+        return ['.. rubric:: ' + name, '']
+
+    def _str_field_list(self, name):
+        return [':' + name + ':']
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        return ['']
+        if self['Signature']:
+            return ['``%s``' % self['Signature']] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Extended Summary'] + ['']
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_field_list(name)
+            out += ['']
+            for param,param_type,desc in self[name]:
+                out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                          param_type)])
+                out += ['']
+                out += self._str_indent(desc,8)
+                out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += ['']
+            content = textwrap.dedent("\n".join(self[name])).split("\n")
+            out += content
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        out = []
+        if self['See Also']:
+            see_also = super(SphinxDocString, self)._str_see_also(func_role)
+            out = ['.. seealso::', '']
+            out += self._str_indent(see_also[2:])
+        return out
+
+    def _str_warnings(self):
+        out = []
+        if self['Warnings']:
+            out = ['.. warning::', '']
+            out += self._str_indent(self['Warnings'])
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        if len(idx) == 0:
+            return out
+
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            elif section == 'refguide':
+                out += ['   single: %s' % (', '.join(references))]
+            else:
+                out += ['   %s: %s' % (section, ','.join(references))]
+        return out
+
+    def _str_references(self):
+        out = []
+        if self['References']:
+            out += self._str_header('References')
+            if isinstance(self['References'], str):
+                self['References'] = [self['References']]
+            out.extend(self['References'])
+            out += ['']
+        return out
+
+    def __str__(self, indent=0, func_role="obj"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Attributes', 'Methods',
+                           'Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_warnings()
+        out += self._str_see_also(func_role)
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_section('Examples')
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+    pass
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+    pass
+
+def get_doc_object(obj, what=None, doc=None):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, '', doc=doc)
+    else:
+        if doc is None:
+            doc = pydoc.getdoc(obj)
+        return SphinxDocString(doc)
diff --git a/doc/sphinxext/gen_rst.py b/doc/sphinxext/gen_rst.py
new file mode 100644
index 0000000..6eedc90
--- /dev/null
+++ b/doc/sphinxext/gen_rst.py
@@ -0,0 +1,907 @@
+"""
+Example generation modified from the scikit learn
+
+Generate the rst files for the examples by iterating over the python
+example files.
+
+Files that generate images should start with 'plot'
+
+"""
+from time import time
+import os
+import shutil
+import traceback
+import glob
+import sys
+from StringIO import StringIO
+import cPickle
+import re
+import urllib2
+import gzip
+import posixpath
+
+try:
+    from PIL import Image
+except:
+    import Image
+
+import matplotlib
+matplotlib.use('Agg')
+
+import token
+import tokenize
+
+MAX_NB_LINES_STDOUT = 20
+
+###############################################################################
+# A tee object to redict streams to multiple outputs
+
+
+class Tee(object):
+
+    def __init__(self, file1, file2):
+        self.file1 = file1
+        self.file2 = file2
+
+    def write(self, data):
+        self.file1.write(data)
+        self.file2.write(data)
+
+    def flush(self):
+        self.file1.flush()
+        self.file2.flush()
+
+###############################################################################
+# Documentation link resolver objects
+
+
+def get_data(url):
+    """Helper function to get data over http or from a local file"""
+    if url.startswith('http://'):
+        resp = urllib2.urlopen(url)
+        encoding = resp.headers.dict.get('content-encoding', 'plain')
+        data = resp.read()
+        if encoding == 'plain':
+            pass
+        elif encoding == 'gzip':
+            data = StringIO(data)
+            data = gzip.GzipFile(fileobj=data).read()
+        else:
+            raise RuntimeError('unknown encoding')
+    else:
+        with open(url, 'r') as fid:
+            data = fid.read()
+        fid.close()
+
+    return data
+
+
+def parse_sphinx_searchindex(searchindex):
+    """Parse a Sphinx search index
+
+    Parameters
+    ----------
+    searchindex : str
+        The Sphinx search index (contents of searchindex.js)
+
+    Returns
+    -------
+    filenames : list of str
+        The file names parsed from the search index.
+    objects : dict
+        The objects parsed from the search index.
+    """
+    def _select_block(str_in, start_tag, end_tag):
+        """Select first block delimited by start_tag and end_tag"""
+        start_pos = str_in.find(start_tag)
+        if start_pos < 0:
+            raise ValueError('start_tag not found')
+        depth = 0
+        for pos in range(start_pos, len(str_in)):
+            if str_in[pos] == start_tag:
+                depth += 1
+            elif str_in[pos] == end_tag:
+                depth -= 1
+
+            if depth == 0:
+                break
+        sel = str_in[start_pos + 1:pos]
+        return sel
+
+    def _parse_dict_recursive(dict_str):
+        """Parse a dictionary from the search index"""
+        dict_out = dict()
+        pos_last = 0
+        pos = dict_str.find(':')
+        while pos >= 0:
+            key = dict_str[pos_last:pos]
+            if dict_str[pos + 1] == '[':
+                # value is a list
+                pos_tmp = dict_str.find(']', pos + 1)
+                if pos_tmp < 0:
+                    raise RuntimeError('error when parsing dict')
+                value = dict_str[pos + 2: pos_tmp].split(',')
+                # try to convert elements to int
+                for i in range(len(value)):
+                    try:
+                        value[i] = int(value[i])
+                    except ValueError:
+                        pass
+            elif dict_str[pos + 1] == '{':
+                # value is another dictionary
+                subdict_str = _select_block(dict_str[pos:], '{', '}')
+                value = _parse_dict_recursive(subdict_str)
+                pos_tmp = pos + len(subdict_str)
+            else:
+                raise ValueError('error when parsing dict: unknown elem')
+
+            key = key.strip('"')
+            if len(key) > 0:
+                dict_out[key] = value
+
+            pos_last = dict_str.find(',', pos_tmp)
+            if pos_last < 0:
+                break
+            pos_last += 1
+            pos = dict_str.find(':', pos_last)
+
+        return dict_out
+
+    # parse objects
+    query = 'objects:'
+    pos = searchindex.find(query)
+    if pos < 0:
+        raise ValueError('"objects:" not found in search index')
+
+    sel = _select_block(searchindex[pos:], '{', '}')
+    objects = _parse_dict_recursive(sel)
+
+    # parse filenames
+    query = 'filenames:'
+    pos = searchindex.find(query)
+    if pos < 0:
+        raise ValueError('"filenames:" not found in search index')
+    filenames = searchindex[pos + len(query) + 1:]
+    filenames = filenames[:filenames.find(']')]
+    filenames = [f.strip('"') for f in filenames.split(',')]
+
+    return filenames, objects
+
+
+class SphinxDocLinkResolver(object):
+    """ Resolve documentation links using searchindex.js generated by Sphinx
+
+    Parameters
+    ----------
+    doc_url : str
+        The base URL of the project website.
+    searchindex : str
+        Filename of searchindex, relative to doc_url.
+    extra_modules_test : list of str
+        List of extra module names to test.
+    relative : bool
+        Return relative links (only useful for links to documentation of this
+        package).
+    """
+
+    def __init__(self, doc_url, searchindex='searchindex.js',
+                 extra_modules_test=[], relative=False):
+        self.doc_url = doc_url
+        self.relative = relative
+        self._link_cache = {}
+
+        self.extra_modules_test = extra_modules_test
+        self._page_cache = {}
+        if doc_url.startswith('http://'):
+            if relative:
+                raise ValueError('Relative links are only supported for local '
+                                 'URLs (doc_url cannot start with "http://)"')
+            searchindex_url = doc_url + '/' + searchindex
+        else:
+            searchindex_url = os.path.join(doc_url, searchindex)
+
+        # detect if we are using relative links on a Windows system
+        if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
+            if not relative:
+                raise ValueError('You have to use relative=True for the local'
+                                 'package on a Windows system.')
+            self._is_windows = True
+        else:
+            self._is_windows = False
+
+        # download and initialize the search index
+        sindex = get_data(searchindex_url)
+        filenames, objects = parse_sphinx_searchindex(sindex)
+
+        self._searchindex = dict(filenames=filenames, objects=objects)
+
+    def _get_link(self, cobj):
+        """Get a valid link, False if not found"""
+
+        fname_idx = None
+        modules_test = [cobj['module_short']] + self.extra_modules_test
+
+        for module in modules_test:
+            full_name = module + '.' + cobj['name']
+            if full_name in self._searchindex['objects']:
+                value = self._searchindex['objects'][full_name]
+                if isinstance(value, dict):
+                    value = value[value.keys()[0]]
+                fname_idx = value[0]
+            elif module in self._searchindex['objects']:
+                value = self._searchindex['objects'][module]
+                if cobj['name'] in value.keys():
+                    fname_idx = value[cobj['name']][0]
+            if fname_idx is not None:
+                break
+
+        if fname_idx is not None:
+            fname = self._searchindex['filenames'][fname_idx] + '.html'
+
+            if self._is_windows:
+                fname = fname.replace('/', '\\')
+                link = os.path.join(self.doc_url, fname)
+            else:
+                link = posixpath.join(self.doc_url, fname)
+
+            if link in self._page_cache:
+                html = self._page_cache[link]
+            else:
+                html = get_data(link)
+                self._page_cache[link] = html
+
+            # test if cobj appears in page
+            url = False
+            for comb_name in ['%s.%s' % (module, cobj['name']) for module
+                              in modules_test]:
+                if html.find(comb_name) >= 0:
+                    url = link + '#' + comb_name
+            link = url
+        else:
+            link = False
+
+        return link
+
+    def resolve(self, cobj, this_url):
+        """Resolve the link to the documentation, returns None if not found
+
+        Parameters
+        ----------
+        cobj : dict
+            Dict with information about the "code object" for which we are
+            resolving a link.
+            cobi['name'] : function or class name (str)
+            cobj['module_short'] : shortened module name (str)
+            cobj['module'] : module name (str)
+        this_url: str
+            URL of the current page. Needed to construct relative URLs
+            (only used if relative=True in constructor).
+
+        Returns
+        -------
+        link : str | None
+            The link (URL) to the documentation.
+        """
+        full_name = cobj['module_short'] + '.' + cobj['name']
+        link = self._link_cache.get(full_name, None)
+        if link is None:
+            # we don't have it cached
+            link = self._get_link(cobj)
+            # cache it for the future
+            self._link_cache[full_name] = link
+
+        if link is False or link is None:
+            # failed to resolve
+            return None
+
+        if self.relative:
+            link = os.path.relpath(link, start=this_url)
+            if self._is_windows:
+                # replace '\' with '/' so it on the web
+                link = link.replace('\\', '/')
+
+            # for some reason, the relative link goes one directory too high up
+            link = link[3:]
+
+        return link
+
+
+###############################################################################
+rst_template = """
+
+.. _example_%(short_fname)s:
+
+%(docstring)s
+
+**Python source code:** :download:`%(fname)s <%(fname)s>`
+
+.. literalinclude:: %(fname)s
+    :lines: %(end_row)s-
+    """
+
+plot_rst_template = """
+
+.. _example_%(short_fname)s:
+
+%(docstring)s
+
+%(image_list)s
+
+%(stdout)s
+
+**Python source code:** :download:`%(fname)s <%(fname)s>`
+
+.. literalinclude:: %(fname)s
+    :lines: %(end_row)s-
+
+**Total running time of the example:** %(time_elapsed) 4i seconds
+
+.. raw:: html
+
+    <div class="social-button-container">
+        <div class="social-button">
+            <a href="https://twitter.com/share" class="twitter-share-button">Tweet</a>
+        </div>
+        <div class="social-button">
+            <g:plusone annotation="inline" width="120" size="medium"></g:plusone>
+        </div>
+        <div class="social-button">
+            <div id="fb-root"></div>
+            <script>(function(d, s, id) {
+                var js, fjs = d.getElementsByTagName(s)[0];
+                if (d.getElementById(id)) return;
+                js = d.createElement(s); js.id = id;
+                js.src = "//connect.facebook.net/en_US/all.js#xfbml=1";
+                fjs.parentNode.insertBefore(js, fjs);
+                }(document, 'script', 'facebook-jssdk'));
+            </script>
+            <div class="fb-like" data-send="false" data-width="450" data-show-faces="false"></div>
+        </div>
+    </div>
+    """
+
+# The following strings are used when we have several pictures: we use
+# an html div tag that our CSS uses to turn the lists into horizontal
+# lists.
+HLIST_HEADER = """
+.. rst-class:: horizontal
+
+"""
+
+HLIST_IMAGE_TEMPLATE = """
+    *
+
+      .. image:: images/%s
+            :scale: 47
+"""
+
+SINGLE_IMAGE = """
+.. image:: images/%s
+    :align: center
+"""
+
+
+def extract_docstring(filename):
+    """ Extract a module-level docstring, if any
+    """
+    lines = file(filename).readlines()
+    start_row = 0
+    if lines[0].startswith('#!'):
+        lines.pop(0)
+        start_row = 1
+
+    docstring = ''
+    first_par = ''
+    tokens = tokenize.generate_tokens(iter(lines).next)
+    for tok_type, tok_content, _, (erow, _), _ in tokens:
+        tok_type = token.tok_name[tok_type]
+        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
+            continue
+        elif tok_type == 'STRING':
+            docstring = eval(tok_content)
+            # If the docstring is formatted with several paragraphs, extract
+            # the first one:
+            paragraphs = '\n'.join(line.rstrip() for line in
+                                   docstring.split('\n')).split('\n\n')
+            if len(paragraphs) > 0:
+                first_par = paragraphs[0]
+        break
+    return docstring, first_par, erow + 1 + start_row
+
+
+def generate_example_rst(app):
+    """ Generate the list of examples, as well as the contents of
+        examples.
+    """
+    root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
+    example_dir = os.path.abspath(app.builder.srcdir + '/../../' + 'examples')
+    try:
+        plot_gallery = eval(app.builder.config.plot_gallery)
+    except TypeError:
+        plot_gallery = bool(app.builder.config.plot_gallery)
+    if not os.path.exists(example_dir):
+        os.makedirs(example_dir)
+    if not os.path.exists(root_dir):
+        os.makedirs(root_dir)
+
+    # we create an index.rst with all examples
+    fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
+    #Note: The sidebar button has been removed from the examples page for now
+    #      due to how it messes up the layout. Will be fixed at a later point
+    fhindex.write("""\
+
+.. raw:: html
+
+
+    <style type="text/css">
+
+    div#sidebarbutton {
+        display: none;
+    }
+
+    .figure {
+        float: left;
+        margin: 10px;
+        width: auto;
+        height: 200px;
+        width: 180px;
+    }
+
+    .figure img {
+        display: inline;
+        }
+
+    .figure .caption {
+        width: 180px;
+        text-align: center !important;
+    }
+    </style>
+
+Examples
+========
+
+.. _examples-index:
+""")
+    # Here we don't use an os.walk, but we recurse only twice: flat is
+    # better than nested.
+    generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
+    for dir in sorted(os.listdir(example_dir)):
+        if os.path.isdir(os.path.join(example_dir, dir)):
+            generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
+    fhindex.flush()
+
+
+def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
+    """ Generate the rst file for an example directory.
+    """
+    if not dir == '.':
+        target_dir = os.path.join(root_dir, dir)
+        src_dir = os.path.join(example_dir, dir)
+    else:
+        target_dir = root_dir
+        src_dir = example_dir
+    if not os.path.exists(os.path.join(src_dir, 'README.txt')):
+        print 80 * '_'
+        print ('Example directory %s does not have a README.txt file'
+               % src_dir)
+        print 'Skipping this directory'
+        print 80 * '_'
+        return
+    fhindex.write("""
+
+
+%s
+
+
+""" % file(os.path.join(src_dir, 'README.txt')).read())
+    if not os.path.exists(target_dir):
+        os.makedirs(target_dir)
+
+    def sort_key(a):
+        # put last elements without a plot
+        if not a.startswith('plot') and a.endswith('.py'):
+            return 'zz' + a
+        return a
+    for fname in sorted(os.listdir(src_dir), key=sort_key):
+        if fname.endswith('py'):
+            generate_file_rst(fname, target_dir, src_dir, plot_gallery)
+            thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
+            link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
+            fhindex.write('.. figure:: %s\n' % thumb)
+            if link_name.startswith('._'):
+                link_name = link_name[2:]
+            if dir != '.':
+                fhindex.write('   :target: ./%s/%s.html\n\n' % (dir,
+                                                                fname[:-3]))
+            else:
+                fhindex.write('   :target: ./%s.html\n\n' % link_name[:-3])
+            fhindex.write("""   :ref:`example_%s`
+
+.. toctree::
+   :hidden:
+
+   %s/%s
+
+""" % (link_name, dir, fname[:-3]))
+    fhindex.write("""
+.. raw:: html
+
+    <div style="clear: both"></div>
+    """)  # clear at the end of the section
+
+# modules for which we embed links into example code
+DOCMODULES = ['mne', 'matplotlib', 'numpy', 'scipy', 'mayavi']
+
+
+def make_thumbnail(in_fname, out_fname, width, height):
+    """Make a thumbnail with the same aspect ratio centered in an
+       image with a given width and height
+    """
+    img = Image.open(in_fname)
+    width_in, height_in = img.size
+    scale_w = width / float(width_in)
+    scale_h = height / float(height_in)
+
+    if height_in * scale_w <= height:
+        scale = scale_w
+    else:
+        scale = scale_h
+
+    width_sc = int(round(scale * width_in))
+    height_sc = int(round(scale * height_in))
+
+    # resize the image
+    img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
+
+    # insert centered
+    thumb = Image.new('RGB', (width, height), (255, 255, 255))
+    pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
+    thumb.paste(img, pos_insert)
+
+    thumb.save(out_fname)
+
+
+def get_short_module_name(module_name, obj_name):
+    """ Get the shortest possible module name """
+    parts = module_name.split('.')
+    short_name = module_name
+    for i in range(len(parts) - 1, 0, -1):
+        short_name = '.'.join(parts[:i])
+        try:
+            exec('from %s import %s' % (short_name, obj_name))
+        except ImportError:
+            # get the last working module name
+            short_name = '.'.join(parts[:(i + 1)])
+            break
+    return short_name
+
+
+def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
+    """ Generate the rst file for a given example.
+    """
+    base_image_name = os.path.splitext(fname)[0]
+    image_fname = '%s_%%s.png' % base_image_name
+
+    this_template = rst_template
+    last_dir = os.path.split(src_dir)[-1]
+    # to avoid leading . in file names, and wrong names in links
+    if last_dir == '.' or last_dir == 'examples':
+        last_dir = ''
+    else:
+        last_dir += '_'
+    short_fname = last_dir + fname
+    src_file = os.path.join(src_dir, fname)
+    example_file = os.path.join(target_dir, fname)
+    shutil.copyfile(src_file, example_file)
+
+    # The following is a list containing all the figure names
+    figure_list = []
+
+    image_dir = os.path.join(target_dir, 'images')
+    thumb_dir = os.path.join(image_dir, 'thumb')
+    if not os.path.exists(image_dir):
+        os.makedirs(image_dir)
+    if not os.path.exists(thumb_dir):
+        os.makedirs(thumb_dir)
+    image_path = os.path.join(image_dir, image_fname)
+    stdout_path = os.path.join(image_dir,
+                               'stdout_%s.txt' % base_image_name)
+    time_path = os.path.join(image_dir,
+                             'time_%s.txt' % base_image_name)
+    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
+    time_elapsed = 0
+    if plot_gallery:
+        # generate the plot as png image if file name
+        # starts with plot and if it is more recent than an
+        # existing image.
+        first_image_file = image_path % 1
+        if os.path.exists(stdout_path):
+            stdout = open(stdout_path).read()
+        else:
+            stdout = ''
+        if os.path.exists(time_path):
+            time_elapsed = float(open(time_path).read())
+
+        if (not os.path.exists(first_image_file) or
+                os.stat(first_image_file).st_mtime
+                <= os.stat(src_file).st_mtime):
+            # We need to execute the code
+            print 'plotting %s' % fname
+            t0 = time()
+            import matplotlib.pyplot as plt
+            plt.close('all')
+
+            try:
+                from mayavi import mlab
+            except Exception, e:
+                from enthought.mayavi import mlab
+            mlab.close(all=True)
+
+            cwd = os.getcwd()
+            try:
+                # First CD in the original example dir, so that any file
+                # created by the example get created in this directory
+                orig_stdout = sys.stdout
+                os.chdir(os.path.dirname(src_file))
+                my_buffer = StringIO()
+                my_stdout = Tee(sys.stdout, my_buffer)
+                sys.stdout = my_stdout
+                my_globals = {'pl': plt}
+                execfile(os.path.basename(src_file), my_globals)
+                time_elapsed = time() - t0
+                sys.stdout = orig_stdout
+                my_stdout = my_buffer.getvalue()
+
+                # get variables so we can later add links to the documentation
+                example_code_obj = {}
+                for var_name, var in my_globals.iteritems():
+                    if not hasattr(var, '__module__'):
+                        continue
+                    if not isinstance(var.__module__, basestring):
+                        continue
+                    if var.__module__.split('.')[0] not in DOCMODULES:
+                        continue
+
+                    # get the type as a string with other things stripped
+                    tstr = str(type(var))
+                    tstr = (tstr[tstr.find('\'')
+                            + 1:tstr.rfind('\'')].split('.')[-1])
+                    # get shortened module name
+                    module_short = get_short_module_name(var.__module__,
+                                                         tstr)
+                    cobj = {'name': tstr, 'module': var.__module__,
+                            'module_short': module_short,
+                            'obj_type': 'object'}
+                    example_code_obj[var_name] = cobj
+
+                # find functions so we can later add links to the documentation
+                funregex = re.compile('[\w.]+\(')
+                with open(src_file, 'rt') as fid:
+                    for line in fid.readlines():
+                        if line.startswith('#'):
+                            continue
+                        for match in funregex.findall(line):
+                            fun_name = match[:-1]
+                            try:
+                                exec('this_fun = %s' % fun_name, my_globals)
+                            except Exception as err:
+                                print 'extracting function failed'
+                                print err
+                                continue
+                            this_fun = my_globals['this_fun']
+                            if not callable(this_fun):
+                                continue
+                            if not hasattr(this_fun, '__module__'):
+                                continue
+                            if not isinstance(this_fun.__module__, basestring):
+                                continue
+                            if (this_fun.__module__.split('.')[0]
+                                    not in DOCMODULES):
+                                continue
+
+                            # get shortened module name
+                            fun_name_short = fun_name.split('.')[-1]
+                            module_short = get_short_module_name(
+                                this_fun.__module__, fun_name_short)
+                            cobj = {'name': fun_name_short,
+                                    'module': this_fun.__module__,
+                                    'module_short': module_short,
+                                    'obj_type': 'function'}
+                            example_code_obj[fun_name] = cobj
+
+                fid.close()
+                if len(example_code_obj) > 0:
+                    # save the dictionary, so we can later add hyperlinks
+                    codeobj_fname = example_file[:-3] + '_codeobj.pickle'
+                    with open(codeobj_fname, 'wb') as fid:
+                        cPickle.dump(example_code_obj, fid,
+                                     cPickle.HIGHEST_PROTOCOL)
+                    fid.close()
+                if '__doc__' in my_globals:
+                    # The __doc__ is often printed in the example, we
+                    # don't with to echo it
+                    my_stdout = my_stdout.replace(my_globals['__doc__'],
+                                                  '')
+                my_stdout = my_stdout.strip()
+                if my_stdout:
+                    output_lines = my_stdout.split('\n')
+                    if len(output_lines) > MAX_NB_LINES_STDOUT:
+                        output_lines = output_lines[:MAX_NB_LINES_STDOUT]
+                        output_lines.append('...')
+                    stdout = ('**Script output**::\n\n  %s\n\n'
+                              % ('\n  '.join(output_lines)))
+                open(stdout_path, 'w').write(stdout)
+                open(time_path, 'w').write('%f' % time_elapsed)
+                os.chdir(cwd)
+
+                # In order to save every figure we have two solutions :
+                # * iterate from 1 to infinity and call plt.fignum_exists(n)
+                #   (this requires the figures to be numbered
+                #    incrementally: 1, 2, 3 and not 1, 2, 5)
+                # * iterate over [fig_mngr.num for fig_mngr in
+                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
+                last_fig_num = 0
+                for fig_num in (fig_mngr.num for fig_mngr in
+                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
+                    # Set the fig_num figure as the current figure as we can't
+                    # save a figure that's not the current figure.
+                    plt.figure(fig_num)
+                    # hack to keep black bg
+                    facecolor = plt.gcf().get_facecolor()
+                    if facecolor == (0.0, 0.0, 0.0, 1.0):
+                        plt.savefig(image_path % fig_num, facecolor='black')
+                    else:
+                        plt.savefig(image_path % fig_num)
+                    figure_list.append(image_fname % fig_num)
+                    last_fig_num = fig_num
+
+                e = mlab.get_engine()
+                for scene in e.scenes:
+                    last_fig_num += 1
+                    mlab.savefig(image_path % last_fig_num)
+                    figure_list.append(image_fname % last_fig_num)
+                    mlab.close(scene)
+
+            except:
+                print 80 * '_'
+                print '%s is not compiling:' % fname
+                traceback.print_exc()
+                print 80 * '_'
+            finally:
+                os.chdir(cwd)
+                sys.stdout = orig_stdout
+
+            print " - time elapsed : %.2g sec" % time_elapsed
+        else:
+            figure_list = [f[len(image_dir):]
+                           for f in glob.glob(image_path % '[1-9]')]
+
+        # generate thumb file
+        this_template = plot_rst_template
+        if os.path.exists(first_image_file):
+            make_thumbnail(first_image_file, thumb_file, 180, 120)
+
+    if not os.path.exists(thumb_file):
+        # use the default thumbnail
+        make_thumbnail('source/_images/mne_helmet.png', thumb_file, 180, 120)
+
+    docstring, short_desc, end_row = extract_docstring(example_file)
+
+    # Depending on whether we have one or more figures, we're using a
+    # horizontal list or a single rst call to 'image'.
+    if len(figure_list) == 1:
+        figure_name = figure_list[0]
+        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
+    else:
+        image_list = HLIST_HEADER
+        for figure_name in figure_list:
+            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
+
+    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
+    f.write(this_template % locals())
+    f.flush()
+
+
+def embed_code_links(app, exception):
+    """Embed hyperlinks to documentation into example code"""
+    if exception is not None:
+        return
+    print 'Embedding documentation hyperlinks in examples..'
+
+    # Add resolvers for the packages for which we want to show links
+    doc_resolvers = {}
+    doc_resolvers['mne'] = SphinxDocLinkResolver(app.builder.outdir,
+                                                 relative=True)
+
+    doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
+        'http://matplotlib.org')
+
+    doc_resolvers['numpy'] = SphinxDocLinkResolver(
+        'http://docs.scipy.org/doc/numpy-1.6.0')
+
+    doc_resolvers['scipy'] = SphinxDocLinkResolver(
+        'http://docs.scipy.org/doc/scipy-0.11.0/reference')
+
+    doc_resolvers['mayavi'] = SphinxDocLinkResolver(
+        'http://docs.enthought.com/mayavi/mayavi',
+        extra_modules_test=['mayavi.mlab'])
+
+    example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
+    html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
+                                                    'auto_examples'))
+    # patterns for replacement
+    link_pattern = '<a href="%s">%s</a>'
+    orig_pattern = '<span class="n">%s</span>'
+    period = '<span class="o">.</span>'
+
+    for dirpath, _, filenames in os.walk(html_example_dir):
+        for fname in filenames:
+            print '\tprocessing: %s' % fname
+            full_fname = os.path.join(html_example_dir, dirpath, fname)
+            subpath = dirpath[len(html_example_dir) + 1:]
+            pickle_fname = os.path.join(example_dir, subpath,
+                                        fname[:-5] + '_codeobj.pickle')
+
+            if os.path.exists(pickle_fname):
+                # we have a pickle file with the objects to embed links for
+                with open(pickle_fname, 'rb') as fid:
+                    example_code_obj = cPickle.load(fid)
+                fid.close()
+                str_repl = {}
+                # generate replacement strings with the links
+                for name, cobj in example_code_obj.iteritems():
+                    this_module = cobj['module'].split('.')[0]
+
+                    if this_module not in doc_resolvers:
+                        continue
+
+                    link = doc_resolvers[this_module].resolve(cobj,
+                                                              full_fname)
+                    if link is not None:
+                        parts = name.split('.')
+                        name_html = orig_pattern % parts[0]
+                        for part in parts[1:]:
+                            name_html += period + orig_pattern % part
+                        str_repl[name_html] = link_pattern % (link, name_html)
+                # do the replacement in the html file
+                if len(str_repl) > 0:
+                    with open(full_fname, 'rt') as fid:
+                        lines_in = fid.readlines()
+                    fid.close()
+                    with open(full_fname, 'wt') as fid:
+                        for line in lines_in:
+                            for name, link in str_repl.iteritems():
+                                line = line.replace(name, link)
+                            fid.write(line)
+                    fid.close()
+
+    print '[done]'
+
+
+def setup(app):
+    app.connect('builder-inited', generate_example_rst)
+    app.add_config_value('plot_gallery', True, 'html')
+
+    # embed links after build is finished
+    app.connect('build-finished', embed_code_links)
+
+    # Sphinx hack: sphinx copies generated images to the build directory
+    #  each time the docs are made.  If the desired image name already
+    #  exists, it appends a digit to prevent overwrites.  The problem is,
+    #  the directory is never cleared.  This means that each time you build
+    #  the docs, the number of images in the directory grows.
+    #
+    # This question has been asked on the sphinx development list, but there
+    #  was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
+    #
+    # The following is a hack that prevents this behavior by clearing the
+    #  image build directory each time the docs are built.  If sphinx
+    #  changes their layout between versions, this will not work (though
+    #  it should probably not cause a crash).  Tested successfully
+    #  on Sphinx 1.0.7
+    build_image_dir = 'build/html/_images'
+    if os.path.exists(build_image_dir):
+        filelist = os.listdir(build_image_dir)
+        for filename in filelist:
+            if filename.endswith('png'):
+                os.remove(os.path.join(build_image_dir, filename))
diff --git a/doc/sphinxext/ipython_console_highlighting.py b/doc/sphinxext/ipython_console_highlighting.py
new file mode 100644
index 0000000..7d024b5
--- /dev/null
+++ b/doc/sphinxext/ipython_console_highlighting.py
@@ -0,0 +1,100 @@
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+"""reST directive for syntax-highlighting ipython interactive sessions.
+"""
+
+#-----------------------------------------------------------------------------
+# Needed modules
+
+# Standard library
+import re
+
+# Third party
+from pygments.lexer import Lexer, do_insertions
+from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
+                                   PythonTracebackLexer)
+from pygments.token import Comment, Generic
+
+from sphinx import highlighting
+
+
+#-----------------------------------------------------------------------------
+# Global constants
+line_re = re.compile('.*?\n')
+
+#-----------------------------------------------------------------------------
+# Code begins - classes and functions
+
+class IPythonConsoleLexer(Lexer):
+    """
+    For IPython console output or doctests, such as:
+
+    .. sourcecode:: ipython
+
+      In [1]: a = 'foo'
+
+      In [2]: a
+      Out[2]: 'foo'
+
+      In [3]: print a
+      foo
+
+      In [4]: 1 / 0
+
+    Notes:
+
+      - Tracebacks are not currently supported.
+
+      - It assumes the default IPython prompts, not customized ones.
+    """
+
+    name = 'IPython console session'
+    aliases = ['ipython']
+    mimetypes = ['text/x-ipython-console']
+    input_prompt = re.compile("(In \[[0-9]+\]: )|(   \.\.\.+:)")
+    output_prompt = re.compile("(Out\[[0-9]+\]: )|(   \.\.\.+:)")
+    continue_prompt = re.compile("   \.\.\.+:")
+    tb_start = re.compile("\-+")
+
+    def get_tokens_unprocessed(self, text):
+        pylexer = PythonLexer(**self.options)
+        tblexer = PythonTracebackLexer(**self.options)
+
+        curcode = ''
+        insertions = []
+        for match in line_re.finditer(text):
+            line = match.group()
+            input_prompt = self.input_prompt.match(line)
+            continue_prompt = self.continue_prompt.match(line.rstrip())
+            output_prompt = self.output_prompt.match(line)
+            if line.startswith("#"):
+                insertions.append((len(curcode),
+                                   [(0, Comment, line)]))
+            elif input_prompt is not None:
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, input_prompt.group())]))
+                curcode += line[input_prompt.end():]
+            elif continue_prompt is not None:
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, continue_prompt.group())]))
+                curcode += line[continue_prompt.end():]
+            elif output_prompt is not None:
+                insertions.append((len(curcode),
+                                   [(0, Generic.Output, output_prompt.group())]))
+                curcode += line[output_prompt.end():]
+            else:
+                if curcode:
+                    for item in do_insertions(insertions,
+                                              pylexer.get_tokens_unprocessed(curcode)):
+                        yield item
+                        curcode = ''
+                        insertions = []
+                yield match.start(), Generic.Output, line
+        if curcode:
+            for item in do_insertions(insertions,
+                                      pylexer.get_tokens_unprocessed(curcode)):
+                yield item
+
+#-----------------------------------------------------------------------------
+# Register the extension as a valid pygments lexer
+highlighting.lexers['ipython'] = IPythonConsoleLexer()
diff --git a/doc/sphinxext/numpy_ext/__init__.py b/doc/sphinxext/numpy_ext/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/doc/sphinxext/numpy_ext/docscrape.py b/doc/sphinxext/numpy_ext/docscrape.py
new file mode 100644
index 0000000..ad5998c
--- /dev/null
+++ b/doc/sphinxext/numpy_ext/docscrape.py
@@ -0,0 +1,498 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+
+class Reader(object):
+    """A line-based string reader.
+
+    """
+    def __init__(self, data):
+        """
+        Parameters
+        ----------
+        data : str
+           String with lines separated by '\n'.
+
+        """
+        if isinstance(data,list):
+            self._str = data
+        else:
+            self._str = data.split('\n') # store string as list of lines
+
+        self.reset()
+
+    def __getitem__(self, n):
+        return self._str[n]
+
+    def reset(self):
+        self._l = 0 # current line nr
+
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+        def is_empty(line):
+            return not line.strip()
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self,n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''
+
+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+    def __init__(self, docstring, config={}):
+        docstring = textwrap.dedent(docstring).split('\n')
+
+        self._doc = Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': [''],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Attributes': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'Warnings': [],
+            'References': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def __getitem__(self,key):
+        return self._parsed_data[key]
+
+    def __setitem__(self,key,val):
+        if not self._parsed_data.has_key(key):
+            warn("Unknown section %s" % key)
+        else:
+            self._parsed_data[key] = val
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip() #    ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self,doc):
+        i = 0
+        j = 0
+        for i,line in enumerate(doc):
+            if line.strip(): break
+
+        for j,line in enumerate(doc[::-1]):
+            if line.strip(): break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip(): # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'): # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self,content):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+
+            params.append((arg_name,arg_type,desc))
+
+        return params
+
+
+    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+    def _parse_see_also(self, content):
+        """
+        func_name : Descriptive text
+            continued text
+        another_func_name : Descriptive text
+        func_name1, func_name2, :meth:`func_name`, func_name3
+
+        """
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'"""
+            m = self._name_rgx.match(text)
+            if m:
+                g = m.groups()
+                if g[1] is None:
+                    return g[3], None
+                else:
+                    return g[2], g[1]
+            raise ValueError("%s is not a item name" % text)
+
+        def push_item(name, rest):
+            if not name:
+                return
+            name, role = parse_item_name(name)
+            items.append((name, list(rest), role))
+            del rest[:]
+
+        current_func = None
+        rest = []
+
+        for line in content:
+            if not line.strip(): continue
+
+            m = self._name_rgx.match(line)
+            if m and line[m.end():].strip().startswith(':'):
+                push_item(current_func, rest)
+                current_func, line = line[:m.end()], line[m.end():]
+                rest = [line.split(':', 1)[1].strip()]
+                if not rest[0]:
+                    rest = []
+            elif not line.startswith(' '):
+                push_item(current_func, rest)
+                current_func = None
+                if ',' in line:
+                    for func in line.split(','):
+                        push_item(func, [])
+                elif line.strip():
+                    current_func = line
+            elif current_func is not None:
+                rest.append(line.strip())
+        push_item(current_func, rest)
+        return items
+
+    def _parse_index(self, section, content):
+        """
+        .. index: default
+           :refguide: something, else, and more
+
+        """
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out
+
+    def _parse_summary(self):
+        """Grab signature (if given) and summary"""
+        if self._is_at_section():
+            return
+
+        summary = self._doc.read_to_next_empty_line()
+        summary_str = " ".join([s.strip() for s in summary]).strip()
+        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+            self['Signature'] = summary_str
+            if not self._is_at_section():
+                self['Summary'] = self._doc.read_to_next_empty_line()
+        else:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        for (section,content) in self._read_sections():
+            if not section.startswith('..'):
+                section = ' '.join([s.capitalize() for s in section.split(' ')])
+            if section in ('Parameters', 'Attributes', 'Methods',
+                           'Returns', 'Raises', 'Warns'):
+                self[section] = self._parse_param_list(content)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*','\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param,param_type,desc in self[name]:
+                out += ['%s : %s' % (param, param_type)]
+                out += self._str_indent(desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']: return []
+        out = []
+        out += self._str_header("See Also")
+        last_had_desc = True
+        for func, desc, role in self['See Also']:
+            if role:
+                link = ':%s:`%s`' % (role, func)
+            elif func_role:
+                link = ':%s:`%s`' % (func_role, func)
+            else:
+                link = "`%s`_" % func
+            if desc or last_had_desc:
+                out += ['']
+                out += [link]
+            else:
+                out[-1] += ", %s" % link
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            out += ['   :%s: %s' % (section, ', '.join(references))]
+        return out
+
+    def __str__(self, func_role=''):
+        out = []
+        out += self._str_signature()
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters','Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_section('Warnings')
+        out += self._str_see_also(func_role)
+        for s in ('Notes','References','Examples'):
+            out += self._str_section(s)
+        for param_list in ('Attributes', 'Methods'):
+            out += self._str_param_list(param_list)
+        out += self._str_index()
+        return '\n'.join(out)
+
+
+def indent(str,indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+    """Deindent a list of lines maximally"""
+    return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+    def __init__(self, func, role='func', doc=None, config={}):
+        self._f = func
+        self._role = role # e.g. "func" or "meth"
+
+        if doc is None:
+            if func is None:
+                raise ValueError("No function or docstring given")
+            doc = inspect.getdoc(func) or ''
+        NumpyDocString.__init__(self, doc)
+
+        if not self['Signature'] and func is not None:
+            func, func_name = self.get_func()
+            try:
+                # try to read signature
+                argspec = inspect.getargspec(func)
+                argspec = inspect.formatargspec(*argspec)
+                argspec = argspec.replace('*','\*')
+                signature = '%s%s' % (func_name, argspec)
+            except TypeError, e:
+                signature = '%s()' % func_name
+            self['Signature'] = signature
+
+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+
+    def __str__(self):
+        out = ''
+
+        func, func_name = self.get_func()
+        signature = self['Signature'].replace('*', '\*')
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
+        if self._role:
+            if not roles.has_key(self._role):
+                print "Warning: invalid role %s" % self._role
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                                             func_name)
+
+        out += super(FunctionDoc, self).__str__(func_role=self._role)
+        return out
+
+
+class ClassDoc(NumpyDocString):
+    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
+                 config={}):
+        if not inspect.isclass(cls) and cls is not None:
+            raise ValueError("Expected a class or None, but got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+
+        if doc is None:
+            if cls is None:
+                raise ValueError("No class or documentation string given")
+            doc = pydoc.getdoc(cls)
+
+        NumpyDocString.__init__(self, doc)
+
+        if config.get('show_class_members', True):
+            if not self['Methods']:
+                self['Methods'] = [(name, '', '')
+                                   for name in sorted(self.methods)]
+            if not self['Attributes']:
+                self['Attributes'] = [(name, '', '')
+                                      for name in sorted(self.properties)]
+
+    @property
+    def methods(self):
+        if self._cls is None:
+            return []
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and callable(func)]
+
+    @property
+    def properties(self):
+        if self._cls is None:
+            return []
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and func is None]
diff --git a/doc/sphinxext/numpy_ext/docscrape_sphinx.py b/doc/sphinxext/numpy_ext/docscrape_sphinx.py
new file mode 100644
index 0000000..9f4350d
--- /dev/null
+++ b/doc/sphinxext/numpy_ext/docscrape_sphinx.py
@@ -0,0 +1,226 @@
+import re, inspect, textwrap, pydoc
+import sphinx
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+    def __init__(self, docstring, config={}):
+        self.use_plots = config.get('use_plots', False)
+        NumpyDocString.__init__(self, docstring, config=config)
+
+    # string conversion routines
+    def _str_header(self, name, symbol='`'):
+        return ['.. rubric:: ' + name, '']
+
+    def _str_field_list(self, name):
+        return [':' + name + ':']
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        return ['']
+        if self['Signature']:
+            return ['``%s``' % self['Signature']] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Extended Summary'] + ['']
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_field_list(name)
+            out += ['']
+            for param,param_type,desc in self[name]:
+                out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                          param_type)])
+                out += ['']
+                out += self._str_indent(desc,8)
+                out += ['']
+        return out
+
+    @property
+    def _obj(self):
+        if hasattr(self, '_cls'):
+            return self._cls
+        elif hasattr(self, '_f'):
+            return self._f
+        return None
+
+    def _str_member_list(self, name):
+        """
+        Generate a member listing, autosummary:: table where possible,
+        and a table where not.
+
+        """
+        out = []
+        if self[name]:
+            out += ['.. rubric:: %s' % name, '']
+            prefix = getattr(self, '_name', '')
+
+            if prefix:
+                prefix = '~%s.' % prefix
+
+            autosum = []
+            others = []
+            for param, param_type, desc in self[name]:
+                param = param.strip()
+                if not self._obj or hasattr(self._obj, param):
+                    autosum += ["   %s%s" % (prefix, param)]
+                else:
+                    others.append((param, param_type, desc))
+
+            if autosum:
+                out += ['.. autosummary::', '   :toctree:', '']
+                out += autosum
+
+            if others:
+                maxlen_0 = max([len(x[0]) for x in others])
+                maxlen_1 = max([len(x[1]) for x in others])
+                hdr = "="*maxlen_0 + "  " + "="*maxlen_1 + "  " + "="*10
+                fmt = '%%%ds  %%%ds  ' % (maxlen_0, maxlen_1)
+                n_indent = maxlen_0 + maxlen_1 + 4
+                out += [hdr]
+                for param, param_type, desc in others:
+                    out += [fmt % (param.strip(), param_type)]
+                    out += self._str_indent(desc, n_indent)
+                out += [hdr]
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += ['']
+            content = textwrap.dedent("\n".join(self[name])).split("\n")
+            out += content
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        out = []
+        if self['See Also']:
+            see_also = super(SphinxDocString, self)._str_see_also(func_role)
+            out = ['.. seealso::', '']
+            out += self._str_indent(see_also[2:])
+        return out
+
+    def _str_warnings(self):
+        out = []
+        if self['Warnings']:
+            out = ['.. warning::', '']
+            out += self._str_indent(self['Warnings'])
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        if len(idx) == 0:
+            return out
+
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            elif section == 'refguide':
+                out += ['   single: %s' % (', '.join(references))]
+            else:
+                out += ['   %s: %s' % (section, ','.join(references))]
+        return out
+
+    def _str_references(self):
+        out = []
+        if self['References']:
+            out += self._str_header('References')
+            if isinstance(self['References'], str):
+                self['References'] = [self['References']]
+            out.extend(self['References'])
+            out += ['']
+            # Latex collects all references to a separate bibliography,
+            # so we need to insert links to it
+            if sphinx.__version__ >= "0.6":
+                out += ['.. only:: latex','']
+            else:
+                out += ['.. latexonly::','']
+            items = []
+            for line in self['References']:
+                m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
+                if m:
+                    items.append(m.group(1))
+            out += ['   ' + ", ".join(["[%s]_" % item for item in items]), '']
+        return out
+
+    def _str_examples(self):
+        examples_str = "\n".join(self['Examples'])
+
+        if (self.use_plots and 'import matplotlib' in examples_str
+                and 'plot::' not in examples_str):
+            out = []
+            out += self._str_header('Examples')
+            out += ['.. plot::', '']
+            out += self._str_indent(self['Examples'])
+            out += ['']
+            return out
+        else:
+            return self._str_section('Examples')
+
+    def __str__(self, indent=0, func_role="obj"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Returns', 'Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_warnings()
+        out += self._str_see_also(func_role)
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_examples()
+        for param_list in ('Attributes', 'Methods'):
+            out += self._str_member_list(param_list)
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+    def __init__(self, obj, doc=None, config={}):
+        self.use_plots = config.get('use_plots', False)
+        FunctionDoc.__init__(self, obj, doc=doc, config=config)
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+    def __init__(self, obj, doc=None, func_doc=None, config={}):
+        self.use_plots = config.get('use_plots', False)
+        ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
+
+class SphinxObjDoc(SphinxDocString):
+    def __init__(self, obj, doc=None, config={}):
+        self._f = obj
+        SphinxDocString.__init__(self, doc, config=config)
+
+def get_doc_object(obj, what=None, doc=None, config={}):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
+                              config=config)
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, doc=doc, config=config)
+    else:
+        if doc is None:
+            doc = pydoc.getdoc(obj)
+        return SphinxObjDoc(obj, doc, config=config)
diff --git a/doc/sphinxext/numpy_ext/numpydoc.py b/doc/sphinxext/numpy_ext/numpydoc.py
new file mode 100644
index 0000000..630a432
--- /dev/null
+++ b/doc/sphinxext/numpy_ext/numpydoc.py
@@ -0,0 +1,163 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+from sphinx.util.compat import Directive
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+                      reference_offset=[0]):
+
+    cfg = dict(use_plots=app.config.numpydoc_use_plots,
+               show_class_members=app.config.numpydoc_show_class_members)
+
+    if what == 'module':
+        # Strip top title
+        title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+                              re.I|re.S)
+        lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
+    else:
+        doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
+        lines[:] = unicode(doc).split(u"\n")
+
+    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+           obj.__name__:
+        if hasattr(obj, '__module__'):
+            v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
+        else:
+            v = dict(full_name=obj.__name__)
+        lines += [u'', u'.. htmlonly::', '']
+        lines += [u'    %s' % x for x in
+                  (app.config.numpydoc_edit_link % v).split("\n")]
+
+    # replace reference numbers so that there are no duplicates
+    references = []
+    for line in lines:
+        line = line.strip()
+        m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
+        if m:
+            references.append(m.group(1))
+
+    # start renaming from the longest string, to avoid overwriting parts
+    references.sort(key=lambda x: -len(x))
+    if references:
+        for i, line in enumerate(lines):
+            for r in references:
+                if re.match(ur'^\d+$', r):
+                    new_r = u"R%d" % (reference_offset[0] + int(r))
+                else:
+                    new_r = u"%s%d" % (r, reference_offset[0])
+                lines[i] = lines[i].replace(u'[%s]_' % r,
+                                            u'[%s]_' % new_r)
+                lines[i] = lines[i].replace(u'.. [%s]' % r,
+                                            u'.. [%s]' % new_r)
+
+    reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+    # Do not try to inspect classes that don't define `__init__`
+    if (inspect.isclass(obj) and
+        (not hasattr(obj, '__init__') or
+        'initializes x; see ' in pydoc.getdoc(obj.__init__))):
+        return '', ''
+
+    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+    if not hasattr(obj, '__doc__'): return
+
+    doc = SphinxDocString(pydoc.getdoc(obj))
+    if doc['Signature']:
+        sig = re.sub(u"^[^(]*", u"", doc['Signature'])
+        return sig, u''
+
+def setup(app, get_doc_object_=get_doc_object):
+    global get_doc_object
+    get_doc_object = get_doc_object_
+
+    app.connect('autodoc-process-docstring', mangle_docstrings)
+    app.connect('autodoc-process-signature', mangle_signature)
+    app.add_config_value('numpydoc_edit_link', None, False)
+    app.add_config_value('numpydoc_use_plots', None, False)
+    app.add_config_value('numpydoc_show_class_members', True, True)
+
+    # Extra mangling domains
+    app.add_domain(NumpyPythonDomain)
+    app.add_domain(NumpyCDomain)
+
+#------------------------------------------------------------------------------
+# Docstring-mangling domains
+#------------------------------------------------------------------------------
+
+from docutils.statemachine import ViewList
+from sphinx.domains.c import CDomain
+from sphinx.domains.python import PythonDomain
+
+class ManglingDomainBase(object):
+    directive_mangling_map = {}
+
+    def __init__(self, *a, **kw):
+        super(ManglingDomainBase, self).__init__(*a, **kw)
+        self.wrap_mangling_directives()
+
+    def wrap_mangling_directives(self):
+        for name, objtype in self.directive_mangling_map.items():
+            self.directives[name] = wrap_mangling_directive(
+                self.directives[name], objtype)
+
+class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
+    name = 'np'
+    directive_mangling_map = {
+        'function': 'function',
+        'class': 'class',
+        'exception': 'class',
+        'method': 'function',
+        'classmethod': 'function',
+        'staticmethod': 'function',
+        'attribute': 'attribute',
+    }
+
+class NumpyCDomain(ManglingDomainBase, CDomain):
+    name = 'np-c'
+    directive_mangling_map = {
+        'function': 'function',
+        'member': 'attribute',
+        'macro': 'function',
+        'type': 'class',
+        'var': 'object',
+    }
+
+def wrap_mangling_directive(base_directive, objtype):
+    class directive(base_directive):
+        def run(self):
+            env = self.state.document.settings.env
+
+            name = None
+            if self.arguments:
+                m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
+                name = m.group(2).strip()
+
+            if not name:
+                name = self.arguments[0]
+
+            lines = list(self.content)
+            mangle_docstrings(env.app, objtype, name, None, None, lines)
+            self.content = ViewList(lines, self.content.parent)
+
+            return base_directive.run(self)
+
+    return directive
diff --git a/doc/sphinxext/numpy_ext_old/__init__.py b/doc/sphinxext/numpy_ext_old/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/doc/sphinxext/numpy_ext_old/docscrape.py b/doc/sphinxext/numpy_ext_old/docscrape.py
new file mode 100644
index 0000000..fb4b544
--- /dev/null
+++ b/doc/sphinxext/numpy_ext_old/docscrape.py
@@ -0,0 +1,490 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+4
+class Reader(object):
+    """A line-based string reader.
+
+    """
+    def __init__(self, data):
+        """
+        Parameters
+        ----------
+        data : str
+           String with lines separated by '\n'.
+
+        """
+        if isinstance(data,list):
+            self._str = data
+        else:
+            self._str = data.split('\n') # store string as list of lines
+
+        self.reset()
+
+    def __getitem__(self, n):
+        return self._str[n]
+
+    def reset(self):
+        self._l = 0 # current line nr
+
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+        def is_empty(line):
+            return not line.strip()
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self,n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''
+
+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+    def __init__(self,docstring):
+        docstring = textwrap.dedent(docstring).split('\n')
+
+        self._doc = Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': [''],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Attributes': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'Warnings': [],
+            'References': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def __getitem__(self,key):
+        return self._parsed_data[key]
+
+    def __setitem__(self,key,val):
+        if not self._parsed_data.has_key(key):
+            warn("Unknown section %s" % key)
+        else:
+            self._parsed_data[key] = val
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip() #    ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self,doc):
+        i = 0
+        j = 0
+        for i,line in enumerate(doc):
+            if line.strip(): break
+
+        for j,line in enumerate(doc[::-1]):
+            if line.strip(): break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip(): # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'): # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self,content):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+
+            params.append((arg_name,arg_type,desc))
+
+        return params
+
+
+    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+    def _parse_see_also(self, content):
+        """
+        func_name : Descriptive text
+            continued text
+        another_func_name : Descriptive text
+        func_name1, func_name2, :meth:`func_name`, func_name3
+
+        """
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'"""
+            m = self._name_rgx.match(text)
+            if m:
+                g = m.groups()
+                if g[1] is None:
+                    return g[3], None
+                else:
+                    return g[2], g[1]
+            raise ValueError("%s is not a item name" % text)
+
+        def push_item(name, rest):
+            if not name:
+                return
+            name, role = parse_item_name(name)
+            items.append((name, list(rest), role))
+            del rest[:]
+
+        current_func = None
+        rest = []
+
+        for line in content:
+            if not line.strip(): continue
+
+            m = self._name_rgx.match(line)
+            if m and line[m.end():].strip().startswith(':'):
+                push_item(current_func, rest)
+                current_func, line = line[:m.end()], line[m.end():]
+                rest = [line.split(':', 1)[1].strip()]
+                if not rest[0]:
+                    rest = []
+            elif not line.startswith(' '):
+                push_item(current_func, rest)
+                current_func = None
+                if ',' in line:
+                    for func in line.split(','):
+                        push_item(func, [])
+                elif line.strip():
+                    current_func = line
+            elif current_func is not None:
+                rest.append(line.strip())
+        push_item(current_func, rest)
+        return items
+
+    def _parse_index(self, section, content):
+        """
+        .. index: default
+           :refguide: something, else, and more
+
+        """
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out
+
+    def _parse_summary(self):
+        """Grab signature (if given) and summary"""
+        if self._is_at_section():
+            return
+
+        summary = self._doc.read_to_next_empty_line()
+        summary_str = " ".join([s.strip() for s in summary]).strip()
+        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+            self['Signature'] = summary_str
+            if not self._is_at_section():
+                self['Summary'] = self._doc.read_to_next_empty_line()
+        else:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        for (section,content) in self._read_sections():
+            if not section.startswith('..'):
+                section = ' '.join([s.capitalize() for s in section.split(' ')])
+            if section in ('Parameters', 'Attributes', 'Methods',
+                           'Returns', 'Raises', 'Warns'):
+                self[section] = self._parse_param_list(content)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*','\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param,param_type,desc in self[name]:
+                out += ['%s : %s' % (param, param_type)]
+                out += self._str_indent(desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']: return []
+        out = []
+        out += self._str_header("See Also")
+        last_had_desc = True
+        for func, desc, role in self['See Also']:
+            if role:
+                link = ':%s:`%s`' % (role, func)
+            elif func_role:
+                link = ':%s:`%s`' % (func_role, func)
+            else:
+                link = "`%s`_" % func
+            if desc or last_had_desc:
+                out += ['']
+                out += [link]
+            else:
+                out[-1] += ", %s" % link
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            out += ['   :%s: %s' % (section, ', '.join(references))]
+        return out
+
+    def __str__(self, func_role=''):
+        out = []
+        out += self._str_signature()
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters','Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_section('Warnings')
+        out += self._str_see_also(func_role)
+        for s in ('Notes','References','Examples'):
+            out += self._str_section(s)
+        out += self._str_index()
+        return '\n'.join(out)
+
+
+def indent(str,indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+    """Deindent a list of lines maximally"""
+    return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+    def __init__(self, func, role='func'):
+        self._f = func
+        self._role = role # e.g. "func" or "meth"
+        try:
+            NumpyDocString.__init__(self,inspect.getdoc(func) or '')
+        except ValueError, e:
+            print '*'*78
+            print "ERROR: '%s' while parsing `%s`" % (e, self._f)
+            print '*'*78
+            #print "Docstring follows:"
+            #print doclines
+            #print '='*78
+
+        if not self['Signature']:
+            func, func_name = self.get_func()
+            try:
+                # try to read signature
+                argspec = inspect.getargspec(func)
+                argspec = inspect.formatargspec(*argspec)
+                argspec = argspec.replace('*','\*')
+                signature = '%s%s' % (func_name, argspec)
+            except TypeError, e:
+                signature = '%s()' % func_name
+            self['Signature'] = signature
+
+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+
+    def __str__(self):
+        out = ''
+
+        func, func_name = self.get_func()
+        signature = self['Signature'].replace('*', '\*')
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
+        if self._role:
+            if not roles.has_key(self._role):
+                print "Warning: invalid role %s" % self._role
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                                             func_name)
+
+        out += super(FunctionDoc, self).__str__(func_role=self._role)
+        return out
+
+
+class ClassDoc(NumpyDocString):
+    def __init__(self,cls,modulename='',func_doc=FunctionDoc):
+        if not inspect.isclass(cls):
+            raise ValueError("Initialise using a class. Got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+        self._name = cls.__name__
+        self._func_doc = func_doc
+
+        NumpyDocString.__init__(self, pydoc.getdoc(cls))
+
+    @property
+    def methods(self):
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and callable(func)]
+
+    def __str__(self):
+        out = ''
+        out += super(ClassDoc, self).__str__()
+        out += "\n\n"
+
+        #for m in self.methods:
+        #    print "Parsing `%s`" % m
+        #    out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
+        #    out += '.. index::\n   single: %s; %s\n\n' % (self._name, m)
+
+        return out
diff --git a/doc/sphinxext/numpy_ext_old/docscrape_sphinx.py b/doc/sphinxext/numpy_ext_old/docscrape_sphinx.py
new file mode 100644
index 0000000..d431ecd
--- /dev/null
+++ b/doc/sphinxext/numpy_ext_old/docscrape_sphinx.py
@@ -0,0 +1,133 @@
+import re, inspect, textwrap, pydoc
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+    # string conversion routines
+    def _str_header(self, name, symbol='`'):
+        return ['.. rubric:: ' + name, '']
+
+    def _str_field_list(self, name):
+        return [':' + name + ':']
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        return ['']
+        if self['Signature']:
+            return ['``%s``' % self['Signature']] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Extended Summary'] + ['']
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_field_list(name)
+            out += ['']
+            for param,param_type,desc in self[name]:
+                out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                          param_type)])
+                out += ['']
+                out += self._str_indent(desc,8)
+                out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += ['']
+            content = textwrap.dedent("\n".join(self[name])).split("\n")
+            out += content
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        out = []
+        if self['See Also']:
+            see_also = super(SphinxDocString, self)._str_see_also(func_role)
+            out = ['.. seealso::', '']
+            out += self._str_indent(see_also[2:])
+        return out
+
+    def _str_warnings(self):
+        out = []
+        if self['Warnings']:
+            out = ['.. warning::', '']
+            out += self._str_indent(self['Warnings'])
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        if len(idx) == 0:
+            return out
+
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            elif section == 'refguide':
+                out += ['   single: %s' % (', '.join(references))]
+            else:
+                out += ['   %s: %s' % (section, ','.join(references))]
+        return out
+
+    def _str_references(self):
+        out = []
+        if self['References']:
+            out += self._str_header('References')
+            if isinstance(self['References'], str):
+                self['References'] = [self['References']]
+            out.extend(self['References'])
+            out += ['']
+        return out
+
+    def __str__(self, indent=0, func_role="obj"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Attributes', 'Methods',
+                           'Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_warnings()
+        out += self._str_see_also(func_role)
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_section('Examples')
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+    pass
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+    pass
+
+def get_doc_object(obj, what=None):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc)
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, '')
+    else:
+        return SphinxDocString(pydoc.getdoc(obj))
diff --git a/doc/sphinxext/numpy_ext_old/numpydoc.py b/doc/sphinxext/numpy_ext_old/numpydoc.py
new file mode 100644
index 0000000..5e979ea
--- /dev/null
+++ b/doc/sphinxext/numpy_ext_old/numpydoc.py
@@ -0,0 +1,111 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+                      reference_offset=[0]):
+    if what == 'module':
+        # Strip top title
+        title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+                              re.I|re.S)
+        lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
+    else:
+        doc = get_doc_object(obj, what)
+        lines[:] = str(doc).split("\n")
+
+    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+           obj.__name__:
+        v = dict(full_name=obj.__name__)
+        lines += [''] + (app.config.numpydoc_edit_link % v).split("\n")
+
+    # replace reference numbers so that there are no duplicates
+    references = []
+    for l in lines:
+        l = l.strip()
+        if l.startswith('.. ['):
+            try:
+                references.append(int(l[len('.. ['):l.index(']')]))
+            except ValueError:
+                print "WARNING: invalid reference in %s docstring" % name
+
+    # Start renaming from the biggest number, otherwise we may
+    # overwrite references.
+    references.sort()
+    if references:
+        for i, line in enumerate(lines):
+            for r in references:
+                new_r = reference_offset[0] + r
+                lines[i] = lines[i].replace('[%d]_' % r,
+                                            '[%d]_' % new_r)
+                lines[i] = lines[i].replace('.. [%d]' % r,
+                                            '.. [%d]' % new_r)
+
+    reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+    # Do not try to inspect classes that don't define `__init__`
+    if (inspect.isclass(obj) and
+        'initializes x; see ' in pydoc.getdoc(obj.__init__)):
+        return '', ''
+
+    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+    if not hasattr(obj, '__doc__'): return
+
+    doc = SphinxDocString(pydoc.getdoc(obj))
+    if doc['Signature']:
+        sig = re.sub("^[^(]*", "", doc['Signature'])
+        return sig, ''
+
+def initialize(app):
+    try:
+        app.connect('autodoc-process-signature', mangle_signature)
+    except:
+        monkeypatch_sphinx_ext_autodoc()
+
+def setup(app, get_doc_object_=get_doc_object):
+    global get_doc_object
+    get_doc_object = get_doc_object_
+
+    app.connect('autodoc-process-docstring', mangle_docstrings)
+    app.connect('builder-inited', initialize)
+    app.add_config_value('numpydoc_edit_link', None, True)
+
+#------------------------------------------------------------------------------
+# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
+#------------------------------------------------------------------------------
+
+def monkeypatch_sphinx_ext_autodoc():
+    global _original_format_signature
+    import sphinx.ext.autodoc
+
+    if sphinx.ext.autodoc.format_signature is our_format_signature:
+        return
+
+    print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
+    _original_format_signature = sphinx.ext.autodoc.format_signature
+    sphinx.ext.autodoc.format_signature = our_format_signature
+
+def our_format_signature(what, obj):
+    r = mangle_signature(None, what, None, obj, None, None, None)
+    if r is not None:
+        return r[0]
+    else:
+        return _original_format_signature(what, obj)
diff --git a/doc/sphinxext/only_directives.py b/doc/sphinxext/only_directives.py
new file mode 100644
index 0000000..c5046bf
--- /dev/null
+++ b/doc/sphinxext/only_directives.py
@@ -0,0 +1,65 @@
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+#
+# A pair of directives for inserting content that will only appear in
+# either html or latex.
+#
+
+from docutils.nodes import Body, Element
+from docutils.parsers.rst import directives
+
+class only_base(Body, Element):
+    def dont_traverse(self, *args, **kwargs):
+        return []
+
+class html_only(only_base):
+    pass
+
+class latex_only(only_base):
+    pass
+
+def run(content, node_class, state, content_offset):
+    text = '\n'.join(content)
+    node = node_class(text)
+    state.nested_parse(content, content_offset, node)
+    return [node]
+
+def html_only_directive(name, arguments, options, content, lineno,
+                        content_offset, block_text, state, state_machine):
+    return run(content, html_only, state, content_offset)
+
+def latex_only_directive(name, arguments, options, content, lineno,
+                         content_offset, block_text, state, state_machine):
+    return run(content, latex_only, state, content_offset)
+
+def builder_inited(app):
+    if app.builder.name == 'html':
+        latex_only.traverse = only_base.dont_traverse
+    else:
+        html_only.traverse = only_base.dont_traverse
+
+def setup(app):
+    app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
+    app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
+    app.add_node(html_only)
+    app.add_node(latex_only)
+
+    # This will *really* never see the light of day As it turns out,
+    # this results in "broken" image nodes since they never get
+    # processed, so best not to do this.
+    # app.connect('builder-inited', builder_inited)
+
+    # Add visit/depart methods to HTML-Translator:
+    def visit_perform(self, node):
+        pass
+    def depart_perform(self, node):
+        pass
+    def visit_ignore(self, node):
+        node.children = []
+    def depart_ignore(self, node):
+        node.children = []
+
+    app.add_node(html_only, html=(visit_perform, depart_perform))
+    app.add_node(html_only, latex=(visit_ignore, depart_ignore))
+    app.add_node(latex_only, latex=(visit_perform, depart_perform))
+    app.add_node(latex_only, html=(visit_ignore, depart_ignore))
diff --git a/doc/upload_html.sh b/doc/upload_html.sh
new file mode 100755
index 0000000..2196279
--- /dev/null
+++ b/doc/upload_html.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+#scp -r build/html/* martinos-data:/web/html/mne/
+rsync -rltvz --delete --perms --chmod=g+w build/html/ martinos-data:/web/html/mne/ -essh
diff --git a/doc/utils/extract_config_doc.py b/doc/utils/extract_config_doc.py
new file mode 100755
index 0000000..c101bcf
--- /dev/null
+++ b/doc/utils/extract_config_doc.py
@@ -0,0 +1,73 @@
+#! /usr/bin/env python
+
+"""
+This script will extract the documentation from the full_configbase.py
+module, reformat it somewhat, and write it as a reST document in
+$PYFIFF/doc/source.
+
+"""
+
+import os
+import re
+from fiff import __file__ as fifffile
+
+fiffpath = os.path.join(os.path.split(fifffile)[0], os.pardir)
+
+confid = open(os.path.join(
+    fiffpath, "data", "configfiles", "full_configbase.py"), "r")
+docfid = open(os.path.join(
+    fiffpath, "doc", "source", "config_doc.rst"), "w")
+
+docfid.write(".. _config_doc:\n\n")
+
+write = False
+space = False
+
+def flip(value):
+    if value:
+        return False
+    else:
+        return True
+
+sectionhead = re.compile("(<)([\w\s]+)(>)")
+
+def get_head(line):
+
+    m = sectionhead.search(line)
+    if m:
+        head = m.groups()[1]
+    else:
+        return ""
+
+    length = len(head)
+    head = "\n\n%s\n" % head
+    for i in range(length):
+        head = "%s-" % head
+    head = "%s\n\n" % head
+
+    return head
+
+for num, line in enumerate(confid):
+
+    if re.match("-+\n", line):
+        space = True
+        newline = ""
+        for i in range(len(line) - 1):
+            newline = "%s^" % newline
+        line = "%s\n" % newline
+    elif re.match("[ \t\n]+", line):
+        space = False
+    if line.startswith("#-"):
+        docfid.write(get_head(line))
+    else:
+        if line.startswith("\"\"\""):
+            write = flip(write)
+            lastflip = num
+    if space:
+        line = "%s\n" % line
+
+    if write and not num == lastflip:
+        docfid.write(line)
+
+confid.close()
+docfid.close()
diff --git a/doc/utils/lut2sphinxtbl.py b/doc/utils/lut2sphinxtbl.py
new file mode 100755
index 0000000..02051ed
--- /dev/null
+++ b/doc/utils/lut2sphinxtbl.py
@@ -0,0 +1,65 @@
+#! /usr/bin/env python
+"""
+Usage: lut2sphinxtbl.py lutfile sphinxfile atlasname
+"""
+import os
+import re
+import sys
+import numpy as np
+
+if len(sys.argv) < 2:
+    print __doc__
+    sys.exit(0)
+
+
+lutfile = sys.argv[1]
+spxfile = sys.argv[2]
+namelist = []
+for i, arg in enumerate(sys.argv):
+    if i > 2:
+        namelist.append(arg)
+atlasname = " ".join(namelist)
+
+lutarr = np.genfromtxt(lutfile, str)
+lutarr = lutarr[:,:2]
+maxid = 0
+maxname = 0
+for row in lutarr:
+    if len(row[0]) > maxid:
+        maxid = len(row[0])
+    if len(row[1]) > maxname:
+        maxname = len(row[1])
+leftbar = max(maxid, 3)
+rightbar = max(maxname, 20)
+
+fid = open(spxfile, "w")
+
+fid.write(".. _%s:\n\n" % os.path.splitext(os.path.split(spxfile)[1])[0])
+fid.write("%s\n" % atlasname)
+for i in range(len(atlasname)):
+    fid.write("-")
+fid.write("\n\n")
+leftline = ""
+for i in range(leftbar):
+    leftline = "".join([leftline, "="])
+rightline = ""
+for i in range(rightbar):
+    rightline = "".join([rightline, "="])
+fid.write("%s   %s\nID     Region\n%s   %s\n" % (leftline, rightline, leftline, rightline))
+for row in lutarr:
+    name = row[1]
+    if not re.match("[rR](h|ight|\-).*", name) and not re.match("[Uu]nknown", name):
+        id = row[0][-3:]
+        if len(id) > 3:
+            id = int(id[-3:])
+        else:
+            id = int(id)
+        m = re.match("(([lL])(h|eft|\-)(\-*))(.*)", name)
+        if m:
+            name = name[len(m.group(1)):].capitalize()
+        space = ""
+        for i in range(7-len(str(id))):
+            space = "".join([space, " "])
+        fid.write("%d%s%s\n" % (id, space, name))
+
+fid.write("%s   %s\n\n" % (leftline, rightline))
diff --git a/doc/utils/make_clean_config.py b/doc/utils/make_clean_config.py
new file mode 100755
index 0000000..ce35036
--- /dev/null
+++ b/doc/utils/make_clean_config.py
@@ -0,0 +1,30 @@
+#! /usr/bin/env python
+
+import os
+
+fullfid = open(os.path.join(os.path.split(__file__)[0], os.path.pardir, os.path.pardir,
+                            "data", "configfiles", "full_configbase.py"), "r")
+cleanfid = open(os.path.join(os.path.split(__file__)[0], os.path.pardir, os.path.pardir,
+                            "data", "configfiles", "clean_configbase.py"), "w")
+
+write = True
+lastflip = None
+
+def flip(value):
+    if value:
+        return False
+    else:
+        return True
+
+for num, line in enumerate(fullfid):
+
+    if not line.startswith("#--"):
+        if line.startswith("\"\"\"") and num > 15:
+            write = flip(write)
+            lastflip = num
+
+        if write and not lastflip == num:
+            cleanfid.write(line)
+
+fullfid.close()
+cleanfid.close()
diff --git a/examples/README.txt b/examples/README.txt
new file mode 100644
index 0000000..aebe569
--- /dev/null
+++ b/examples/README.txt
@@ -0,0 +1,6 @@
+
+General examples
+-------------------
+
+General-purpose and introductory examples to MNE.
+
diff --git a/examples/connectivity/README.txt b/examples/connectivity/README.txt
new file mode 100644
index 0000000..73de5f5
--- /dev/null
+++ b/examples/connectivity/README.txt
@@ -0,0 +1,6 @@
+
+Connectivity Analysis Examples
+------------------------------
+
+Examples demonstrating connectivity analysis in sensor and source space.
+
diff --git a/examples/connectivity/plot_cwt_sensor_connectivity.py b/examples/connectivity/plot_cwt_sensor_connectivity.py
new file mode 100644
index 0000000..001afde
--- /dev/null
+++ b/examples/connectivity/plot_cwt_sensor_connectivity.py
@@ -0,0 +1,79 @@
+"""
+==============================================================
+Compute seed based time-frequency connectivity in sensor space
+==============================================================
+
+Computes the connectivity between a seed-gradiometer close to the visual cortex
+and all other gradiometers. The connectivity is computed in the time-frequency
+domain using Morlet wavelets and the debiased Squared Weighted Phase Lag Index
+[1] is used as connectivity metric.
+
+[1] Vinck et al. "An improved index of phase-synchronization for electro-
+    physiological data in the presence of volume-conduction, noise and
+    sample-size bias" NeuroImage, vol. 55, no. 4, pp. 1548-1565, Apr. 2011.
+"""
+
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne import fiff
+from mne.connectivity import spectral_connectivity, seed_target_indices
+from mne.datasets import sample
+from mne.layouts import read_layout
+from mne.viz import plot_topo_tfr
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Add a bad channel
+raw.info['bads'] += ['MEG 2443']
+
+# Pick MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                        exclude='bads')
+
+# Create epochs for left-visual condition
+event_id, tmin, tmax = 3, -0.2, 0.5
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+
+# Use 'MEG 2343' as seed
+seed_ch = 'MEG 2343'
+picks_ch_names = [raw.ch_names[i] for i in picks]
+
+# Create seed-target indices for connectivity computation
+seed = picks_ch_names.index(seed_ch)
+targets = np.arange(len(picks))
+indices = seed_target_indices(seed, targets)
+
+# Define wavelet frequencies and number of cycles
+cwt_frequencies = np.arange(7, 30, 2)
+cwt_n_cycles = cwt_frequencies / 7.
+
+# Run the connectivity analysis using 2 parallel jobs
+sfreq = raw.info['sfreq']  # the sampling frequency
+con, freqs, times, _, _ = spectral_connectivity(epochs, indices=indices,
+    method='wpli2_debiased', mode='cwt_morlet', sfreq=sfreq,
+    cwt_frequencies=cwt_frequencies, cwt_n_cycles=cwt_n_cycles, n_jobs=2)
+
+# Mark the seed channel with a value of 1.0, so we can see it in the plot
+con[np.where(indices[1] == seed)] = 1.0
+
+# Show topography of connectivity from seed
+import pylab as pl
+layout = read_layout('Vectorview-all')
+title = 'WPLI2 - Visual - Seed %s' % seed_ch
+plot_topo_tfr(epochs, con, freqs, layout, title=title)
+pl.show()
diff --git a/examples/connectivity/plot_mne_inverse_coherence_epochs.py b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
new file mode 100644
index 0000000..0232a15
--- /dev/null
+++ b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
@@ -0,0 +1,118 @@
+"""
+==============================================================
+Compute coherence in source space using a MNE inverse solution
+==============================================================
+
+This examples computes the coherence between a seed in the left
+auditory cortex and the rest of the brain based on single-trial
+MNE-dSPM inverse soltions.
+
+"""
+
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.minimum_norm import apply_inverse, apply_inverse_epochs,\
+                             read_inverse_operator
+from mne.connectivity import seed_target_indices, spectral_connectivity
+
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+label_name_lh = 'Aud-lh'
+fname_label_lh = data_path + '/MEG/sample/labels/%s.label' % label_name_lh
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+inverse_operator = read_inverse_operator(fname_inv)
+label_lh = mne.read_label(fname_label_lh)
+raw = Raw(fname_raw)
+events = mne.read_events(fname_event)
+
+# Add a bad channel
+raw.info['bads'] += ['MEG 2443']
+
+# pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
+                                                    eog=150e-6))
+
+# First, we find the most active vertex in the left auditory cortex, which
+# we will later use as seed for the connectivity computation
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+evoked = epochs.average()
+stc = apply_inverse(evoked, inverse_operator, lambda2, method,
+                    pick_normal=True)
+
+# Restrict the source estimate to the label in the left auditory cortex
+stc_label = stc.in_label(label_lh)
+
+# Find number and index of vertex with most power
+src_pow = np.sum(stc_label.data ** 2, axis=1)
+seed_vertno = stc_label.vertno[0][np.argmax(src_pow)]
+seed_idx = np.searchsorted(stc.vertno[0], seed_vertno)  # index in original stc
+
+# Generate index parameter for seed-based connectivity analysis
+n_sources = stc.data.shape[0]
+indices = seed_target_indices([seed_idx], np.arange(n_sources))
+
+# Compute inverse solution and for each epoch. By using "return_generator=True"
+# stcs will be a generator object instead of a list. This allows us so to
+# compute the coherence without having to keep all source estimates in memory.
+
+snr = 1.0  # use lower SNR for single epochs
+lambda2 = 1.0 / snr ** 2
+stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
+                            pick_normal=True, return_generator=True)
+
+# Now we are ready to compute the coherence in the alpha and beta band.
+# fmin and fmax specify the lower and upper freq. for each band, resp.
+fmin = (8., 13.)
+fmax = (13., 30.)
+sfreq = raw.info['sfreq']  # the sampling frequency
+
+# Now we compute connectivity. To speed things up, we use 2 parallel jobs
+# and use mode='fourier', which uses a FFT with a Hanning window
+# to compute the spectra (instead of multitaper estimation, which has a
+# lower variance but is slower). By using faverage=True, we directly
+# average the coherence in the alpha and beta band, i.e., we will only
+# get 2 frequency bins
+coh, freqs, times, n_epochs, n_tapers = spectral_connectivity(stcs,
+    method='coh', mode='fourier', indices=indices,
+    sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, n_jobs=2)
+
+print 'Frequencies in Hz over which coherence was averaged for alpha: '
+print freqs[0]
+print 'Frequencies in Hz over which coherence was averaged for beta: '
+print freqs[1]
+
+# Generate a SourceEstimate with the coherence. This is simple since we
+# used a single seed. For more than one seeds we would have to split coh.
+# Note: We use a hack to save the frequency axis as time
+tmin = np.mean(freqs[0])
+tstep = np.mean(freqs[1]) - tmin
+coh_stc = mne.SourceEstimate(coh, vertices=stc.vertno, tmin=1e-3 * tmin,
+                             tstep=1e-3 * tstep, subject='sample')
+
+# Now we can visualize the coherence using the plot method
+brain = coh_stc.plot('sample', 'inflated', 'rh', fmin=0.25, fmid=0.4,
+                     fmax=0.65, time_label='Coherence %0.1f Hz',
+                     subjects_dir=subjects_dir)
+brain.show_view('lateral')
diff --git a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py b/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
new file mode 100644
index 0000000..95aefd7
--- /dev/null
+++ b/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
@@ -0,0 +1,98 @@
+"""
+==============================================================
+Compute full spectrum source space connectivity between labels
+==============================================================
+
+The connectivity is computed between 4 labels across the spectrum
+between 5 and 40 Hz.
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
+from mne.connectivity import spectral_connectivity
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+# Load data
+inverse_operator = read_inverse_operator(fname_inv)
+raw = Raw(fname_raw)
+events = mne.read_events(fname_event)
+
+# Add a bad channel
+raw.info['bads'] += ['MEG 2443']
+
+# Pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   exclude='bads')
+
+# Define epochs for left-auditory condition
+event_id, tmin, tmax = 1, -0.2, 0.5
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
+                                                    eog=150e-6))
+
+# Compute inverse solution and for each epoch. By using "return_generator=True"
+# stcs will be a generator object instead of a list.
+snr = 1.0  # use lower SNR for single epochs
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
+                            pick_normal=True, return_generator=True)
+
+# Read some labels
+names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh']
+labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name)
+          for name in names]
+
+# Average the source estimates within each label using sign-flips to reduce
+# signal cancellations, also here we return a generator
+src = inverse_operator['src']
+label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
+                                         return_generator=True)
+
+fmin, fmax = 5., 40.
+sfreq = raw.info['sfreq']  # the sampling frequency
+
+con, freqs, times, n_epochs, n_tapers = spectral_connectivity(label_ts,
+        method='wpli2_debiased', mode='multitaper', sfreq=sfreq, fmin=fmin,
+        fmax=fmax, mt_adaptive=True, n_jobs=2)
+
+import pylab as pl
+n_rows, n_cols = con.shape[:2]
+fig, axes = pl.subplots(n_rows, n_cols, sharex=True, sharey=True)
+pl.suptitle('Between labels connectivity')
+for i in range(n_rows):
+    for j in range(i + 1):
+        if i == j:
+            axes[i, j].set_axis_off()
+            continue
+
+        axes[i, j].plot(freqs, con[i, j, :])
+        axes[j, i].plot(freqs, con[i, j, :])
+
+        if j == 0:
+            axes[i, j].set_ylabel(names[i])
+            axes[0, i].set_title(names[i])
+        if i == (n_rows - 1):
+            axes[i, j].set_xlabel(names[j])
+        axes[i, j].set_xlim([fmin, fmax])
+        axes[j, i].set_xlim([fmin, fmax])
+
+        # Show band limits
+        for f in [8, 12, 18, 35]:
+            axes[i, j].axvline(f, color='k')
+            axes[j, i].axvline(f, color='k')
+pl.show()
diff --git a/examples/connectivity/plot_mne_inverse_label_connectivity.py b/examples/connectivity/plot_mne_inverse_label_connectivity.py
new file mode 100644
index 0000000..0728874
--- /dev/null
+++ b/examples/connectivity/plot_mne_inverse_label_connectivity.py
@@ -0,0 +1,125 @@
+"""
+=========================================================================
+Compute source space connectivity and visualize it using a circular graph
+=========================================================================
+
+This example computes the all-to-all connectivity between 68 regions in
+source space based on dSPM inverse solutions and a FreeSurfer cortical
+parcellation. The connectivity is visualized using a circular graph which
+is ordered based on the locations of the regions.
+"""
+
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
+from mne.connectivity import spectral_connectivity
+from mne.viz import circular_layout, plot_connectivity_circle
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+# Load data
+inverse_operator = read_inverse_operator(fname_inv)
+raw = Raw(fname_raw)
+events = mne.read_events(fname_event)
+
+# Add a bad channel
+raw.info['bads'] += ['MEG 2443']
+
+# Pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   exclude='bads')
+
+# Define epochs for left-auditory condition
+event_id, tmin, tmax = 1, -0.2, 0.5
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
+                                                    eog=150e-6))
+
+# Compute inverse solution and for each epoch. By using "return_generator=True"
+# stcs will be a generator object instead of a list.
+snr = 1.0  # use lower SNR for single epochs
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
+                            pick_normal=True, return_generator=True)
+
+# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
+labels, label_colors = mne.labels_from_parc('sample', parc='aparc',
+                                            subjects_dir=subjects_dir)
+
+# Average the source estimates within each label using sign-flips to reduce
+# signal cancellations, also here we return a generator
+src = inverse_operator['src']
+label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
+                                         return_generator=True)
+
+# Now we are ready to compute the connectivity in the alpha band. Notice
+# from the status messages, how mne-python: 1) reads an epoch from the raw
+# file, 2) applies SSP and baseline correction, 3) computes the inverse to
+# obtain a source estimate, 4) averages the source estimate to obtain a
+# time series for each label, 5) includes the label time series in the
+# connectivity computation, and then moves to the next epoch. This
+# behaviour is because we are using generators and allows us to
+# compute connectivity in computationally efficient manner where the amount
+# of memory (RAM) needed is independent from the number of epochs.
+fmin = 8.
+fmax = 13.
+sfreq = raw.info['sfreq']  # the sampling frequency
+
+con, freqs, times, n_epochs, n_tapers = spectral_connectivity(label_ts,
+        method='wpli2_debiased', mode='multitaper', sfreq=sfreq, fmin=fmin,
+        fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=2)
+
+# con is a 3D array, get the connectivity for the first (and only) freq. band
+con = con[:, :, 0]
+
+# Now, we visualize the connectivity using a circular graph layout
+
+# First, we reorder the labels based on their location in the left hemi
+label_names = [label.name for label in labels]
+
+lh_labels = [name for name in label_names if name.endswith('lh')]
+
+# Get the y-location of the label
+label_ypos = list()
+for name in lh_labels:
+    idx = label_names.index(name)
+    ypos = np.mean(labels[idx].pos[:, 1])
+    label_ypos.append(ypos)
+
+# Reorder the labels based on their location
+lh_labels = [label for (ypos, label) in sorted(zip(label_ypos, lh_labels))]
+
+# For the right hemi
+rh_labels = [label[:-2] + 'rh' for label in lh_labels]
+
+# Save the plot order and create a circular layout
+node_order = list()
+node_order.extend(lh_labels[::-1])  # reverse the order
+node_order.extend(rh_labels)
+
+node_angles = circular_layout(label_names, node_order, start_pos=90)
+
+# Plot the graph using node colors from the FreeSurfer parcellation. We only
+# show the 300 strongest connections.
+plot_connectivity_circle(con, label_names, n_lines=300, node_angles=node_angles,
+                         node_colors=label_colors,
+                         title='All-to-All Connectivity left-Auditory '
+                               'Condition')
+import pylab as pl
+pl.savefig('circle.png', facecolor='black')
+pl.show()
diff --git a/examples/connectivity/plot_mne_inverse_psi_visual.py b/examples/connectivity/plot_mne_inverse_psi_visual.py
new file mode 100644
index 0000000..4828a68
--- /dev/null
+++ b/examples/connectivity/plot_mne_inverse_psi_visual.py
@@ -0,0 +1,115 @@
+"""
+=====================================================================
+Compute Phase Slope Index (PSI) in source space for a visual stimulus
+=====================================================================
+
+This example demonstrates how the Phase Slope Index (PSI) [1] can be computed
+in source space based on single trial dSPM source estimates. In addition,
+the example shows advanced usage of the connectivity estimation routines
+by first extracting a label time course for each epoch and then combining
+the label time course with the single trial source estimates to compute the
+connectivity.
+
+The result clearly shows how the activity in the visual label precedes more
+widespread activity (a postivive PSI means the label time course is leading).
+
+References
+----------
+[1] Nolte et al. "Robustly Estimating the Flow Direction of Information in
+Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
+pp. 1-4, Jun. 2008.
+"""
+
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
+from mne.connectivity import seed_target_indices, phase_slope_index
+from mne.viz import mne_analyze_colormap
+
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+fname_label = data_path + '/MEG/sample/labels/Vis-lh.label'
+
+event_id, tmin, tmax = 4, -0.2, 0.3
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+inverse_operator = read_inverse_operator(fname_inv)
+raw = Raw(fname_raw)
+events = mne.read_events(fname_event)
+
+# pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
+                                                    eog=150e-6))
+
+# Compute inverse solution and for each epoch. Note that since we are passing
+# the output to both extract_label_time_course and the phase_slope_index
+# functions, we have to use "return_generator=False", since it is only possible
+# to iterate over generators once.
+snr = 1.0  # use lower SNR for single epochs
+lambda2 = 1.0 / snr ** 2
+stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
+                            pick_normal=True, return_generator=False)
+
+# Now, we generate seed time series by averaging the activity in the left
+# visual corex
+label = mne.read_label(fname_label)
+src = inverse_operator['src']  # the source space used
+seed_ts = mne.extract_label_time_course(stcs, label, src, mode='mean_flip')
+
+# Combine the seed time course with the source estimates. There will be a total
+# of 7500 signals:
+# index 0: time course extracted from label
+# index 1..7499: dSPM source space time courses
+comb_ts = zip(seed_ts, stcs)
+
+# Construct indices to estimate connectivity between the label time course
+# and all source space time courses
+vertices = [src[i]['vertno'] for i in range(2)]
+n_signals_tot = 1 + len(vertices[0]) + len(vertices[1])
+
+indices = seed_target_indices([0], np.arange(1, n_signals_tot))
+
+# Compute the PSI in the frequency range 8Hz..30Hz. We exclude the baseline
+# period from the connectivity estimation
+fmin = 8.
+fmax = 30.
+tmin_con = 0.
+sfreq = raw.info['sfreq']  # the sampling frequency
+
+psi, freqs, times, n_epochs, _ = phase_slope_index(comb_ts, mode='multitaper',
+    indices=indices, sfreq=sfreq, fmin=fmin, fmax=fmax, tmin=tmin_con)
+
+# Generate a SourceEstimate with the PSI. This is simple since we used a single
+# seed (inspect the indices variable to see how the PSI scores are arranged in
+# the output)
+psi_stc = mne.SourceEstimate(psi, vertices=vertices, tmin=0, tstep=1,
+                             subject='sample')
+
+# Now we can visualize the PSI using the plot method. We use a custom colormap
+# to show signed values
+v_max = np.max(np.abs(psi))
+colormap = mne_analyze_colormap(limits=[0, v_max / 3, v_max])
+brain = psi_stc.plot(surface='inflated', hemi='lh',
+                     time_label='Phase Slope Index (PSI)',
+                     subjects_dir=subjects_dir, colormap=colormap)
+brain.scale_data_colormap(fmin=-v_max, fmid=0., fmax=v_max, transparent=False)
+brain.show_view('medial')
+brain.add_label(fname_label, color='green', alpha=0.7)
diff --git a/examples/connectivity/plot_sensor_connectivity.py b/examples/connectivity/plot_sensor_connectivity.py
new file mode 100644
index 0000000..984c460
--- /dev/null
+++ b/examples/connectivity/plot_sensor_connectivity.py
@@ -0,0 +1,118 @@
+"""
+===============================================
+Compute all-to-all connectivity in sensor space
+===============================================
+
+Computes the Phase Lag Index (PLI) between all gradiometers and shows the
+connectivity in 3D using the helmet geometry. The left visual stimulation data
+are used which produces strong connectvitiy in the right occipital sensors.
+"""
+
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+from scipy import linalg
+
+import mne
+from mne import fiff
+from mne.connectivity import spectral_connectivity
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Add a bad channel
+raw.info['bads'] += ['MEG 2443']
+
+# Pick MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                        exclude='bads')
+
+# Create epochs for the visual condition
+event_id, tmin, tmax = 3, -0.2, 0.5
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+
+# Compute connectivity for band containing the evoked response.
+# We exclude the baseline period
+fmin, fmax = 3., 9.
+sfreq = raw.info['sfreq']  # the sampling frequency
+tmin = 0.0  # exclude the baseline period
+con, freqs, times, n_epochs, n_tapers = spectral_connectivity(epochs,
+    method='pli', mode='multitaper', sfreq=sfreq,
+    fmin=fmin, fmax=fmax, faverage=True, tmin=tmin,
+    mt_adaptive=False, n_jobs=2)
+
+# the epochs contain an EOG channel, which we remove now
+ch_names = epochs.ch_names
+idx = [ch_names.index(name) for name in ch_names if name.startswith('MEG')]
+con = con[idx][:, idx]
+
+# con is a 3D array where the last dimension is size one since we averaged
+# over frequencies in a single band. Here we make it 2D
+con = con[:, :, 0]
+
+# Now, visualize the connectivity in 3D
+try:
+    from enthought.mayavi import mlab
+except:
+    from mayavi import mlab
+
+mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
+
+# Plot the sensor locations
+sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
+sens_loc = np.array(sens_loc)
+
+pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
+                    color=(0, 0, 1), opacity=0.5, scale_factor=0.01)
+
+# Get the strongest connections
+n_con = 20  # show up to 20 connections
+min_dist = 0.05  # exclude sensors that are less than 5cm apart
+threshold = np.sort(con, axis=None)[-n_con]
+ii, jj = np.where(con >= threshold)
+
+# Remove close connections
+con_nodes = list()
+con_val = list()
+for i, j in zip(ii, jj):
+    if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
+        con_nodes.append((i, j))
+        con_val.append(con[i, j])
+
+con_val = np.array(con_val)
+
+# Show the connections as tubes between sensors
+vmax = np.max(con_val)
+vmin = np.min(con_val)
+for val, nodes in zip(con_val, con_nodes):
+    x1, y1, z1 = sens_loc[nodes[0]]
+    x2, y2, z2 = sens_loc[nodes[1]]
+    mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
+                vmin=vmin, vmax=vmax, tube_radius=0.002)
+
+mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
+
+# Add the sensor names for the connections shown
+nodes_shown = list(set([n[0] for n in con_nodes] +
+                       [n[1] for n in con_nodes]))
+
+for node in nodes_shown:
+    x, y, z = sens_loc[node]
+    mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005,
+                color=(0, 0, 0))
+
+view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
+mlab.view(*view)
diff --git a/examples/decoding/README.txt b/examples/decoding/README.txt
new file mode 100644
index 0000000..ab26871
--- /dev/null
+++ b/examples/decoding/README.txt
@@ -0,0 +1,5 @@
+
+Decoding / MVPA
+---------------
+
+Decoding, a.k.a. MVPA or machine learning examples.
diff --git a/examples/decoding/plot_decoding_sensors.py b/examples/decoding/plot_decoding_sensors.py
new file mode 100644
index 0000000..3e8b12e
--- /dev/null
+++ b/examples/decoding/plot_decoding_sensors.py
@@ -0,0 +1,99 @@
+"""
+==========================
+Decoding sensor space data
+==========================
+
+Decoding, a.k.a MVPA or supervised machine learning applied to MEG
+data in sensor space. Here the classifier is applied to every time
+point.
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+import pylab as pl
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+
+data_path = sample.data_path()
+
+pl.close('all')
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname, preload=True)
+raw.filter(2, None, method='iir')  # replace baselining with high-pass
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
+                        exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=None, preload=True,
+                    reject=dict(grad=4000e-13, eog=150e-6))
+
+epochs_list = [epochs[k] for k in event_id]
+mne.epochs.equalize_epoch_counts(epochs_list)
+
+###############################################################################
+# Decoding in sensor space using a linear SVM
+n_times = len(epochs.times)
+# Take only the data channels (here the gradiometers)
+data_picks = fiff.pick_types(epochs.info, meg=True, exclude='bads')
+# Make arrays X and y such that :
+# X is 3d with X.shape[0] is the total number of epochs to classify
+# y is filled with integers coding for the class to predict
+# We must have X.shape[0] equal to y.shape[0]
+X = [e.get_data()[:, data_picks, :] for e in epochs_list]
+y = [k * np.ones(len(this_X)) for k, this_X in enumerate(X)]
+X = np.concatenate(X)
+y = np.concatenate(y)
+
+from sklearn.svm import SVC
+from sklearn.cross_validation import cross_val_score, ShuffleSplit
+
+clf = SVC(C=1, kernel='linear')
+# Define a monte-carlo cross-validation generator (reduce variance):
+cv = ShuffleSplit(len(X), 10, test_size=0.2)
+
+scores = np.empty(n_times)
+std_scores = np.empty(n_times)
+
+for t in xrange(n_times):
+    Xt = X[:, :, t]
+    # Standardize features
+    Xt -= Xt.mean(axis=0)
+    Xt /= Xt.std(axis=0)
+    # Run cross-validation
+    # Note : for sklearn the Xt matrix should be 2d (n_samples x n_features)
+    scores_t = cross_val_score(clf, Xt, y, cv=cv, n_jobs=1)
+    scores[t] = scores_t.mean()
+    std_scores[t] = scores_t.std()
+
+times = 1e3 * epochs.times
+scores *= 100  # make it percentage
+std_scores *= 100
+pl.plot(times, scores, label="Classif. score")
+pl.axhline(50, color='k', linestyle='--', label="Chance level")
+pl.axvline(0, color='r', label='stim onset')
+pl.legend()
+hyp_limits = (scores - std_scores, scores + std_scores)
+pl.fill_between(times, hyp_limits[0], y2=hyp_limits[1], color='b', alpha=0.5)
+pl.xlabel('Times (ms)')
+pl.ylabel('CV classification score (% correct)')
+pl.ylim([30, 100])
+pl.title('Sensor space decoding')
+pl.show()
diff --git a/examples/decoding/plot_decoding_spatio_temporal_source.py b/examples/decoding/plot_decoding_spatio_temporal_source.py
new file mode 100644
index 0000000..dbaac14
--- /dev/null
+++ b/examples/decoding/plot_decoding_spatio_temporal_source.py
@@ -0,0 +1,154 @@
+"""
+==========================
+Decoding source space data
+==========================
+
+Decoding, a.k.a MVPA or supervised machine learning applied to MEG
+data in source space on the left cortical surface. Here f-test feature
+selection is employed to confine the classification to the potentially
+relevant features. The classifier then is trained to selected features of
+epochs in source space.
+"""
+
+# Author: Denis A. Engemann <d.engemann at fz-juelich.de>
+#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+import os
+import numpy as np
+from mne import fiff
+from mne.datasets import sample
+from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
+
+data_path = sample.data_path()
+fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+subjects_dir = data_path + '/subjects'
+subject = os.environ['SUBJECT'] = subjects_dir + '/sample'
+os.environ['SUBJECTS_DIR'] = subjects_dir
+
+###############################################################################
+# Set parameters
+
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+label_names = 'Aud-rh', 'Vis-rh'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_r=2, vis_r=4)  # load contra-lateral conditions
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname, preload=True)
+raw.filter(2, None, method='iir')  # replace baselining with high-pass
+events = mne.read_events(event_fname)
+
+# Set up pick list: MEG - bad channels (modify to your needs)
+raw.info['bads'] += ['MEG 2443']  # mark bads
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                        exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=None, preload=True,
+                    reject=dict(grad=4000e-13, eog=150e-6))
+
+epochs.equalize_event_counts(event_id.keys(), 'mintime', copy=False)
+epochs_list = [epochs[k] for k in event_id]
+
+# Compute inverse solution
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+n_times = len(epochs.times)
+n_vertices = 3732
+n_epochs = len(epochs.events)
+
+# Load data and compute inverse solution and stcs for each epoch.
+
+noise_cov = mne.read_cov(fname_cov)
+inverse_operator = read_inverse_operator(fname_inv)
+X = np.zeros([n_epochs, n_vertices, n_times])
+
+# to save memory, we'll load and transform our epochs step by step.
+for condition_count, ep in zip([0, n_epochs / 2], epochs_list):
+    stcs = apply_inverse_epochs(ep, inverse_operator, lambda2,
+                            method, pick_normal=True,  # this saves us memory
+                            return_generator=True)
+    for jj, stc in enumerate(stcs):
+        X[condition_count + jj] = stc.lh_data
+
+###############################################################################
+# Decoding in sensor space using a linear SVM
+
+# Make arrays X and y such that :
+# X is 3d with X.shape[0] is the total number of epochs to classify
+# y is filled with integers coding for the class to predict
+# We must have X.shape[0] equal to y.shape[0]
+
+# we know the first half belongs to the first class, the second one
+y = np.repeat([0, 1], len(X) / 2)   # belongs to the second class
+X = X.reshape(n_epochs, n_vertices * n_times)
+# we have to normalize the data before supplying them to our classifier
+X -= X.mean(axis=0)
+X /= X.std(axis=0)
+
+# prepare classifier
+from sklearn.svm import SVC
+from sklearn.cross_validation import ShuffleSplit
+
+# Define a monte-carlo cross-validation generator (reduce variance):
+n_splits = 10
+clf = SVC(C=1, kernel='linear')
+cv = ShuffleSplit(len(X), n_splits, test_size=0.2)
+
+# setup feature selection and classification pipeline
+from sklearn.feature_selection import SelectKBest, f_classif
+from sklearn.pipeline import Pipeline
+
+# we will use an ANOVA f-test to preselect relevant spatio-temporal units
+feature_selection = SelectKBest(f_classif, k=500)  # take the best 500
+# to make life easier we will create a pipeline object
+anova_svc = Pipeline([('anova', feature_selection), ('svc', clf)])
+
+# initialize score and feature weights result arrays
+scores = np.zeros(n_splits)
+feature_weights = np.zeros([n_vertices, n_times])
+
+# hold on, this may take a moment
+for ii, (train, test) in enumerate(cv):
+    anova_svc.fit(X[train], y[train])
+    y_pred = anova_svc.predict(X[test])
+    y_test = y[test]
+    scores[ii] = np.sum(y_pred == y_test) / float(len(y_test))
+    feature_weights += feature_selection.inverse_transform(clf.coef_) \
+        .reshape(n_vertices, n_times)
+
+print 'Average prediction accuracy: %0.3f | standard deviation:  %0.3f' % \
+    (scores.mean(), scores.std())
+
+# prepare feature weights for visualization
+feature_weights /= (ii + 1)  # create average weights
+# create mask to avoid division error
+feature_weights = np.ma.masked_array(feature_weights, feature_weights == 0)
+# normalize scores for visualization purposes
+feature_weights /= feature_weights.std(axis=1)[:, None]
+feature_weights -= feature_weights.mean(axis=1)[:, None]
+
+# unmask, take absolute values, emulate f-value scale
+feature_weights = np.abs(feature_weights.data) * 10
+
+vertices = [stc.lh_vertno, np.array([])]  # empty array for right hemisphere
+stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
+            tmin=stc.tmin, tstep=stc.tstep,
+            subject='sample')
+
+brain = stc_feat.plot(subject=subject, fmin=1, fmid=5.5, fmax=20)
+brain.set_time(100)
+brain.show_view('l')
+# take the medial view to further explore visual areas
diff --git a/examples/export/README.txt b/examples/export/README.txt
new file mode 100644
index 0000000..bc9e455
--- /dev/null
+++ b/examples/export/README.txt
@@ -0,0 +1,5 @@
+
+Export of MNE data for use in other packages
+--------------------------------------------
+
+Export as data frames in Pandas, TimeSeries in nitime etc.
diff --git a/examples/export/plot_epochs_as_data_frame.py b/examples/export/plot_epochs_as_data_frame.py
new file mode 100644
index 0000000..dde36fb
--- /dev/null
+++ b/examples/export/plot_epochs_as_data_frame.py
@@ -0,0 +1,230 @@
+"""
+=================================
+Export epochs to Pandas DataFrame
+=================================
+
+In this example the pandas exporter will be used to produce a DataFrame
+object. After exploring some basic features a split-apply-combine
+work flow will be conducted to examine the latencies of the response
+maxima across epochs and conditions.
+Note. Equivalent methods are available for raw and evoked data objects.
+
+Short Pandas Primer
+-------------------
+
+Pandas Data Frames
+~~~~~~~~~~~~~~~~~~
+A data frame can be thought of as a combination of matrix, list and dict:
+It knows about linear algebra and element-wise operations but is size mutable
+and allows for labeled access to its data. In addition, the pandas data frame
+class provides many useful methods for restructuring, reshaping and visualizing
+data. As most methods return data frame instances, operations can be chained
+with ease; this allows to write efficient one-liners. Technically a DataFrame
+can be seen as a high-level container for numpy arrays and hence switching
+back and forth between numpy arrays and DataFrames is very easy.
+Taken together, these features qualify data frames for inter operation with
+databases and for interactive data exploration / analysis.
+Additionally, pandas interfaces with the R statistical computing language that
+covers a huge amount of statistical functionality.
+
+Export Options
+~~~~~~~~~~~~~~
+The pandas exporter comes with a few options worth being commented.
+
+Pandas DataFrame objects use a so called hierarchical index. This can be
+thought of as an array of unique tuples, in our case, representing the higher
+dimensional MEG data in a 2D data table. The column names are the channel names
+from the epoch object. The channels can be accessed like entries of a
+dictionary:
+
+    df['MEG 2333']
+
+Epochs and time slices can be accessed with the .ix method:
+
+    epochs_df.ix[(1, 2), 'MEG 2333']
+
+However, it is also possible to include this index as regular categorial data
+columns which yields a long table format typically used for repeated measure
+designs. To take control of this feature, on export, you can specify which
+of the three dimensions 'condition', 'epoch' and 'time' is passed to the Pandas
+index using the index parameter. Note that this decision is revertible any
+time, as demonstrated below.
+
+Similarly, for convenience, it is possible to scale the times, e.g. from
+seconds to milliseconds.
+
+Some Instance Methods
+~~~~~~~~~~~~~~~~~~~~~
+Most numpy methods and many ufuncs can be found as instance methods, e.g.
+mean, median, var, std, mul, , max, argmax etc.
+Below an incomplete listing of additional useful data frame instance methods:
+
+apply : apply function to data.
+    Any kind of custom function can be applied to the data. In combination with
+    lambda this can be very useful.
+describe : quickly generate summary stats
+    Very useful for exploring data.
+groupby : generate subgroups and initialize a 'split-apply-combine' operation.
+    Creates a group object. Subsequently, methods like apply, agg, or transform
+    can be used to manipulate the underlying data separately but
+    simultaneously. Finally, reset_index can be used to combine the results
+    back into a data frame.
+plot : wrapper around plt.plot
+    However it comes with some special options. For examples see below.
+shape : shape attribute
+    gets the dimensions of the data frame.
+values :
+    return underlying numpy array.
+to_records :
+    export data as numpy record array.
+to_dict :
+    export data as dict of arrays.
+
+Reference
+~~~~~~~~~
+More information and additional introductory materials can be found at the
+pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
+"""
+# Author: Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+import pylab as pl
+import numpy as np
+from mne.fiff import Raw
+from mne.datasets import sample
+
+
+# turn on interactive mode
+pl.ion()
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+raw = Raw(raw_fname)
+
+# For simplicity we will only consider the first 10 epochs
+events = mne.read_events(event_fname)[:10]
+
+# Add a bad channel
+raw.info['bads'] += ['MEG 2443']
+picks = mne.fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                            stim=False, exclude='bads')
+
+tmin, tmax = -0.2, 0.5
+baseline = (None, 0)
+reject = dict(grad=4000e-13, eog=150e-6)
+
+event_id = dict(auditory_l=1, auditory_r=2, visual_l=3, visual_r=4)
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
+                    baseline=baseline, preload=True, reject=reject)
+
+###############################################################################
+# Export DataFrame
+
+# The following parameters will scale the channels and times plotting
+# friendly. The info columns 'epoch' and 'time' will be used as hierarchical
+# index whereas the condition is treated as categorial data. Note that
+# this is optional. By passing None you could also print out all nesting
+# factors in a long table style commonly used for analyzing repeated measure
+# designs.
+
+index, scale_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
+
+df = epochs.as_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
+                          index=index)
+
+# Create MEG channel selector and drop EOG channel.
+meg_chs = [c for c in df.columns if 'MEG' in c]
+
+df.pop('EOG 061')  # this works just like with a list.
+
+###############################################################################
+# Explore Pandas MultiIndex
+
+# Pandas is using a MultiIndex or hierarchical index to handle higher
+# dimensionality while at the same time representing data in a flat 2d manner.
+
+print df.index.names, df.index.levels
+
+# Inspecting the index object unveils that 'epoch', 'time' are used
+# for subsetting data. We can take advantage of that by using the
+# .ix attribute, where in this case the first position indexes the MultiIndex
+# and the second the columns, that is, channels.
+
+# Plot some channels across the first three epochs
+xticks, sel = np.arange(3, 600, 120), meg_chs[:15]
+df.ix[:3, sel].plot(xticks=xticks)
+mne.viz.tight_layout()
+
+# slice the time starting at t0 in epoch 2 and ending 500ms after
+# the base line in epoch 3. Note that the second part of the tuple
+# represents time in milliseconds from stimulus onset.
+df.ix[(1, 0):(3, 500), sel].plot(xticks=xticks)
+mne.viz.tight_layout()
+
+# Note: For convenience the index was converted from floating point values
+# to integer values. To restore the original values you can e.g. say
+# df['times'] = np.tile(epoch.times, len(epochs_times)
+
+# We now reset the index of the DataFrame to expose some Pandas
+# pivoting functionality. To simplify the groupby operation we
+# we drop the indices to treat epoch and time as categroial factors.
+
+df = df.reset_index()
+
+# The ensuing DataFrame then is split into subsets reflecting a crossing
+# between condition and trial number. The idea is that we can broadcast
+# operations into each cell simultaneously.
+
+factors = ['condition', 'epoch']
+sel = factors + ['MEG 1332', 'MEG 1342']
+grouped = df[sel].groupby(factors)
+
+# To make the plot labels more readable let's edit the values of 'condition'.
+df.condition = df.condition.apply(lambda name: name + ' ')
+
+# Now we compare the mean of two channels response across conditions.
+grouped.mean().plot(kind='bar', stacked=True, title='Mean MEG Response',
+                    color=['steelblue', 'orange'])
+mne.viz.tight_layout()
+
+# We can even accomplish more complicated tasks in a few lines calling
+# apply method and passing a function. Assume we wanted to know the time
+# slice of the maximum response for each condition.
+
+max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
+
+print max_latency
+
+# Then make the plot labels more readable let's edit the values of 'condition'.
+df.condition = df.condition.apply(lambda name: name + ' ')
+
+pl.figure()
+max_latency.plot(kind='barh', title='Latency of Maximum Reponse',
+                 color='steelblue')
+mne.viz.tight_layout()
+
+# Finally, we will again remove the index to create a proper data table that
+# can be used with statistical packages like statsmodels or R.
+
+final_df = max_latency.reset_index()
+final_df.rename(columns={0: sel[2]})  # as the index is oblivious of names.
+
+# The index is now written into regular columns so it can be used as factor.
+print final_df
+
+# To save as csv file, uncomment the next line.
+# final_df.to_csv('my_epochs.csv')
+
+# Note. Data Frames can be easily concatenated, e.g., across subjects.
+# E.g. say:
+#
+# import pandas as pd
+# group = pd.concat([df_1, df_2])
+# group['subject'] = np.r_[np.ones(len(df_1)), np.ones(len(df_2)) + 1]
diff --git a/examples/export/plot_epochs_to_nitime.py b/examples/export/plot_epochs_to_nitime.py
new file mode 100644
index 0000000..37ceaf9
--- /dev/null
+++ b/examples/export/plot_epochs_to_nitime.py
@@ -0,0 +1,65 @@
+"""
+=======================
+Export epochs to NiTime
+=======================
+
+This script shows how to export Epochs to the NiTime library
+for further signal processing and data analysis.
+
+"""
+
+# Author: Denis Engemann <d.engemann at fz-juelich.de>
+#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+raw.info['bads'] += ['MEG 2443', 'EEG 053']
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                        exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, eog=150e-6))
+
+# Export to NiTime
+epochs_ts = epochs.to_nitime(picks=np.arange(20), collapse=True)
+
+###############################################################################
+# Now use nitime's OO-interface to compute coherence between sensors
+
+from nitime.analysis import MTCoherenceAnalyzer
+from nitime.viz import drawmatrix_channels
+import matplotlib.pylab as pl
+
+# setup coherency analyzer
+C = MTCoherenceAnalyzer(epochs_ts)
+
+# confine analysis to 10 - 20 Hz
+freq_idx = np.where((C.frequencies > 10) * (C.frequencies < 30))[0]
+
+# compute average coherence
+coh = np.mean(C.coherence[:, :, freq_idx], -1)  # Averaging on last dimension
+drawmatrix_channels(coh, epochs.ch_names, color_anchor=0,
+                    title='MEG gradiometer coherence')
+
+pl.show()
diff --git a/examples/export/plot_evoked_to_nitime.py b/examples/export/plot_evoked_to_nitime.py
new file mode 100644
index 0000000..d0a2dfd
--- /dev/null
+++ b/examples/export/plot_evoked_to_nitime.py
@@ -0,0 +1,34 @@
+"""
+============================
+Export evoked data to Nitime
+============================
+
+"""
+# Author: Denis Engemann <d.engemann at fz-juelichde>
+#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+from mne import fiff
+from mne.datasets import sample
+from nitime.viz import plot_tseries
+import pylab as pl
+
+
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+# Reading
+evoked = fiff.Evoked(fname, setno=0, baseline=(None, 0), proj=True)
+
+# Pick channels to view
+picks = fiff.pick_types(evoked.info, meg='grad', eeg=False, exclude='bads')
+
+evoked_ts = evoked.to_nitime(picks=picks)
+
+plot_tseries(evoked_ts)
+
+pl.show()
diff --git a/examples/export/plot_raw_to_nitime.py b/examples/export/plot_raw_to_nitime.py
new file mode 100644
index 0000000..5b8a81d
--- /dev/null
+++ b/examples/export/plot_raw_to_nitime.py
@@ -0,0 +1,83 @@
+"""
+============================
+Export Raw Objects to NiTime
+============================
+
+This script shows how to export raw files to the NiTime library
+for further signal processing and data analysis.
+
+"""
+
+# Author: Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+
+from mne.fiff import Raw
+from mne.datasets import sample
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+###############################################################################
+# get raw data
+raw = Raw(raw_fname)
+
+# set picks
+picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                            stim=False, exclude='bads')
+
+# pick times relative to the onset of the MEG measurement.
+start, stop = raw.time_as_index([100, 115], use_first_samp=False)
+
+# export to nitime using a copy of the data
+raw_ts = raw.to_nitime(start=start, stop=stop, picks=picks, copy=True)
+
+###############################################################################
+# explore some nitime timeseries features
+
+# get start
+print raw_ts.t0
+
+# get duration
+print raw_ts.duration
+
+# get sample duration (sampling interval)
+print raw_ts.sampling_interval
+
+# get exported raw infor
+print raw_ts.metadata.keys()
+
+# index at certain time
+print raw_ts.at(110.5)
+
+# get channel names (attribute added during export)
+print raw_ts.ch_names[:3]
+
+###############################################################################
+# investigate spectral density
+
+import matplotlib.pylab as pl
+
+import nitime.algorithms as tsa
+
+ch_sel = raw_ts.ch_names.index('MEG 0122')
+
+data_ch = raw_ts.data[ch_sel]
+
+f, psd_mt, nu = tsa.multi_taper_psd(data_ch, Fs=raw_ts.sampling_rate,
+                                    BW=1, adaptive=False, jackknife=False)
+
+# Convert PSD to dB
+psd_mt = 10 * np.log10(psd_mt)
+
+pl.close('all')
+pl.plot(f, psd_mt)
+pl.xlabel('Frequency (Hz)')
+pl.ylabel('Power Spectrald Density (db/Hz)')
+pl.title('Multitaper Power Spectrum \n %s' % raw_ts.ch_names[ch_sel])
+pl.show()
diff --git a/examples/extract_events_from_raw.py b/examples/extract_events_from_raw.py
new file mode 100644
index 0000000..cf88a27
--- /dev/null
+++ b/examples/extract_events_from_raw.py
@@ -0,0 +1,31 @@
+"""
+=========================
+Find events in a raw file
+=========================
+
+Find events from the stimulation/trigger channel in the raw data.
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+
+# Reading events
+raw = Raw(fname)
+
+events = mne.find_events(raw, stim_channel='STI 014')
+
+# Writing events
+mne.write_events('events.fif', events)
+
+for ind, before, after in events[:5]:
+    print "At sample %d stim channel went from %d to %d" % (
+                                                    ind, before, after)
diff --git a/examples/inverse/README.txt b/examples/inverse/README.txt
new file mode 100644
index 0000000..69559c3
--- /dev/null
+++ b/examples/inverse/README.txt
@@ -0,0 +1,7 @@
+
+Inverse problem and source analysis
+-----------------------------------
+
+Estimate source activations, extract activations in
+labels, morph data between subjects etc.
+
diff --git a/examples/inverse/plot_compute_mne_inverse.py b/examples/inverse/plot_compute_mne_inverse.py
new file mode 100644
index 0000000..36e358b
--- /dev/null
+++ b/examples/inverse/plot_compute_mne_inverse.py
@@ -0,0 +1,56 @@
+"""
+================================================
+Compute MNE-dSPM inverse solution on evoked data
+================================================
+
+Compute dSPM inverse solution on MNE evoked dataset
+and stores the solution in stc files for visualisation.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+from mne.datasets import sample
+from mne.fiff import Evoked
+from mne.minimum_norm import apply_inverse, read_inverse_operator
+
+
+data_path = sample.data_path()
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+subjects_dir = data_path + '/subjects'
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+inverse_operator = read_inverse_operator(fname_inv)
+
+# Compute inverse solution
+stc = apply_inverse(evoked, inverse_operator, lambda2, method,
+                    pick_normal=False)
+
+# Save result in stc files
+stc.save('mne_%s_inverse' % method)
+
+###############################################################################
+# View activation time-series
+pl.plot(1e3 * stc.times, stc.data[::100, :].T)
+pl.xlabel('time (ms)')
+pl.ylabel('%s value' % method)
+pl.show()
+
+# Plot brain in 3D with PySurfer if available. Note that the subject name
+# is already known by the SourceEstimate stc object.
+brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
+brain.set_data_time_index(180)
+brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
+brain.show_view('lateral')
+brain.save_image('dSPM_map.png')
diff --git a/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py b/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
new file mode 100644
index 0000000..a7f057c
--- /dev/null
+++ b/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
@@ -0,0 +1,79 @@
+"""
+==================================================
+Compute MNE-dSPM inverse solution on single epochs
+==================================================
+
+Compute dSPM inverse solution on single trial epochs restricted
+to a brain label.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
+
+
+data_path = sample.data_path()
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+label_name = 'Aud-lh'
+fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+snr = 1.0  # use smaller SNR for raw data
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+inverse_operator = read_inverse_operator(fname_inv)
+label = mne.read_label(fname_label)
+raw = Raw(fname_raw)
+events = mne.read_events(fname_event)
+
+# Set up pick list
+include = []
+
+# Add a bad channel
+raw.info['bads'] += ['EEG 053']  # bads + 1 more
+
+# pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   include=include, exclude='bads')
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
+                                                    eog=150e-6))
+
+# Compute inverse solution and stcs for each epoch
+stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label,
+                            pick_normal=True)
+
+mean_stc = sum(stcs) / len(stcs)
+
+# compute sign flip to avoid signal cancelation when averaging signed values
+flip = mne.label_sign_flip(label, inverse_operator['src'])
+
+label_mean = np.mean(mean_stc.data, axis=0)
+label_mean_flip = np.mean(flip[:, np.newaxis] * mean_stc.data, axis=0)
+
+###############################################################################
+# View activation time-series
+pl.figure()
+h0 = pl.plot(1e3 * stcs[0].times, mean_stc.data.T, 'k')
+h1, = pl.plot(1e3 * stcs[0].times, label_mean, 'r', linewidth=3)
+h2, = pl.plot(1e3 * stcs[0].times, label_mean_flip, 'g', linewidth=3)
+pl.legend((h0[0], h1, h2), ('all dipoles in label', 'mean',
+                            'mean with sign flip'))
+pl.xlabel('time (ms)')
+pl.ylabel('dSPM value')
+pl.show()
diff --git a/examples/inverse/plot_compute_mne_inverse_raw_in_label.py b/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
new file mode 100644
index 0000000..92d0d08
--- /dev/null
+++ b/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
@@ -0,0 +1,54 @@
+"""
+=============================================
+Compute sLORETA inverse solution on raw data
+=============================================
+
+Compute sLORETA inverse solution on raw dataset restricted
+to a brain label and stores the solution in stc files for
+visualisation.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw
+from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
+
+
+data_path = sample.data_path()
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
+label_name = 'Aud-lh'
+fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
+
+snr = 1.0  # use smaller SNR for raw data
+lambda2 = 1.0 / snr ** 2
+method = "sLORETA"  # use sLORETA method (could also be MNE or dSPM)
+
+# Load data
+raw = Raw(fname_raw)
+inverse_operator = read_inverse_operator(fname_inv)
+label = mne.read_label(fname_label)
+
+start, stop = raw.time_as_index([0, 15])  # read the first 15s of data
+
+# Compute inverse solution
+stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
+                        start, stop, pick_normal=False)
+
+# Save result in stc files
+stc.save('mne_%s_raw_inverse_%s' % (method, label_name))
+
+###############################################################################
+# View activation time-series
+pl.plot(1e3 * stc.times, stc.data[::100, :].T)
+pl.xlabel('time (ms)')
+pl.ylabel('%s value' % method)
+pl.show()
diff --git a/examples/inverse/plot_compute_mne_inverse_volume.py b/examples/inverse/plot_compute_mne_inverse_volume.py
new file mode 100644
index 0000000..2451c6d
--- /dev/null
+++ b/examples/inverse/plot_compute_mne_inverse_volume.py
@@ -0,0 +1,55 @@
+"""
+=======================================================================
+Compute MNE-dSPM inverse solution on evoked data in volume source space
+=======================================================================
+
+Compute dSPM inverse solution on MNE evoked dataset in a volume source
+space and stores the solution in a nifti file for visualisation.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Evoked
+from mne.minimum_norm import apply_inverse, read_inverse_operator
+
+data_path = sample.data_path()
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+inverse_operator = read_inverse_operator(fname_inv)
+src = inverse_operator['src']
+
+# Compute inverse solution
+stc = apply_inverse(evoked, inverse_operator, lambda2, method)
+stc.crop(0.0, 0.2)
+
+# Save result in a 4D nifti file
+img = mne.save_stc_as_volume('mne_%s_inverse.nii.gz' % method, stc,
+        src, mri_resolution=False)  # set to True for full MRI resolution
+data = img.get_data()
+
+# plot result (one slice)
+coronal_slice = data[:, 10, :, 60]
+pl.close('all')
+pl.imshow(np.ma.masked_less(coronal_slice, 8), cmap=pl.cm.Reds,
+          interpolation='nearest')
+pl.colorbar()
+pl.contour(coronal_slice != 0, 1, colors=['black'])
+pl.xticks([])
+pl.yticks([])
+pl.show()
diff --git a/examples/inverse/plot_dipole_fit_result.py b/examples/inverse/plot_dipole_fit_result.py
new file mode 100644
index 0000000..5142834
--- /dev/null
+++ b/examples/inverse/plot_dipole_fit_result.py
@@ -0,0 +1,82 @@
+"""
+==============================================================
+Reading a .dip file form xfit and view with source space in 3D
+==============================================================
+
+Here the .dip file was generated with the mne_dipole_fit command.
+
+Detailed unix command is :
+
+$mne_dipole_fit --meas sample_audvis-ave.fif --set 1 --meg --tmin 40 --tmax 95 \
+    --bmin -200 --bmax 0 --noise sample_audvis-cov.fif \
+    --bem ../../subjects/sample/bem/sample-5120-bem-sol.fif \
+    --origin 0:0:40 --mri sample_audvis-meg-oct-6-fwd.fif \
+    --dip sample_audvis_set1.dip
+
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
+dip_fname = data_path + '/MEG/sample/sample_audvis_set1.dip'
+bem_fname = data_path + '/subjects/sample/bem/sample-5120-bem-sol.fif'
+
+brain_surface = mne.read_bem_surfaces(bem_fname, add_geom=True)[0]
+points = brain_surface['rr']
+faces = brain_surface['tris']
+
+fwd = mne.read_forward_solution(fwd_fname)
+src = fwd['src']
+
+# read dipoles
+time, pos, amplitude, ori, gof = mne.read_dip(dip_fname)
+
+print "Time (ms): %s" % time
+print "Amplitude (nAm): %s" % amplitude
+print "GOF (%%): %s" % gof
+
+# only plot those for which GOF is above 50%
+pos = pos[gof > 50.]
+ori = ori[gof > 50.]
+time = time[gof > 50.]
+
+###############################################################################
+# Show result on 3D source space
+try:
+    from enthought.mayavi import mlab
+except:
+    from mayavi import mlab
+
+lh_points = src[0]['rr']
+lh_faces = src[0]['use_tris']
+mlab.figure(size=(600, 600), bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
+
+# show brain surface after proper coordinate system transformation
+points = brain_surface['rr']
+faces = brain_surface['tris']
+coord_trans = fwd['mri_head_t']['trans']
+points = np.dot(coord_trans[:3,:3], points.T).T + coord_trans[:3,-1]
+mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
+                     faces, color=(1, 1, 0), opacity=0.3)
+
+# show one cortical surface
+mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+                     lh_faces, color=(0.7, ) * 3)
+
+# show dipole as small cones
+dipoles = mlab.quiver3d(pos[:,0], pos[:,1], pos[:,2],
+                        ori[:,0], ori[:,1], ori[:,2],
+                        opacity=1., scale_factor=4e-4, scalars=time,
+                        mode='cone')
+mlab.colorbar(dipoles, title='Dipole fit time (ms)')
+
+# proper 3D orientation
+mlab.get_engine().scenes[0].scene.x_plus_view()
diff --git a/examples/inverse/plot_gamma_map_inverse.py b/examples/inverse/plot_gamma_map_inverse.py
new file mode 100644
index 0000000..2bbb751
--- /dev/null
+++ b/examples/inverse/plot_gamma_map_inverse.py
@@ -0,0 +1,66 @@
+"""
+===============================================================================
+Compute a sparse inverse solution using the Gamma-Map empirical Bayesian method
+===============================================================================
+
+See Wipf et al. "A unified Bayesian framework for MEG/EEG source imaging."
+NeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009.
+"""
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+
+import mne
+from mne.datasets import sample
+from mne.inverse_sparse import gamma_map
+from mne.viz import plot_sparse_source_estimates
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+# Read the evoked response and crop it
+setno = 'Left visual'
+evoked = mne.fiff.read_evoked(evoked_fname, setno=setno, baseline=(None, 0))
+evoked.crop(tmin=-50e-3, tmax=300e-3)
+
+# Read the forward solution
+forward = mne.read_forward_solution(fwd_fname, surf_ori=True, force_fixed=False)
+
+# Read noise covariance matrix and regularize it
+cov = mne.read_cov(cov_fname)
+cov = mne.cov.regularize(cov, evoked.info)
+
+# Run the Gamma-MAP method
+alpha = 0.5
+stc, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True,
+                          return_residual=True)
+
+# View in 2D and 3D ("glass" brain like 3D plot)
+
+# Show the sources as spheres scaled by their strength
+scale_factors = np.max(np.abs(stc.data), axis=1)
+scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors))
+
+plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
+    modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None),
+    fig_name="Gamma-MAP")
+
+# Show the evoked response and the residual for gradiometers
+ylim = dict(grad=[-120, 120])
+evoked = mne.fiff.pick_types_evoked(evoked, meg='grad', exclude='bads')
+pl.figure()
+evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,
+            proj=True)
+
+residual = mne.fiff.pick_types_evoked(residual, meg='grad', exclude='bads')
+pl.figure()
+residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,
+              proj=True)
diff --git a/examples/inverse/plot_label_activation_from_stc.py b/examples/inverse/plot_label_activation_from_stc.py
new file mode 100644
index 0000000..dd1d865
--- /dev/null
+++ b/examples/inverse/plot_label_activation_from_stc.py
@@ -0,0 +1,62 @@
+"""
+==================================================
+Extracting time course from source_estimate object
+==================================================
+
+Load a SourceEstimate object from stc files and
+extract the time course of activation in
+individual labels, as well as in a complex label
+formed through merging two labels.
+
+"""
+# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import os
+
+import mne
+from mne.datasets import sample
+import pylab as pl
+
+data_path = sample.data_path()
+os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
+meg_path = data_path + '/MEG/sample'
+
+# load the stc
+stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
+
+# load the labels
+aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
+aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
+
+# extract the time course for different labels from the stc
+stc_lh = stc.in_label(aud_lh)
+stc_rh = stc.in_label(aud_rh)
+stc_bh = stc.in_label(aud_lh + aud_rh)
+
+# calculate center of mass and transform to mni coordinates
+vtx, _, t_lh = stc_lh.center_of_mass('sample')
+mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
+vtx, _, t_rh = stc_rh.center_of_mass('sample')
+mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
+
+# plot the activation
+pl.figure()
+pl.axes([.1, .275, .85, .625])
+hl = pl.plot(stc.times, stc_lh.data.mean(0), 'b')
+hr = pl.plot(stc.times, stc_rh.data.mean(0), 'g')
+hb = pl.plot(stc.times, stc_bh.data.mean(0), 'r')
+pl.xlabel('Time (s)')
+pl.ylabel('Source amplitude (dSPM)')
+pl.xlim(stc.times[0], stc.times[-1])
+
+# add a legend including center-of-mass mni coordinates to the plot
+labels = ['LH: center of mass = %s' % mni_lh.round(2),
+          'RH: center of mass = %s' % mni_rh.round(2),
+          'Combined LH & RH']
+pl.figlegend([hl, hr, hb], labels, 'lower center')
+pl.suptitle('Average activation in auditory cortex labels', fontsize=20)
+pl.show()
diff --git a/examples/inverse/plot_label_source_activations.py b/examples/inverse/plot_label_source_activations.py
new file mode 100644
index 0000000..3d258cb
--- /dev/null
+++ b/examples/inverse/plot_label_source_activations.py
@@ -0,0 +1,64 @@
+"""
+====================================================
+Extracting the time series of activations in a label
+====================================================
+
+We first apply a dSPM inverse operator to get signed activations
+in a label (with positive and negative values) and we then
+compare different strategies to average the times series
+in a label. We compare a simple average, with an averaging
+using the dipoles normal (flip mode) and then a PCA,
+also using a sign flip.
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator, apply_inverse
+from mne.fiff import Evoked
+
+data_path = sample.data_path()
+label = 'Aud-lh'
+label_fname = data_path + '/MEG/sample/labels/%s.label' % label
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+inverse_operator = read_inverse_operator(fname_inv)
+src = inverse_operator['src']
+
+# Compute inverse solution
+pick_normal = True # Get signed values to see the effect of sign filp
+stc = apply_inverse(evoked, inverse_operator, lambda2, method,
+                    pick_normal=pick_normal)
+
+label = mne.read_label(label_fname)
+
+stc_label = stc.in_label(label)
+mean = stc.extract_label_time_course(label, src, mode='mean')
+mean_flip = stc.extract_label_time_course(label, src, mode='mean_flip')
+pca = stc.extract_label_time_course(label, src, mode='pca_flip')
+
+print "Number of vertices : %d" % len(stc_label.data)
+
+# View source activations
+import pylab as pl
+pl.figure()
+pl.plot(1e3 * stc_label.times, stc_label.data.T, 'k', linewidth=0.5)
+h0, = pl.plot(1e3 * stc_label.times, mean.T, 'r', linewidth=3)
+h1, = pl.plot(1e3 * stc_label.times, mean_flip.T, 'g', linewidth=3)
+h2, = pl.plot(1e3 * stc_label.times, pca.T, 'b', linewidth=3)
+pl.legend([h0, h1, h2], ['mean', 'mean flip', 'PCA flip'])
+pl.xlabel('Time (ms)')
+pl.ylabel('Source amplitude')
+pl.title('Activations in Label : %s' % label)
+pl.show()
diff --git a/examples/inverse/plot_lcmv_beamformer.py b/examples/inverse/plot_lcmv_beamformer.py
new file mode 100644
index 0000000..519ec68
--- /dev/null
+++ b/examples/inverse/plot_lcmv_beamformer.py
@@ -0,0 +1,85 @@
+"""
+======================================
+Compute LCMV beamformer on evoked data
+======================================
+
+Compute LCMV beamformer solutions on evoked dataset for three different choices
+of source orientation and stores the solutions in stc files for visualisation.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import numpy as np
+
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.beamformer import lcmv
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+label_name = 'Aud-lh'
+fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
+
+###############################################################################
+# Get epochs
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = Raw(raw_fname)
+raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+left_temporal_channels = mne.read_selection('Left-temporal')
+picks = pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                   exclude='bads', selection=left_temporal_channels)
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+evoked = epochs.average()
+
+forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
+
+noise_cov = mne.read_cov(fname_cov)
+noise_cov = mne.cov.regularize(noise_cov, evoked.info,
+                               mag=0.05, grad=0.05, eeg=0.1, proj=True)
+
+data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+
+pl.close('all')
+
+pick_oris = [None, 'max-power']
+names = ['free', 'max-power']
+descriptions = ['Free orientation', 'Max-power orientation']
+colors = ['b', 'r']
+
+for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
+    stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
+               pick_ori=pick_ori)
+
+    # Save result in stc files
+    stc.save('lcmv-' + name)
+
+    # View activation time-series
+    data, times, _ = mne.label_time_courses(fname_label, "lcmv-" + name +
+                                            "-lh.stc")
+    pl.plot(1e3 * times, np.mean(data, axis=0), color, hold=True, label=desc)
+
+pl.xlabel('Time (ms)')
+pl.ylabel('LCMV value')
+pl.ylim(-0.8, 2.2)
+pl.title('LCMV in %s' % label_name)
+pl.legend()
+pl.show()
diff --git a/examples/inverse/plot_lcmv_beamformer_volume.py b/examples/inverse/plot_lcmv_beamformer_volume.py
new file mode 100644
index 0000000..4f57a73
--- /dev/null
+++ b/examples/inverse/plot_lcmv_beamformer_volume.py
@@ -0,0 +1,90 @@
+"""
+===================================================================
+Compute LCMV inverse solution on evoked data in volume source space
+===================================================================
+
+Compute LCMV inverse solution on an auditory evoked dataset in a volume source
+space. It stores the solution in a nifti file for visualisation e.g. with
+Freeview.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.beamformer import lcmv
+
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+###############################################################################
+# Get epochs
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = Raw(raw_fname)
+raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+left_temporal_channels = mne.read_selection('Left-temporal')
+picks = pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                   exclude='bads', selection=left_temporal_channels)
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+evoked = epochs.average()
+
+forward = mne.read_forward_solution(fname_fwd)
+
+noise_cov = mne.read_cov(fname_cov)
+noise_cov = mne.cov.regularize(noise_cov, evoked.info,
+                               mag=0.05, grad=0.05, eeg=0.1, proj=True)
+
+data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+
+# Run free orientation (vector) beamformer. Source orientation can be
+# restricted by setting pick_ori to 'max-power'
+stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, pick_ori=None)
+
+# Save result in stc files
+stc.save('lcmv-vol')
+
+stc.crop(0.0, 0.2)
+
+# Save result in a 4D nifti file
+img = mne.save_stc_as_volume('lcmv_inverse.nii.gz', stc,
+        forward['src'], mri_resolution=False)  # True for full MRI resolution
+
+# plot result (one slice)
+pl.close('all')
+data = img.get_data()
+coronal_slice = data[:, 10, :, 60]
+pl.figure()
+pl.imshow(np.ma.masked_less(coronal_slice, 1), cmap=pl.cm.Reds,
+          interpolation='nearest')
+pl.colorbar()
+pl.contour(coronal_slice != 0, 1, colors=['black'])
+pl.xticks([])
+pl.yticks([])
+
+# plot source time courses with the maximum peak amplitudes
+pl.figure()
+pl.plot(stc.times, stc.data[np.argsort(np.max(stc.data, axis=1))[-40:]].T)
+pl.xlabel('Time (ms)')
+pl.ylabel('LCMV value')
+pl.show()
diff --git a/examples/inverse/plot_make_inverse_operator.py b/examples/inverse/plot_make_inverse_operator.py
new file mode 100644
index 0000000..d198576
--- /dev/null
+++ b/examples/inverse/plot_make_inverse_operator.py
@@ -0,0 +1,89 @@
+"""
+===============================================================
+Assemble inverse operator and compute MNE-dSPM inverse solution
+===============================================================
+
+Assemble M/EEG, MEG, and EEG inverse operators and compute dSPM
+inverse solution on MNE evoked dataset and stores the solution
+in stc files for visualisation.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Evoked
+from mne.minimum_norm import make_inverse_operator, apply_inverse, \
+                             write_inverse_operator
+
+data_path = sample.data_path()
+fname_fwd_meeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+fname_fwd_eeg = data_path + '/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+
+# Load data
+evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
+forward_meeg = mne.read_forward_solution(fname_fwd_meeg, surf_ori=True)
+noise_cov = mne.read_cov(fname_cov)
+
+# regularize noise covariance
+noise_cov = mne.cov.regularize(noise_cov, evoked.info,
+                               mag=0.05, grad=0.05, eeg=0.1, proj=True)
+
+# Restrict forward solution as necessary for MEG
+forward_meg = mne.fiff.pick_types_forward(forward_meeg, meg=True, eeg=False)
+# Alternatively, you can just load a forward solution that is restricted
+forward_eeg = mne.read_forward_solution(fname_fwd_eeg, surf_ori=True)
+
+# make an M/EEG, MEG-only, and EEG-only inverse operators
+info = evoked.info
+inverse_operator_meeg = make_inverse_operator(info, forward_meeg, noise_cov,
+                                              loose=0.2, depth=0.8)
+inverse_operator_meg = make_inverse_operator(info, forward_meg, noise_cov,
+                                              loose=0.2, depth=0.8)
+inverse_operator_eeg = make_inverse_operator(info, forward_eeg, noise_cov,
+                                              loose=0.2, depth=0.8)
+
+write_inverse_operator('sample_audvis-meeg-oct-6-inv.fif',
+                       inverse_operator_meeg)
+write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
+                       inverse_operator_meg)
+write_inverse_operator('sample_audvis-eeg-oct-6-inv.fif',
+                       inverse_operator_eeg)
+
+# Compute inverse solution
+stcs = dict()
+stcs['meeg'] = apply_inverse(evoked, inverse_operator_meeg, lambda2, "dSPM",
+                        pick_normal=False)
+stcs['meg'] = apply_inverse(evoked, inverse_operator_meg, lambda2, "dSPM",
+                        pick_normal=False)
+stcs['eeg'] = apply_inverse(evoked, inverse_operator_eeg, lambda2, "dSPM",
+                        pick_normal=False)
+
+# Save result in stc files
+names = ['meeg', 'meg', 'eeg']
+for name in names:
+    stcs[name].save('mne_dSPM_inverse-%s' % name)
+
+###############################################################################
+# View activation time-series
+pl.close('all')
+pl.figure(figsize=(8, 6))
+for ii in range(len(stcs)):
+    name = names[ii]
+    stc = stcs[name]
+    pl.subplot(len(stcs), 1, ii + 1)
+    pl.plot(1e3 * stc.times, stc.data[::150, :].T)
+    pl.ylabel('%s\ndSPM value' % str.upper(name))
+pl.xlabel('time (ms)')
+pl.show()
\ No newline at end of file
diff --git a/examples/inverse/plot_mixed_norm_L21_inverse.py b/examples/inverse/plot_mixed_norm_L21_inverse.py
new file mode 100644
index 0000000..edb1e1e
--- /dev/null
+++ b/examples/inverse/plot_mixed_norm_L21_inverse.py
@@ -0,0 +1,69 @@
+"""
+================================================================
+Compute sparse inverse solution based on L1/L2 mixed norm (MxNE)
+================================================================
+
+See
+Gramfort A., Kowalski M. and Hamalainen, M,
+Mixed-norm estimates for the M/EEG inverse problem using accelerated
+gradient methods, Physics in Medicine and Biology, 2012
+http://dx.doi.org/10.1088/0031-9155/57/7/1937
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.inverse_sparse import mixed_norm
+from mne.minimum_norm import make_inverse_operator, apply_inverse
+from mne.viz import plot_sparse_source_estimates
+
+data_path = sample.data_path()
+fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+# Read noise covariance matrix
+cov = mne.read_cov(cov_fname)
+# Handling average file
+setno = 0
+evoked = fiff.read_evoked(ave_fname, setno=setno, baseline=(None, 0))
+evoked.crop(tmin=0, tmax=0.3)
+# Handling forward solution
+forward = mne.read_forward_solution(fwd_fname, surf_ori=True)
+
+cov = mne.cov.regularize(cov, evoked.info)
+
+import pylab as pl
+pl.figure()
+ylim = dict(eeg=[-10, 10], grad=[-400, 400], mag=[-600, 600])
+evoked.plot(ylim=ylim, proj=True)
+
+###############################################################################
+# Run solver
+alpha = 70  # regularization parameter between 0 and 100 (100 is high)
+loose, depth = 0.2, 0.9  # loose orientation & depth weighting
+
+# Compute dSPM solution to be used as weights in MxNE
+inverse_operator = make_inverse_operator(evoked.info, forward, cov,
+                                         loose=None, depth=depth, fixed=True)
+stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
+                         method='dSPM')
+
+# Compute MxNE inverse solution
+stc, residual = mixed_norm(evoked, forward, cov, alpha, loose=loose,
+                 depth=depth, maxit=3000, tol=1e-4, active_set_size=10,
+                 debias=True, weights=stc_dspm, weights_min=8.,
+                 return_residual=True)
+
+pl.figure()
+residual.plot(ylim=ylim, proj=True)
+
+###############################################################################
+# View in 2D and 3D ("glass" brain like 3D plot)
+plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
+                             opacity=0.1, fig_name="MxNE (cond %s)" % setno)
diff --git a/examples/inverse/plot_morph_data.py b/examples/inverse/plot_morph_data.py
new file mode 100644
index 0000000..df1aab3
--- /dev/null
+++ b/examples/inverse/plot_morph_data.py
@@ -0,0 +1,57 @@
+"""
+==========================================================
+Morph source estimates from one subject to another subject
+==========================================================
+
+A source estimate from a given subject 'sample' is morphed
+to the anatomy of another subject 'fsaverage'. The output
+is a source estimate defined on the anatomy of 'fsaverage'
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+import numpy as np
+from mne.datasets import sample
+
+data_path = sample.data_path()
+
+subject_from = 'sample'
+subject_to = 'fsaverage'
+
+fname = data_path + '/MEG/sample/sample_audvis-meg'
+src_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
+
+# Read input stc file
+stc_from = mne.read_source_estimate(fname)
+# Morph using one method (supplying the vertices in fsaverage's source
+# space makes it faster). Note that for any generic subject, you could do:
+#     vertices_to = mne.grade_to_vertices(subject_to, grade=5)
+# But fsaverage's source space was set up so we can just do this:
+vertices_to = [np.arange(10242), np.arange(10242)]
+stc_to = mne.morph_data(subject_from, subject_to, stc_from, n_jobs=1,
+                        grade=vertices_to)
+stc_to.save('%s_audvis-meg' % subject_to)
+
+# Morph using another method -- useful if you're going to do a lot of the
+# same inter-subject morphing operations; you could save and load morph_mat
+morph_mat = mne.compute_morph_matrix(subject_from, subject_to, stc_from.vertno,
+                                     vertices_to)
+stc_to_2 = mne.morph_data_precomputed(subject_from, subject_to,
+                                      stc_from, vertices_to, morph_mat)
+stc_to_2.save('%s_audvis-meg_2' % subject_to)
+
+# View source activations
+import pylab as pl
+pl.plot(stc_from.times, stc_from.data.mean(axis=0), 'r', label='from')
+pl.plot(stc_to.times, stc_to.data.mean(axis=0), 'b', label='to')
+pl.plot(stc_to_2.times, stc_to.data.mean(axis=0), 'g', label='to_2')
+pl.xlabel('time (ms)')
+pl.ylabel('Mean Source amplitude')
+pl.legend()
+pl.show()
diff --git a/examples/inverse/plot_read_inverse.py b/examples/inverse/plot_read_inverse.py
new file mode 100644
index 0000000..dff0af1
--- /dev/null
+++ b/examples/inverse/plot_read_inverse.py
@@ -0,0 +1,41 @@
+"""
+=======================================================
+Reading an inverse operator and view source space in 3D
+=======================================================
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator
+
+data_path = sample.data_path()
+fname = data_path
+fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+
+inv = read_inverse_operator(fname)
+
+print "Method: %s" % inv['methods']
+print "fMRI prior: %s" % inv['fmri_prior']
+print "Number of sources: %s" % inv['nsource']
+print "Number of channels: %s" % inv['nchan']
+
+###############################################################################
+# Show result on 3D source space
+lh_points = inv['src'][0]['rr']
+lh_faces = inv['src'][0]['use_tris']
+rh_points = inv['src'][1]['rr']
+rh_faces = inv['src'][1]['use_tris']
+try:
+    from enthought.mayavi import mlab
+except:
+    from mayavi import mlab
+
+mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
+mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+                     lh_faces)
+mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+                     rh_faces)
diff --git a/examples/inverse/plot_read_source_space.py b/examples/inverse/plot_read_source_space.py
new file mode 100644
index 0000000..d5027c1
--- /dev/null
+++ b/examples/inverse/plot_read_source_space.py
@@ -0,0 +1,36 @@
+"""
+==============================================
+Reading a source space from a forward operator
+==============================================
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import os.path as op
+
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-eeg-oct-6p-fwd.fif')
+
+add_geom = True  # include high resolution source space
+src = mne.read_source_spaces(fname, add_geom=add_geom)
+
+# 3D source space (high sampling)
+lh_points = src[0]['rr']
+lh_faces = src[0]['tris']
+rh_points = src[1]['rr']
+rh_faces = src[1]['tris']
+try:
+    from enthought.mayavi import mlab
+except:
+    from mayavi import mlab
+mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
+mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+                     lh_faces)
+mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+                     rh_faces)
diff --git a/examples/inverse/plot_read_stc.py b/examples/inverse/plot_read_stc.py
new file mode 100644
index 0000000..f1b3aa4
--- /dev/null
+++ b/examples/inverse/plot_read_stc.py
@@ -0,0 +1,32 @@
+"""
+===================
+Reading an STC file
+===================
+
+STC files contain activations on cortex ie. source
+reconstructions
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis-meg'
+
+stc = mne.read_source_estimate(fname)
+
+n_vertices, n_samples = stc.data.shape
+print "stc data size: %s (nb of vertices) x %s (nb of samples)" % (
+                                                    n_vertices, n_samples)
+
+# View source activations
+import pylab as pl
+pl.plot(stc.times, stc.data[::100, :].T)
+pl.xlabel('time (ms)')
+pl.ylabel('Source amplitude')
+pl.show()
diff --git a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py b/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
new file mode 100644
index 0000000..754a3fd
--- /dev/null
+++ b/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
@@ -0,0 +1,123 @@
+"""
+=============================================
+Compute MxNE with time-frequency sparse prior
+=============================================
+
+The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)
+that promotes focal (sparse) sources (such as dipole fitting techniques).
+The benefit of this approach is that:
+
+  - it is spatio-temporal without assuming stationarity (sources properties
+    can vary over time)
+  - activations are localized in space, time and frequency in one step.
+  - with a built-in filtering process based on a short time Fourier
+    transform (STFT), data does not need to be low passed (just high pass
+    to make the signals zero mean).
+  - the solver solves a convex optimization problem, hence cannot be
+    trapped in local minima.
+
+References:
+
+A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
+non-stationary source activations
+Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
+DOI: 10.1016/j.neuroimage.2012.12.051.
+
+A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+Functional Brain Imaging with M/EEG Using Structured Sparsity in
+Time-Frequency Dictionaries
+Proceedings Information Processing in Medical Imaging
+Lecture Notes in Computer Science, 2011, Volume 6801/2011,
+600-611, DOI: 10.1007/978-3-642-22092-0_49
+http://dx.doi.org/10.1007/978-3-642-22092-0_49
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.minimum_norm import make_inverse_operator, apply_inverse
+from mne.inverse_sparse import tf_mixed_norm
+from mne.viz import plot_sparse_source_estimates
+
+data_path = sample.data_path()
+fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+# Read noise covariance matrix
+cov = mne.read_cov(cov_fname)
+
+# Handling average file
+setno = 'Left visual'
+evoked = fiff.read_evoked(ave_fname, setno=setno, baseline=(None, 0))
+evoked = fiff.pick.pick_channels_evoked(evoked)
+# We make the window slightly larger than what you'll eventually be interested
+# in ([-0.05, 0.3]) to avoid edge effects.
+evoked.crop(tmin=-0.1, tmax=0.4)
+
+# Handling forward solution
+forward = mne.read_forward_solution(fwd_fname, force_fixed=False,
+                                    surf_ori=True)
+
+cov = mne.cov.regularize(cov, evoked.info)
+
+###############################################################################
+# Run solver
+
+# alpha_space regularization parameter is between 0 and 100 (100 is high)
+alpha_space = 50.  # spatial regularization parameter
+# alpha_time parameter promotes temporal smoothness
+# (0 means no temporal regularization)
+alpha_time = 1.  # temporal regularization parameter
+
+loose, depth = 0.2, 0.9  # loose orientation & depth weighting
+
+# Compute dSPM solution to be used as weights in MxNE
+inverse_operator = make_inverse_operator(evoked.info, forward, cov,
+                                         loose=loose, depth=depth)
+stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
+                         method='dSPM')
+
+# Compute TF-MxNE inverse solution
+stc, residual = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
+                    loose=loose, depth=depth, maxit=200, tol=1e-4,
+                    weights=stc_dspm, weights_min=8., debias=True,
+                    wsize=16, tstep=4, window=0.05, return_residual=True)
+
+# Crop to remove edges
+stc.crop(tmin=-0.05, tmax=0.3)
+evoked.crop(tmin=-0.05, tmax=0.3)
+residual.crop(tmin=-0.05, tmax=0.3)
+
+import pylab as pl
+pl.figure()
+ylim = dict(eeg=[-10, 10], grad=[-200, 250], mag=[-600, 600])
+picks = fiff.pick_types(evoked.info, meg='grad', exclude='bads')
+evoked.plot(picks=picks, ylim=ylim, proj=True,
+            titles=dict(grad='Evoked Response (grad)'))
+
+pl.figure()
+picks = fiff.pick_types(residual.info, meg='grad', exclude='bads')
+residual.plot(picks=picks, ylim=ylim, proj=True,
+              titles=dict(grad='Residual (grad)'))
+
+###############################################################################
+# View in 2D and 3D ("glass" brain like 3D plot)
+plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
+                             opacity=0.1, fig_name="TF-MxNE (cond %s)" % setno,
+                             modes=['sphere'], scale_factors=[1.])
+
+time_label = 'TF-MxNE time=%0.2f ms'
+brain = stc.plot('sample', 'inflated', 'rh', fmin=10e-9, fmid=15e-9,
+                 fmax=20e-9, time_label=time_label, smoothing_steps=5,
+                 subjects_dir=data_path + '/subjects')
+brain.show_view('medial')
+brain.set_data_time_index(120)
+brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
+brain.add_label("V2", color="red", scalar_thresh=.5, borders=True)
diff --git a/examples/plot_channel_epochs_image.py b/examples/plot_channel_epochs_image.py
new file mode 100644
index 0000000..74121d5
--- /dev/null
+++ b/examples/plot_channel_epochs_image.py
@@ -0,0 +1,72 @@
+"""
+=========================================
+Visualize channel over epochs as an image
+=========================================
+
+This will produce what is sometimes called an event related
+potential / field (ERP/ERF) image.
+
+2 images are produced. One with a good channel and one with a channel
+that does not see any evoked field.
+
+It is also demonstrated how to reorder the epochs using a 1d spectral
+embedding as described in:
+
+Graph-based variability estimation in single-trial event-related neural
+responses A. Gramfort, R. Keriven, M. Clerc, 2010,
+Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
+http://hal.inria.fr/inria-00497023
+"""
+print __doc__
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import pylab as pl
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+raw.info['bads'] = ['MEG 2443', 'EEG 053']
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
+                        exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, eog=150e-6))
+
+###############################################################################
+# Show event related fields images
+
+# and order with spectral reordering
+# If you don't have scikit-learn installed set order_func to None
+from sklearn.cluster.spectral import spectral_embedding
+
+
+def order_func(times, data):
+    this_data = data[:, (times > 0.0) & (times < 0.350)]
+    return np.argsort(spectral_embedding(np.corrcoef(this_data),
+                      n_components=1, random_state=0).ravel())
+
+good_pick = 97  # channel with a clear evoked response
+bad_pick = 98  # channel with no evoked response
+
+pl.close('all')
+mne.viz.plot_image_epochs(epochs, [good_pick, bad_pick], sigma=0.5, vmin=-100,
+                    vmax=250, colorbar=True, order=order_func, show=True)
diff --git a/examples/plot_define_target_events.py b/examples/plot_define_target_events.py
new file mode 100644
index 0000000..468f867
--- /dev/null
+++ b/examples/plot_define_target_events.py
@@ -0,0 +1,98 @@
+"""
+============================================================
+Define target events based on time lag, plot evoked response
+============================================================
+
+This script shows how to define higher order events based on
+time lag between reference and target events. For
+illustration, we will put face stimuli presented into two
+classes, that is 1) followed by an early button press
+(within 590 milliseconds) and followed by a late button
+press (later than 590 milliseconds). Finally, we will
+visualize the evoked responses to both 'quickly-processed'
+and 'slowly-processed' face stimuli.
+
+"""
+# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.event import define_target_events
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
+include = []  # or stim channels ['STI 014']
+raw.info['bads'] += ['EEG 053']  # bads
+
+# pick MEG channels
+picks = fiff.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
+                        include=include, exclude='bads')
+
+###############################################################################
+# Find stimulus event followed by quick button presses
+
+reference_id = 5  # presentation of a smiley face
+target_id = 32  # button press
+sfreq = raw.info['sfreq']  # sampling rate
+tmin = 0.1  # trials leading to very early responses will be rejected
+tmax = 0.59  # ignore face stimuli followed by button press later than 590 ms
+new_id = 42  # the new event id for a hit. If None, reference_id is used.
+fill_na = 99  # the fill value for misses
+
+events_, lag = define_target_events(events, reference_id, target_id,
+                    sfreq, tmin, tmax, new_id, fill_na)
+
+print events_  # The 99 indicates missing or too late button presses
+
+# besides the events also the lag between target and reference is returned
+# this could e.g. be used as parametric regressor in subsequent analyses.
+
+print lag[lag != fill_na]  # lag in milliseconds
+
+# #############################################################################
+# Construct epochs
+
+tmin_ = -0.2
+tmax_ = 0.4
+event_id = dict(early=new_id, late=fill_na)
+
+epochs = mne.Epochs(raw, events_, event_id, tmin_,
+                    tmax_, picks=picks, baseline=(None, 0),
+                    reject=dict(mag=4e-12))
+
+# average epochs and get an Evoked dataset.
+
+early, late = [epochs[k].average() for k in event_id]
+
+###############################################################################
+# View evoked response
+
+import pylab as pl
+
+times = 1e3 * epochs.times  # time in milliseconds
+title = 'Evoked response followed by %s button press'
+
+pl.clf()
+ax = pl.subplot(2, 1, 1)
+early.plot(axes=ax)
+pl.title(title % 'late')
+pl.ylabel('Evoked field (fT)')
+ax = pl.subplot(2, 1, 2)
+late.plot(axes=ax)
+pl.title(title % 'early')
+pl.ylabel('Evoked field (fT)')
+pl.show()
diff --git a/examples/plot_estimate_covariance_matrix_baseline.py b/examples/plot_estimate_covariance_matrix_baseline.py
new file mode 100644
index 0000000..613fb41
--- /dev/null
+++ b/examples/plot_estimate_covariance_matrix_baseline.py
@@ -0,0 +1,55 @@
+"""
+===============================================
+Estimate covariance matrix from Epochs baseline
+===============================================
+
+We first define a set of Epochs from events and a raw file.
+Then we estimate the noise covariance of prestimulus data,
+a.k.a. baseline.
+
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+raw = fiff.Raw(fname)
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
+include = []  # or stim channels ['STI 014']
+raw.info['bads'] += ['EEG 053']  # bads + 1 more
+
+# pick EEG channels
+picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
+                                            include=include, exclude='bads')
+# Read epochs, with proj off by default so we can plot either way later
+reject = dict(grad=4000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject, proj=False)
+
+# Compute the covariance on baseline
+cov = mne.compute_covariance(epochs, tmin=None, tmax=0)
+print cov
+
+###############################################################################
+# Show covariance
+mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True)
+# try setting proj to False to see the effect
diff --git a/examples/plot_estimate_covariance_matrix_raw.py b/examples/plot_estimate_covariance_matrix_raw.py
new file mode 100644
index 0000000..bcbfc15
--- /dev/null
+++ b/examples/plot_estimate_covariance_matrix_raw.py
@@ -0,0 +1,38 @@
+"""
+==============================================
+Estimate covariance matrix from a raw FIF file
+==============================================
+
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+
+raw = fiff.Raw(fname)
+
+include = []  # or stim channels ['STI 014']
+raw.info['bads'] += ['EEG 053']  # bads + 1 more
+
+# pick EEG channels
+picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
+                                            include=include, exclude='bads')
+# setup rejection
+reject = dict(eeg=80e-6, eog=150e-6)
+
+# Compute the covariance from the raw data
+cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject)
+print cov
+
+###############################################################################
+# Show covariance
+mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True)
+# try setting proj to False to see the effect
diff --git a/examples/plot_evoked_delayed_ssp.py b/examples/plot_evoked_delayed_ssp.py
new file mode 100644
index 0000000..b41c65f
--- /dev/null
+++ b/examples/plot_evoked_delayed_ssp.py
@@ -0,0 +1,95 @@
+"""
+=========================================
+Create evoked objects in delayed SSP mode
+=========================================
+
+This script shows how to apply SSP projectors delayed, that is,
+at the evoked stage. This is particularly useful to support decisions
+related to the trade-off between denoising and preserving signal.
+We first will extract Epochs and create evoked objects
+with the required settings for delayed SSP application.
+Then we will explore the impact of the particular SSP projectors
+on the evoked data.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import mne
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# pick magnetometer channels
+picks = fiff.pick_types(raw.info, meg='mag', stim=False, eog=True,
+                        include=[], exclude='bads')
+
+# If we suspend SSP projection at the epochs stage we might reject
+# more epochs than necessary. To deal with this we set proj to `delayed`
+# while passing reject parameters. Each epoch will then be projected before
+# performing peak-to-peak amplitude rejection. If it survives the rejection
+# procedure the unprojected raw epoch will be employed instead.
+# As a consequence, the point in time at which the projection is applied will
+# not have impact on the final results.
+# We will make use of this function to prepare for interactively selecting
+# projections at the evoked stage.
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12),
+                    proj='delayed')
+
+evoked = epochs.average()  # average epochs and get an Evoked dataset.
+
+###############################################################################
+# Interactively select / deselect the SSP projection vectors
+
+# Here we expose the details of how to apply SSPs reversibly
+title = 'Incremental SSP application'
+
+# let's first move the proj list to another location
+projs, evoked.info['projs'] = evoked.info['projs'], []
+fig, axes = pl.subplots(2, 2)  # create 4 subplots for our four vectors
+
+# As the bulk of projectors was extracted from the same source, we can simply
+# iterate over our collection of projs and add them step by step to see how
+# the signals change as a function of the SSPs applied. As this operation
+# can't be undone we will operate on copies of the original evoked object to
+# keep things reversible.
+
+for proj, ax in zip(projs, axes.flatten()):
+    evoked.add_proj(proj)  # add projection vectors loop by loop.
+    evoked.copy().apply_proj().plot(axes=ax)  # apply on a copy of evoked
+    ax.set_title('+ %s' % proj['desc'])  # extract description.
+pl.suptitle(title)
+pl.show()
+
+# We also could have easily visualized the impact of single projection vectors
+# by deleting the vector directly after visualizing the changes.
+# E.g. had we appended the following line to our loop:
+#   `evoked.del_proj(-1)`
+
+# Often, it is desirable to interactively explore data. To make this more
+# convenient we can make use of the 'interactive' option. This will open a
+# check box that allows us to reversibly select projection vectors. Any
+# modification of the selection will immediately cause the figure to update.
+
+pl.figure()
+evoked.plot(proj='interactive')
+pl.show()
+
+# Hint: the same works with evoked.plot_topomap
diff --git a/examples/plot_evoked_topomap.py b/examples/plot_evoked_topomap.py
new file mode 100644
index 0000000..b4ade71
--- /dev/null
+++ b/examples/plot_evoked_topomap.py
@@ -0,0 +1,35 @@
+"""
+========================================
+Plotting topographic maps of evoked data
+========================================
+
+Load evoked data and plot topomaps for selected time points.
+
+"""
+# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+
+path = mne.datasets.sample.data_path()
+fname = path + '/MEG/sample/sample_audvis-ave.fif'
+
+# load evoked and subtract baseline
+evoked = mne.fiff.read_evoked(fname, 'Left Auditory', baseline=(None, 0))
+
+# plot magnetometer data as topomap at 1 time point : 100ms
+evoked.plot_topomap(0.1, ch_type='mag', size=3, colorbar=False)
+
+# set time instants in seconds (from 50 to 150ms in a step of 10ms)
+times = np.arange(0.05, 0.15, 0.01)
+# If times is set to None only 10 regularly spaced topographies will be shown
+
+# plot magnetometer data as topomaps
+evoked.plot_topomap(times, ch_type='mag')
+
+# plot gradiometer data (plots the RMS for each pair of gradiometers)
+evoked.plot_topomap(times, ch_type='grad')
diff --git a/examples/plot_evoked_topomap_delayed_ssp.py b/examples/plot_evoked_topomap_delayed_ssp.py
new file mode 100644
index 0000000..3f80e8d
--- /dev/null
+++ b/examples/plot_evoked_topomap_delayed_ssp.py
@@ -0,0 +1,61 @@
+"""
+===============================================
+Create topographic ERF maps in delayed SSP mode
+===============================================
+
+This script shows how to apply SSP projectors delayed, that is,
+at the evoked stage. This is particularly useful to support decisions
+related to the trade-off between denoising and preserving signal.
+In this example we demonstrate how to use topographic maps for delayed
+SSP application.
+"""
+# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import mne
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# delete EEG projections (we know it's the last one)
+raw.del_proj(-1)
+# add ECG projs for magnetometers
+[raw.add_proj(p) for p in mne.read_proj(ecg_fname) if 'axial' in p['desc']]
+
+# pick magnetometer channels
+picks = fiff.pick_types(raw.info, meg='mag', stim=False, eog=True,
+                        include=[], exclude='bads')
+
+# We will make of the proj `delayed` option to
+# interactively select projections at the evoked stage.
+# more information can be found in the example/plot_evoked_delayed_ssp.py
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12), proj='delayed')
+
+evoked = epochs.average()  # average epochs and get an Evoked dataset.
+
+###############################################################################
+# Interactively select / deselect the SSP projection vectors
+
+# set time instants in seconds (from 50 to 150ms in a step of 10ms)
+times = np.arange(0.05, 0.15, 0.01)
+
+evoked.plot_topomap(times, proj='interactive')
+# Hint: the same works for evoked.plot and viz.plot_topo
diff --git a/examples/plot_evoked_whitening.py b/examples/plot_evoked_whitening.py
new file mode 100644
index 0000000..0d4b98a
--- /dev/null
+++ b/examples/plot_evoked_whitening.py
@@ -0,0 +1,48 @@
+"""
+=============================================
+Whitening evoked data with a noise covariance
+=============================================
+
+Evoked data are loaded and then whitened using a given
+noise covariance matrix. It's an excellent
+quality check to see if baseline signals match the assumption
+of Gaussian whiten noise from which we expect values around
+and less than 2 standard deviations.
+
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+# Reading
+evoked = mne.fiff.Evoked(fname, setno=0, baseline=(None, 0), proj=True)
+noise_cov = mne.read_cov(cov_fname)
+
+###############################################################################
+# Show result
+
+  # Pick channels to view
+picks = mne.fiff.pick_types(evoked.info, meg=True, eeg=True, exclude='bads')
+evoked.plot(picks=picks)
+
+noise_cov = mne.cov.regularize(noise_cov, evoked.info,
+                               grad=0.1, mag=0.1, eeg=0.1)
+
+evoked_white = mne.whiten_evoked(evoked, noise_cov, picks, diag=True)
+
+# plot the whitened evoked data to see if baseline signals match the
+# assumption of Gaussian whiten noise from which we expect values around
+# and less than 2 standard deviations.
+import pylab as pl
+pl.figure()
+evoked_white.plot(picks=picks, unit=False, hline=[-2, 2])
diff --git a/examples/plot_from_raw_to_epochs_to_evoked.py b/examples/plot_from_raw_to_epochs_to_evoked.py
new file mode 100644
index 0000000..3590879
--- /dev/null
+++ b/examples/plot_from_raw_to_epochs_to_evoked.py
@@ -0,0 +1,65 @@
+"""
+========================================================
+Extract epochs, average and save evoked response to disk
+========================================================
+
+This script shows how to read the epochs from a raw file given
+a list of events. The epochs are averaged to produce evoked
+data and then saved to disk.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.viz import plot_drop_log
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+#   Plot raw data
+fig = raw.plot(events=events)
+
+#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
+include = []  # or stim channels ['STI 014']
+raw.info['bads'] += ['EEG 053']  # bads + 1 more
+
+# pick EEG channels
+picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
+                        include=include, exclude='bads')
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
+evoked = epochs.average()  # average epochs and get an Evoked dataset.
+
+evoked.save('sample_audvis_eeg-ave.fif')  # save evoked data to disk
+
+###############################################################################
+# View evoked response
+times = 1e3 * epochs.times  # time in miliseconds
+import pylab as pl
+pl.figure()
+evoked.plot()
+pl.xlim([times[0], times[-1]])
+pl.xlabel('time (ms)')
+pl.ylabel('Potential (uV)')
+pl.title('EEG evoked potential')
+pl.show()
+
+# Look at channels that caused dropped events, showing that the subject's
+# blinks were likely to blame for most epochs being dropped
+epochs.drop_bad_epochs()
+plot_drop_log(epochs.drop_log, subject='sample')
diff --git a/examples/plot_from_raw_to_multiple_epochs_to_evoked.py b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
new file mode 100644
index 0000000..671bbcd
--- /dev/null
+++ b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
@@ -0,0 +1,71 @@
+"""
+====================================================================
+Extract epochs for multiple conditions, save evoked response to disk
+====================================================================
+
+This script shows how to read the epochs for multiple conditions from
+a raw file given a list of events. The epochs are averaged to produce
+evoked data and then saved to disk.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.epochs import combine_event_ids
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
+tmin = -0.2
+tmax = 0.5
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
+include = []  # or stim channels ['STI 014']
+raw.info['bads'] += ['EEG 053']  # bads + 1 more
+
+# pick EEG channels
+picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
+                        include=include, exclude='bads')
+# Read epochs
+epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
+# Let's equalize the trial counts in each condition
+epochs.equalize_event_counts(['AudL', 'AudR', 'VisL', 'VisR'], copy=False)
+# Now let's combine some conditions
+combine_event_ids(epochs, ['AudL', 'AudR'], {'Auditory': 12}, copy=False)
+combine_event_ids(epochs, ['VisL', 'VisR'], {'Visual': 34}, copy=False)
+
+# average epochs and get Evoked datasets
+evokeds = [epochs[cond].average() for cond in ['Auditory', 'Visual']]
+
+# save evoked data to disk
+fiff.write_evoked('sample_auditory_and_visual_eeg-ave.fif', evokeds)
+
+###############################################################################
+# View evoked response
+import pylab as pl
+pl.clf()
+ax = pl.subplot(2, 1, 1)
+evokeds[0].plot(axes=ax)
+pl.title('EEG evoked potential, auditory trials')
+pl.ylabel('Potential (uV)')
+ax = pl.subplot(2, 1, 2)
+evokeds[1].plot(axes=ax)
+pl.title('EEG evoked potential, visual trials')
+pl.ylabel('Potential (uV)')
+pl.show()
diff --git a/examples/plot_megsim_data.py b/examples/plot_megsim_data.py
new file mode 100644
index 0000000..1126357
--- /dev/null
+++ b/examples/plot_megsim_data.py
@@ -0,0 +1,53 @@
+"""
+===========================================
+MEGSIM experimental and simulation datasets
+===========================================
+
+The MEGSIM consists of experimental and simulated MEG data
+which can be useful for reproducing research results.
+
+The MEGSIM files will be dowloaded automatically.
+
+The datasets are documented in:
+Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
+Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
+(2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
+Realistic Simulated and Empirical Data. Neuroinformatics 10:141-158
+"""
+
+import pylab as pl
+import mne
+from mne.datasets.megsim import load_data
+
+condition = 'visual'  # or 'auditory' or 'somatosensory'
+
+# Load experimental RAW files for the visual condition
+raw_fnames = load_data(condition=condition, data_format='raw',
+                       data_type='experimental')
+
+# Load simulation evoked files for the visual condition
+evoked_fnames = load_data(condition=condition, data_format='evoked',
+                          data_type='simulation')
+
+raw = mne.fiff.Raw(raw_fnames[0])
+events = mne.find_events(raw, stim_channel="STI 014")
+
+# Visualize raw file
+raw.plot()
+
+# Make an evoked file from the experimental data
+picks = mne.fiff.pick_types(raw.info, meg=True, eog=True, exclude='bads')
+
+# Read epochs
+event_id, tmin, tmax = 9, -0.2, 0.5
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0),
+                    reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+evoked = epochs.average()  # average epochs and get an Evoked dataset.
+pl.figure()
+evoked.plot()
+
+# Compare to the simulated data
+evoked_sim = mne.fiff.Evoked(evoked_fnames[0])
+pl.figure()
+evoked_sim.plot()
diff --git a/examples/plot_megsim_data_single_trial.py b/examples/plot_megsim_data_single_trial.py
new file mode 100644
index 0000000..3d87ef1
--- /dev/null
+++ b/examples/plot_megsim_data_single_trial.py
@@ -0,0 +1,35 @@
+"""
+======================================
+MEGSIM single trial simulation dataset
+======================================
+
+The MEGSIM consists of experimental and simulated MEG data
+which can be useful for reproducing research results.
+
+The MEGSIM files will be dowloaded automatically.
+
+The datasets are documented in:
+Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
+Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
+(2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
+Realistic Simulated and Empirical Data. Neuroinformatics 10:141-158
+"""
+
+import pylab as pl
+import mne
+from mne.datasets.megsim import load_data
+
+condition = 'visual'  # or 'auditory' or 'somatosensory'
+
+# Load experimental RAW files for the visual condition
+epochs_fnames = load_data(condition=condition, data_format='single-trial',
+                          data_type='simulation')
+
+# Take only 10 trials from the same simulation setup.
+epochs_fnames = [f for f in epochs_fnames if 'sim6_trial_' in f][:10]
+
+evokeds = [mne.fiff.read_evoked(f) for f in epochs_fnames]
+mean_evoked = sum(evokeds[1:], evokeds[0])
+
+# Visualize the average
+mean_evoked.plot()
diff --git a/examples/plot_read_and_write_raw_data.py b/examples/plot_read_and_write_raw_data.py
new file mode 100644
index 0000000..a048422
--- /dev/null
+++ b/examples/plot_read_and_write_raw_data.py
@@ -0,0 +1,44 @@
+"""
+=============================
+Reading and writing raw files
+=============================
+
+In this example we read a raw file. Plot a segment of MEG data
+restricted to MEG channels. And save these data in a new
+raw file.
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+
+raw = fiff.Raw(fname)
+
+# Set up pick list: MEG + STI 014 - bad channels
+want_meg = True
+want_eeg = False
+want_stim = False
+include = ['STI 014']
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bad channels + 2 more
+
+picks = fiff.pick_types(raw.info, meg=want_meg, eeg=want_eeg, stim=want_stim,
+                        include=include, exclude='bads')
+
+some_picks = picks[:5]  # take 5 first
+start, stop = raw.time_as_index([0, 15])  # read the first 15s of data
+data, times = raw[some_picks, start:(stop + 1)]
+
+# save 150s of MEG data in FIF file
+raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks,
+         overwrite=True)
+
+###############################################################################
+# Show MEG data
+raw.plot()
diff --git a/examples/plot_read_bem_surfaces.py b/examples/plot_read_bem_surfaces.py
new file mode 100644
index 0000000..0d946d2
--- /dev/null
+++ b/examples/plot_read_bem_surfaces.py
@@ -0,0 +1,40 @@
+"""
+============================================
+Reading BEM surfaces from a forward solution
+============================================
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
+
+surfaces = mne.read_bem_surfaces(fname, add_geom=True)
+
+print "Number of surfaces : %d" % len(surfaces)
+
+###############################################################################
+# Show result
+head_col = (0.95, 0.83, 0.83)  # light pink
+skull_col = (0.91, 0.89, 0.67)
+brain_col = (0.67, 0.89, 0.91)  # light blue
+colors = [head_col, skull_col, brain_col]
+
+# 3D source space
+try:
+    from enthought.mayavi import mlab
+except:
+    from mayavi import mlab
+
+mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
+for c, surf in zip(colors, surfaces):
+    points = surf['rr']
+    faces = surf['tris']
+    mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
+                         color=c, opacity=0.3)
diff --git a/examples/plot_read_epochs.py b/examples/plot_read_epochs.py
new file mode 100644
index 0000000..fd64d53
--- /dev/null
+++ b/examples/plot_read_epochs.py
@@ -0,0 +1,47 @@
+"""
+==================================
+Reading epochs from a raw FIF file
+==================================
+
+This script shows how to read the epochs from a raw file given
+a list of events. For illustration, we compute the evoked responses
+for both MEG and EEG data by averaging all the epochs.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.viz import plot_evoked
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
+                        exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+evoked = epochs.average()  # average epochs to get the evoked response
+
+###############################################################################
+# Show result
+plot_evoked(evoked)
diff --git a/examples/plot_read_evoked.py b/examples/plot_read_evoked.py
new file mode 100644
index 0000000..60e54be
--- /dev/null
+++ b/examples/plot_read_evoked.py
@@ -0,0 +1,28 @@
+"""
+==================================
+Reading and writing an evoked file
+==================================
+
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+from mne import fiff
+from mne.datasets import sample
+from mne.viz import plot_evoked
+
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+# Reading
+evoked = fiff.Evoked(fname, setno='Left Auditory',
+                     baseline=(None, 0), proj=True)
+
+###############################################################################
+# Show result:
+# By using exclude=[] bad channels are not excluded and are shown in red
+plot_evoked(evoked, exclude=[])
diff --git a/examples/plot_read_forward.py b/examples/plot_read_forward.py
new file mode 100644
index 0000000..bef623c
--- /dev/null
+++ b/examples/plot_read_forward.py
@@ -0,0 +1,44 @@
+"""
+====================================================
+Read a forward operator and display sensitivity maps
+====================================================
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+
+fwd = mne.read_forward_solution(fname, surf_ori=True)
+leadfield = fwd['sol']['data']
+
+print "Leadfield size : %d x %d" % leadfield.shape
+
+grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
+mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
+eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
+
+###############################################################################
+# Show gain matrix a.k.a. leadfield matrix with sensitivy map
+
+import pylab as pl
+pl.matshow(leadfield[:, :500])
+pl.xlabel('sources')
+pl.ylabel('sensors')
+pl.title('Lead field matrix (500 dipoles only)')
+
+pl.figure()
+pl.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
+        bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'])
+pl.legend()
+pl.title('Normal orientation sensitivity')
+pl.show()
+
+args = dict(fmin=0.1, fmid=0.5, fmax=0.9, smoothing_steps=7)
+grad_map.plot(subject='sample', time_label='Gradiometers sensitivity', **args)
diff --git a/examples/plot_read_noise_covariance_matrix.py b/examples/plot_read_noise_covariance_matrix.py
new file mode 100644
index 0000000..a997660
--- /dev/null
+++ b/examples/plot_read_noise_covariance_matrix.py
@@ -0,0 +1,29 @@
+"""
+=========================================
+Reading/Writing a noise covariance matrix
+=========================================
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+cov = mne.Covariance(fname)
+print cov
+
+###############################################################################
+# Show covariance
+
+# Note: if you have the measurement info you can use mne.viz.plot_cov
+
+import pylab as pl
+pl.matshow(cov.data)
+pl.title('Noise covariance matrix (%d channels)' % cov.data.shape[0])
+pl.show()
diff --git a/examples/plot_shift_evoked.py b/examples/plot_shift_evoked.py
new file mode 100644
index 0000000..4b6175c
--- /dev/null
+++ b/examples/plot_shift_evoked.py
@@ -0,0 +1,43 @@
+"""
+==================================
+Shifting time-scale in evoked data
+==================================
+
+"""
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+from mne import fiff
+from mne.datasets import sample
+
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+# Reading evoked data
+evoked = fiff.Evoked(fname, setno='Left Auditory',
+                     baseline=(None, 0), proj=True)
+
+picks = fiff.pick_channels(ch_names=evoked.info['ch_names'],
+                           include="MEG 2332", exclude="bad")
+
+# Create subplots
+f, axarr = pl.subplots(3)
+evoked.plot(exclude=[], picks=picks, axes=axarr[0],
+            titles=dict(grad='Before time shifting'))
+
+# Apply relative time-shift of 500 ms
+evoked.shift_time(0.5, relative=True)
+
+evoked.plot(exclude=[], picks=picks, axes=axarr[1],
+            titles=dict(grad='Relative shift: 500 ms'))
+
+# Apply absolute time-shift of 500 ms
+evoked.shift_time(0.5, relative=False)
+
+evoked.plot(exclude=[], picks=picks, axes=axarr[2],
+            titles=dict(grad='Absolute shift: 500 ms'))
diff --git a/examples/plot_simulate_evoked_data.py b/examples/plot_simulate_evoked_data.py
new file mode 100644
index 0000000..4e36d26
--- /dev/null
+++ b/examples/plot_simulate_evoked_data.py
@@ -0,0 +1,85 @@
+"""
+==============================
+Generate simulated evoked data
+==============================
+
+"""
+# Author: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import pylab as pl
+
+import mne
+from mne.fiff.pick import pick_types_evoked, pick_types_forward
+from mne.datasets import sample
+from mne.time_frequency import iir_filter_raw, morlet
+from mne.viz import plot_evoked, plot_sparse_source_estimates
+from mne.simulation import generate_sparse_stc, generate_evoked
+
+###############################################################################
+# Load real data as templates
+data_path = sample.data_path()
+
+raw = mne.fiff.Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
+proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
+raw.info['projs'] += proj
+raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels
+
+fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+fwd = mne.read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
+fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
+
+cov = mne.read_cov(cov_fname)
+
+evoked_template = mne.fiff.read_evoked(ave_fname, setno=0, baseline=None)
+evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
+                                    exclude=raw.info['bads'])
+
+label_names = ['Aud-lh', 'Aud-rh']
+labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
+          for ln in label_names]
+
+###############################################################################
+# Generate source time courses and the correspond evoked data
+snr = 6  # dB
+tmin = -0.1
+sfreq = 1000.  # Hz
+tstep = 1. / sfreq
+n_samples = 600
+times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
+
+# Generate times series from 2 Morlet wavelets
+stc_data = np.zeros((len(labels), len(times)))
+Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
+stc_data[0][:len(Ws[0])] = np.real(Ws[0])
+stc_data[1][:len(Ws[1])] = np.real(Ws[1])
+stc_data *= 100 * 1e-9  # use nAm as unit
+
+# time translation
+stc_data[1] = np.roll(stc_data[1], 80)
+stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
+                          random_state=0)
+
+###############################################################################
+# Generate noisy evoked data
+picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads')
+iir_filter = iir_filter_raw(raw, order=5, picks=picks, tmin=60, tmax=180)
+evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
+                         tmin=0.0, tmax=0.2, iir_filter=iir_filter)
+
+###############################################################################
+# Plot
+plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
+                                opacity=0.5, high_resolution=True)
+
+pl.figure()
+pl.psd(evoked.data[0])
+
+pl.figure()
+plot_evoked(evoked)
diff --git a/examples/plot_ssp_projs_sensitivity_map.py b/examples/plot_ssp_projs_sensitivity_map.py
new file mode 100644
index 0000000..79a19cd
--- /dev/null
+++ b/examples/plot_ssp_projs_sensitivity_map.py
@@ -0,0 +1,38 @@
+"""
+==================================
+Sensitivity map of SSP projections
+==================================
+
+This example shows the sources that have a forward field
+similar to the first SSP vector correcting for ECG.
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
+
+fwd = mne.read_forward_solution(fname, surf_ori=True)
+projs = mne.read_proj(ecg_fname)
+projs = projs[3:][::2]  # take only one projection per channel type
+
+# Compute sensitivity map
+ssp_ecg_map = mne.sensitivity_map(fwd, ch_type='grad', projs=projs,
+                                  mode='angle')
+
+###############################################################################
+# Show sensitivy map
+
+import pylab as pl
+pl.hist(ssp_ecg_map.data.ravel())
+pl.show()
+
+args = dict(fmin=0.2, fmid=0.6, fmax=1., smoothing_steps=7, hemi='rh')
+ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
diff --git a/examples/plot_ssp_projs_topomaps.py b/examples/plot_ssp_projs_topomaps.py
new file mode 100644
index 0000000..00451a6
--- /dev/null
+++ b/examples/plot_ssp_projs_topomaps.py
@@ -0,0 +1,31 @@
+"""
+=================================
+Plot SSP projections topographies
+=================================
+
+This example shows how to display topographies of SSP projection vectors.
+The projections used are the ones correcting for ECG artifacts.
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import mne
+from mne.datasets import sample
+data_path = sample.data_path()
+
+ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
+ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+evoked = mne.fiff.read_evoked(ave_fname, setno='Left Auditory')
+projs = mne.read_proj(ecg_fname)
+
+layouts = [mne.layouts.read_layout('Vectorview-all'),
+           mne.layouts.make_eeg_layout(evoked.info)]
+
+pl.figure(figsize=(10, 6))
+mne.viz.plot_projs_topomap(projs, layout=layouts)
+mne.viz.tight_layout()
diff --git a/examples/plot_topo_channel_epochs_image.py b/examples/plot_topo_channel_epochs_image.py
new file mode 100644
index 0000000..f657e71
--- /dev/null
+++ b/examples/plot_topo_channel_epochs_image.py
@@ -0,0 +1,55 @@
+"""
+============================================================
+Visualize channel over epochs as images in sensor topography
+============================================================
+
+This will produce what is sometimes called event related
+potential / field (ERP/ERF) images.
+
+One sensor topography plot is produced with the evoked field images from
+the selected channels.
+"""
+print __doc__
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import pylab as pl
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.layouts import read_layout
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + MEG - bad channels (modify to your needs)
+raw.info['bads'] = ['MEG 2443', 'EEG 053']
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
+                        exclude='bads')
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, eog=150e-6))
+
+###############################################################################
+# Show event related fields images
+
+layout = read_layout('Vectorview-all')
+
+title = 'ERF images - MNE sample data'
+mne.viz.plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
+                               colorbar=True, title=title)
+pl.show()
diff --git a/examples/plot_topo_compare_conditions.py b/examples/plot_topo_compare_conditions.py
new file mode 100644
index 0000000..230c59f
--- /dev/null
+++ b/examples/plot_topo_compare_conditions.py
@@ -0,0 +1,71 @@
+"""
+=================================================
+Compare evoked responses for different conditions
+=================================================
+
+In this example, an Epochs object for visual and
+auditory responses is created. Both conditions
+are then accessed by their respective names to
+create a sensor layout plot of the related
+evoked responses.
+
+"""
+
+# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import mne
+
+from mne.fiff import Raw, pick_types
+from mne.viz import plot_topo
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id = 1
+tmin = -0.2
+tmax = 0.5
+
+#   Setup for reading the raw data
+raw = Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+#   Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
+include = []  # or stim channels ['STI 014']
+# bad channels in raw.info['bads'] will be automatically excluded
+
+#   Set up amplitude-peak rejection values for MEG channels
+reject = dict(grad=4000e-13, mag=4e-12)
+
+# pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   include=include, exclude='bads')
+
+# Create epochs including different events
+epochs = mne.Epochs(raw, events, dict(audio_l=1, visual_r=3), tmin, tmax,
+                    picks=picks, baseline=(None, 0), reject=reject)
+
+# Generate list of evoked objects from conditions names
+evokeds = [epochs[name].average() for name in 'audio_l', 'visual_r']
+
+###############################################################################
+# Show topography for two different conditions
+
+colors = 'yellow', 'green'
+title = 'MNE sample data - left auditory and visual'
+
+plot_topo(evokeds, color=colors, title=title)
+
+conditions = [e.comment for e in evokeds]
+for cond, col, pos in zip(conditions, colors, (0.025, 0.07)):
+    pl.figtext(0.775, pos, cond, color=col, fontsize=12)
+
+pl.show()
diff --git a/examples/plot_topography.py b/examples/plot_topography.py
new file mode 100644
index 0000000..a478988
--- /dev/null
+++ b/examples/plot_topography.py
@@ -0,0 +1,30 @@
+"""
+=================================
+Plot topographies for MEG sensors
+=================================
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+
+from mne import fiff
+from mne.viz import plot_topo
+from mne.datasets import sample
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+# Reading
+evoked = fiff.read_evoked(fname, setno=0, baseline=(None, 0))
+
+###############################################################################
+# Show topography
+title = 'MNE sample data (condition : %s)' % evoked.comment
+plot_topo(evoked, title=title)
+pl.show()
diff --git a/examples/preprocessing/README.txt b/examples/preprocessing/README.txt
new file mode 100644
index 0000000..ff3ee6e
--- /dev/null
+++ b/examples/preprocessing/README.txt
@@ -0,0 +1,6 @@
+
+Preprocessing
+-------------
+
+Examples related to data preprocessing (artifact detection / rejection etc.)
+
diff --git a/examples/preprocessing/plot_find_ecg_artifacts.py b/examples/preprocessing/plot_find_ecg_artifacts.py
new file mode 100644
index 0000000..3d665a2
--- /dev/null
+++ b/examples/preprocessing/plot_find_ecg_artifacts.py
@@ -0,0 +1,48 @@
+"""
+==================
+Find ECG artifacts
+==================
+
+Locate QRS component of ECG.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+import mne
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+
+event_id = 999
+ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
+                                                 ch_name='MEG 1531')
+
+# Read epochs
+picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
+                        include=['MEG 1531'], exclude='bads')
+tmin, tmax = -0.1, 0.1
+epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
+                    proj=False)
+data = epochs.get_data()
+
+print "Number of detected ECG artifacts : %d" % len(data)
+
+###############################################################################
+# Plot ECG artifacts
+pl.plot(1e3 * epochs.times, np.squeeze(data).T)
+pl.xlabel('Times (ms)')
+pl.ylabel('ECG')
+pl.show()
diff --git a/examples/preprocessing/plot_find_eog_artifacts.py b/examples/preprocessing/plot_find_eog_artifacts.py
new file mode 100644
index 0000000..6945ff6
--- /dev/null
+++ b/examples/preprocessing/plot_find_eog_artifacts.py
@@ -0,0 +1,46 @@
+"""
+==================
+Find EOG artifacts
+==================
+
+Locate peaks of EOG to spot blinks and general EOG artifacts.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+import mne
+from mne import fiff
+from mne.datasets import sample
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+
+event_id = 998
+eog_events = mne.preprocessing.find_eog_events(raw, event_id)
+
+# Read epochs
+picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
+                        exclude='bads')
+tmin, tmax = -0.2, 0.2
+epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
+data = epochs.get_data()
+
+print "Number of detected EOG artifacts : %d" % len(data)
+
+###############################################################################
+# Plot EOG artifacts
+pl.plot(1e3 * epochs.times, np.squeeze(data).T)
+pl.xlabel('Times (ms)')
+pl.ylabel('EOG (muV)')
+pl.show()
diff --git a/examples/preprocessing/plot_ica_from_epochs.py b/examples/preprocessing/plot_ica_from_epochs.py
new file mode 100644
index 0000000..95dc0c7
--- /dev/null
+++ b/examples/preprocessing/plot_ica_from_epochs.py
@@ -0,0 +1,136 @@
+"""
+================================
+Compute ICA components on epochs
+================================
+
+ICA is used to decompose raw data in 49 to 50 sources.
+The source matching the ECG is found automatically
+and displayed. Finally, after the cleaned epochs are
+compared to the uncleaned epochs, evoked ICA sources
+are investigated using sensor space ERF plotting
+techniques.
+
+"""
+print __doc__
+
+# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import matplotlib.pylab as pl
+import numpy as np
+import mne
+from mne.fiff import Raw
+from mne.preprocessing.ica import ICA
+from mne.datasets import sample
+
+###############################################################################
+# Setup paths and prepare epochs data
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+raw = Raw(raw_fname, preload=True)
+
+picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                            ecg=True, stim=False, exclude='bads')
+
+tmin, tmax, event_id = -0.2, 0.5, 1
+baseline = (None, 0)
+reject = None
+
+events = mne.find_events(raw, stim_channel='STI 014')
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks,
+                    baseline=baseline, preload=True, reject=reject)
+
+###############################################################################
+# Setup ICA seed decompose data, then access and plot sources.
+# for more background information visit the plot_ica_from_raw.py example
+
+# fit sources from epochs or from raw (both works for epochs)
+ica = ICA(n_components=0.90, n_pca_components=64, max_pca_components=100,
+          noise_cov=None, random_state=0)
+
+ica.decompose_epochs(epochs)
+print ica
+
+###############################################################################
+# Automatically find ECG and EOG component using correlation coefficient.
+
+# As we don't have an ECG channel we use one that correlates a lot with heart
+# beats: 'MEG 1531'. We can directly pass the name to the find_sources method.
+# In our example, the find_sources method returns and array of correlation
+# scores for each ICA source.
+
+ecg_scores = ica.find_sources_epochs(epochs, target='MEG 1531',
+                                     score_func='pearsonr')
+
+# get maximum correlation index for ECG
+ecg_source_idx = np.abs(ecg_scores).argmax()
+
+# get sources from concatenated epochs
+sources = ica.get_sources_epochs(epochs, concatenate=True)
+
+# plot first epoch
+times = epochs.times
+first_trial = np.arange(len(times))
+
+pl.figure()
+pl.title('Source most correlated with the ECG channel')
+pl.plot(times, sources[ecg_source_idx, first_trial].T, color='r')
+pl.xlabel('Time (s)')
+pl.ylabel('AU')
+pl.show()
+
+# As we have an EOG channel, we can use it to detect the source.
+eog_scores = ica.find_sources_epochs(epochs, target='EOG 061',
+                                     score_func='pearsonr')
+
+# get maximum correlation index for EOG
+eog_source_idx = np.abs(eog_scores).argmax()
+
+# compute times for concatenated epochs
+times = np.linspace(times[0], times[-1] * len(epochs), sources.shape[1])
+
+# As the subject did not constantly move her eyes, the movement artifacts
+# may remain hidden when plotting single epochs.
+# Plotting the identified source across epochs reveals
+# considerable EOG artifacts.
+
+pl.figure()
+pl.title('Source most correlated with the EOG channel')
+pl.plot(times, sources[eog_source_idx].T, color='r')
+pl.xlabel('Time (s)')
+pl.ylabel('AU')
+pl.xlim(times[[0, -1]])
+pl.show()
+
+###############################################################################
+# Reject artifact sources and compare results
+
+# Add the detected artifact indices to ica.exclude
+ica.exclude += [ecg_source_idx, eog_source_idx]
+
+# Restore sensor space data
+epochs_ica = ica.pick_sources_epochs(epochs)
+
+# First show unprocessed, then cleaned epochs
+for e in epochs, epochs_ica:
+    pl.figure()
+    e.average().plot()
+    pl.show()
+
+###############################################################################
+# Inspect evoked ICA sources
+
+# create ICA Epochs object.
+ica_epochs = ica.sources_as_epochs(epochs)
+
+# don't exclude bad sources by passing an empty list.
+ica_picks = mne.fiff.pick_types(ica_epochs.info, misc=True, exclude=[])
+ica_evoked = ica_epochs.average(ica_picks)
+pl.figure()
+ica_evoked.plot(titles=dict(misc='ICA sources'))
+
+# Tip: use this for epochs constructed around ECG r-peaks to check whether all
+# ECG components were identified.
diff --git a/examples/preprocessing/plot_ica_from_raw.py b/examples/preprocessing/plot_ica_from_raw.py
new file mode 100644
index 0000000..8579184
--- /dev/null
+++ b/examples/preprocessing/plot_ica_from_raw.py
@@ -0,0 +1,208 @@
+"""
+==================================
+Compute ICA components on raw data
+==================================
+
+ICA is used to decompose raw data in 49 to 50 sources.
+The source matching the ECG is found automatically
+and displayed. Subsequently, the cleaned data is compared
+with the uncleaned data. The last section shows how to export
+the sources into a fiff file for further processing and displaying, e.g.
+using mne_browse_raw.
+
+"""
+print __doc__
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import pylab as pl
+
+import mne
+from mne.fiff import Raw
+from mne.preprocessing.ica import ICA
+from mne.datasets import sample
+
+###############################################################################
+# Setup paths and prepare raw data
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+raw = Raw(raw_fname, preload=True)
+
+picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                            stim=False, exclude='bads')
+
+###############################################################################
+# Setup ICA seed decompose data, then access and plot sources.
+
+# Instead of the actual number of components here we pass a float value
+# between 0 and 1 to select n_components by a percentage of
+# explained variance. Also we decide to use 64 PCA components before mixing
+# back to sensor space. These include the PCA components supplied to ICA plus
+# additional PCA components up to rank 64 of the MEG data.
+# This allows to control the trade-off between denoising and preserving signal.
+
+ica = ICA(n_components=0.90, n_pca_components=64, max_pca_components=100,
+          noise_cov=None, random_state=0)
+
+# 1 minute exposure should be sufficient for artifact detection.
+# However, rejection performance may significantly improve when using
+# the entire data range
+
+start, stop = 100., 160.  # floats, otherwise it will be interpreted as index
+
+# decompose sources for raw data
+ica.decompose_raw(raw, start=start, stop=stop, picks=picks)
+print ica
+
+sources = ica.get_sources_raw(raw, start=start, stop=stop)
+
+# plot reasonable time window for inspection
+start_plot, stop_plot = 100., 103.
+ica.plot_sources_raw(raw, start=start_plot, stop=stop_plot)
+
+###############################################################################
+# Automatically find the ECG component using correlation with ECG signal.
+
+# First, we create a helper function that iteratively applies the pearson
+# correlation function to sources and returns an array of r values
+# This is to illustrate the way ica.find_sources_raw works. Actually, this is
+# the default score_func.
+
+from scipy.stats import pearsonr
+
+corr = lambda x, y: np.array([pearsonr(a, y.ravel()) for a in x])[:, 0]
+
+# As we don't have an ECG channel we use one that correlates a lot with heart
+# beats: 'MEG 1531'. We can directly pass the name to the find_sources method.
+# In our example, the find_sources method returns and array of correlation
+# scores for each ICA source.
+
+ecg_scores = ica.find_sources_raw(raw, target='MEG 1531', score_func=corr)
+
+# get sources
+sources = ica.get_sources_raw(raw, start=start_plot, stop=stop_plot)
+
+# compute times
+times = np.linspace(start_plot, stop_plot, sources.shape[1])
+
+# get maximum correlation index for ECG
+ecg_source_idx = np.abs(ecg_scores).argmax()
+
+pl.figure()
+pl.plot(times, sources[ecg_source_idx], color='r')
+pl.title('ICA source matching ECG')
+pl.xlabel('Time (s)')
+pl.ylabel('AU')
+pl.show()
+
+# let us have a look which other components resemble the ECG.
+# We can do this by reordering the plot by our scores using order
+# and generating sort indices for the sources:
+
+ecg_order = np.abs(ecg_scores).argsort()[::-1]  # ascending order
+
+ica.plot_sources_raw(raw, order=ecg_order, start=start_plot, stop=stop_plot)
+
+# Let's make our ECG component selection more liberal and include sources
+# for which the variance explanation in terms of \{r^2}\ exceeds 5 percent.
+# we will directly extend the ica.exclude list by the result.
+
+ica.exclude.extend(np.where(np.abs(ecg_scores) ** 2 > .05)[0])
+
+###############################################################################
+# Automatically find the EOG component using correlation with EOG signal.
+
+# As we have an EOG channel, we can use it to detect the source.
+
+eog_scores = ica.find_sources_raw(raw, target='EOG 061', score_func=corr)
+
+# get maximum correlation index for EOG
+eog_source_idx = np.abs(eog_scores).argmax()
+
+# plot the component that correlates most with the EOG
+pl.figure()
+pl.plot(times, sources[eog_source_idx], color='r')
+pl.title('ICA source matching EOG')
+pl.xlabel('Time (s)')
+pl.ylabel('AU')
+pl.show()
+
+###############################################################################
+# Show MEG data before and after ICA cleaning.
+
+# We now add the eog artifacts to the ica.exclusion list
+ica.exclude += [eog_source_idx]
+
+# Restore sensor space data
+raw_ica = ica.pick_sources_raw(raw, include=None)
+
+start_compare, stop_compare = raw.time_as_index([100, 106])
+
+data, times = raw[picks, start_compare:stop_compare]
+data_clean, _ = raw_ica[picks, start_compare:stop_compare]
+
+pl.figure()
+pl.plot(times, data.T)
+pl.xlabel('time (s)')
+pl.xlim(100, 106)
+pl.ylabel('Raw MEG data (T)')
+y0, y1 = pl.ylim()
+
+pl.figure()
+pl.plot(times, data_clean.T)
+pl.xlabel('time (s)')
+pl.xlim(100, 106)
+pl.ylabel('Denoised MEG data (T)')
+pl.ylim(y0, y1)
+pl.show()
+
+###############################################################################
+# Compare the affected channel before and after ICA cleaning.
+
+affected_idx = raw.ch_names.index('MEG 1531')
+
+# plot the component that correlates most with the ECG
+pl.figure()
+pl.plot(times, data[affected_idx], color='k')
+pl.title('Affected channel MEG 1531 before cleaning.')
+y0, y1 = pl.ylim()
+
+# plot the component that correlates most with the ECG
+pl.figure()
+pl.plot(times, data_clean[affected_idx], color='k')
+pl.title('Affected channel MEG 1531 after cleaning.')
+pl.ylim(y0, y1)
+pl.show()
+
+###############################################################################
+# Export ICA as raw for subsequent processing steps in ICA space.
+
+from mne.layouts import make_grid_layout
+
+ica_raw = ica.sources_as_raw(raw, start=start, stop=stop, picks=None)
+
+print ica_raw.ch_names[:5]  # just a few
+
+ica_lout = make_grid_layout(ica_raw.info)
+
+# Uncomment the following two lines to save sources and layout.
+# ica_raw.save('ica_raw.fif')
+# ica_lout.save(os.path.join(os.environ['HOME'], '.mne/lout/ica.lout'))
+
+###############################################################################
+# To save an ICA session you can say:
+# ica.save('my_ica.fif')
+#
+# You can later restore the session by saying:
+# >>> from mne.preprocessing import read_ica
+# >>> read_ica('my_ica.fif')
+#
+# The ICA functionality exposed in this example will then be available at
+# at any later point in time provided the data have the same structure as the
+# data initially supplied to ICA.
diff --git a/examples/read_events.py b/examples/read_events.py
new file mode 100644
index 0000000..a088010
--- /dev/null
+++ b/examples/read_events.py
@@ -0,0 +1,31 @@
+"""
+=====================
+Reading an event file
+=====================
+
+Read events from a file.
+"""
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne.datasets import sample
+
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+
+# Reading events
+# events = mne.read_events(fname)  # all
+events = mne.read_events(fname, include=1)  # restricted to event 1
+events = mne.read_events(fname, include=[1, 2])  # restricted to event 1 or 2
+events = mne.read_events(fname, exclude=[4, 32])  # keep all but 4 and 32
+
+# Writing events
+mne.write_events('events.fif', events)
+
+for ind, before, after in events[:5]:
+    print "At sample %d stim channel went from %d to %d" % (
+                                                    ind, before, after)
diff --git a/examples/stats/README.txt b/examples/stats/README.txt
new file mode 100644
index 0000000..b395d8f
--- /dev/null
+++ b/examples/stats/README.txt
@@ -0,0 +1,6 @@
+
+Statistics Examples
+-------------------
+
+Some examples of how to compute statistics on M/EEG data with MNE.
+
diff --git a/examples/stats/plot_cluster_1samp_test_time_frequency.py b/examples/stats/plot_cluster_1samp_test_time_frequency.py
new file mode 100644
index 0000000..2a1e46b
--- /dev/null
+++ b/examples/stats/plot_cluster_1samp_test_time_frequency.py
@@ -0,0 +1,139 @@
+"""
+===============================================================
+Non-parametric 1 sample cluster statistic on single trial power
+===============================================================
+
+This script shows how to estimate significant clusters
+in time-frequency power estimates. It uses a non-parametric
+statistical procedure based on permutations and cluster
+level statistics.
+
+The procedure consists in:
+
+  - extracting epochs
+  - compute single trial power estimates
+  - baseline line correct the power estimates (power ratios)
+  - compute stats to see if ratio deviates from 1.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.time_frequency import single_trial_power
+from mne.stats import permutation_cluster_1samp_test
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_id = 1
+tmin = -0.3
+tmax = 0.6
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.find_events(raw, stim_channel='STI 014')
+
+include = []
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                                stim=False, include=include, exclude='bads')
+
+# Load condition 1
+event_id = 1
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+data = epochs.get_data()  # as 3D matrix
+data *= 1e13  # change unit to fT / cm
+# Time vector
+times = 1e3 * epochs.times  # change unit to ms
+
+# Take only one channel
+ch_name = raw.info['ch_names'][97]
+data = data[:, 97:98, :]
+
+evoked_data = np.mean(data, 0)
+
+# data -= evoked_data[None,:,:] # remove evoked component
+# evoked_data = np.mean(data, 0)
+
+# Factor to down-sample the temporal dimension of the PSD computed by
+# single_trial_power.  Decimation occurs after frequency decomposition and can
+# be used to reduce memory usage (and possibly computational time of downstream
+# operations such as nonparametric statistics) if you don't need high
+# spectrotemporal resolution.
+decim = 5
+frequencies = np.arange(8, 40, 2)  # define frequencies of interest
+Fs = raw.info['sfreq']  # sampling in Hz
+epochs_power = single_trial_power(data, Fs=Fs, frequencies=frequencies,
+                                  n_cycles=4, use_fft=False, n_jobs=1,
+                                  baseline=(-100, 0), times=times,
+                                  baseline_mode='ratio', decim=decim)
+
+# Crop in time to keep only what is between 0 and 400 ms
+time_mask = (times > 0) & (times < 400)
+evoked_data = evoked_data[:, time_mask]
+times = times[time_mask]
+
+# The time vector reflects the original time points, not the decimated time
+# points returned by single trial power. Be sure to decimate the time mask
+# appropriately.
+epochs_power = epochs_power[..., time_mask[::decim]]
+
+epochs_power = epochs_power[:, 0, :, :]
+epochs_power = np.log10(epochs_power)  # take log of ratio
+# under the null hypothesis epochs_power should be now be 0
+
+###############################################################################
+# Compute statistic
+threshold = 2.5
+T_obs, clusters, cluster_p_values, H0 = \
+                   permutation_cluster_1samp_test(epochs_power,
+                               n_permutations=100, threshold=threshold, tail=0)
+
+###############################################################################
+# View time-frequency plots
+import pylab as pl
+pl.clf()
+pl.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
+pl.subplot(2, 1, 1)
+pl.plot(times, evoked_data.T)
+pl.title('Evoked response (%s)' % ch_name)
+pl.xlabel('time (ms)')
+pl.ylabel('Magnetic Field (fT/cm)')
+pl.xlim(times[0], times[-1])
+pl.ylim(-100, 250)
+
+pl.subplot(2, 1, 2)
+
+# Create new stats image with only significant clusters
+T_obs_plot = np.nan * np.ones_like(T_obs)
+for c, p_val in zip(clusters, cluster_p_values):
+    if p_val <= 0.05:
+        T_obs_plot[c] = T_obs[c]
+
+vmax = np.max(np.abs(T_obs))
+vmin = -vmax
+pl.imshow(T_obs, cmap=pl.cm.gray, extent=[times[0], times[-1],
+                                          frequencies[0], frequencies[-1]],
+                                  aspect='auto', origin='lower',
+                                  vmin=vmin, vmax=vmax)
+pl.imshow(T_obs_plot, cmap=pl.cm.jet, extent=[times[0], times[-1],
+                                          frequencies[0], frequencies[-1]],
+                                  aspect='auto', origin='lower',
+                                  vmin=vmin, vmax=vmax)
+pl.colorbar()
+pl.xlabel('time (ms)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Induced power (%s)' % ch_name)
+pl.show()
diff --git a/examples/stats/plot_cluster_methods_tutorial.py b/examples/stats/plot_cluster_methods_tutorial.py
new file mode 100644
index 0000000..f4c0af4
--- /dev/null
+++ b/examples/stats/plot_cluster_methods_tutorial.py
@@ -0,0 +1,197 @@
+"""
+======================================================
+Permutation t-test on toy data with spatial clustering
+======================================================
+
+Following the illustrative example of Ridgway et al. 2012,
+this demonstrates some basic ideas behind both the "hat"
+variance adjustment method, as well as threshold-free
+cluster enhancement (TFCE) methods in mne-python.
+
+This toy dataset consists of a 40 x 40 square with a "signal"
+present in the center (at pixel [20, 20]) with white noise
+added and a 5-pixel-SD normal smoothing kernel applied.
+
+For more information, see:
+Ridgway et al. 2012, "The problem of low variance voxels in
+statistical parametric mapping; a new hat avoids a 'haircut'",
+NeuroImage. 2012 Feb 1;59(3):2131-41.
+
+Smith and Nichols 2009, "Threshold-free cluster enhancement:
+addressing problems of smoothing, threshold dependence, and
+localisation in cluster inference", NeuroImage 44 (2009) 83-98.
+
+In the top row plot the T statistic over space, peaking toward the
+center. Note that it has peaky edges. Second, with the "hat" variance
+correction/regularization, the peak becomes correctly centered. Third,
+the TFCE approach also corrects for these edge artifacts. Fourth, the
+the two methods combined provide a tighter estimate, for better or
+worse.
+
+Now considering multiple-comparisons corrected statistics on these
+variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
+mis-localize the peak due to sharpness in the T statistic driven by
+low-variance pixels toward the edge of the plateau. Standard clustering
+(first plot in the second row) identifies the correct region, but the
+whole area must be declared significant, so no peak analysis can be done.
+Also, the peak is broad. In this method, all significances are
+family-wise error rate (FWER) corrected, and the method is
+non-parametric so assumptions of Gaussian data distributions (which do
+actually hold for this example) don't need to be satisfied. Adding the
+"hat" technique tightens the estimate of significant activity (second
+plot). The TFCE approach (third plot) allows analyzing each significant
+point independently, but still has a broadened estimate. Note that
+this is also FWER corrected. Finally, combining the TFCE and "hat"
+methods tightens the area declared significant (again FWER corrected),
+and allows for evaluation of each point independently instead of as
+a single, broad cluster.
+"""
+
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+from scipy import stats
+from functools import partial
+
+from mne.stats import spatio_temporal_cluster_1samp_test, \
+                      bonferroni_correction, ttest_1samp_no_p
+
+try:
+    from sklearn.feature_extraction.image import grid_to_graph
+except ImportError:
+    from scikits.learn.feature_extraction.image import grid_to_graph
+
+###############################################################################
+# Set parameters
+width = 40
+n_subjects = 10
+signal_mean = 100
+signal_sd = 100
+noise_sd = 0.01
+gaussian_sd = 5
+sigma = 1e-3  # sigma for the "hat" method
+threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
+threshold_tfce = dict(start=0, step=0.2)
+n_permutations = 1024  # number of clustering permutations (1024 for exact)
+
+###############################################################################
+# Construct simulated data
+#    Make the connectivity matrix just next-neighbor spatially
+n_src = width * width
+connectivity = grid_to_graph(width, width)
+
+#    For each "subject", make a smoothed noisy signal with a centered peak
+rng = np.random.RandomState(42)
+X = noise_sd * rng.randn(n_subjects, width, width)
+#    Add a signal at the dead center
+X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
+#    Spatially smooth with a 2D Gaussian kernel
+size = width // 2 - 1
+gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
+for si in range(X.shape[0]):
+    for ri in range(X.shape[1]):
+        X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
+    for ci in range(X.shape[2]):
+        X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
+
+###############################################################################
+# Do some statistics
+
+#    Note that X needs to be a multi-dimensional array of shape
+#    samples (subjects) x time x space, so we permute dimensions
+X = X.reshape((n_subjects, 1, n_src))
+
+#    Now let's do some clustering using the standard method. Note that not
+#    specifying a connectivity matrix implies grid-like connectivity, which
+#    we want here:
+T_obs, clusters, p_values, H0 = \
+    spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold,
+                                       connectivity=connectivity,
+                                       tail=1, n_permutations=n_permutations)
+
+#    Let's put the cluster data in a readable format
+ps = np.zeros(width * width)
+for cl, p in zip(clusters, p_values):
+    ps[cl[1]] = -np.log10(p)
+ps = ps.reshape((width, width))
+T_obs = T_obs.reshape((width, width))
+
+#     To do a Bonferroni correction on these data is simple:
+p = stats.distributions.t.sf(T_obs, n_subjects - 1)
+p_bon = -np.log10(bonferroni_correction(p)[1])
+
+#    Now let's do some clustering using the standard method with "hat":
+stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
+T_obs_hat, clusters, p_values, H0 = \
+    spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold,
+                                       connectivity=connectivity,
+                                       tail=1, n_permutations=n_permutations,
+                                       stat_fun=stat_fun)
+
+#    Let's put the cluster data in a readable format
+ps_hat = np.zeros(width * width)
+for cl, p in zip(clusters, p_values):
+    ps_hat[cl[1]] = -np.log10(p)
+ps_hat = ps_hat.reshape((width, width))
+T_obs_hat = T_obs_hat.reshape((width, width))
+
+#    Now the threshold-free cluster enhancement method (TFCE):
+T_obs_tfce, clusters, p_values, H0 = \
+    spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold_tfce,
+                                       connectivity=connectivity,
+                                       tail=1, n_permutations=n_permutations)
+T_obs_tfce = T_obs_tfce.reshape((width, width))
+ps_tfce = -np.log10(p_values.reshape((width, width)))
+
+#    Now the TFCE with "hat" variance correction:
+T_obs_tfce_hat, clusters, p_values, H0 = \
+    spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold_tfce,
+                                       connectivity=connectivity,
+                                       tail=1, n_permutations=n_permutations,
+                                       stat_fun=stat_fun)
+T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
+ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
+
+###############################################################################
+# Visualize results
+
+import pylab as pl
+from mpl_toolkits.mplot3d import Axes3D  # this changes hidden matplotlib vars
+pl.ion()
+fig = pl.figure(facecolor='w')
+
+x, y = np.mgrid[0:width, 0:width]
+kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
+
+Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
+titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
+for ii, (t, title) in enumerate(zip(Ts, titles)):
+    ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
+    ax.plot_surface(x, y, t, **kwargs)
+    ax.set_xticks([])
+    ax.set_yticks([])
+    ax.set_title(title)
+
+p_lims = [1.3, -np.log10(1.0 / n_permutations)]
+pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
+titles = ['Standard clustering', 'Clust. w/"hat"',
+          'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
+axs = []
+for ii, (p, title) in enumerate(zip(pvals, titles)):
+    ax = fig.add_subplot(2, 4, 5 + ii)
+    pl.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
+    ax.set_xticks([])
+    ax.set_yticks([])
+    ax.set_title(title)
+    axs.append(ax)
+
+pl.tight_layout()
+for ax in axs:
+    cbar = pl.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
+                       fraction=0.1, pad=0.025)
+    cbar.set_label('-log10(p)')
+    cbar.set_ticks(p_lims)
+    cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
diff --git a/examples/stats/plot_cluster_stats_evoked.py b/examples/stats/plot_cluster_stats_evoked.py
new file mode 100644
index 0000000..3644408
--- /dev/null
+++ b/examples/stats/plot_cluster_stats_evoked.py
@@ -0,0 +1,88 @@
+"""
+=======================================================
+Permutation F-test on sensor data with 1D cluster level
+=======================================================
+
+One tests if the evoked response is significantly different
+between conditions. Multiple comparison problem is addressed
+with cluster level permutation test.
+
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.stats import permutation_cluster_test
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id = 1
+tmin = -0.2
+tmax = 0.5
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+channel = 'MEG 1332'  # include only this channel in analysis
+include = [channel]
+
+###############################################################################
+# Read epochs for the channel of interest
+picks = fiff.pick_types(raw.info, meg=False, eog=True, include=include,
+                        exclude='bads')
+event_id = 1
+reject = dict(grad=4000e-13, eog=150e-6)
+epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), reject=reject)
+condition1 = epochs1.get_data()  # as 3D matrix
+
+event_id = 2
+epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), reject=reject)
+condition2 = epochs2.get_data()  # as 3D matrix
+
+condition1 = condition1[:, 0, :]  # take only one channel to get a 2D array
+condition2 = condition2[:, 0, :]  # take only one channel to get a 2D array
+
+###############################################################################
+# Compute statistic
+threshold = 6.0
+T_obs, clusters, cluster_p_values, H0 = \
+                permutation_cluster_test([condition1, condition2],
+                            n_permutations=1000, threshold=threshold, tail=1,
+                            n_jobs=2)
+
+###############################################################################
+# Plot
+times = epochs1.times
+import pylab as pl
+pl.close('all')
+pl.subplot(211)
+pl.title('Channel : ' + channel)
+pl.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
+        label="ERF Contrast (Event 1 - Event 2)")
+pl.ylabel("MEG (T / m)")
+pl.legend()
+pl.subplot(212)
+for i_c, c in enumerate(clusters):
+    c = c[0]
+    if cluster_p_values[i_c] <= 0.05:
+        h = pl.axvspan(times[c.start], times[c.stop - 1], color='r', alpha=0.3)
+    else:
+        pl.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
+                   alpha=0.3)
+hf = pl.plot(times, T_obs, 'g')
+pl.legend((h, ), ('cluster p-value < 0.05', ))
+pl.xlabel("time (ms)")
+pl.ylabel("f-values")
+pl.show()
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal.py b/examples/stats/plot_cluster_stats_spatio_temporal.py
new file mode 100644
index 0000000..6a79b95
--- /dev/null
+++ b/examples/stats/plot_cluster_stats_spatio_temporal.py
@@ -0,0 +1,179 @@
+"""
+=================================================================
+Permutation t-test on source data with spatio-temporal clustering
+=================================================================
+
+Tests if the evoked response is significantly different between
+conditions across subjects (simulated here using one subject's data).
+The multiple comparisons problem is addressed with a cluster-level
+permutation test across space and time.
+
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD (3-clause)
+
+print __doc__
+
+import os.path as op
+import numpy as np
+from numpy.random import randn
+from scipy import stats as stats
+
+import mne
+from mne import fiff, spatial_tris_connectivity, compute_morph_matrix,\
+    grade_to_tris
+from mne.epochs import equalize_epoch_counts
+from mne.stats import spatio_temporal_cluster_1samp_test,\
+                      summarize_clusters_stc
+from mne.minimum_norm import apply_inverse, read_inverse_operator
+from mne.datasets import sample
+from mne.viz import mne_analyze_colormap
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+subjects_dir = data_path + '/subjects'
+
+tmin = -0.2
+tmax = 0.3  # Use a lower tmax to reduce multiple comparisons
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+###############################################################################
+# Read epochs for all channels, removing a bad one
+raw.info['bads'] += ['MEG 2443']
+picks = fiff.pick_types(raw.info, meg=True, eog=True, exclude='bads')
+event_id = 1  # L auditory
+reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
+epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), reject=reject, preload=True)
+
+event_id = 3  # L visual
+epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), reject=reject, preload=True)
+
+#    Equalize trial counts to eliminate bias (which would otherwise be
+#    introduced by the abs() performed below)
+equalize_epoch_counts([epochs1, epochs2])
+
+###############################################################################
+# Transform to source space
+
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+inverse_operator = read_inverse_operator(fname_inv)
+sample_vertices = [s['vertno'] for s in inverse_operator['src']]
+
+#    Let's average and compute inverse, resampling to speed things up
+evoked1 = epochs1.average()
+evoked1.resample(50)
+condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)
+evoked2 = epochs2.average()
+evoked2.resample(50)
+condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)
+
+#    Let's only deal with t > 0, cropping to reduce multiple comparisons
+condition1.crop(0, None)
+condition2.crop(0, None)
+tmin = condition1.tmin
+tstep = condition1.tstep
+
+###############################################################################
+# Transform to common cortical space
+
+#    Normally you would read in estimates across several subjects and morph
+#    them to the same cortical space (e.g. fsaverage). For example purposes,
+#    we will simulate this by just having each "subject" have the same
+#    response (just noisy in source space) here. Note that for 7 subjects
+#    with a two-sided statistical test, the minimum significance under a
+#    permutation test is only p = 1/(2 ** 6) = 0.015, which is large.
+n_vertices_sample, n_times = condition1.data.shape
+n_subjects = 7
+print 'Simulating data for %d subjects.' % n_subjects
+
+#    Let's make sure our results replicate, so set the seed.
+np.random.seed(0)
+X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10
+X[:, :, :, 0] += condition1.data[:, :, np.newaxis]
+X[:, :, :, 1] += condition2.data[:, :, np.newaxis]
+
+#    It's a good idea to spatially smooth the data, and for visualization
+#    purposes, let's morph these to fsaverage, which is a grade 5 source space
+#    with vertices 0:10242 for each hemisphere. Usually you'd have to morph
+#    each subject's data separately (and you might want to use morph_data
+#    instead), but here since all estimates are on 'sample' we can use one
+#    morph matrix for all the heavy lifting.
+fsave_vertices = [np.arange(10242), np.arange(10242)]
+morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
+                                 fsave_vertices, 20, subjects_dir)
+n_vertices_fsave = morph_mat.shape[0]
+
+#    We have to change the shape for the dot() to work properly
+X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
+print 'Morphing data.'
+X = morph_mat.dot(X)  # morph_mat is a sparse matrix
+X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
+
+#    Finally, we want to compare the overall activity levels in each condition,
+#    the diff is taken along the last axis (condition). The negative sign makes
+#    it so condition1 > condition2 shows up as "red blobs" (instead of blue).
+X = np.abs(X)  # only magnitude
+X = X[:, :, :, 0] - X[:, :, :, 1]  # make paired contrast
+
+
+###############################################################################
+# Compute statistic
+
+#    To use an algorithm optimized for spatio-temporal clustering, we
+#    just pass the spatial connectivity matrix (instead of spatio-temporal)
+print 'Computing connectivity.'
+connectivity = spatial_tris_connectivity(grade_to_tris(5))
+
+#    Note that X needs to be a multi-dimensional array of shape
+#    samples (subjects) x time x space, so we permute dimensions
+X = np.transpose(X, [2, 1, 0])
+
+#    Now let's actually do the clustering. This can take a long time...
+#    Here we set the threshold quite high to reduce computation.
+p_threshold = 0.001
+t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
+print 'Clustering.'
+T_obs, clusters, cluster_p_values, H0 = clu = \
+    spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=2,
+                                       threshold=t_threshold)
+#    Now select the clusters that are sig. at p < 0.05 (note that this value
+#    is multiple-comparisons corrected).
+good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
+
+###############################################################################
+# Visualize the clusters
+
+print 'Visualizing clusters.'
+
+#    Now let's build a convenient representation of each cluster, where each
+#    cluster becomes a "time point" in the SourceEstimate
+stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
+                                vertno=fsave_vertices, subject='fsaverage')
+
+#    Let's actually plot the first "time point" in the SourceEstimate, which
+#    shows all the clusters, weighted by duration
+colormap = mne_analyze_colormap(limits=[0, 10, 50])
+subjects_dir = op.join(data_path, 'subjects')
+# blue blobs are for condition A < condition B, red for A > B
+brains = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both', colormap,
+                                  subjects_dir=subjects_dir,
+                                  time_label='Duration significant (ms)')
+for idx, brain in enumerate(brains):
+    brain.set_data_time_index(0)
+    # The colormap requires brain data to be scaled -fmax -> fmax
+    brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False)
+    brain.show_view('lateral')
+    brain.save_image('clusters-%s.png' % ('lh' if idx == 0 else 'rh'))
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py b/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
new file mode 100644
index 0000000..a891817
--- /dev/null
+++ b/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
@@ -0,0 +1,107 @@
+"""
+=========================================================================
+2 samples permutation test on source data with spatio-temporal clustering
+=========================================================================
+
+Tests if the source space data are significantly different between
+2 groups of subjects (simulated here using one subject's data).
+The multiple comparisons problem is addressed with a cluster-level
+permutation test across space and time.
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD (3-clause)
+
+print __doc__
+
+import os.path as op
+import numpy as np
+from scipy import stats as stats
+
+import mne
+from mne import spatial_tris_connectivity, grade_to_tris
+from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
+subjects_dir = data_path + '/subjects'
+
+# Load stc to in common cortical space (fsaverage)
+stc = mne.read_source_estimate(stc_fname)
+stc.resample(50)
+
+stc = mne.morph_data('sample', 'fsaverage', stc, grade=5, smooth=20,
+                     subjects_dir=subjects_dir)
+n_vertices_fsave, n_times = stc.data.shape
+tstep = stc.tstep
+
+n_subjects1, n_subjects2 = 7, 9
+print 'Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2)
+
+#    Let's make sure our results replicate, so set the seed.
+np.random.seed(0)
+X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
+X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
+X1[:, :, :] += stc.data[:, :, np.newaxis]
+# make the activity bigger for the second set of subjects
+X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
+
+#    We want to compare the overall activity levels for each subject
+X1 = np.abs(X1)  # only magnitude
+X2 = np.abs(X2)  # only magnitude
+
+###############################################################################
+# Compute statistic
+
+#    To use an algorithm optimized for spatio-temporal clustering, we
+#    just pass the spatial connectivity matrix (instead of spatio-temporal)
+print 'Computing connectivity.'
+connectivity = spatial_tris_connectivity(grade_to_tris(5))
+
+#    Note that X needs to be a list of multi-dimensional array of shape
+#    samples (subjects_k) x time x space, so we permute dimensions
+X1 = np.transpose(X1, [2, 1, 0])
+X2 = np.transpose(X2, [2, 1, 0])
+X = [X1, X2]
+
+#    Now let's actually do the clustering. This can take a long time...
+#    Here we set the threshold quite high to reduce computation.
+p_threshold = 0.0001
+f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
+                                        n_subjects1 - 1, n_subjects2 - 1)
+print 'Clustering.'
+T_obs, clusters, cluster_p_values, H0 = clu =\
+    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
+                                 threshold=f_threshold)
+#    Now select the clusters that are sig. at p < 0.05 (note that this value
+#    is multiple-comparisons corrected).
+good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
+
+###############################################################################
+# Visualize the clusters
+
+print 'Visualizing clusters.'
+
+#    Now let's build a convenient representation of each cluster, where each
+#    cluster becomes a "time point" in the SourceEstimate
+fsave_vertices = [np.arange(10242), np.arange(10242)]
+stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
+                                vertno=fsave_vertices, subject='fsaverage')
+
+#    Let's actually plot the first "time point" in the SourceEstimate, which
+#    shows all the clusters, weighted by duration
+subjects_dir = op.join(data_path, 'subjects')
+# blue blobs are for condition A != condition B
+brains = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
+                                  subjects_dir=subjects_dir,
+                                  time_label='Duration significant (ms)',
+                                  fmin=0, fmid=25, fmax=50)
+for idx, brain in enumerate(brains):
+    brain.set_data_time_index(0)
+    brain.scale_data_colormap(fmin=0, fmid=25, fmax=50, transparent=True)
+    brain.show_view('lateral')
+    brain.save_image('clusters-%s.png' % ('lh' if idx == 0 else 'rh'))
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py b/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
new file mode 100644
index 0000000..d57915a
--- /dev/null
+++ b/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
@@ -0,0 +1,266 @@
+"""
+======================================================================
+Repeated measures ANOVA on source data with spatio-temporal clustering
+======================================================================
+
+This example illustrates how to make use of the clustering functions
+for arbitrary, self-defined contrasts beyond standard t-tests. In this
+case we will tests if the differences in evoked responses between
+stimulation modality (visual VS auditory) depend on the stimulus
+location (left vs right) for a group of subjects (simulated here
+using one subject's data). For this purpose we will compute an
+interaction effect using a repeated measures ANOVA. The multiple
+comparisons problem is addressed with a cluster-level permutation test
+across space and time.
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Denis Engemannn <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import os.path as op
+import numpy as np
+from numpy.random import randn
+
+import mne
+from mne import fiff, spatial_tris_connectivity, compute_morph_matrix,\
+    grade_to_tris
+from mne.stats import spatio_temporal_cluster_test, f_threshold_twoway_rm, \
+    f_twoway_rm, summarize_clusters_stc
+
+from mne.minimum_norm import apply_inverse, read_inverse_operator
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+subjects_dir = data_path + '/subjects'
+
+tmin = -0.2
+tmax = 0.3  # Use a lower tmax to reduce multiple comparisons
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+###############################################################################
+# Read epochs for all channels, removing a bad one
+raw.info['bads'] += ['MEG 2443']
+picks = fiff.pick_types(raw.info, meg=True, eog=True, exclude='bads')
+# we'll load all four conditions that make up the 'two ways' of our ANOVA
+
+event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
+reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), reject=reject, preload=True)
+
+#    Equalize trial counts to eliminate bias (which would otherwise be
+#    introduced by the abs() performed below)
+epochs.equalize_event_counts(event_id, copy=False)
+
+###############################################################################
+# Transform to source space
+
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+inverse_operator = read_inverse_operator(fname_inv)
+
+# we'll only use one hemisphere to speed up this example
+# instead of a second vertex array we'll pass an empty array
+sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([])]
+
+#    Let's average and compute inverse, then resample to speed things up
+conditions = []
+for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']:  # order is important
+    evoked = epochs[cond].average()
+    evoked.resample(50)
+    condition = apply_inverse(evoked, inverse_operator, lambda2, method)
+    #    Let's only deal with t > 0, cropping to reduce multiple comparisons
+    condition.crop(0, None)
+    conditions.append(condition)
+
+tmin = conditions[0].tmin
+tstep = conditions[0].tstep
+
+###############################################################################
+# Transform to common cortical space
+
+#    Normally you would read in estimates across several subjects and morph
+#    them to the same cortical space (e.g. fsaverage). For example purposes,
+#    we will simulate this by just having each "subject" have the same
+#    response (just noisy in source space) here.
+
+# we'll only consider the left hemisphere in this example.
+n_vertices_sample, n_times = conditions[0].lh_data.shape
+n_subjects = 7
+print 'Simulating data for %d subjects.' % n_subjects
+
+#    Let's make sure our results replicate, so set the seed.
+np.random.seed(0)
+X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
+for ii, condition in enumerate(conditions):
+    X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
+
+#    It's a good idea to spatially smooth the data, and for visualization
+#    purposes, let's morph these to fsaverage, which is a grade 5 source space
+#    with vertices 0:10242 for each hemisphere. Usually you'd have to morph
+#    each subject's data separately (and you might want to use morph_data
+#    instead), but here since all estimates are on 'sample' we can use one
+#    morph matrix for all the heavy lifting.
+fsave_vertices = [np.arange(10242), np.array([])]  # right hemisphere is empty
+morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
+                                 fsave_vertices, 20, subjects_dir)
+n_vertices_fsave = morph_mat.shape[0]
+
+#    We have to change the shape for the dot() to work properly
+X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
+print 'Morphing data.'
+X = morph_mat.dot(X)  # morph_mat is a sparse matrix
+X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
+
+#    Now we need to prepare the group matrix for the ANOVA statistic.
+#    To make the clustering function work correctly with the
+#    ANOVA function X needs to be a list of multi-dimensional arrays
+#    (one per condition) of shape: samples (subjects) x time x space
+
+X = np.transpose(X, [2, 1, 0, 3])  # First we permute dimensions
+# finally we split the array into a list a list of conditions
+# and discard the empty dimension resulting from the split using numpy squeeze
+X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
+
+###############################################################################
+# Prepare function for arbitrary contrast
+
+# As our ANOVA function is a multi-purpose tool we need to apply a few
+# modifications to integrate it with the clustering function. This
+# includes reshaping data, setting default arguments and processing
+# the return values. For this reason we'll write a tiny dummy function.
+
+# We will tell the ANOVA how to interpret the data matrix in terms of
+# factors. This is done via the factor levels argument which is a list
+# of the number factor levels for each factor.
+factor_levels = [2, 2]
+
+# Finally we will pick the interaction effect by passing 'A:B'.
+# (this notation is borrowed from the R formula language)
+effects = 'A:B'  # Without this also the main effects will be returned.
+# Tell the ANOVA not to compute p-values which we don't need for clustering
+return_pvals = False
+
+# a few more convenient bindings
+n_times = X[0].shape[1]
+n_conditions = 4
+
+
+# A stat_fun must deal with a variable number of input arguments.
+def stat_fun(*args):
+    # Inside the clustering function each condition will be passed as
+    # flattened array, necessitated by the clustering procedure.
+    # The ANOVA however expects an input array of dimensions:
+    # subjects X conditions X observations (optional).
+    # The following expression catches the list input, swaps the first and the
+    # second dimension and puts the remaining observations in the third
+    # dimension.
+    data = np.swapaxes(np.asarray(args), 1, 0).reshape(n_subjects, \
+        n_conditions, n_times * n_vertices_fsave)
+    return f_twoway_rm(data, factor_levels=factor_levels, effects=effects,
+                return_pvals=return_pvals)[0]  # drop p-values (empty array).
+    # Note. for further details on this ANOVA function consider the
+    # corresponding time frequency example.
+
+###############################################################################
+# Compute clustering statistic
+
+#    To use an algorithm optimized for spatio-temporal clustering, we
+#    just pass the spatial connectivity matrix (instead of spatio-temporal)
+
+source_space = grade_to_tris(5)
+# as we only have one hemisphere we need only need half the connectivity
+lh_source_space = source_space[source_space[:, 0] < 10242]
+print 'Computing connectivity.'
+connectivity = spatial_tris_connectivity(lh_source_space)
+
+#    Now let's actually do the clustering. Please relax, on a small
+#    notebook and one single thread only this will take a couple of minutes ...
+#    To speed things up a bit we will
+pthresh = 0.001
+f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)
+n_permutations = 100  # ... run fewer permutations (reduces sensitivity)
+
+print 'Clustering.'
+T_obs, clusters, cluster_p_values, H0 = clu = \
+    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
+                                 threshold=f_thresh, stat_fun=stat_fun,
+                                 n_permutations=n_permutations)
+#    Now select the clusters that are sig. at p < 0.05 (note that this value
+#    is multiple-comparisons corrected).
+good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
+
+###############################################################################
+# Visualize the clusters
+
+print 'Visualizing clusters.'
+
+#    Now let's build a convenient representation of each cluster, where each
+#    cluster becomes a "time point" in the SourceEstimate
+stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
+                                vertno=fsave_vertices, subject='fsaverage')
+
+#    Let's actually plot the first "time point" in the SourceEstimate, which
+#    shows all the clusters, weighted by duration
+
+subjects_dir = op.join(data_path, 'subjects')
+# The brighter the color, the stronger the interaction between
+# stimulus modality and stimulus location
+
+brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'lh',
+                                  subjects_dir=subjects_dir,
+                                  time_label='Duration significant (ms)')
+
+brain.set_data_time_index(0)
+brain.scale_data_colormap(fmin=5, fmid=10, fmax=30, transparent=True)
+brain.show_view('lateral')
+brain.save_image('cluster-lh.png')
+brain.show_view('medial')
+
+###############################################################################
+# Finally, let's investigate interaction effect by reconstructing the time
+# courses
+
+import pylab as pl
+inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
+                    enumerate(good_cluster_inds)][0]  # first cluster
+
+times = np.arange(X[0].shape[1]) * tstep * 1e3
+
+pl.clf()
+colors = ['y', 'b', 'g', 'purple']
+for ii, (condition, color, eve_id) in enumerate(
+    zip(X, colors, ['l_aud', 'r_aud', 'l_vis', 'r_vis'])):
+    # extract time course at cluster vertices
+    condition = condition[:, :, inds_v]
+    # normally we would normalize values across subjects but
+    # here we use data from the same subject so we're good to just
+    # create average time series across subjects and vertices.
+    mean_tc = condition.mean(axis=2).mean(axis=0)
+    std_tc = condition.std(axis=2).std(axis=0)
+    pl.plot(times, mean_tc.T, color=color, label=eve_id)
+    pl.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
+            alpha=0.5, label='')
+
+pl.xlabel('Time (ms)')
+pl.ylabel('Activation (F-values)')
+pl.xlim(times[[0, -1]])
+pl.fill_betweenx(np.arange(*pl.ylim()), times[inds_t[0]],
+        times[inds_t[-1]], color='orange', alpha=0.3)
+pl.legend()
+pl.title('Interaction between stimulus-modality and location.')
+pl.show()
diff --git a/examples/stats/plot_cluster_stats_time_frequency.py b/examples/stats/plot_cluster_stats_time_frequency.py
new file mode 100644
index 0000000..a331e9b
--- /dev/null
+++ b/examples/stats/plot_cluster_stats_time_frequency.py
@@ -0,0 +1,149 @@
+"""
+=========================================================================
+Non-parametric between conditions cluster statistic on single trial power
+=========================================================================
+
+This script shows how to compare clusters in time-frequency
+power estimates between conditions. It uses a non-parametric
+statistical procedure based on permutations and cluster
+level statistics.
+
+The procedure consists in:
+
+  - extracting epochs for 2 conditions
+  - compute single trial power estimates
+  - baseline line correct the power estimates (power ratios)
+  - compute stats to see if the power estimates are significantly different
+    between conditions.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.time_frequency import single_trial_power
+from mne.stats import permutation_cluster_test
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+event_id = 1
+tmin = -0.2
+tmax = 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+include = []
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                        stim=False, include=include, exclude='bads')
+
+ch_name = raw.info['ch_names'][picks[0]]
+
+# Load condition 1
+reject = dict(grad=4000e-13, eog=150e-6)
+event_id = 1
+epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                reject=reject)
+data_condition_1 = epochs_condition_1.get_data()  # as 3D matrix
+data_condition_1 *= 1e13  # change unit to fT / cm
+
+# Load condition 2
+event_id = 2
+epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                reject=reject)
+data_condition_2 = epochs_condition_2.get_data()  # as 3D matrix
+data_condition_2 *= 1e13  # change unit to fT / cm
+
+# Take only one channel
+data_condition_1 = data_condition_1[:, 97:98, :]
+data_condition_2 = data_condition_2[:, 97:98, :]
+
+# Time vector
+times = 1e3 * epochs_condition_1.times  # change unit to ms
+
+# Factor to downsample the temporal dimension of the PSD computed by
+# single_trial_power.  Decimation occurs after frequency decomposition and can
+# be used to reduce memory usage (and possibly comptuational time of downstream
+# operations such as nonparametric statistics) if you don't need high
+# spectrotemporal resolution.
+decim = 2
+frequencies = np.arange(7, 30, 3)  # define frequencies of interest
+Fs = raw.info['sfreq']  # sampling in Hz
+n_cycles = 1.5
+epochs_power_1 = single_trial_power(data_condition_1, Fs=Fs,
+                                    frequencies=frequencies,
+                                    n_cycles=n_cycles, use_fft=False,
+                                    decim=decim)
+
+epochs_power_2 = single_trial_power(data_condition_2, Fs=Fs,
+                                    frequencies=frequencies,
+                                    n_cycles=n_cycles, use_fft=False,
+                                    decim=decim)
+
+epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
+epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix
+
+# Compute ratio with baseline power (be sure to correct time vector with
+# decimation factor)
+baseline_mask = times[::decim] < 0
+epochs_baseline_1 = np.mean(epochs_power_1[:, :, baseline_mask], axis=2)
+epochs_power_1 /= epochs_baseline_1[..., np.newaxis]
+epochs_baseline_2 = np.mean(epochs_power_2[:, :, baseline_mask], axis=2)
+epochs_power_2 /= epochs_baseline_2[..., np.newaxis]
+
+###############################################################################
+# Compute statistic
+threshold = 6.0
+T_obs, clusters, cluster_p_values, H0 = \
+                   permutation_cluster_test([epochs_power_1, epochs_power_2],
+                               n_permutations=100, threshold=threshold, tail=0)
+
+###############################################################################
+# View time-frequency plots
+import pylab as pl
+pl.clf()
+pl.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
+pl.subplot(2, 1, 1)
+evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
+pl.plot(times, evoked_contrast.T)
+pl.title('Contrast of evoked response (%s)' % ch_name)
+pl.xlabel('time (ms)')
+pl.ylabel('Magnetic Field (fT/cm)')
+pl.xlim(times[0], times[-1])
+pl.ylim(-100, 200)
+
+pl.subplot(2, 1, 2)
+
+# Create new stats image with only significant clusters
+T_obs_plot = np.nan * np.ones_like(T_obs)
+for c, p_val in zip(clusters, cluster_p_values):
+    if p_val <= 0.05:
+        T_obs_plot[c] = T_obs[c]
+
+pl.imshow(T_obs, cmap=pl.cm.gray, extent=[times[0], times[-1],
+                                          frequencies[0], frequencies[-1]],
+                                  aspect='auto', origin='lower')
+pl.imshow(T_obs_plot, cmap=pl.cm.jet, extent=[times[0], times[-1],
+                                              frequencies[0], frequencies[-1]],
+                                  aspect='auto', origin='lower')
+
+pl.xlabel('time (ms)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Induced power (%s)' % ch_name)
+pl.show()
diff --git a/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py b/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
new file mode 100644
index 0000000..480d704
--- /dev/null
+++ b/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
@@ -0,0 +1,224 @@
+"""
+====================================================================
+Mass-univariate twoway repeated measures ANOVA on single trial power
+====================================================================
+
+This script shows how to conduct a mass-univariate repeated measures
+ANOVA. As the model to be fitted assumes two fully crossed factors,
+we will study the interplay between perceptual modality
+(auditory VS visual) and the location of stimulus presentation
+(left VS right). Here we use single trials as replications
+(subjects) while iterating over time slices plus frequency bands
+for to fit our mass-univariate model. For the sake of simplicity we
+will confine this analysis to one single channel of which we know
+that it exposes a strong induced response. We will then visualize
+each effect by creating a corresponding mass-univariate effect
+image. We conclude with accounting for multiple comparisons by
+performing a permutation clustering test using the ANOVA as
+clustering function. The results final will be compared to
+multiple comparisons using False Discovery Rate correction.
+"""
+# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.time_frequency import single_trial_power
+from mne.stats import f_threshold_twoway_rm, f_twoway_rm, fdr_correction
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+event_id = 1
+tmin = -0.2
+tmax = 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+include = []
+raw.info['bads'] += ['MEG 2443']  # bads
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                        stim=False, include=include, exclude='bads')
+
+ch_name = raw.info['ch_names'][picks[0]]
+
+# Load conditions
+reject = dict(grad=4000e-13, eog=150e-6)
+event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                reject=reject)
+
+# make sure all conditions have the same counts, as the ANOVA expects a
+# fully balanced data matrix and does not forgive imbalances that generously
+# (risk of type-I error)
+epochs.equalize_event_counts(event_id, copy=False)
+# Time vector
+times = 1e3 * epochs.times  # change unit to ms
+
+# Factor to downs-sample the temporal dimension of the PSD computed by
+# single_trial_power.
+decim = 2
+frequencies = np.arange(7, 30, 3)  # define frequencies of interest
+Fs = raw.info['sfreq']  # sampling in Hz
+n_cycles = frequencies / frequencies[0]
+baseline_mask = times[::decim] < 0
+
+# now create TFR representations for all conditions
+epochs_power = []
+for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
+    this_power = single_trial_power(condition, Fs=Fs, frequencies=frequencies,
+        n_cycles=n_cycles, use_fft=False, decim=decim)
+    this_power = this_power[:, 0, :, :]  # we only have one channel.
+    # Compute ratio with baseline power (be sure to correct time vector with
+    # decimation factor)
+    epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)
+    this_power /= epochs_baseline[..., np.newaxis]
+    epochs_power.append(this_power)
+
+###############################################################################
+# Setup repeated measures ANOVA
+
+n_conditions = len(epochs.event_id)
+n_replications = epochs.events.shape[0] / n_conditions
+# we will tell the ANOVA how to interpret the data matrix in terms of
+# factors. This done via the factor levels argument which is a list
+# of the number factor levels for each factor.
+factor_levels = [2, 2]  # number of levels in each factor
+effects = 'A*B'  # this is the default signature for computing all effects
+# Other possible options are 'A' or 'B' for the corresponding main effects
+# or 'A:B' for the interaction effect only (this notation is borrowed from the
+# R formula language)
+n_frequencies = len(frequencies)
+n_times = len(times[::decim])
+
+# Now we'll assemble the data matrix and swap axes so the trial replications
+# are the first dimension and the conditions are the second dimension
+data = np.swapaxes(np.asarray(epochs_power), 1, 0)
+# reshape last two dimensions in one mass-univariate observation-vector
+data = data.reshape(n_replications, n_conditions, n_frequencies * n_times)
+
+# so we have replications * conditions * observations:
+print data.shape
+
+# while the iteration scheme used above for assembling the data matrix
+# makes sure the first two dimensions are organized as expected (with A =
+# modality and B = location):
+#
+#           A1B1 A1B2 A2B1 B2B2
+# trial 1   1.34 2.53 0.97 1.74
+# trial ... .... .... .... ....
+# trial 56  2.45 7.90 3.09 4.76
+#
+# Now we're ready to run our repeated measures ANOVA.
+
+fvals, pvals = f_twoway_rm(data, factor_levels, effects=effects)
+
+effect_labels = ['modality', 'location', 'modality by location']
+import pylab as pl
+
+# let's visualize our effects by computing f-images
+for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
+    pl.figure()
+    # show naive F-values in gray
+    pl.imshow(effect.reshape(8, 211), cmap=pl.cm.gray, extent=[times[0],
+        times[-1], frequencies[0], frequencies[-1]], aspect='auto',
+        origin='lower')
+    # create mask for significant Time-frequency locations
+    effect = np.ma.masked_array(effect, [sig > .05])
+    pl.imshow(effect.reshape(8, 211), cmap=pl.cm.jet, extent=[times[0],
+        times[-1], frequencies[0], frequencies[-1]], aspect='auto',
+        origin='lower')
+    pl.colorbar()
+    pl.xlabel('time (ms)')
+    pl.ylabel('Frequency (Hz)')
+    pl.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
+    pl.show()
+
+# Note. As we treat trials as subjects, the test only accounts for
+# time locked responses despite the 'induced' approach.
+# For analysis for induced power at the group level averaged TRFs
+# are required.
+
+
+###############################################################################
+# Account for multiple comparisons using FDR versus permutation clustering test
+
+# First we need to slightly modify the ANOVA function to be suitable for
+# the clustering procedure. Also want to set some defaults.
+# Let's first override effects to confine the analysis to the interaction
+effects = 'A:B'
+
+
+# A stat_fun must deal with a variable number of input arguments.
+def stat_fun(*args):
+    # Inside the clustering function each condition will be passed as
+    # flattened array, necessitated by the clustering procedure.
+    # The ANOVA however expects an input array of dimensions:
+    # subjects X conditions X observations (optional).
+    # The following expression catches the list input, swaps the first and the
+    # second dimension and puts the remaining observations in the third
+    # dimension.
+    data = np.swapaxes(np.asarray(args), 1, 0).reshape(n_replications, \
+        n_conditions, n_times * n_frequencies)
+    return f_twoway_rm(data, factor_levels=factor_levels, effects=effects,
+                return_pvals=False)[0]
+    # The ANOVA returns a tuple f-values and p-values, we will pick the former.
+
+
+pthresh = 0.00001  # set threshold rather high to save some time
+f_thresh = f_threshold_twoway_rm(n_replications, factor_levels, effects,
+                                 pthresh)
+tail = 1  # f-test, so tail > 0
+n_permutations = 256  # Save some time (the test won't be too sensitive ...)
+T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
+    epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
+    n_permutations=n_permutations)
+
+# Create new stats image with only significant clusters
+good_clusers = np.where(cluster_p_values < .05)[0]
+T_obs_plot = np.ma.masked_array(T_obs, np.invert(clusters[good_clusers]))
+
+pl.figure()
+for f_image, cmap in zip([T_obs, T_obs_plot], [pl.cm.gray, pl.cm.jet]):
+    pl.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
+          frequencies[0], frequencies[-1]], aspect='auto',
+          origin='lower')
+pl.xlabel('time (ms)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Time-locked response for \'modality by location\' (%s)\n'
+          ' cluster-level corrected (p <= 0.05)' % ch_name)
+pl.show()
+
+# now using FDR
+mask, _ = fdr_correction(pvals[2])
+T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
+
+pl.figure()
+for f_image, cmap in zip([T_obs, T_obs_plot2], [pl.cm.gray, pl.cm.jet]):
+    pl.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
+          frequencies[0], frequencies[-1]], aspect='auto',
+          origin='lower')
+
+pl.xlabel('time (ms)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Time-locked response for \'modality by location\' (%s)\n'
+          ' FDR corrected (p <= 0.05)' % ch_name)
+pl.show()
+
+# Both, cluster level and FDR correction help getting rid of
+# putatively spots we saw in the naive f-images.
diff --git a/examples/stats/plot_fdr_stats_evoked.py b/examples/stats/plot_fdr_stats_evoked.py
new file mode 100644
index 0000000..897c236
--- /dev/null
+++ b/examples/stats/plot_fdr_stats_evoked.py
@@ -0,0 +1,81 @@
+"""
+=======================================
+FDR correction on T-test on sensor data
+=======================================
+
+One tests if the evoked response significantly deviates from 0.
+Multiple comparison problem is addressed with
+False Discovery Rate (FDR) correction.
+
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+from scipy import stats
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.stats import bonferroni_correction, fdr_correction
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)[:30]
+
+channel = 'MEG 1332'  # include only this channel in analysis
+include = [channel]
+
+###############################################################################
+# Read epochs for the channel of interest
+picks = fiff.pick_types(raw.info, meg=False, eog=True, include=include,
+                        exclude='bads')
+event_id = 1
+reject = dict(grad=4000e-13, eog=150e-6)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject)
+X = epochs.get_data()  # as 3D matrix
+X = X[:, 0, :]  # take only one channel to get a 2D array
+
+###############################################################################
+# Compute statistic
+T, pval = stats.ttest_1samp(X, 0)
+alpha = 0.05
+
+n_samples, n_tests = X.shape
+threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
+
+reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
+threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
+
+reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
+threshold_fdr = np.min(np.abs(T)[reject_fdr])
+
+###############################################################################
+# Plot
+times = 1e3 * epochs.times
+
+import pylab as pl
+pl.close('all')
+pl.plot(times, T, 'k', label='T-stat')
+xmin, xmax = pl.xlim()
+pl.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
+          label='p=0.05 (uncorrected)', linewidth=2)
+pl.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
+          label='p=0.05 (Bonferroni)', linewidth=2)
+pl.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
+          label='p=0.05 (FDR)', linewidth=2)
+pl.legend()
+pl.xlabel("Time (ms)")
+pl.ylabel("T-stat")
+pl.show()
diff --git a/examples/stats/plot_sensor_permutation_test.py b/examples/stats/plot_sensor_permutation_test.py
new file mode 100644
index 0000000..d244049
--- /dev/null
+++ b/examples/stats/plot_sensor_permutation_test.py
@@ -0,0 +1,90 @@
+"""
+=================================
+Permutation T-test on sensor data
+=================================
+
+One tests if the signal significantly deviates from 0
+during a fixed time window of interest. Here computation
+is performed on MNE sample dataset between 40 and 60 ms.
+
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.stats import permutation_t_test
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id = 1
+tmin = -0.2
+tmax = 0.5
+
+#   Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+#   Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
+include = []  # or stim channel ['STI 014']
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# pick MEG Gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
+                        include=include, exclude='bads')
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+data = epochs.get_data()
+times = epochs.times
+
+temporal_mask = np.logical_and(0.04 <= times, times <= 0.06)
+data = np.squeeze(np.mean(data[:, :, temporal_mask], axis=2))
+
+n_permutations = 50000
+T0, p_values, H0 = permutation_t_test(data, n_permutations, n_jobs=2)
+
+significant_sensors = picks[p_values <= 0.05]
+significant_sensors_names = [raw.info['ch_names'][k]
+                             for k in significant_sensors]
+
+print "Number of significant sensors : %d" % len(significant_sensors)
+print "Sensors names : %s" % significant_sensors_names
+
+###############################################################################
+# View location of significantly active sensors
+import pylab as pl
+
+# load sensor layout
+from mne.layouts import read_layout
+layout = read_layout('Vectorview-grad')
+
+# Extract mask and indices of active sensors in layout
+idx_of_sensors = [layout.names.index(name)
+                  for name in significant_sensors_names
+                  if name in layout.names]
+mask_significant_sensors = np.zeros(len(layout.pos), dtype=np.bool)
+mask_significant_sensors[idx_of_sensors] = True
+mask_non_significant_sensors = mask_significant_sensors == False
+
+# plot it
+pl.figure(facecolor='k')
+pl.axis('off')
+pl.axis('tight')
+pl.scatter(layout.pos[mask_significant_sensors, 0],
+           layout.pos[mask_significant_sensors, 1], s=50, c='r')
+pl.scatter(layout.pos[mask_non_significant_sensors, 0],
+           layout.pos[mask_non_significant_sensors, 1], c='w')
+title = 'MNE sample data (Left auditory between 40 and 60 ms)'
+pl.figtext(0.03, 0.93, title, color='w', fontsize=18)
+pl.show()
+pl.show()
diff --git a/examples/time_frequency/README.txt b/examples/time_frequency/README.txt
new file mode 100644
index 0000000..16050ac
--- /dev/null
+++ b/examples/time_frequency/README.txt
@@ -0,0 +1,6 @@
+
+Time-Frequency Examples
+-----------------------
+
+Some examples of how to explore time frequency content of M/EEG data with MNE.
+
diff --git a/examples/time_frequency/plot_compute_raw_data_spectrum.py b/examples/time_frequency/plot_compute_raw_data_spectrum.py
new file mode 100644
index 0000000..798074a
--- /dev/null
+++ b/examples/time_frequency/plot_compute_raw_data_spectrum.py
@@ -0,0 +1,86 @@
+"""
+==================================================
+Compute the power spectral density of raw data
+==================================================
+
+This script shows how to compute the power spectral density (PSD)
+of measurements on a raw dataset. It also show the effect of applying SSP
+to the data to reduce ECG and EOG artifacts.
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+from mne import fiff, read_proj, read_selection
+from mne.time_frequency import compute_raw_psd
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname, preload=True)
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# Add SSP projection vectors to reduce EOG and ECG artifacts
+projs = read_proj(proj_fname)
+raw.add_proj(projs, remove_existing=True)
+
+# Pick MEG magnetometers in the Left-temporal region
+selection = read_selection('Left-temporal')
+picks = fiff.pick_types(raw.info, meg='mag', eeg=False, eog=False,
+                        stim=False, exclude='bads', selection=selection)
+
+# Let's just look at the first few channels for demonstration purposes
+picks = picks[:4]
+
+tmin, tmax = 0, 60  # use the first 60s of data
+fmin, fmax = 2, 300  # look at frequencies between 2 and 300Hz
+NFFT = 2048  # the FFT size (NFFT). Ideally a power of 2
+psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                              fmin=fmin, fmax=fmax, NFFT=NFFT, n_jobs=1,
+                              plot=False, proj=False)
+
+# And now do the same with SSP applied
+psds_ssp, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                  fmin=fmin, fmax=fmax, NFFT=NFFT, n_jobs=1,
+                                  plot=False, proj=True)
+
+# And now do the same with SSP + notch filtering
+raw.notch_filter(np.arange(60, 241, 60), picks=picks, n_jobs=1)
+psds_notch, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                    fmin=fmin, fmax=fmax, NFFT=NFFT, n_jobs=1,
+                                    plot=False, proj=True)
+
+# Convert PSDs to dB
+psds = 10 * np.log10(psds)
+psds_ssp = 10 * np.log10(psds_ssp)
+psds_notch = 10 * np.log10(psds_notch)
+
+###############################################################################
+# Compute mean and standard deviation across channels and then plot
+def plot_psds(freqs, psds, fill_color):
+    psd_mean = np.mean(psds, axis=0)
+    psd_std = np.std(psds, axis=0)
+    hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
+
+    pl.plot(freqs, psd_mean)
+    pl.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1], color=fill_color,
+                    alpha=0.5)
+
+import pylab as pl
+pl.figure()
+plot_psds(freqs, psds, (0, 0, 1, .3))
+plot_psds(freqs, psds_ssp, (0, 1, 0, .3))
+plot_psds(freqs, psds_notch, (0, 0.5, 0.5, .3))
+pl.xlabel('Freq (Hz)')
+pl.ylabel('Power Spectral Density (dB/Hz)')
+pl.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
+pl.show()
diff --git a/examples/time_frequency/plot_compute_source_psd_epochs.py b/examples/time_frequency/plot_compute_source_psd_epochs.py
new file mode 100644
index 0000000..9f49141
--- /dev/null
+++ b/examples/time_frequency/plot_compute_source_psd_epochs.py
@@ -0,0 +1,89 @@
+"""
+=====================================================================
+Compute Power Spectral Density of inverse solution from single epochs
+=====================================================================
+
+Compute PSD of dSPM inverse solution on single trial epochs restricted
+to a brain label. The PSD is computed using a multi-taper method with
+Discrete Prolate Spheroidal Sequence (DPSS) windows.
+
+"""
+
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs
+
+
+data_path = sample.data_path()
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
+fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+label_name = 'Aud-lh'
+fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+snr = 1.0  # use smaller SNR for raw data
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
+
+# Load data
+inverse_operator = read_inverse_operator(fname_inv)
+label = mne.read_label(fname_label)
+raw = Raw(fname_raw)
+events = mne.read_events(fname_event)
+
+# Set up pick list
+include = []
+raw.info['bads'] += ['EEG 053']  # bads + 1 more
+
+# pick MEG channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                   include=include, exclude='bads')
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
+                                                    eog=150e-6))
+
+# define frequencies of interest
+fmin, fmax = 0., 70.
+bandwidth = 4.  # bandwidth of the windows in Hz
+
+# compute source space psd in label
+
+# Note: By using "return_generator=True" stcs will be a generator object
+# instead of a list. This allows us so to iterate without having to
+# keep everything in memory.
+
+stcs = compute_source_psd_epochs(epochs, inverse_operator, lambda2=lambda2,
+                                 method=method, fmin=fmin, fmax=fmax,
+                                 bandwidth=bandwidth, label=label,
+                                 return_generator=True)
+
+# compute average PSD over the first 10 epochs
+n_epochs = 10
+for i, stc in enumerate(stcs):
+    if i >= n_epochs:
+        break
+
+    if i == 0:
+        psd_avg = np.mean(stc.data, axis=0)
+    else:
+        psd_avg += np.mean(stc.data, axis=0)
+
+psd_avg /= n_epochs
+freqs = stc.times  # the frequencies are stored here
+
+pl.figure()
+pl.plot(freqs, psd_avg)
+pl.xlabel('Freq (Hz)')
+pl.ylabel('Power Spectral Density')
+pl.show()
diff --git a/examples/time_frequency/plot_source_label_time_frequency.py b/examples/time_frequency/plot_source_label_time_frequency.py
new file mode 100644
index 0000000..a1654e9
--- /dev/null
+++ b/examples/time_frequency/plot_source_label_time_frequency.py
@@ -0,0 +1,86 @@
+"""
+=========================================================
+Compute power and phase lock in label of the source space
+=========================================================
+
+Returns time-frequency maps of induced power and phase lock
+in the source space. The inverse method is linear based on dSPM inverse
+operator.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator, source_induced_power
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+label_name = 'Aud-lh'
+fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
+
+tmin, tmax, event_id = -0.2, 0.5, 1
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.find_events(raw, stim_channel='STI 014')
+inverse_operator = read_inverse_operator(fname_inv)
+
+include = []
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG channels
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                                stim=False, include=include, exclude='bads')
+
+# Load condition 1
+event_id = 1
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
+
+# Compute a source estimate per frequency band
+frequencies = np.arange(7, 30, 2)  # define frequencies of interest
+label = mne.read_label(fname_label)
+n_cycles = frequencies / float(7)  # different number of cycle per frequency
+power, phase_lock = source_induced_power(epochs, inverse_operator, frequencies,
+                            label, baseline=(-0.1, 0), baseline_mode='percent',
+                            n_cycles=n_cycles, n_jobs=1)
+
+power = np.mean(power, axis=0)  # average over sources
+phase_lock = np.mean(phase_lock, axis=0)  # average over sources
+times = epochs.times
+
+###############################################################################
+# View time-frequency plots
+import pylab as pl
+pl.clf()
+pl.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
+pl.subplot(2, 1, 1)
+pl.imshow(20 * power, extent=[times[0], times[-1],
+                                      frequencies[0], frequencies[-1]],
+          aspect='auto', origin='lower')
+pl.xlabel('Time (s)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Induced power in %s' % label_name)
+pl.colorbar()
+
+pl.subplot(2, 1, 2)
+pl.imshow(phase_lock, extent=[times[0], times[-1],
+                              frequencies[0], frequencies[-1]],
+          aspect='auto', origin='lower')
+pl.xlabel('Time (s)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Phase-lock in %s' % label_name)
+pl.colorbar()
+pl.show()
diff --git a/examples/time_frequency/plot_source_power_spectrum.py b/examples/time_frequency/plot_source_power_spectrum.py
new file mode 100644
index 0000000..0a7d05f
--- /dev/null
+++ b/examples/time_frequency/plot_source_power_spectrum.py
@@ -0,0 +1,55 @@
+"""
+=========================================================
+Compute power spectrum densities of the sources with dSPM
+=========================================================
+
+Returns an STC file containing the PSD (in dB) of each of the sources.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator, compute_source_psd
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname, verbose=False)
+events = mne.find_events(raw, stim_channel='STI 014')
+inverse_operator = read_inverse_operator(fname_inv)
+raw.info['bads'] = ['MEG 2443', 'EEG 053']
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                        stim=False, exclude='bads')
+
+tmin, tmax = 0, 120  # use the first 120s of data
+fmin, fmax = 4, 100  # look at frequencies between 4 and 100Hz
+NFFT = 2048  # the FFT size (NFFT). Ideally a power of 2
+label = mne.read_label(fname_label)
+
+stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
+                         tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
+                         pick_normal=True, NFFT=NFFT, label=label)
+
+stc.save('psd_dSPM')
+
+###############################################################################
+# View PSD of sources in label
+import pylab as pl
+pl.plot(1e3 * stc.times, stc.data.T)
+pl.xlabel('Frequency (Hz)')
+pl.ylabel('PSD (dB)')
+pl.title('Source Power Spectrum (PSD)')
+pl.show()
diff --git a/examples/time_frequency/plot_source_space_time_frequency.py b/examples/time_frequency/plot_source_space_time_frequency.py
new file mode 100644
index 0000000..81cea6a
--- /dev/null
+++ b/examples/time_frequency/plot_source_space_time_frequency.py
@@ -0,0 +1,67 @@
+"""
+===================================================
+Compute induced power in the source space with dSPM
+===================================================
+
+Returns STC files ie source estimates of induced power
+for different bands in the source space. The inverse method
+is linear based on dSPM inverse operator.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator, source_band_induced_power
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+tmin, tmax, event_id = -0.2, 0.5, 1
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.find_events(raw, stim_channel='STI 014')
+inverse_operator = read_inverse_operator(fname_inv)
+
+include = []
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                        stim=False, include=include, exclude='bads')
+
+# Load condition 1
+event_id = 1
+events = events[:10]  # take 10 events to keep the computation time low
+# Use linear detrend to reduce any edge artifacts
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True, detrend=1)
+
+# Compute a source estimate per frequency band
+bands = dict(alpha=[9, 11], beta=[18, 22])
+
+stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
+                                 use_fft=False, n_jobs=1)
+
+for b, stc in stcs.iteritems():
+    stc.save('induced_power_%s' % b)
+
+###############################################################################
+# plot mean power
+import pylab as pl
+pl.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
+pl.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
+pl.xlabel('Time (ms)')
+pl.ylabel('Power')
+pl.legend()
+pl.title('Mean source induced power')
+pl.show()
diff --git a/examples/time_frequency/plot_temporal_whitening.py b/examples/time_frequency/plot_temporal_whitening.py
new file mode 100644
index 0000000..45ed5b4
--- /dev/null
+++ b/examples/time_frequency/plot_temporal_whitening.py
@@ -0,0 +1,63 @@
+"""
+================================
+Temporal whitening with AR model
+================================
+
+This script shows how to fit an AR model to data and use it
+to temporally whiten the signals.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+from scipy import signal
+import pylab as pl
+
+import mne
+from mne.time_frequency import ar_raw
+from mne.datasets import sample
+data_path = sample.data_path()
+
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+proj_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
+
+raw = mne.fiff.Raw(raw_fname)
+proj = mne.read_proj(proj_fname)
+raw.info['projs'] += proj
+raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels
+
+# Set up pick list: Gradiometers - bad channels
+picks = mne.fiff.pick_types(raw.info, meg='grad', exclude='bads')
+
+order = 5  # define model order
+picks = picks[:5]
+
+# Estimate AR models on raw data
+coefs = ar_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
+mean_coefs = np.mean(coefs, axis=0)  # mean model across channels
+
+filt = np.r_[1, -mean_coefs]  # filter coefficient
+d, times = raw[0, 1e4:2e4]  # look at one channel from now on
+d = d.ravel()  # make flat vector
+innovation = signal.convolve(d, filt, 'valid')
+d_ = signal.lfilter([1], filt, innovation)  # regenerate the signal
+d_ = np.r_[d_[0] * np.ones(order), d_]  # dummy samples to keep signal length
+
+###############################################################################
+# Plot the different time series and PSDs
+pl.close('all')
+pl.figure()
+pl.plot(d[:100], label='signal')
+pl.plot(d_[:100], label='regenerated signal')
+pl.legend()
+
+pl.figure()
+pl.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
+pl.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
+pl.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
+pl.legend(('Signal', 'Innovation', 'Regenerated signal'))
+pl.show()
diff --git a/examples/time_frequency/plot_tfr_topography.py b/examples/time_frequency/plot_tfr_topography.py
new file mode 100644
index 0000000..5757485
--- /dev/null
+++ b/examples/time_frequency/plot_tfr_topography.py
@@ -0,0 +1,80 @@
+"""
+===================================================================
+Plot time-frequency representations on topographies for MEG sensors
+===================================================================
+
+Both induced power and phase locking values are displayed.
+"""
+print __doc__
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import pylab as pl
+import mne
+from mne import fiff
+from mne.time_frequency import induced_power
+from mne.viz import plot_topo_power, plot_topo_phase_lock
+from mne.datasets import sample
+
+data_path = sample.data_path()
+
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+include = []
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                        stim=False, include=include, exclude='bads')
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+data = epochs.get_data()  # as 3D matrix
+
+layout = mne.layouts.read_layout('Vectorview-all')
+
+###############################################################################
+# Calculate power and phase locking value
+
+frequencies = np.arange(7, 30, 3)  # define frequencies of interest
+n_cycles = frequencies / float(7)  # different number of cycle per frequency
+Fs = raw.info['sfreq']  # sampling in Hz
+decim = 3
+power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
+                                  n_cycles=n_cycles, n_jobs=1, use_fft=False,
+                                  decim=decim, zero_mean=True)
+
+###############################################################################
+# Prepare topography plots, set baseline correction parameters
+
+baseline = (None, 0)  # set the baseline for induced power
+mode = 'ratio'  # set mode for baseline rescaling
+
+###############################################################################
+# Show topography of power.
+
+title = 'Induced power - MNE sample data'
+plot_topo_power(epochs, power, frequencies, layout, baseline=baseline,
+                mode=mode, decim=decim, vmin=0., vmax=14, title=title)
+pl.show()
+
+###############################################################################
+# Show topography of phase locking value (PLV)
+
+mode = None  # no baseline rescaling for PLV
+
+title = 'Phase locking value - MNE sample data'
+plot_topo_phase_lock(epochs, phase_lock, frequencies, layout,
+                     baseline=baseline, mode=mode, decim=decim, title=title)
+
+pl.show()
diff --git a/examples/time_frequency/plot_time_frequency.py b/examples/time_frequency/plot_time_frequency.py
new file mode 100644
index 0000000..4fefd0a
--- /dev/null
+++ b/examples/time_frequency/plot_time_frequency.py
@@ -0,0 +1,95 @@
+"""
+=========================================================
+Time frequency : Induced power and inter-trial phase-lock
+=========================================================
+
+This script shows how to compute induced power and inter-trial
+phase-lock for a list of epochs read in a raw file given
+a list of events.
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import numpy as np
+
+import mne
+from mne import fiff
+from mne.time_frequency import induced_power
+from mne.datasets import sample
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+include = []
+raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                        stim=False, include=include, exclude='bads')
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+data = epochs.get_data()  # as 3D matrix
+evoked = epochs.average()  # compute evoked fields
+
+times = 1e3 * epochs.times  # change unit to ms
+evoked_data = evoked.data * 1e13  # change unit to fT / cm
+
+# Take only one channel
+data = data[:, 97:98, :]
+evoked_data = evoked_data[97:98, :]
+
+frequencies = np.arange(7, 30, 3)  # define frequencies of interest
+n_cycles = frequencies / float(7)  # different number of cycle per frequency
+Fs = raw.info['sfreq']  # sampling in Hz
+decim = 3
+power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
+                                  n_cycles=n_cycles, n_jobs=1, use_fft=False,
+                                  decim=decim, zero_mean=True)
+
+# baseline corrections with ratio
+power /= np.mean(power[:, :, times[::decim] < 0], axis=2)[:, :, None]
+
+###############################################################################
+# View time-frequency plots
+import pylab as pl
+pl.clf()
+pl.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.63)
+pl.subplot(3, 1, 1)
+pl.plot(times, evoked_data.T)
+pl.title('Evoked response (%s)' % evoked.ch_names[97])
+pl.xlabel('time (ms)')
+pl.ylabel('Magnetic Field (fT/cm)')
+pl.xlim(times[0], times[-1])
+pl.ylim(-150, 300)
+
+pl.subplot(3, 1, 2)
+pl.imshow(20 * np.log10(power[0]), extent=[times[0], times[-1],
+                                      frequencies[0], frequencies[-1]],
+          aspect='auto', origin='lower')
+pl.xlabel('Time (s)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Induced power (%s)' % evoked.ch_names[97])
+pl.colorbar()
+
+pl.subplot(3, 1, 3)
+pl.imshow(phase_lock[0], extent=[times[0], times[-1],
+                              frequencies[0], frequencies[-1]],
+          aspect='auto', origin='lower')
+pl.xlabel('Time (s)')
+pl.ylabel('Frequency (Hz)')
+pl.title('Phase-lock (%s)' % evoked.ch_names[97])
+pl.colorbar()
+pl.show()
diff --git a/mne/__init__.py b/mne/__init__.py
new file mode 100644
index 0000000..b2d9480
--- /dev/null
+++ b/mne/__init__.py
@@ -0,0 +1,68 @@
+"""MNE for MEG and EEG data analysis
+"""
+
+__version__ = '0.6'
+
+# have to import verbose first since it's needed by many things
+from .utils import set_log_level, set_log_file, verbose, set_config, \
+                   get_config, get_config_path
+
+from .cov import read_cov, write_cov, Covariance, \
+                 compute_covariance, compute_raw_data_covariance, \
+                 whiten_evoked
+from .event import read_events, write_events, find_events, merge_events, \
+                   pick_events, make_fixed_length_events, concatenate_events, \
+                   find_stim_steps
+from .forward import read_forward_solution, apply_forward, apply_forward_raw, \
+                     do_forward_solution, average_forward_solutions, \
+                     write_forward_solution
+from .source_estimate import read_stc, write_stc, read_w, write_w, \
+                             read_source_estimate, \
+                             SourceEstimate, morph_data, \
+                             morph_data_precomputed, compute_morph_matrix, \
+                             grade_to_tris, grade_to_vertices, \
+                             spatial_src_connectivity, \
+                             spatial_tris_connectivity, \
+                             spatial_dist_connectivity, \
+                             spatio_temporal_src_connectivity, \
+                             spatio_temporal_tris_connectivity, \
+                             spatio_temporal_dist_connectivity, \
+                             save_stc_as_volume, extract_label_time_course
+from .surface import read_bem_surfaces, read_surface, write_bem_surface, \
+                     write_surface
+from .source_space import read_source_spaces, vertex_to_mni, \
+                          write_source_spaces
+from .epochs import Epochs, read_epochs
+from .label import label_time_courses, read_label, label_sign_flip, \
+                   write_label, stc_to_label, grow_labels, Label, \
+                   BiHemiLabel, labels_from_parc
+from .misc import parse_config, read_reject_parameters
+from .transforms import transform_coordinates, read_trans, write_trans
+from .proj import read_proj, write_proj, compute_proj_epochs, \
+                  compute_proj_evoked, compute_proj_raw, sensitivity_map
+from .selection import read_selection
+from .dipole import read_dip
+from . import beamformer
+from . import connectivity
+from . import cuda
+from . import datasets
+from . import epochs
+from . import fiff
+from . import filter
+from . import layouts
+from . import minimum_norm
+from . import mixed_norm
+from . import preprocessing
+from . import simulation
+from . import stats
+from . import tests
+from . import time_frequency
+from . import viz
+
+# initialize logging
+set_log_level(None, False)
+set_log_file()
+
+# initialize CUDA
+if get_config('MNE_USE_CUDA', 'false').lower() == 'true':
+    cuda.init_cuda()
diff --git a/mne/baseline.py b/mne/baseline.py
new file mode 100644
index 0000000..d12b7a6
--- /dev/null
+++ b/mne/baseline.py
@@ -0,0 +1,87 @@
+"""Util function to baseline correct data
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import logging
+logger = logging.getLogger('mne')
+
+from . import verbose
+
+
+ at verbose
+def rescale(data, times, baseline, mode, verbose=None, copy=True):
+    """Rescale aka baseline correct data
+
+    Parameters
+    ----------
+    data : array
+        It can be of any shape. The only constraint is that the last
+        dimension should be time.
+    times : 1D array
+        Time instants is seconds.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline)).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    copy : bool
+        Operate on a copy of the data, or in place.
+
+    Returns
+    -------
+    data_scaled: array
+        Array of same shape as data after rescaling.
+    """
+    if copy:
+        data = data.copy()
+
+    valid_modes = ['logratio', 'ratio', 'zscore', 'mean', 'percent']
+    if mode not in valid_modes:
+        raise Exception('mode should be any of : %s' % valid_modes)
+
+    if baseline is not None:
+        logger.info("Applying baseline correction ... (mode: %s)" % mode)
+        bmin, bmax = baseline
+        if bmin is None:
+            imin = 0
+        else:
+            imin = int(np.where(times >= bmin)[0][0])
+        if bmax is None:
+            imax = len(times)
+        else:
+            imax = int(np.where(times <= bmax)[0][-1]) + 1
+
+        mean = np.mean(data[..., imin:imax], axis=-1)[..., None]
+        if mode == 'mean':
+            data -= mean
+        if mode == 'logratio':
+            data /= mean
+            data = np.log10(data)  # a value of 1 means 10 times bigger
+        if mode == 'ratio':
+            data /= mean
+        elif mode == 'zscore':
+            std = np.std(data[..., imin:imax], axis=-1)[..., None]
+            data -= mean
+            data /= std
+        elif mode == 'percent':
+            data -= mean
+            data /= mean
+
+    else:
+        logger.info("No baseline correction applied...")
+
+    return data
diff --git a/mne/beamformer/__init__.py b/mne/beamformer/__init__.py
new file mode 100644
index 0000000..b6c1ad7
--- /dev/null
+++ b/mne/beamformer/__init__.py
@@ -0,0 +1,4 @@
+"""Beamformers for source localization
+"""
+
+from ._lcmv import lcmv, lcmv_epochs, lcmv_raw
diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py
new file mode 100644
index 0000000..ffe869b
--- /dev/null
+++ b/mne/beamformer/_lcmv.py
@@ -0,0 +1,398 @@
+"""Compute Linearly constrained minimum variance (LCMV) beamformer.
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Roman Goj <roman.goj at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..fiff.constants import FIFF
+from ..fiff.proj import make_projector
+from ..fiff.pick import pick_types, pick_channels_forward, pick_channels_cov
+from ..forward import _subject_from_forward
+from ..minimum_norm.inverse import _get_vertno, combine_xyz
+from ..cov import compute_whitener
+from ..source_estimate import SourceEstimate
+from ..source_space import label_src_vertno_sel
+from .. import verbose
+
+
+ at verbose
+def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
+                label=None, picks=None, pick_ori=None, verbose=None):
+    """ LCMV beamformer for evoked data, single epochs, and raw data
+
+    Parameters
+    ----------
+    data : array or list / iterable
+        Sensor space data. If data.ndim == 2 a single observation is assumed
+        and a single stc is returned. If data.ndim == 3 or if data is
+        a list / iterable, a list of stc's is returned.
+    info : dict
+        Measurement info.
+    tmin : float
+        Time of first sample.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label.
+    picks : array of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    pick_ori : None | 'max-power'
+        If 'max-power', the source orientation that maximizes output source
+        power is chosen.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate (or list of SourceEstimate)
+        Source time courses.
+    """
+
+    is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+
+    if pick_ori in ['max-power'] and not is_free_ori:
+        raise ValueError('Max-power orientation can only be picked '
+                         'when a forward operator with free orientation is '
+                         'used.')
+
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, exclude='bads')
+
+    ch_names = [info['ch_names'][k] for k in picks]
+
+    # restrict forward solution to selected channels
+    forward = pick_channels_forward(forward, include=ch_names)
+
+    # get gain matrix (forward operator)
+    if label is not None:
+        vertno, src_sel = label_src_vertno_sel(label, forward['src'])
+
+        if is_free_ori:
+            src_sel = 3 * src_sel
+            src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
+            src_sel = src_sel.ravel()
+
+        G = forward['sol']['data'][:, src_sel]
+    else:
+        vertno = _get_vertno(forward['src'])
+        G = forward['sol']['data']
+
+    # Handle SSPs
+    proj, ncomp, _ = make_projector(info['projs'], ch_names)
+    G = np.dot(proj, G)
+
+    # Handle whitening + data covariance
+    whitener, _ = compute_whitener(noise_cov, info, picks)
+
+    # whiten the leadfield
+    G = np.dot(whitener, G)
+
+    # Apply SSPs + whitener to data covariance
+    data_cov = pick_channels_cov(data_cov, include=ch_names)
+    Cm = data_cov['data']
+    Cm = np.dot(proj, np.dot(Cm, proj.T))
+    Cm = np.dot(whitener, np.dot(Cm, whitener.T))
+
+    # Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
+    Cm_inv = linalg.pinv(Cm, reg)
+
+    # Compute spatial filters
+    W = np.dot(G.T, Cm_inv)
+    n_orient = 3 if is_free_ori else 1
+    n_sources = G.shape[1] // n_orient
+    for k in range(n_sources):
+        Wk = W[n_orient * k: n_orient * k + n_orient]
+        Gk = G[:, n_orient * k: n_orient * k + n_orient]
+        Ck = np.dot(Wk, Gk)
+
+        # Find source orientation maximizing output source power
+        if pick_ori == 'max-power':
+            eig_vals, eig_vecs = linalg.eigh(Ck)
+
+            # Choosing the eigenvector associated with the middle eigenvalue.
+            # The middle and not the minimal eigenvalue is used because MEG is
+            # insensitive to one (radial) of the three dipole orientations and
+            # therefore the smallest eigenvalue reflects mostly noise.
+            for i in range(3):
+                if i != eig_vals.argmax() and i != eig_vals.argmin():
+                    idx_middle = i
+
+            # TODO: The eigenvector associated with the smallest eigenvalue
+            # should probably be used when using combined EEG and MEG data
+            max_ori = eig_vecs[:, idx_middle]
+
+            Wk[:] = np.dot(max_ori, Wk)
+            Ck = np.dot(max_ori, np.dot(Ck, max_ori))
+            is_free_ori = False
+
+        if is_free_ori:
+            # Free source orientation
+            Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
+        else:
+            # Fixed source orientation
+            Wk /= Ck
+
+    # Pick source orientation maximizing output source power
+    if pick_ori == 'max-power':
+        W = W[0::3]
+
+    # noise normalization
+    noise_norm = np.sum(W ** 2, axis=1)
+    if is_free_ori:
+        noise_norm = np.sum(np.reshape(noise_norm, (-1, 3)), axis=1)
+    noise_norm = np.sqrt(noise_norm)
+
+    if not is_free_ori:
+        W /= noise_norm[:, None]
+
+    if isinstance(data, np.ndarray) and data.ndim == 2:
+        data = [data]
+        return_single = True
+    else:
+        return_single = False
+
+    subject = _subject_from_forward(forward)
+    for i, M in enumerate(data):
+        if len(M) != len(picks):
+            raise ValueError('data and picks must have the same length')
+
+        if not return_single:
+            logger.info("Processing epoch : %d" % (i + 1))
+
+        # SSP and whitening
+        M = np.dot(proj, M)
+        M = np.dot(whitener, M)
+
+        # project to source space using beamformer weights
+
+        if is_free_ori:
+            sol = np.dot(W, M)
+            logger.info('combining the current components...')
+            sol = combine_xyz(sol)
+            sol /= noise_norm[:, None]
+        else:
+            # Linear inverse: do computation here or delayed
+            if M.shape[0] < W.shape[0] and pick_ori != 'max-power':
+                sol = (W, M)
+            else:
+                sol = np.dot(W, M)
+            if pick_ori == 'max-power':
+                sol = np.abs(sol)
+
+        tstep = 1.0 / info['sfreq']
+        yield SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                             subject=subject)
+
+    logger.info('[done]')
+
+
+ at verbose
+def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
+         pick_ori=None, verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Compute Linearly Constrained Minimum Variance (LCMV) beamformer
+    on evoked data.
+
+    NOTE : This implementation has not been heavilly tested so please
+    report any issue or suggestions.
+
+    Parameters
+    ----------
+    evoked : Evoked
+        Evoked data to invert
+    forward : dict
+        Forward operator
+    noise_cov : Covariance
+        The noise covariance
+    data_cov : Covariance
+        The data covariance
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label
+    pick_ori : None | 'max-power'
+        If 'max-power', the source orientation that maximizes output source
+        power is chosen.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate
+        Source time courses
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+
+    The reference for finding the max-power orientation is:
+    Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
+    beamformers for neuromagnetic source reconstruction.
+    Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
+    """
+
+    info = evoked.info
+    data = evoked.data
+    tmin = evoked.times[0]
+
+    stc = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
+                      label, pick_ori=pick_ori).next()
+
+    return stc
+
+
+ at verbose
+def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
+                pick_ori=None, return_generator=False, verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Compute Linearly Constrained Minimum Variance (LCMV) beamformer
+    on single trial data.
+
+    NOTE : This implementation has not been heavilly tested so please
+    report any issue or suggestions.
+
+    Parameters
+    ----------
+    epochs : Epochs
+        Single trial epochs.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label.
+    pick_ori : None | 'max-power'
+        If 'max-power', the source orientation that maximizes output source
+        power is chosen.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc: list | generator of SourceEstimate
+        The source estimates for all epochs
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+
+    The reference for finding the max-power orientation is:
+    Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
+    beamformers for neuromagnetic source reconstruction.
+    Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
+    """
+
+    info = epochs.info
+    tmin = epochs.times[0]
+
+    # use only the good data channels
+    picks = pick_types(info, meg=True, eeg=True, exclude='bads')
+    data = epochs.get_data()[:, picks, :]
+
+    stcs = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
+                       label, pick_ori=pick_ori)
+
+    if not return_generator:
+        stcs = [s for s in stcs]
+
+    return stcs
+
+
+ at verbose
+def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
+             start=None, stop=None, picks=None, pick_ori=None, verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Compute Linearly Constrained Minimum Variance (LCMV) beamformer
+    on raw data.
+
+    NOTE : This implementation has not been heavilly tested so please
+    report any issue or suggestions.
+
+    Parameters
+    ----------
+    raw : mne.fiff.Raw
+        Raw data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label.
+    start : int
+        Index of first time sample (index not time is seconds).
+    stop : int
+        Index of first time sample not to include (index not time is seconds).
+    picks : array of int
+        Channel indices in raw to use for beamforming (if None all channels
+        are used except bad channels).
+    pick_ori : None | 'max-power'
+        If 'max-power', the source orientation that maximizes output source
+        power is chosen.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate
+        Source time courses
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+
+    The reference for finding the max-power orientation is:
+    Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
+    beamformers for neuromagnetic source reconstruction.
+    Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
+    """
+
+    info = raw.info
+
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, exclude='bads')
+
+    data, times = raw[picks, start:stop]
+    tmin = times[0]
+
+    stc = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
+                      label, picks, pick_ori).next()
+
+    return stc
diff --git a/mne/beamformer/tests/__init__.py b/mne/beamformer/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py
new file mode 100644
index 0000000..56c41ec
--- /dev/null
+++ b/mne/beamformer/tests/test_lcmv.py
@@ -0,0 +1,153 @@
+import os.path as op
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+
+import mne
+from mne.datasets import sample
+from mne.beamformer import lcmv, lcmv_epochs, lcmv_raw
+
+
+data_path = sample.data_path()
+fname_data = op.join(data_path, 'MEG', 'sample',
+                            'sample_audvis-ave.fif')
+fname_raw = op.join(data_path, 'MEG', 'sample',
+                            'sample_audvis_raw.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample',
+                            'sample_audvis-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                            'sample_audvis-meg-oct-6-fwd.fif')
+fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
+                            'sample_audvis-meg-vol-7-fwd.fif')
+fname_event = op.join(data_path, 'MEG', 'sample',
+                            'sample_audvis_raw-eve.fif')
+label = 'Aud-lh'
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+
+label = mne.read_label(fname_label)
+noise_cov = mne.read_cov(fname_cov)
+# preloading raw here increases mem requirements by 400 mb for all nosetests
+# that include this file's parent directory :(
+raw = mne.fiff.Raw(fname_raw, preload=False)
+forward = mne.read_forward_solution(fname_fwd)
+forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
+forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True,
+                                          surf_ori=True)
+forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
+events = mne.read_events(fname_event)
+
+
+def test_lcmv():
+    """Test LCMV with evoked data and single trials
+    """
+    event_id, tmin, tmax = 1, -0.1, 0.15
+
+    # Setup for reading the raw data
+    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+
+    # Set up pick list: EEG + MEG - bad channels (modify to your needs)
+    left_temporal_channels = mne.read_selection('Left-temporal')
+    picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False,
+                                stim=True, eog=True, exclude='bads',
+                                selection=left_temporal_channels)
+
+    # Read epochs
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                        picks=picks, baseline=(None, 0), preload=True,
+                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+    epochs.resample(200, npad=0, n_jobs=2)
+    evoked = epochs.average()
+
+    noise_cov = mne.read_cov(fname_cov)
+    noise_cov = mne.cov.regularize(noise_cov, evoked.info,
+                                   mag=0.05, grad=0.05, eeg=0.1, proj=True)
+
+    data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+    stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01)
+
+    stc_pow = np.sum(stc.data, axis=1)
+    idx = np.argmax(stc_pow)
+    max_stc = stc.data[idx]
+    tmax = stc.times[np.argmax(max_stc)]
+
+    assert_true(0.09 < tmax < 0.1)
+    assert_true(2. < np.max(max_stc) < 3.)
+
+    # Test picking source orientation maximizing output source power
+    stc_max_power = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
+                         pick_ori="max-power")
+
+    assert_true((np.abs(stc_max_power.data) <= stc.data + 1).all())
+
+    # Test if fixed forward operator is detected when picking
+    # max-power orientation
+    assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
+                  reg=0.01, pick_ori="max-power")
+
+    # Now test single trial using fixed orientation forward solution
+    # so we can compare it to the evoked solution
+    stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
+    stcs_ = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01,
+                        return_generator=True)
+    assert_array_equal(stcs[0].data, stcs_.next().data)
+
+    epochs.drop_bad_epochs()
+    assert_true(len(epochs.events) == len(stcs))
+
+    # average the single trial estimates
+    stc_avg = np.zeros_like(stc.data)
+    for this_stc in stcs:
+        stc_avg += this_stc.data
+    stc_avg /= len(stcs)
+
+    # compare it to the solution using evoked with fixed orientation
+    stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)
+    assert_array_almost_equal(stc_avg, stc_fixed.data)
+
+    # use a label so we have few source vertices and delayed computation is
+    # not used
+    stcs_label = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,
+                             reg=0.01, label=label)
+
+    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
+
+
+def test_lcmv_raw():
+    """Test LCMV with raw data
+    """
+    tmin, tmax = 0, 20
+    # Setup for reading the raw data
+    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+
+    # Set up pick list: EEG + MEG - bad channels (modify to your needs)
+    left_temporal_channels = mne.read_selection('Left-temporal')
+    picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                                eog=True, exclude='bads',
+                                selection=left_temporal_channels)
+
+    noise_cov = mne.read_cov(fname_cov)
+    noise_cov = mne.cov.regularize(noise_cov, raw.info,
+                                   mag=0.05, grad=0.05, eeg=0.1, proj=True)
+
+    start, stop = raw.time_as_index([tmin, tmax])
+
+    # use only the left-temporal MEG channels for LCMV
+    picks = mne.fiff.pick_types(raw.info, meg=True, exclude='bads',
+                                selection=left_temporal_channels)
+
+    data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax)
+
+    stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label,
+                   start=start, stop=stop, picks=picks)
+
+    assert_array_almost_equal(np.array([tmin, tmax]),
+                              np.array([stc.times[0], stc.times[-1]]),
+                              decimal=2)
+
+    # make sure we get an stc with vertices only in the lh
+    vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']]
+    assert_true(len(stc.vertno[0]) == len(np.intersect1d(vertno[0],
+                                                         label.vertices)))
+    assert_true(len(stc.vertno[1]) == 0)
+    # TODO: test more things
diff --git a/mne/connectivity/__init__.py b/mne/connectivity/__init__.py
new file mode 100644
index 0000000..1495fb9
--- /dev/null
+++ b/mne/connectivity/__init__.py
@@ -0,0 +1,6 @@
+""" Connectivity Analysis Tools
+"""
+
+from .utils import seed_target_indices
+from .spectral import spectral_connectivity
+from .effective import phase_slope_index
diff --git a/mne/connectivity/effective.py b/mne/connectivity/effective.py
new file mode 100644
index 0000000..12a94bf
--- /dev/null
+++ b/mne/connectivity/effective.py
@@ -0,0 +1,164 @@
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+import copy
+import logging
+logger = logging.getLogger('mne')
+
+import numpy as np
+
+from .. import verbose
+from .spectral import spectral_connectivity
+
+
+ at verbose
+def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
+                      mode='multitaper', fmin=None, fmax=np.inf,
+                      tmin=None, tmax=None, mt_bandwidth=None,
+                      mt_adaptive=False, mt_low_bias=True,
+                      cwt_frequencies=None, cwt_n_cycles=7, block_size=1000,
+                      n_jobs=1, verbose=None):
+    """
+    Compute the Phase Slope Index (PSI) connectivity measure
+
+    The PSI is an effective connectivity measure, i.e., a measure which can
+    give an indication of the direction of the information flow (causality).
+    For two time series, and one computes the PSI between the first and the
+    second time series as follows
+
+    indices = (np.array([0]), np.array([1]))
+    psi = phase_slope_index(data, indices=indices, ...)
+
+    A positive value means that time series 0 is ahead of time series 1 and
+    a negative value means the opposite.
+
+    The PSI is computed from the coherency (see spectral_connectivity), details
+    can be found in [1].
+
+    References
+    ----------
+    [1] Nolte et al. "Robustly Estimating the Flow Direction of Information in
+    Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
+    pp. 1-4, Jun. 2008.
+
+    Parameters
+    ----------
+    data : array, shape=(n_epochs, n_signals, n_times)
+           or list/generator of array, shape =(n_signals, n_times)
+           or list/generator of SourceEstimate
+           or Epochs
+        The data from which to compute connectivity. Note that it is also
+        possible to combine multiple signals by providing a list of tuples,
+        e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
+        corresponds to 3 epochs, and arr_* could be an array with the same
+        number of time points as stc_*.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which to compute
+        connectivity. If None, all connections are computed.
+    sfreq : float
+        The sampling frequency.
+    mode : str
+        Spectrum estimation mode can be either: 'multitaper', 'fourier', or
+        'cwt_morlet'.
+    fmin : float | tuple of floats
+        The lower frequency of interest. Multiple bands are defined using
+        a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
+        If None the frequency corresponding to an epoch length of 5 cycles
+        is used.
+    fmax : float | tuple of floats
+        The upper frequency of interest. Multiple bands are dedined using
+        a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
+    tmin : float | None
+        Time to start connectivity estimation.
+    tmax : float | None
+        Time to end connectivity estimation.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'multitaper' mode.
+    mt_adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD.
+        Only used in 'multitaper' mode.
+    mt_low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth. Only used in 'multitaper' mode.
+    cwt_frequencies : array
+        Array of frequencies of interest. Only used in 'cwt_morlet' mode.
+    cwt_n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency. Only used in
+        'cwt_morlet' mode.
+    block_size : int
+        How many connections to compute at once (higher numbers are faster
+        but require more memory).
+    n_jobs : int
+        How many epochs to process in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psi : array
+        Computed connectivity measure(s). The shape of each array is either
+        (n_signals, n_signals, n_bands) mode: 'multitaper' or 'fourier'
+        (n_signals, n_signals, n_bands, n_times) mode: 'cwt_morlet'
+        when "indices" is None, or
+        (n_con, n_bands) mode: 'multitaper' or 'fourier'
+        (n_con, n_bands, n_times) mode: 'cwt_morlet'
+        when "indices" is specified and "n_con = len(indices[0])".
+    freqs : array
+        Frequency points at which the connectivity was computed.
+    times : array
+        Time points for which the connectivity was computed.
+    n_epochs : int
+        Number of epochs used for computation.
+    n_tapers : int
+        The number of DPSS tapers used. Only defined in 'multitaper' mode.
+        Otherwise None is returned.
+    """
+    logger.info('Estimating phase slope index (PSI)')
+    # estimate the coherency
+    cohy, freqs_, times, n_epochs, n_tapers = spectral_connectivity(data,
+        method='cohy', indices=indices, sfreq=sfreq, mode=mode, fmin=fmin,
+        fmax=fmax, fskip=0, faverage=False, tmin=tmin, tmax=tmax,
+        mt_bandwidth=mt_bandwidth, mt_adaptive=mt_adaptive,
+        mt_low_bias=mt_low_bias, cwt_frequencies=cwt_frequencies,
+        cwt_n_cycles=cwt_n_cycles, block_size=block_size, n_jobs=n_jobs,
+        verbose=verbose)
+
+    logger.info('Computing PSI from estimated Coherency')
+    # compute PSI in the requested bands
+    if fmin is None:
+        fmin = -np.inf  # set it to -inf, so we can adjust it later
+
+    bands = zip(np.asarray((fmin,)).ravel(), np.asarray((fmax,)).ravel())
+    n_bands = len(bands)
+
+    freq_dim = -2 if mode == 'cwt_morlet' else -1
+
+    # allocate space for output
+    out_shape = list(cohy.shape)
+    out_shape[freq_dim] = n_bands
+    psi = np.zeros(out_shape, dtype=np.float)
+
+    # allocate accumulator
+    acc_shape = copy.copy(out_shape)
+    acc_shape.pop(freq_dim)
+    acc = np.empty(acc_shape, dtype=np.complex128)
+
+    freqs = list()
+    idx_fi = [Ellipsis] * cohy.ndim
+    idx_fj = [Ellipsis] * cohy.ndim
+    for band_idx, band in enumerate(bands):
+        freq_idx = np.where((freqs_ > band[0]) & (freqs_ < band[1]))[0]
+        freqs.append(freqs_[freq_idx])
+
+        acc.fill(0.)
+        for fi, fj in zip(freq_idx, freq_idx[1:]):
+            idx_fi[freq_dim] = fi
+            idx_fj[freq_dim] = fj
+            acc += np.conj(cohy[idx_fi]) * cohy[idx_fj]
+
+        idx_fi[freq_dim] = band_idx
+        psi[idx_fi] = np.imag(acc)
+    logger.info('[PSI Estimation Done]')
+
+    return psi, freqs, times, n_epochs, n_tapers
diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py
new file mode 100644
index 0000000..968e2d8
--- /dev/null
+++ b/mne/connectivity/spectral.py
@@ -0,0 +1,1054 @@
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from warnings import warn
+from inspect import getargspec, getmembers
+
+import numpy as np
+from scipy.fftpack import fftfreq
+
+import logging
+logger = logging.getLogger('mne')
+
+
+from .utils import check_indices
+from ..fixes import tril_indices
+from ..parallel import parallel_func
+from .. import Epochs, SourceEstimate
+from ..time_frequency.multitaper import dpss_windows, _mt_spectra,\
+                                        _psd_from_mt, _csd_from_mt,\
+                                        _psd_from_mt_adaptive
+from ..time_frequency.tfr import morlet, cwt
+from .. import verbose
+
+########################################################################
+# Various connectivity estimators
+
+
+class _AbstractConEstBase(object):
+    """Abstract base class for all connectivity estimators, specifies
+       the interface but doesn't do anything"""
+
+    def start_epoch(self):
+        raise RuntimeError('start_epoch method not implemented')
+
+    def accumulate(self, con_idx, csd_xy):
+        raise RuntimeError('accumulate method not implemented')
+
+    def combine(self, other):
+        raise RuntimeError('combine method not implemented')
+
+    def compute_con(self, con_idx, n_epochs):
+        raise RuntimeError('compute_con method not implemented')
+
+
+class _EpochMeanConEstBase(_AbstractConEstBase):
+    """Base class for methods that estimate connectivity as mean over epochs"""
+    def __init__(self, n_cons, n_freqs, n_times):
+        self.n_cons = n_cons
+        self.n_freqs = n_freqs
+        self.n_times = n_times
+
+        if n_times == 0:
+            self.csd_shape = (n_cons, n_freqs)
+        else:
+            self.csd_shape = (n_cons, n_freqs, n_times)
+
+        self.con_scores = None
+
+    def start_epoch(self):
+        """This method is called at the start of each epoch"""
+        pass  # for this type of con. method we don't do anything
+
+    def combine(self, other):
+        """Include con. accumated for some epochs in this estimate"""
+        self._acc += other._acc
+
+
+class _CohEstBase(_EpochMeanConEstBase):
+    """Base Estimator for Coherence, Coherency, Imag. Coherence"""
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_CohEstBase, self).__init__(n_cons, n_freqs, n_times)
+
+        # allocate space for accumulation of CSD
+        self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate CSD for some connections"""
+        self._acc[con_idx] += csd_xy
+
+
+class _CohEst(_CohEstBase):
+    """Coherence Estimator"""
+    name = 'Coherence'
+
+    def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        csd_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = np.abs(csd_mean) / np.sqrt(psd_xx * psd_yy)
+
+
+class _CohyEst(_CohEstBase):
+    """Coherency Estimator"""
+    name = 'Coherency'
+
+    def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape,
+                                       dtype=np.complex128)
+        csd_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = csd_mean / np.sqrt(psd_xx * psd_yy)
+
+
+class _ImCohEst(_CohEstBase):
+    """Imaginary Coherence Estimator"""
+    name = 'Imaginary Coherence'
+
+    def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        csd_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = np.imag(csd_mean) / np.sqrt(psd_xx * psd_yy)
+
+
+class _PLVEst(_EpochMeanConEstBase):
+    """PLV Estimator"""
+    name = 'PLV'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_PLVEst, self).__init__(n_cons, n_freqs, n_times)
+
+        # allocate accumulator
+        self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        self._acc[con_idx] += csd_xy / np.abs(csd_xy)
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        plv = np.abs(self._acc / n_epochs)
+        self.con_scores[con_idx] = plv
+
+
+class _PLIEst(_EpochMeanConEstBase):
+    """PLI Estimator"""
+    name = 'PLI'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_PLIEst, self).__init__(n_cons, n_freqs, n_times)
+
+        # allocate accumulator
+        self._acc = np.zeros(self.csd_shape)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        self._acc[con_idx] += np.sign(np.imag(csd_xy))
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        pli_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = np.abs(pli_mean)
+
+
+class _PLIUnbiasedEst(_PLIEst):
+    """Unbiased PLI Square Estimator"""
+    name = 'Unbiased PLI Square'
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        pli_mean = self._acc[con_idx] / n_epochs
+
+        # See Vinck paper Eq. (30)
+        con = (n_epochs * pli_mean ** 2 - 1) / (n_epochs - 1)
+
+        self.con_scores[con_idx] = con
+
+
+class _WPLIEst(_EpochMeanConEstBase):
+    """WPLI Estimator"""
+    name = 'WPLI'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_WPLIEst, self).__init__(n_cons, n_freqs, n_times)
+
+        #store  both imag(csd) and abs(imag(csd))
+        acc_shape = (2,) + self.csd_shape
+        self._acc = np.zeros(acc_shape)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        im_csd = np.imag(csd_xy)
+        self._acc[0, con_idx] += im_csd
+        self._acc[1, con_idx] += np.abs(im_csd)
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+
+        num = np.abs(self._acc[0, con_idx])
+        denom = self._acc[1, con_idx]
+
+        # handle zeros in denominator
+        z_denom = np.where(denom == 0.)
+        denom[z_denom] = 1.
+
+        con = num / denom
+
+        # where we had zeros in denominator, we set con to zero
+        con[z_denom] = 0.
+
+        self.con_scores[con_idx] = con
+
+
+class _WPLIDebiasedEst(_EpochMeanConEstBase):
+    """Debiased WPLI Square Estimator"""
+    name = 'Debiased WPLI Square'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_WPLIDebiasedEst, self).__init__(n_cons, n_freqs, n_times)
+        #store imag(csd), abs(imag(csd)), imag(csd)^2
+        acc_shape = (3,) + self.csd_shape
+        self._acc = np.zeros(acc_shape)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        im_csd = np.imag(csd_xy)
+        self._acc[0, con_idx] += im_csd
+        self._acc[1, con_idx] += np.abs(im_csd)
+        self._acc[2, con_idx] += im_csd ** 2
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+
+        # note: we use the trick from fieldtrip to compute the
+        # the estimate over all pairwise epoch combinations
+        sum_im_csd = self._acc[0, con_idx]
+        sum_abs_im_csd = self._acc[1, con_idx]
+        sum_sq_im_csd = self._acc[2, con_idx]
+
+        denom = sum_abs_im_csd ** 2 - sum_sq_im_csd
+
+        # handle zeros in denominator
+        z_denom = np.where(denom == 0.)
+        denom[z_denom] = 1.
+
+        con = (sum_im_csd ** 2 - sum_sq_im_csd) / denom
+
+        # where we had zeros in denominator, we set con to zero
+        con[z_denom] = 0.
+
+        self.con_scores[con_idx] = con
+
+
+class _PPCEst(_EpochMeanConEstBase):
+    """Pairwise Phase Consistency (PPC) Estimator"""
+    name = 'PPC'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_PPCEst, self).__init__(n_cons, n_freqs, n_times)
+
+        #store csd / abs(csd)
+        self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        denom = np.abs(csd_xy)
+        z_denom = np.where(denom == 0.)
+        denom[z_denom] = 1.
+        this_acc = csd_xy / denom
+        this_acc[z_denom] = 0.  # handle division by zero
+
+        self._acc[con_idx] += this_acc
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+
+        # note: we use the trick from fieldtrip to compute the
+        # the estimate over all pairwise epoch combinations
+        con = ((self._acc[con_idx] * np.conj(self._acc[con_idx]) - n_epochs)
+               / (n_epochs * (n_epochs - 1.)))
+
+        self.con_scores[con_idx] = np.real(con)
+
+
+###############################################################################
+def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
+                                 mode, window_fun, eigvals, wavelets, freq_mask,
+                                 mt_adaptive, idx_map, block_size, psd,
+                                 accumulate_psd, con_method_types, con_methods,
+                                 n_signals, n_times, accumulate_inplace=True):
+    """Connectivity estimation for one epoch see spectral_connectivity"""
+
+    n_cons = len(idx_map[0])
+
+    if wavelets is not None:
+        n_times_spectrum = n_times
+        n_freqs = len(wavelets)
+    else:
+        n_times_spectrum = 0
+        n_freqs = np.sum(freq_mask)
+
+    if not accumulate_inplace:
+        # instantiate methods only for this epoch (used in parallel mode)
+        con_methods = [mtype(n_cons, n_freqs, n_times_spectrum)
+                       for mtype in con_method_types]
+
+    if len(sig_idx) == n_signals:
+        # we use all signals: use a slice for faster indexing
+        sig_idx = slice(None, None)
+
+    # compute tapered spectra
+    if mode in ['multitaper', 'fourier']:
+        x_mt = list()
+        this_psd = list()
+        sig_pos_start = 0
+        for this_data in data:
+            this_n_sig = this_data.shape[0]
+            sig_pos_end = sig_pos_start + this_n_sig
+            if not isinstance(sig_idx, slice):
+                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start)
+                                & (sig_idx < sig_pos_end)] - sig_pos_start
+            else:
+                this_sig_idx = sig_idx
+            if isinstance(this_data, SourceEstimate):
+                this_x_mt = this_data.transform_data(_mt_spectra,
+                                        fun_args=(window_fun, sfreq),
+                                        idx=this_sig_idx, tmin_idx=tmin_idx,
+                                        tmax_idx=tmax_idx)
+            else:
+                this_x_mt, _ = _mt_spectra(this_data[this_sig_idx,
+                                                     tmin_idx:tmax_idx],
+                                           window_fun, sfreq)
+
+            if mt_adaptive:
+                # compute PSD and adaptive weights
+                _this_psd, weights = _psd_from_mt_adaptive(this_x_mt, eigvals,
+                                        freq_mask, return_weights=True)
+
+                # only keep freqs of interest
+                this_x_mt = this_x_mt[:, :, freq_mask]
+            else:
+                # do not use adaptive weights
+                this_x_mt = this_x_mt[:, :, freq_mask]
+                if mode == 'multitaper':
+                    weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
+                else:
+                    # hack to so we can sum over axis=-2
+                    weights = np.array([1.])[:, None, None]
+
+                if accumulate_psd:
+                    _this_psd = _psd_from_mt(this_x_mt, weights)
+
+            x_mt.append(this_x_mt)
+            if accumulate_psd:
+                this_psd.append(_this_psd)
+
+        x_mt = np.concatenate(x_mt, axis=0)
+        if accumulate_psd:
+            this_psd = np.concatenate(this_psd, axis=0)
+
+        # advance position
+        sig_pos_start = sig_pos_end
+
+    elif mode == 'cwt_morlet':
+        # estimate spectra using CWT
+        x_cwt = list()
+        this_psd = list()
+        sig_pos_start = 0
+        for this_data in data:
+            this_n_sig = this_data.shape[0]
+            sig_pos_end = sig_pos_start + this_n_sig
+            if not isinstance(sig_idx, slice):
+                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start)
+                    & (sig_idx < sig_pos_end)] - sig_pos_start
+            else:
+                this_sig_idx = sig_idx
+            if isinstance(this_data, SourceEstimate):
+                this_x_cwt = this_data.transform_data(cwt,
+                    fun_args=(wavelets,), idx=this_sig_idx, tmin_idx=tmin_idx,
+                    tmax_idx=tmax_idx, use_fft=True, mode='same')
+            else:
+                this_x_cwt = cwt(this_data[this_sig_idx, tmin_idx:tmax_idx],
+                                 wavelets, use_fft=True, mode='same')
+
+            if accumulate_psd:
+                this_psd.append(np.abs(this_x_cwt) ** 2)
+
+            x_cwt.append(this_x_cwt)
+
+            # advance position
+            sig_pos_start = sig_pos_end
+
+        x_cwt = np.concatenate(x_cwt, axis=0)
+        if accumulate_psd:
+            this_psd = np.concatenate(this_psd, axis=0)
+    else:
+        raise RuntimeError('invalid mode')
+
+    # accumulate or return psd
+    if accumulate_psd:
+        if accumulate_inplace:
+            psd += this_psd
+        else:
+            psd = this_psd
+    else:
+        psd = None
+
+    # tell the methods that a new epoch starts
+    for method in con_methods:
+        method.start_epoch()
+
+    # accumulate connectivity scores
+    if mode in ['multitaper', 'fourier']:
+        for i in xrange(0, n_cons, block_size):
+            con_idx = slice(i, i + block_size)
+            if mt_adaptive:
+                csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
+                                   x_mt[idx_map[1][con_idx]],
+                                   weights[idx_map[0][con_idx]],
+                                   weights[idx_map[1][con_idx]])
+            else:
+                csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
+                                   x_mt[idx_map[1][con_idx]],
+                                   weights, weights)
+
+            for method in con_methods:
+                method.accumulate(con_idx, csd)
+    else:
+        # cwt_morlet mode
+        for i in xrange(0, n_cons, block_size):
+            con_idx = slice(i, i + block_size)
+
+            csd = x_cwt[idx_map[0][con_idx]]\
+                  * np.conjugate(x_cwt[idx_map[1][con_idx]])
+            for method in con_methods:
+                method.accumulate(con_idx, csd)
+
+    return con_methods, psd
+
+
+def _get_n_epochs(epochs, n):
+    """Generator that returns lists with at most n epochs"""
+    epochs_out = []
+    for e in epochs:
+        if not isinstance(e, (list, tuple)):
+            e = (e,)
+        epochs_out.append(e)
+        if len(epochs_out) >= n:
+            yield epochs_out
+            epochs_out = []
+    yield epochs_out
+
+
+def _check_method(method):
+    """Test if a method implements the required interface"""
+    interface_members = [m[0] for m in getmembers(_AbstractConEstBase)
+                         if not m[0].startswith('_')]
+    method_members = [m[0] for m in getmembers(method)
+                      if not m[0].startswith('_')]
+
+    for member in interface_members:
+        if member not in method_members:
+            return False, member
+    return True, None
+
+
+def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None):
+    """Helper function to get and/or verify the data sizes and time scales"""
+    if not isinstance(data, (list, tuple)):
+        raise ValueError('data has to be a list or tuple')
+    n_signals_tot = 0
+    for this_data in data:
+        this_n_signals, this_n_times = this_data.shape
+        if n_times is not None:
+            if this_n_times != n_times:
+                raise ValueError('all input time series must have the same '
+                                 'number of time points')
+        else:
+            n_times = this_n_times
+        n_signals_tot += this_n_signals
+
+        if hasattr(this_data, 'times'):
+            this_times = this_data.times
+            if times is not None:
+                if np.any(times != this_times):
+                    warn('time scales of input time series do not match')
+            else:
+                times = this_times
+
+    if n_signals is not None:
+        if n_signals != n_signals_tot:
+            raise ValueError('the number of time series has to be the same in '
+                             'each epoch')
+    n_signals = n_signals_tot
+
+    return n_signals, n_times, times
+
+
+# map names to estimator types
+_CON_METHOD_MAP = {'coh': _CohEst, 'cohy': _CohyEst, 'imcoh': _ImCohEst,
+                   'plv': _PLVEst, 'ppc': _PPCEst, 'pli': _PLIEst,
+                   'pli2_unbiased': _PLIUnbiasedEst, 'wpli': _WPLIEst,
+                   'wpli2_debiased': _WPLIDebiasedEst}
+
+
+ at verbose
+def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
+                          mode='multitaper', fmin=None, fmax=np.inf,
+                          fskip=0, faverage=False, tmin=None, tmax=None,
+                          mt_bandwidth=None, mt_adaptive=False,
+                          mt_low_bias=True, cwt_frequencies=None,
+                          cwt_n_cycles=7, block_size=1000, n_jobs=1,
+                          verbose=None):
+    """Compute various frequency-domain and time-frequency domain connectivity
+    measures.
+
+    The connectivity method(s) are specified using the "method" parameter.
+    All methods are based on estimates of the cross- and power spectral
+    densities (CSD/PSD) Sxy and Sxx, Syy.
+
+    The spectral densities can be estimated using a multitaper method with
+    digital prolate spheroidal sequence (DPSS) windows, a discrete Fourier
+    transform with Hanning windows, or a continuous wavelet transform using
+    Morlet wavelets. The spectral estimation mode is specified using the
+    "mode" parameter.
+
+    By default, the connectivity between all signals is computed (only
+    connections corresponding to the lower-triangular part of the
+    connectivity matrix). If one is only interested in the connectivity
+    between some signals, the "indices" parameter can be used. For example,
+    to compute the connectivity between the signal with index 0 and signals
+    "2, 3, 4" (a total of 3 connections) one can use the following:
+
+    indices = (np.array([0, 0, 0],    # row indices
+               np.array([2, 3, 4])))  # col indices
+
+    con_flat = spectral_connectivity(data, method='coh', indices=indices, ...)
+
+    In this case con_flat.shape = (3, n_freqs). The connectivity scores are
+    in the same order as defined indices.
+
+    Supported Connectivity Measures:
+
+    The connectivity method(s) is specified using the "method" parameter. The
+    following methods are supported (note: E[] denotes average over epochs).
+    Multiple measures can be computed at once by using a list/tuple, e.g.
+    "['coh', 'pli']" to compute coherence and PLI.
+
+    'coh' : Coherence given by
+
+                 | E[Sxy] |
+        C = ---------------------
+            sqrt(E[Sxx] * E[Syy])
+
+    'cohy' : Coherency given by
+
+                   E[Sxy]
+        C = ---------------------
+            sqrt(E[Sxx] * E[Syy])
+
+    'imcoh' : Imaginary coherence [1] given by
+
+                  Im(E[Sxy])
+        C = ----------------------
+            sqrt(E[Sxx] * E[Syy])
+
+    'plv' : Phase-Locking Value (PLV) [2] given by
+
+        PLV = |E[Sxy/|Sxy|]|
+
+    'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator of squared
+            PLV [3].
+
+    'pli' : Phase Lag Index (PLI) [4] given by
+
+        PLI = |E[sign(Im(Sxy))]|
+
+    'pli2_unbiased' : Unbiased estimator of squared PLI [5].
+
+    'wpli' : Weighted Phase Lag Index (WPLI) [5] given by
+
+                  |E[Im(Sxy)]|
+        WPLI = ------------------
+                  E[|Im(Sxy)|]
+
+    'wpli2_debiased' : Debiased estimator of squared WPLI [5].
+
+    References
+    ----------
+
+    [1] Nolte et al. "Identifying true brain interaction from EEG data using
+        the imaginary part of coherency" Clinical neurophysiology, vol. 115,
+        no. 10, pp. 2292-2307, Oct. 2004.
+
+    [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human brain
+        mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
+
+    [3] Vinck et al. "The pairwise phase consistency: a bias-free measure of
+        rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
+        pp. 112-122, May 2010.
+
+    [4] Stam et al. "Phase lag index: assessment of functional connectivity
+        from multi channel EEG and MEG with diminished bias from common
+        sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
+        Nov. 2007.
+
+    [5] Vinck et al. "An improved index of phase-synchronization for electro-
+        physiological data in the presence of volume-conduction, noise and
+        sample-size bias" NeuroImage, vol. 55, no. 4, pp. 1548-1565, Apr. 2011.
+
+    Parameters
+    ----------
+    data : array, shape=(n_epochs, n_signals, n_times)
+           or list/generator of array, shape =(n_signals, n_times)
+           or list/generator of SourceEstimate
+           or Epochs
+        The data from which to compute connectivity. Note that it is also
+        possible to combine multiple signals by providing a list of tuples,
+        e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
+        corresponds to 3 epochs, and arr_* could be an array with the same
+        number of time points as stc_*.
+    method : string | list of string
+        Connectivity measure(s) to compute.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which to compute
+        connectivity. If None, all connections are computed.
+    sfreq : float
+        The sampling frequency.
+    mode : str
+        Spectrum estimation mode can be either: 'multitaper', 'fourier', or
+        'cwt_morlet'.
+    fmin : float | tuple of floats
+        The lower frequency of interest. Multiple bands are defined using
+        a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
+        If None the frequency corresponding to an epoch length of 5 cycles
+        is used.
+    fmax : float | tuple of floats
+        The upper frequency of interest. Multiple bands are dedined using
+        a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
+    fskip : int
+        Omit every "(fskip + 1)-th" frequency bin to decimate in frequency
+        domain.
+    faverage : boolean
+        Average connectivity scores for each frequency band. If True,
+        the output freqs will be a list with arrays of the frequencies
+        that were averaged.
+    tmin : float | None
+        Time to start connectivity estimation.
+    tmax : float | None
+        Time to end connectivity estimation.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'multitaper' mode.
+    mt_adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD.
+        Only used in 'multitaper' mode.
+    mt_low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth. Only used in 'multitaper' mode.
+    cwt_frequencies : array
+        Array of frequencies of interest. Only used in 'cwt_morlet' mode.
+    cwt_n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency. Only used in
+        'cwt_morlet' mode.
+    block_size : int
+        How many connections to compute at once (higher numbers are faster
+        but require more memory).
+    n_jobs : int
+        How many epochs to process in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    con : array | list of arrays
+        Computed connectivity measure(s). The shape of each array is either
+        (n_signals, n_signals, n_frequencies) mode: 'multitaper' or 'fourier'
+        (n_signals, n_signals, n_frequencies, n_times) mode: 'cwt_morlet'
+        when "indices" is None, or
+        (n_con, n_frequencies) mode: 'multitaper' or 'fourier'
+        (n_con, n_frequencies, n_times) mode: 'cwt_morlet'
+        when "indices" is specified and "n_con = len(indices[0])".
+    freqs : array
+        Frequency points at which the connectivity was computed.
+    times : array
+        Time points for which the connectivity was computed.
+    n_epochs : int
+        Number of epochs used for computation.
+    n_tapers : int
+        The number of DPSS tapers used. Only defined in 'multitaper' mode.
+        Otherwise None is returned.
+    """
+    if n_jobs > 1:
+        parallel, my_epoch_spectral_connectivity, _ = \
+                parallel_func(_epoch_spectral_connectivity, n_jobs,
+                              verbose=verbose)
+
+    # format fmin and fmax and check inputs
+    if fmin is None:
+        fmin = -np.inf  # set it to -inf, so we can adjust it later
+
+    fmin = np.asarray((fmin,)).ravel()
+    fmax = np.asarray((fmax,)).ravel()
+    if len(fmin) != len(fmax):
+        raise ValueError('fmin and fmax must have the same length')
+    if np.any(fmin > fmax):
+        raise ValueError('fmax must be larger than fmin')
+
+    n_bands = len(fmin)
+
+    # assign names to connectivity methods
+    if not isinstance(method, (list, tuple)):
+        method = [method]  # make it a list so we can iterate over it
+
+    n_methods = len(method)
+    con_method_types = []
+    for m in method:
+        if m in _CON_METHOD_MAP:
+            method = _CON_METHOD_MAP[m]
+            con_method_types.append(method)
+        elif isinstance(m, basestring):
+            raise ValueError('%s is not a valid connectivity method' % m)
+        else:
+            # add custom method
+            method_valid, msg = _check_method(m)
+            if not method_valid:
+                raise ValueError('The supplied connectivity method does '
+                                 'not have the method %s' % msg)
+            con_method_types.append(m)
+
+    # determine how many arguments the compute_con_function needs
+    n_comp_args = [len(getargspec(mtype.compute_con).args)
+                   for mtype in con_method_types]
+
+    # we only support 3 or 5 arguments
+    if any([n not in (3, 5) for n in n_comp_args]):
+        raise ValueError('The compute_con function needs to have either '
+                         '3 or 5 arguments')
+
+    # if none of the comp_con functions needs the PSD, we don't estimate it
+    accumulate_psd = any([n == 5 for n in n_comp_args])
+
+    if isinstance(data, Epochs):
+        times_in = data.times  # input times for Epochs input type
+        sfreq = data.info['sfreq']
+
+    # loop over data; it could be a generator that returns
+    # (n_signals x n_times) arrays or SourceEstimates
+    epoch_idx = 0
+    logger.info('Connectivity computation...')
+    for epoch_block in _get_n_epochs(data, n_jobs):
+
+        if epoch_idx == 0:
+            # initialize everything
+            first_epoch = epoch_block[0]
+
+            # get the data size and time scale
+            n_signals, n_times_in, times_in =\
+                    _get_and_verify_data_sizes(first_epoch)
+
+            if times_in is None:
+                # we are not using Epochs or SourceEstimate(s) as input
+                times_in = np.linspace(0.0, n_times_in / sfreq, n_times_in,
+                                       endpoint=False)
+
+            n_times_in = len(times_in)
+            tmin_idx = 0
+            tmax_idx = n_times_in
+            tmin_true = times_in[0]
+            tmax_true = times_in[-1]
+            if tmin is not None:
+                tmin_idx = np.argmin(np.abs(times_in - tmin))
+                tmin_true = times_in[tmin_idx]
+            if tmax is not None:
+                tmax_idx = np.argmin(np.abs(times_in - tmax)) + 1
+                tmax_true = times_in[tmax_idx - 1]  # time of last point used
+
+            times = times_in[tmin_idx:tmax_idx]
+            n_times = len(times)
+
+            if indices is None:
+                # only compute r for lower-triangular region
+                indices_use = tril_indices(n_signals, -1)
+            else:
+                indices_use = check_indices(indices)
+
+            # number of connectivities to compute
+            n_cons = len(indices_use[0])
+
+            logger.info('    computing connectivity for %d connections'
+                        % n_cons)
+
+            logger.info('    using t=%0.3fs..%0.3fs for estimation (%d points)'
+                        % (tmin_true, tmax_true, n_times))
+
+            # get frequencies of interest for the different modes
+            if mode in ['multitaper', 'fourier']:
+                # fmin fmax etc is only supported for these modes
+                # decide which frequencies to keep
+                freqs_all = fftfreq(n_times, 1. / sfreq)
+                freqs_all = freqs_all[freqs_all >= 0]
+            elif mode == 'cwt_morlet':
+                # cwt_morlet mode
+                if cwt_frequencies is None:
+                    raise ValueError('define frequencies of interest using '
+                                     'cwt_frequencies')
+                else:
+                    cwt_frequencies = cwt_frequencies.astype(np.float)
+                if any(cwt_frequencies > (sfreq / 2.)):
+                    raise ValueError('entries in cwt_frequencies cannot be '
+                                     'larger than Nyquist (sfreq / 2)')
+                freqs_all = cwt_frequencies
+            else:
+                raise ValueError('mode has an invalid value')
+
+            # check that fmin corresponds to at least 5 cycles
+            five_cycle_freq = 5. * sfreq / float(n_times)
+
+            if len(fmin) == 1 and fmin[0] == -np.inf:
+                # we use the 5 cycle freq. as default
+                fmin = [five_cycle_freq]
+            else:
+                if any(fmin < five_cycle_freq):
+                    warn('fmin corresponds to less than 5 cycles, '
+                         'spectrum estimate will be unreliable')
+
+            # create a frequency mask for all bands
+            freq_mask = np.zeros(len(freqs_all), dtype=np.bool)
+            for f_lower, f_upper in zip(fmin, fmax):
+                freq_mask |= ((freqs_all >= f_lower) & (freqs_all <= f_upper))
+
+            # possibly skip frequency points
+            for pos in xrange(fskip):
+                freq_mask[pos + 1::fskip + 1] = False
+
+            # the frequency points where we compute connectivity
+            freqs = freqs_all[freq_mask]
+            n_freqs = len(freqs)
+
+            # get the freq. indices and points for each band
+            freq_idx_bands = [np.where((freqs >= fl) & (freqs <= fu))[0]
+                              for fl, fu in zip(fmin, fmax)]
+            freqs_bands = [freqs[freq_idx] for freq_idx in freq_idx_bands]
+
+            # make sure we don't have empty bands
+            for i, n_f_band in enumerate([len(f) for f in freqs_bands]):
+                if n_f_band == 0:
+                    raise ValueError('There are no frequency points between '
+                        '%0.1fHz and %0.1fHz. Change the band specification '
+                        '(fmin, fmax) or the frequency resolution.'
+                        % (fmin[i], fmax[i]))
+
+            if n_bands == 1:
+                logger.info('    frequencies: %0.1fHz..%0.1fHz (%d points)'
+                            % (freqs_bands[0][0], freqs_bands[0][-1],
+                               n_freqs))
+            else:
+                logger.info('    computing connectivity for the bands:')
+                for i, bfreqs in enumerate(freqs_bands):
+                    logger.info('     band %d: %0.1fHz..%0.1fHz '
+                                '(%d points)' % (i + 1, bfreqs[0],
+                                bfreqs[-1], len(bfreqs)))
+
+            if faverage:
+                logger.info('    connectivity scores will be averaged for '
+                            'each band')
+
+            # get the window function, wavelets, etc for different modes
+            if mode == 'multitaper':
+                # compute standardized half-bandwidth
+                if mt_bandwidth is not None:
+                    half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
+                else:
+                    half_nbw = 4
+
+                # compute dpss windows
+                n_tapers_max = int(2 * half_nbw)
+                window_fun, eigvals = dpss_windows(n_times, half_nbw,
+                                                   n_tapers_max,
+                                                   low_bias=mt_low_bias)
+                n_tapers = len(eigvals)
+                logger.info('    using multitaper spectrum estimation with '
+                            '%d DPSS windows' % n_tapers)
+
+                if mt_adaptive and len(eigvals) < 3:
+                    warn('Not adaptively combining the spectral estimators '
+                         'due to a low number of tapers.')
+                    mt_adaptive = False
+
+                n_times_spectrum = 0  # this method only uses the freq. domain
+                wavelets = None
+            elif mode == 'fourier':
+                logger.info('    using FFT with a Hanning window to estimate '
+                            'spectra')
+
+                window_fun = np.hanning(n_times)
+                mt_adaptive = False
+                eigvals = 1.
+                n_tapers = None
+                n_times_spectrum = 0  # this method only uses the freq. domain
+                wavelets = None
+            elif mode == 'cwt_morlet':
+                logger.info('    using CWT with Morlet wavelets to estimate '
+                            'spectra')
+
+                # reformat cwt_n_cycles if we have removed some frequencies
+                # using fmin, fmax, fskip
+                cwt_n_cycles = np.asarray((cwt_n_cycles,)).ravel()
+                if len(cwt_n_cycles) > 1:
+                    if len(cwt_n_cycles) != len(cwt_frequencies):
+                        raise ValueError('cwt_n_cycles must be float or an '
+                            'array with the same size as cwt_frequencies')
+                    cwt_n_cycles = cwt_n_cycles[freq_mask]
+
+                # get the Morlet wavelets
+                wavelets = morlet(sfreq, freqs,
+                                  n_cycles=cwt_n_cycles, zero_mean=True)
+                eigvals = None
+                n_tapers = None
+                window_fun = None
+                n_times_spectrum = n_times
+            else:
+                raise ValueError('mode has an invalid value')
+
+            # unique signals for which we actually need to compute PSD etc.
+            sig_idx = np.unique(np.r_[indices_use[0], indices_use[1]])
+
+            # map indices to unique indices
+            idx_map = [np.searchsorted(sig_idx, ind) for ind in indices_use]
+
+            # allocate space to accumulate PSD
+            if accumulate_psd:
+                if n_times_spectrum == 0:
+                    psd_shape = (len(sig_idx), n_freqs)
+                else:
+                    psd_shape = (len(sig_idx), n_freqs, n_times_spectrum)
+                psd = np.zeros(psd_shape)
+            else:
+                psd = None
+
+            # create instances of the connectivity estimators
+            con_methods = [mtype(n_cons, n_freqs, n_times_spectrum)
+                           for mtype in con_method_types]
+
+            sep = ', '
+            metrics_str = sep.join([method.name for method in con_methods])
+            logger.info('    the following metrics will be computed: %s'
+                        % metrics_str)
+
+        # check dimensions and time scale
+        for this_epoch in epoch_block:
+            _get_and_verify_data_sizes(this_epoch, n_signals, n_times_in,
+                                       times_in)
+
+        if n_jobs == 1:
+            # no parallel processing
+            for this_epoch in epoch_block:
+                logger.info('    computing connectivity for epoch %d'
+                            % (epoch_idx + 1))
+
+                # con methods and psd are updated inplace
+                _epoch_spectral_connectivity(this_epoch, sig_idx, tmin_idx,
+                    tmax_idx, sfreq, mode, window_fun, eigvals, wavelets,
+                    freq_mask, mt_adaptive, idx_map, block_size, psd,
+                    accumulate_psd, con_method_types, con_methods,
+                    n_signals, n_times, accumulate_inplace=True)
+                epoch_idx += 1
+        else:
+            # process epochs in parallel
+            logger.info('    computing connectivity for epochs %d..%d'
+                        % (epoch_idx + 1, epoch_idx + len(epoch_block)))
+
+            out = parallel(my_epoch_spectral_connectivity(this_epoch, sig_idx,
+                    tmin_idx, tmax_idx, sfreq, mode, window_fun, eigvals,
+                    wavelets, freq_mask, mt_adaptive, idx_map, block_size, psd,
+                    accumulate_psd, con_method_types, None, n_signals, n_times,
+                    accumulate_inplace=False) for this_epoch in epoch_block)
+
+            # do the accumulation
+            for this_out in out:
+                for method, parallel_method in zip(con_methods, this_out[0]):
+                    method.combine(parallel_method)
+                if accumulate_psd:
+                    psd += this_out[1]
+
+            epoch_idx += len(epoch_block)
+
+    # normalize
+    n_epochs = epoch_idx
+    if accumulate_psd:
+        psd /= n_epochs
+
+    # compute final connectivity scores
+    con = []
+    for method, n_args in zip(con_methods, n_comp_args):
+        if n_args == 3:
+            # compute all scores at once
+            method.compute_con(slice(0, n_cons), n_epochs)
+        else:
+            # compute scores block-wise to save memory
+            for i in xrange(0, n_cons, block_size):
+                con_idx = slice(i, i + block_size)
+                psd_xx = psd[idx_map[0][con_idx]]
+                psd_yy = psd[idx_map[1][con_idx]]
+                method.compute_con(con_idx, n_epochs, psd_xx, psd_yy)
+
+        # get the connectivity scores
+        this_con = method.con_scores
+
+        if this_con.shape[0] != n_cons:
+            raise ValueError('First dimension of connectivity scores must be '
+                             'the same as the number of connections')
+        if faverage:
+            if this_con.shape[1] != n_freqs:
+                raise ValueError('2nd dimension of connectivity scores must '
+                                 'be the same as the number of frequencies')
+            con_shape = (n_cons, n_bands) + this_con.shape[2:]
+            this_con_bands = np.empty(con_shape, dtype=this_con.dtype)
+            for band_idx in xrange(n_bands):
+                this_con_bands[:, band_idx] =\
+                    np.mean(this_con[:, freq_idx_bands[band_idx]], axis=1)
+            this_con = this_con_bands
+
+        con.append(this_con)
+
+    if indices is None:
+        # return all-to-all connectivity matrices
+        logger.info('    assembling connectivity matrix')
+        con_flat = con
+        con = []
+        for this_con_flat in con_flat:
+            this_con = np.zeros((n_signals, n_signals)
+                                + this_con_flat.shape[1:],
+                                dtype=this_con_flat.dtype)
+            this_con[indices_use] = this_con_flat
+            con.append(this_con)
+
+    logger.info('[Connectivity computation done]')
+
+    if n_methods == 1:
+        # for a single method return connectivity directly
+        con = con[0]
+
+    if faverage:
+        # for each band we return the frequencies that were averaged
+        freqs = freqs_bands
+
+    return con, freqs, times, n_epochs, n_tapers
diff --git a/mne/connectivity/tests/__init__.py b/mne/connectivity/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/connectivity/tests/test_effective.py b/mne/connectivity/tests/test_effective.py
new file mode 100644
index 0000000..6d2731d
--- /dev/null
+++ b/mne/connectivity/tests/test_effective.py
@@ -0,0 +1,43 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.connectivity import phase_slope_index
+
+
+sfreq = 50.
+n_signals = 3
+n_epochs = 10
+n_times = 500
+
+rng = np.random.RandomState(42)
+data = rng.randn(n_epochs, n_signals, n_times)
+
+# simulate time shifts
+for i in range(n_epochs):
+    data[i, 1, 10:] = data[i, 0, :-10]  # signal 0 is ahead
+    data[i, 2, :-10] = data[i, 0, 10:]  # signal 2 is ahead
+
+
+def test_psi():
+    """Test Phase Slope Index (PSI) estimation"""
+
+    psi, freqs, times, n_epochs, n_tapers = phase_slope_index(data,
+        mode='fourier', sfreq=sfreq)
+    assert_true(psi[1, 0, 0] < 0)
+    assert_true(psi[2, 0, 0] > 0)
+
+    indices = (np.array([0]), np.array([1]))
+    psi_2, freqs, times, n_epochs, n_tapers = phase_slope_index(data,
+        mode='fourier', sfreq=sfreq, indices=indices)
+
+    # the measure is symmetric (sign flip)
+    assert_array_almost_equal(psi_2[0, 0], -psi[1, 0, 0])
+
+    cwt_freqs = np.arange(5., 20, 0.5)
+    psi_cwt, freqs, times, n_epochs, n_tapers = phase_slope_index(data,
+        mode='cwt_morlet', sfreq=sfreq, cwt_frequencies=cwt_freqs,
+        indices=indices)
+
+    assert_true(np.all(psi_cwt > 0))
+    assert_true(psi_cwt.shape[-1] == n_times)
diff --git a/mne/connectivity/tests/test_spectral.py b/mne/connectivity/tests/test_spectral.py
new file mode 100644
index 0000000..de6162a
--- /dev/null
+++ b/mne/connectivity/tests/test_spectral.py
@@ -0,0 +1,195 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true, assert_raises
+
+from mne.fixes import tril_indices
+from mne.connectivity import spectral_connectivity
+from mne.connectivity.spectral import _CohEst
+
+from mne import SourceEstimate
+from mne.filter import band_pass_filter
+
+sfreq = 50.
+n_signals = 3
+n_epochs = 10
+n_times = 500
+
+tmin = 0.
+tmax = (n_times - 1) / sfreq
+# Use a case known to have no spurious correlations (it would bad if nosetests
+# could randomly fail):
+np.random.seed(0)
+data = np.random.randn(n_epochs, n_signals, n_times)
+times_data = np.linspace(tmin, tmax, n_times)
+
+# simulate connectivity from 5Hz..15Hz
+fstart, fend = 5.0, 15.0
+for i in xrange(n_epochs):
+    data[i, 1, :] = band_pass_filter(data[i, 0, :], sfreq, fstart, fend)
+    # add some noise, so the spectrum is not exactly zero
+    data[i, 1, :] += 1e-2 * np.random.randn(n_times)
+
+
+def _stc_gen(data, sfreq, tmin, combo=False):
+    """Simulate a SourceEstimate generator"""
+    vertices = [np.arange(data.shape[1]), np.empty(0)]
+    for d in data:
+        if not combo:
+            stc = SourceEstimate(data=d, vertices=vertices,
+                                 tmin=tmin, tstep=1 / float(sfreq))
+            yield stc
+        else:
+            # simulate a combination of array and source estimate
+            arr = d[0]
+            stc = SourceEstimate(data=d[1:], vertices=vertices,
+                                 tmin=tmin, tstep=1 / float(sfreq))
+            yield (arr, stc)
+
+
+def test_spectral_connectivity():
+    """Test frequency-domain connectivity methods"""
+
+    # First we test some invalid parameters:
+    assert_raises(ValueError, spectral_connectivity, data, method='notamethod')
+    assert_raises(ValueError, spectral_connectivity, data,
+                  mode='notamode')
+
+    # test invalid fmin fmax settings
+    assert_raises(ValueError, spectral_connectivity, data, fmin=10,
+                  fmax=10 + 0.5 * (sfreq / float(n_times)))
+    assert_raises(ValueError, spectral_connectivity, data, fmin=10, fmax=5)
+    assert_raises(ValueError, spectral_connectivity, data, fmin=(0, 11),
+                  fmax=(5, 10))
+    assert_raises(ValueError, spectral_connectivity, data, fmin=(11,),
+                  fmax=(12, 15))
+
+    methods = ['coh', 'imcoh', 'cohy', 'plv', 'ppc', 'pli', 'pli2_unbiased',
+               'wpli', 'wpli2_debiased', 'coh']
+
+    modes = ['multitaper', 'fourier', 'cwt_morlet']
+
+    # define some frequencies for cwt
+    cwt_frequencies = np.arange(3, 24.5, 1)
+
+    for mode in modes:
+        for method in methods:
+            if method == 'coh' and mode == 'multitaper':
+                # only check adaptive estimation for coh to reduce test time
+                check_adaptive = [False, True]
+            else:
+                check_adaptive = [False]
+
+            if method == 'coh' and mode == 'cwt_morlet':
+                # so we also test using an array for num cycles
+                cwt_n_cycles = 7. * np.ones(len(cwt_frequencies))
+            else:
+                cwt_n_cycles = 7.
+
+            for adaptive in check_adaptive:
+
+                if adaptive:
+                    mt_bandwidth = 1.
+                else:
+                    mt_bandwidth = None
+
+                con, freqs, times, n, _ = spectral_connectivity(data,
+                        method=method, mode=mode,
+                        indices=None, sfreq=sfreq, mt_adaptive=adaptive,
+                        mt_low_bias=True, mt_bandwidth=mt_bandwidth,
+                        cwt_frequencies=cwt_frequencies,
+                        cwt_n_cycles=cwt_n_cycles)
+
+                assert_true(n == n_epochs)
+                assert_array_almost_equal(times_data, times)
+
+                if mode == 'multitaper':
+                    upper_t = 0.95
+                    lower_t = 0.5
+                else:
+                    # other estimates have higher variance
+                    upper_t = 0.8
+                    lower_t = 0.75
+
+                # test the simulated signal
+                if method == 'coh':
+                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
+                    # we see something for zero-lag
+                    assert_true(np.all(con[1, 0, idx[0]:idx[1]] > upper_t))
+
+                    if mode != 'cwt_morlet':
+                        idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
+                        assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
+                        assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
+                elif method == 'cohy':
+                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
+                    # imaginary coh will be zero
+                    assert_true(np.all(np.imag(con[1, 0, idx[0]:idx[1]])
+                                < lower_t))
+                    # we see something for zero-lag
+                    assert_true(np.all(np.abs(con[1, 0, idx[0]:idx[1]])
+                                > upper_t))
+
+                    idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
+                    if mode != 'cwt_morlet':
+                        assert_true(np.all(np.abs(con[1, 0, :idx[0]])
+                                    < lower_t))
+                        assert_true(np.all(np.abs(con[1, 0, idx[1]:])
+                                    < lower_t))
+                elif method == 'imcoh':
+                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
+                    # imaginary coh will be zero
+                    assert_true(np.all(con[1, 0, idx[0]:idx[1]] < lower_t))
+                    idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
+                    assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
+                    assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
+
+                # compute same connections using indices and 2 jobs,
+                # also add a second method
+                indices = tril_indices(n_signals, -1)
+
+                test_methods = (method, _CohEst)
+                combo = True if method == 'coh' else False
+                stc_data = _stc_gen(data, sfreq, tmin)
+                con2, freqs2, times2, n2, _ = spectral_connectivity(stc_data,
+                        method=test_methods, mode=mode, indices=indices,
+                        sfreq=sfreq, mt_adaptive=adaptive, mt_low_bias=True,
+                        mt_bandwidth=mt_bandwidth, tmin=tmin, tmax=tmax,
+                        cwt_frequencies=cwt_frequencies,
+                        cwt_n_cycles=cwt_n_cycles, n_jobs=2)
+
+                assert_true(isinstance(con2, list))
+                assert_true(len(con2) == 2)
+
+                if method == 'coh':
+                    assert_array_almost_equal(con2[0], con2[1])
+
+                con2 = con2[0]  # only keep the first method
+
+                # we get the same result for the probed connections
+                assert_array_almost_equal(freqs, freqs2)
+                assert_array_almost_equal(con[indices], con2)
+                assert_true(n == n2)
+                assert_array_almost_equal(times_data, times2)
+
+                # compute same connections for two bands, fskip=1, and f. avg.
+                fmin = (5., 15.)
+                fmax = (15., 30.)
+                con3, freqs3, times3, n3, _ = spectral_connectivity(data,
+                        method=method, mode=mode,
+                        indices=indices, sfreq=sfreq, fmin=fmin, fmax=fmax,
+                        fskip=1, faverage=True, mt_adaptive=adaptive,
+                        mt_low_bias=True, mt_bandwidth=mt_bandwidth,
+                        cwt_frequencies=cwt_frequencies,
+                        cwt_n_cycles=cwt_n_cycles)
+
+                assert_true(isinstance(freqs3, list))
+                assert_true(len(freqs3) == len(fmin))
+                for i in range(len(freqs3)):
+                    assert_true(np.all((freqs3[i] >= fmin[i])
+                                       & (freqs3[i] <= fmax[i])))
+
+                # average con2 "manually" and we get the same result
+                for i in range(len(freqs3)):
+                    freq_idx = np.searchsorted(freqs2, freqs3[i])
+                    con2_avg = np.mean(con2[:, freq_idx], axis=1)
+                    assert_array_almost_equal(con2_avg, con3[:, i])
diff --git a/mne/connectivity/tests/test_utils.py b/mne/connectivity/tests/test_utils.py
new file mode 100644
index 0000000..2736b1f
--- /dev/null
+++ b/mne/connectivity/tests/test_utils.py
@@ -0,0 +1,23 @@
+import numpy as np
+from nose.tools import assert_true
+
+from mne.connectivity import seed_target_indices
+
+
+def test_indices():
+    """Test connectivity indexing methods"""
+    n_seeds_test = [1, 3, 4]
+    n_targets_test = [2, 3, 200]
+    for n_seeds in n_seeds_test:
+        for n_targets in n_targets_test:
+            idx = np.random.permutation(np.arange(n_seeds + n_targets))
+            seeds = idx[:n_seeds]
+            targets = idx[n_seeds:]
+            indices = seed_target_indices(seeds, targets)
+            assert_true(len(indices) == 2)
+            assert_true(len(indices[0]) == len(indices[1]))
+            assert_true(len(indices[0]) == n_seeds * n_targets)
+            for seed in seeds:
+                assert_true(np.sum(indices[0] == seed) == n_targets)
+            for target in targets:
+                assert_true(np.sum(indices[1] == target) == n_seeds)
diff --git a/mne/connectivity/utils.py b/mne/connectivity/utils.py
new file mode 100644
index 0000000..14025b4
--- /dev/null
+++ b/mne/connectivity/utils.py
@@ -0,0 +1,45 @@
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+import numpy as np
+
+
+def check_indices(indices):
+    """Check indices parameter"""
+
+    if not isinstance(indices, tuple) or len(indices) != 2:
+        raise ValueError('indices must be a tuple of length 2')
+
+    if len(indices[0]) != len(indices[1]):
+        raise ValueError('Index arrays indices[0] and indices[1] must '
+                         'have the same length')
+
+    return indices
+
+
+def seed_target_indices(seeds, targets):
+    """Generate indices parameter for seed based connectivity analysis.
+
+    Parameters
+    ----------
+    seeds : array of int | int
+        Seed indices.
+    targets : array of int | int
+        Indices of signals for which to compute connectivity.
+
+    Returns
+    -------
+    indices : tuple of arrays
+        The indices parameter used for connectivity computation.
+    """
+    # make them arrays
+    seeds = np.asarray((seeds,)).ravel()
+    targets = np.asarray((targets,)).ravel()
+
+    n_seeds = len(seeds)
+    n_targets = len(targets)
+
+    indices = (np.concatenate([np.tile(i, n_targets) for i in seeds]),
+               np.tile(targets, n_seeds))
+
+    return indices
diff --git a/mne/cov.py b/mne/cov.py
new file mode 100644
index 0000000..2fa6077
--- /dev/null
+++ b/mne/cov.py
@@ -0,0 +1,741 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+import os
+from math import floor, ceil
+import warnings
+
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from . import fiff, verbose
+from .fiff.write import start_file, end_file
+from .fiff.proj import make_projector, proj_equal, activate_proj
+from .fiff import fiff_open
+from .fiff.pick import pick_types, channel_indices_by_type, pick_channels_cov,\
+                       pick_channels
+from .fiff.constants import FIFF
+from .epochs import _is_good
+
+
+def _check_covs_algebra(cov1, cov2):
+    if cov1.ch_names != cov2.ch_names:
+        raise ValueError('Both Covariance do not have the same list of '
+                         'channels.')
+    if map(str, cov1['projs']) != map(str, cov2['projs']):
+        raise ValueError('Both Covariance do not have the same list of '
+                         'SSP projections.')
+
+
+class Covariance(dict):
+    """Noise covariance matrix
+
+    Parameters
+    ----------
+    fname : string
+        The name of the raw file.
+
+    Attributes
+    ----------
+    data : array of shape (n_channels, n_channels)
+        The covariance.
+    `ch_names` : list of string
+        List of channels' names.
+    nfree : int
+        Number of degrees of freedom i.e. number of time points used.
+    """
+    def __init__(self, fname):
+        if fname is None:
+            return
+
+        # Reading
+        fid, tree, _ = fiff_open(fname)
+        self.update(fiff.read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV))
+        fid.close()
+
+    @property
+    def data(self):
+        return self['data']
+
+    @property
+    def ch_names(self):
+        return self['names']
+
+    @property
+    def nfree(self):
+        return self['nfree']
+
+    def save(self, fname):
+        """save covariance matrix in a FIF file"""
+        fid = start_file(fname)
+
+        try:
+            fiff.write_cov(fid, self)
+        except Exception as inst:
+            os.remove(fname)
+            raise inst
+
+        end_file(fid)
+
+    def as_diag(self, copy=True):
+        """Set covariance to be processed as being diagonal
+
+        Parameters
+        ----------
+        copy : bool
+            If True, return a modified copy of the covarince. If False,
+            the covariance is modified in place.
+
+        Returns
+        -------
+        cov : dict
+            The covariance.
+
+        Notes
+        -----
+        This function allows creation of inverse operators
+        equivalent to using the old "--diagnoise" mne option.
+        """
+        if self['diag'] is True:
+            return self.copy() if copy is True else self
+        if copy is True:
+            cov = cp.deepcopy(self)
+        else:
+            cov = self
+        cov['diag'] = True
+        cov['data'] = np.diag(cov['data'])
+        cov['eig'] = None
+        cov['eigvec'] = None
+        return cov
+
+    def __repr__(self):
+        s = "size : %s x %s" % self.data.shape
+        s += ", data : %s" % self.data
+        return "<Covariance  |  %s>" % s
+
+    def __add__(self, cov):
+        """Add Covariance taking into account number of degrees of freedom"""
+        _check_covs_algebra(self, cov)
+        this_cov = cp.deepcopy(cov)
+        this_cov['data'] = ((this_cov['data'] * this_cov['nfree']) +
+                            (self['data'] * self['nfree'])) / \
+                                (self['nfree'] + this_cov['nfree'])
+        this_cov['nfree'] += self['nfree']
+
+        this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
+
+        return this_cov
+
+    def __iadd__(self, cov):
+        """Add Covariance taking into account number of degrees of freedom"""
+        _check_covs_algebra(self, cov)
+        self['data'][:] = ((self['data'] * self['nfree']) + \
+                            (cov['data'] * cov['nfree'])) / \
+                                (self['nfree'] + cov['nfree'])
+        self['nfree'] += cov['nfree']
+
+        self['bads'] = list(set(self['bads']).union(cov['bads']))
+
+        return self
+
+
+###############################################################################
+# IO
+
+def read_cov(fname):
+    """Read a noise covariance from a FIF file.
+
+    Parameters
+    ----------
+    fname : string
+        The name of file containing the covariance matrix.
+
+    Returns
+    -------
+    cov : Covariance
+        The noise covariance matrix.
+    """
+    return Covariance(fname)
+
+
+###############################################################################
+# Estimate from data
+
+def _check_n_samples(n_samples, n_chan):
+    """Check to see if there are enough samples for reliable cov calc"""
+    n_samples_min = 10 * (n_chan + 1) / 2
+    if n_samples <= 0:
+        raise ValueError('No samples found to compute the covariance matrix')
+    if n_samples < n_samples_min:
+        text = ('Too few samples (required : %d got : %d), covariance '
+                'estimate may be unreliable' % (n_samples_min, n_samples))
+        warnings.warn(text)
+        logger.warn(text)
+
+
+ at verbose
+def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
+                                reject=None, flat=None, picks=None,
+                                verbose=None):
+    """Estimate noise covariance matrix from a continuous segment of raw data
+
+    It is typically useful to estimate a noise covariance
+    from empty room data or time intervals before starting
+    the stimulation.
+
+    Note: To speed up the computation you should consider preloading raw data
+    by setting preload=True when reading the Raw data.
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        Raw data
+    tmin : float
+        Beginning of time interval in seconds
+    tmax : float
+        End of time interval in seconds
+    tstep : float
+        Size of data chunks for artefact rejection.
+    reject : dict
+        Rejection parameters based on peak to peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict
+        Rejection parameters based on flatness of signal
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        If flat is None then no rejection is done.
+    picks : array of int
+        Indices of channels to include (if None, all channels
+        except bad channels are used).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : instance of Covariance
+        Noise covariance matrix.
+    """
+    sfreq = raw.info['sfreq']
+
+    # Convert to samples
+    start = 0 if tmin is None else int(floor(tmin * sfreq))
+    if tmax is None:
+        stop = raw.last_samp - raw.first_samp
+    else:
+        stop = int(ceil(tmax * sfreq))
+    step = int(ceil(tstep * raw.info['sfreq']))
+
+    # don't exclude any bad channels, inverses expect all channels present
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
+                           exclude=[])
+
+    data = 0
+    n_samples = 0
+    mu = 0
+
+    info = cp.copy(raw.info)
+    info['chs'] = [info['chs'][k] for k in picks]
+    info['ch_names'] = [info['ch_names'][k] for k in picks]
+    info['nchan'] = len(picks)
+    idx_by_type = channel_indices_by_type(info)
+
+    # Read data in chuncks
+    for first in range(start, stop, step):
+        last = first + step
+        if last >= stop:
+            last = stop
+        raw_segment, times = raw[picks, first:last]
+        if _is_good(raw_segment, info['ch_names'], idx_by_type, reject, flat,
+                    ignore_chs=info['bads']):
+            mu += raw_segment.sum(axis=1)
+            data += np.dot(raw_segment, raw_segment.T)
+            n_samples += raw_segment.shape[1]
+        else:
+            logger.info("Artefact detected in [%d, %d]" % (first, last))
+
+    _check_n_samples(n_samples, len(picks))
+    mu /= n_samples
+    data -= n_samples * mu[:, None] * mu[None, :]
+    data /= (n_samples - 1.0)
+    logger.info("Number of samples used : %d" % n_samples)
+    logger.info('[done]')
+
+    cov = Covariance(None)
+
+    ch_names = [raw.info['ch_names'][k] for k in picks]
+    # XXX : do not compute eig and eigvec now (think it's better...)
+    eig = None
+    eigvec = None
+
+    #   Store structure for fif
+    cov.update(kind=FIFF.FIFFV_MNE_NOISE_COV, diag=False, dim=len(data),
+               names=ch_names, data=data,
+               projs=cp.deepcopy(raw.info['projs']),
+               bads=raw.info['bads'], nfree=n_samples, eig=eig,
+               eigvec=eigvec)
+
+    return cov
+
+
+ at verbose
+def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
+                       projs=None, verbose=None):
+    """Estimate noise covariance matrix from epochs
+
+    The noise covariance is typically estimated on pre-stim periods
+    when the stim onset is defined from events.
+
+    If the covariance is computed for multiple event types (events
+    with different IDs), the following two options can be used and combined.
+    A) either an Epochs object for each event type is created and
+    a list of Epochs is passed to this function.
+    B) an Epochs object is created for multiple events and passed
+    to this function.
+
+    Note: Baseline correction should be used when creating the Epochs.
+          Otherwise the computed covariance matrix will be inaccurate.
+
+    Note: For multiple event types, it is also possible to create a
+          single Epochs object with events obtained using
+          merge_events(). However, the resulting covariance matrix
+          will only be correct if keep_sample_mean is True.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs, or a list of Epochs objects
+        The epochs
+    keep_sample_mean : bool
+        If False, the average response over epochs is computed for
+        each event type and subtracted during the covariance
+        computation. This is useful if the evoked response from a
+        previous stimulus extends into the baseline period of the next.
+    tmin : float | None
+        Start time for baseline. If None start at first sample.
+    tmax : float | None
+        End time for baseline. If None end at last sample.
+    projs : list of Projection | None
+        List of projectors to use in covariance calculation, or None
+        to indicate that the projectors from the epochs should be
+        inherited. If None, then projectors from all epochs must match.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : instance of Covariance
+        The computed covariance.
+    """
+
+    if not isinstance(epochs, list):
+        epochs = _unpack_epochs(epochs)
+    else:
+        epochs = [ep for li in [_unpack_epochs(epoch) for epoch in epochs]
+                  for ep in li]
+
+    # check for baseline correction
+    for epochs_t in epochs:
+        if epochs_t.baseline is None:
+            warnings.warn('Epochs are not baseline corrected, covariance '
+                          'matrix may be inaccurate')
+
+    bads = epochs[0].info['bads']
+    if projs is None:
+        projs = cp.deepcopy(epochs[0].info['projs'])
+        # make sure Epochs are compatible
+        for epochs_t in epochs[1:]:
+            if epochs_t.proj != epochs[0].proj:
+                raise ValueError('Epochs must agree on the use of projections')
+            for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
+                if not proj_equal(proj_a, proj_b):
+                    raise ValueError('Epochs must have same projectors')
+    else:
+        projs = cp.deepcopy(projs)
+    ch_names = epochs[0].ch_names
+
+    # make sure Epochs are compatible
+    for epochs_t in epochs[1:]:
+        if epochs_t.info['bads'] != bads:
+            raise ValueError('Epochs must have same bad channels')
+        if epochs_t.ch_names != ch_names:
+            raise ValueError('Epochs must have same channel names')
+
+    n_epoch_types = len(epochs)
+    data = 0.0
+    data_mean = list(np.zeros(n_epoch_types))
+    n_samples = np.zeros(n_epoch_types, dtype=np.int)
+    n_epochs = np.zeros(n_epoch_types, dtype=np.int)
+
+    picks_meeg = pick_types(epochs[0].info, meg=True, eeg=True, eog=False,
+                            exclude=[])
+    ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
+
+    for i, epochs_t in enumerate(epochs):
+
+        tstart, tend = None, None
+        if tmin is not None:
+            tstart = np.where(epochs_t.times >= tmin)[0][0]
+        if tmax is not None:
+            tend = np.where(epochs_t.times <= tmax)[0][-1] + 1
+        tslice = slice(tstart, tend, None)
+
+        for e in epochs_t:
+            e = e[picks_meeg][:, tslice]
+            if not keep_sample_mean:
+                data_mean[i] += e
+            data += np.dot(e, e.T)
+            n_samples[i] += e.shape[1]
+            n_epochs[i] += 1
+
+    n_samples_tot = int(np.sum(n_samples))
+
+    _check_n_samples(n_samples_tot, len(picks_meeg))
+
+    if keep_sample_mean:
+        data /= n_samples_tot
+    else:
+        n_samples_epoch = n_samples / n_epochs
+        norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
+        for i, mean in enumerate(data_mean):
+            data -= 1.0 / n_epochs[i] * np.dot(mean, mean.T)
+        data /= norm_const
+
+    cov = Covariance(None)
+
+    # XXX : do not compute eig and eigvec now (think it's better...)
+    eig = None
+    eigvec = None
+
+    cov.update(kind=1, diag=False, dim=len(data), names=ch_names,
+               data=data, projs=projs, bads=epochs[0].info['bads'],
+               nfree=n_samples_tot, eig=eig, eigvec=eigvec)
+
+    logger.info("Number of samples used : %d" % n_samples_tot)
+    logger.info('[done]')
+
+    return cov
+
+
+###############################################################################
+# Writing
+
+def write_cov(fname, cov):
+    """Write a noise covariance matrix
+
+    Parameters
+    ----------
+    fname : string
+        The name of the file
+
+    cov : Covariance
+        The noise covariance matrix
+    """
+    cov.save(fname)
+
+
+###############################################################################
+# Prepare for inverse modeling
+
+def rank(A, tol=1e-8):
+    s = linalg.svd(A, compute_uv=0)
+    return np.sum(np.where(s > s[0] * tol, 1, 0))
+
+
+def _unpack_epochs(epochs):
+    """ Aux Function """
+    if len(epochs.event_id) > 1:
+        epochs = [epochs[k] for k in epochs.event_id]
+    else:
+        epochs = [epochs]
+
+    return epochs
+
+
+ at verbose
+def _get_whitener(A, pca, ch_type, verbose=None):
+    # whitening operator
+    rnk = rank(A)
+    eig, eigvec = linalg.eigh(A, overwrite_a=True)
+    eigvec = eigvec.T
+    eig[:-rnk] = 0.0
+    logger.info('Setting small %s eigenvalues to zero.' % ch_type)
+    if not pca:  # No PCA case.
+        logger.info('Not doing PCA for %s.' % ch_type)
+    else:
+        logger.info('Doing PCA for %s.' % ch_type)
+        # This line will reduce the actual number of variables in data
+        # and leadfield to the true rank.
+        eigvec = eigvec[:-rnk].copy()
+    return eig, eigvec
+
+
+ at verbose
+def prepare_noise_cov(noise_cov, info, ch_names, verbose=None):
+    """Prepare noise covariance matrix
+
+    Parameters
+    ----------
+    noise_cov : Covariance
+        The noise covariance to process.
+    info : dict
+        The measurement info (used to get channel types and bad channels).
+    ch_names : list
+        The channel names to be considered.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
+    if noise_cov['diag'] is False:
+        C = noise_cov.data[C_ch_idx][:, C_ch_idx]
+    else:
+        C = np.diag(noise_cov.data[C_ch_idx])
+
+    # Create the projection operator
+    proj, ncomp, _ = make_projector(info['projs'], ch_names)
+    if ncomp > 0:
+        logger.info('    Created an SSP operator (subspace dimension = %d)'
+                    % ncomp)
+        C = np.dot(proj, np.dot(C, proj.T))
+
+    pick_meg = pick_types(info, meg=True, eeg=False, exclude='bads')
+    pick_eeg = pick_types(info, meg=False, eeg=True, exclude='bads')
+    meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
+    C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
+    eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
+    C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
+
+    has_meg = len(C_meg_idx) > 0
+    has_eeg = len(C_eeg_idx) > 0
+
+    if has_meg:
+        C_meg = C[C_meg_idx][:, C_meg_idx]
+        C_meg_eig, C_meg_eigvec = _get_whitener(C_meg, False, 'MEG')
+
+    if has_eeg:
+        C_eeg = C[C_eeg_idx][:, C_eeg_idx]
+        C_eeg_eig, C_eeg_eigvec = _get_whitener(C_eeg, False, 'EEG')
+
+    n_chan = len(ch_names)
+    eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
+    eig = np.zeros(n_chan, dtype=np.float)
+
+    if has_meg:
+        eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
+        eig[C_meg_idx] = C_meg_eig
+    if has_eeg:
+        eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
+        eig[C_eeg_idx] = C_eeg_eig
+
+    assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
+
+    noise_cov = cp.deepcopy(noise_cov)
+    noise_cov.update(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
+                     diag=False, names=ch_names)
+
+    return noise_cov
+
+
+def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude=None,
+               proj=True, verbose=None):
+    """Regularize noise covariance matrix
+
+    This method works by adding a constant to the diagonal for each
+    channel type separatly. Special care is taken to keep the
+    rank of the data constant.
+
+    Parameters
+    ----------
+    cov : Covariance
+        The noise covariance matrix.
+    info : dict
+        The measurement info (used to get channel types and bad channels).
+    mag : float
+        Regularization factor for MEG magnetometers.
+    grad : float
+        Regularization factor for MEG gradiometers.
+    eeg : float
+        Regularization factor for EEG.
+    exclude : list | None
+        List of channels to mark as bad. If None, bads channels
+        are extracted from both info['bads'] and cov['bads'].
+    proj : bool
+        Apply or not projections to keep rank of data.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    reg_cov : Covariance
+        The regularized covariance matrix.
+    """
+    cov = cp.deepcopy(cov)
+    if exclude is None:
+        exclude = info['bads'] + cov['bads']
+
+    sel_eeg = pick_types(info, meg=False, eeg=True, exclude=exclude)
+    sel_mag = pick_types(info, meg='mag', eeg=False, exclude=exclude)
+    sel_grad = pick_types(info, meg='grad', eeg=False, exclude=exclude)
+
+    info_ch_names = info['ch_names']
+    ch_names_eeg = [info_ch_names[i] for i in sel_eeg]
+    ch_names_mag = [info_ch_names[i] for i in sel_mag]
+    ch_names_grad = [info_ch_names[i] for i in sel_grad]
+
+    # This actually removes bad channels from the cov, which is not backward
+    # compatible, so let's leave all channels in
+    cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
+    ch_names = cov_good.ch_names
+
+    idx_eeg, idx_mag, idx_grad = [], [], []
+    for i, ch in enumerate(ch_names):
+        if ch in ch_names_eeg:
+            idx_eeg.append(i)
+        elif ch in ch_names_mag:
+            idx_mag.append(i)
+        elif ch in ch_names_grad:
+            idx_grad.append(i)
+        else:
+            raise Exception('channel is unknown type')
+
+    C = cov_good['data']
+
+    assert len(C) == (len(idx_eeg) + len(idx_mag) + len(idx_grad))
+
+    if proj:
+        projs = info['projs'] + cov_good['projs']
+        projs = activate_proj(projs)
+
+    for desc, idx, reg in [('EEG', idx_eeg, eeg), ('MAG', idx_mag, mag),
+                           ('GRAD', idx_grad, grad)]:
+        if len(idx) == 0 or reg == 0.0:
+            logger.info("    %s regularization : None" % desc)
+            continue
+
+        logger.info("    %s regularization : %s" % (desc, reg))
+
+        this_C = C[idx][:, idx]
+        if proj:
+            this_ch_names = [ch_names[k] for k in idx]
+            P, ncomp, _ = make_projector(projs, this_ch_names)
+            U = linalg.svd(P)[0][:, :-ncomp]
+            if ncomp > 0:
+                logger.info('    Created an SSP operator for %s '
+                            '(dimension = %d)' % (desc, ncomp))
+                this_C = np.dot(U.T, np.dot(this_C, U))
+
+        sigma = np.mean(np.diag(this_C))
+        this_C.flat[::len(this_C) + 1] += reg * sigma  # modify diag inplace
+        if proj and ncomp > 0:
+            this_C = np.dot(U, np.dot(this_C, U.T))
+
+        C[np.ix_(idx, idx)] = this_C
+
+    # Put data back in correct locations
+    idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
+    cov['data'][np.ix_(idx, idx)] = C
+
+    return cov
+
+
+def compute_whitener(noise_cov, info, picks=None, verbose=None):
+    """Compute whitening matrix
+
+    Parameters
+    ----------
+    noise_cov : Covariance
+        The noise covariance.
+    info : dict
+        The measurement info.
+    picks : array of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    W : 2d array
+        The whitening matrix.
+    ch_names : list
+        The channel names.
+    """
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, exclude='bads')
+
+    ch_names = [info['chs'][k]['ch_name'] for k in picks]
+
+    noise_cov = cp.deepcopy(noise_cov)
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names)
+    n_chan = len(ch_names)
+
+    W = np.zeros((n_chan, n_chan), dtype=np.float)
+    #
+    #   Omit the zeroes due to projection
+    #
+    eig = noise_cov['eig']
+    nzero = (eig > 0)
+    W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+    #
+    #   Rows of eigvec are the eigenvectors
+    #
+    W = np.dot(W, noise_cov['eigvec'])
+    W = np.dot(noise_cov['eigvec'].T, W)
+    return W, ch_names
+
+
+def whiten_evoked(evoked, noise_cov, picks, diag=False):
+    """Whiten evoked data using given noise covariance
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    noise_cov : instance of Covariance
+        The noise covariance
+    picks : array of ints
+        The channel indices to whiten
+    diag : bool
+        If True, whiten using only the diagonal of the covariance
+
+    Returns
+    -------
+    evoked_white : instance of Evoked
+        The whitened evoked data.
+    """
+    ch_names = [evoked.ch_names[k] for k in picks]
+    n_chan = len(ch_names)
+    evoked = cp.deepcopy(evoked)
+
+    if diag:
+        noise_cov = cp.deepcopy(noise_cov)
+        noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
+
+    noise_cov = prepare_noise_cov(noise_cov, evoked.info, ch_names)
+
+    W = np.zeros((n_chan, n_chan), dtype=np.float)
+    #
+    #   Omit the zeroes due to projection
+    #
+    eig = noise_cov['eig']
+    nzero = (eig > 0)
+    W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+    #
+    #   Rows of eigvec are the eigenvectors
+    #
+    W = np.dot(W, noise_cov['eigvec'])
+    W = np.dot(noise_cov['eigvec'].T, W)
+    evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
+    return evoked
diff --git a/mne/cuda.py b/mne/cuda.py
new file mode 100644
index 0000000..bbcef0f
--- /dev/null
+++ b/mne/cuda.py
@@ -0,0 +1,408 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.fftpack import fft, ifft
+try:
+    import pycuda.gpuarray as gpuarray
+    from pycuda.driver import mem_get_info
+    from scikits.cuda import fft as cudafft
+except ImportError:
+    pass
+
+import logging
+logger = logging.getLogger('mne')
+
+from .utils import sizeof_fmt
+
+
+# Support CUDA for FFTs; requires scikits.cuda and pycuda
+cuda_capable = False
+cuda_multiply_inplace_complex128 = None
+cuda_halve_value_complex128 = None
+cuda_real_value_complex128 = None
+requires_cuda = np.testing.dec.skipif(True, 'CUDA not initialized')
+
+
+def init_cuda():
+    """Initialize CUDA functionality
+
+    This function attempts to load the necessary interfaces
+    (hardware connectivity) to run CUDA-based filering. This
+    function should only need to be run once per session.
+
+    If the config var (set via mne.set_config or in ENV)
+    MNE_USE_CUDA == 'true', this function will be executed when
+    importing mne. If this variable is not set, this function can
+    be manually executed.
+    """
+    global cuda_capable
+    global cuda_multiply_inplace_complex128
+    global cuda_halve_value_complex128
+    global cuda_real_value_complex128
+    global requires_cuda
+    if cuda_capable is True:
+        logger.info('CUDA previously enabled, currently %s available memory'
+                    % sizeof_fmt(mem_get_info()[0]))
+        return
+    # Triage possible errors for informative messaging
+    cuda_capable = False
+    try:
+        import pycuda.gpuarray
+        import pycuda.driver
+    except ImportError:
+        logger.warn('module pycuda not found, CUDA not enabled')
+    else:
+        try:
+            # Initialize CUDA; happens with importing autoinit
+            import pycuda.autoinit
+        except ImportError:
+            logger.warn('pycuda.autoinit could not be imported, likely '
+                        'a hardware error, CUDA not enabled')
+        else:
+            # Make our multiply inplace kernel
+            try:
+                from pycuda.elementwise import ElementwiseKernel
+                # let's construct our own CUDA multiply in-place function
+                dtype = 'pycuda::complex<double>'
+                cuda_multiply_inplace_complex128 = \
+                    ElementwiseKernel(dtype + ' *a, ' + dtype + ' *b',
+                                      'b[i] *= a[i]', 'multiply_inplace')
+                cuda_halve_value_complex128 = \
+                    ElementwiseKernel(dtype + ' *a', 'a[i] /= 2.0',
+                                      'halve_value')
+                cuda_real_value_complex128 = \
+                    ElementwiseKernel(dtype + ' *a', 'a[i] = real(a[i])',
+                                      'real_value')
+            except:
+                # This should never happen
+                raise RuntimeError('pycuda ElementwiseKernel could not be '
+                                   'constructed, please report this issue '
+                                   'to mne-python developers with your '
+                                   'system information and pycuda version')
+            else:
+                # Make sure scikits.cuda is installed
+                try:
+                    from scikits.cuda import fft as cudafft
+                except ImportError:
+                    logger.warn('modudle scikits.cuda not found, CUDA not '
+                                'enabled')
+                else:
+                    # Make sure we can use 64-bit FFTs
+                    try:
+                        fft_plan = cudafft.Plan(16, np.float64, np.complex128)
+                        del fft_plan
+                    except:
+                        logger.warn('Device does not support 64-bit FFTs, '
+                                    'CUDA not enabled')
+                    else:
+                        cuda_capable = True
+                        # Figure out limit for CUDA FFT calculations
+                        logger.info('Enabling CUDA with %s available memory'
+                                    % sizeof_fmt(mem_get_info()[0]))
+    requires_cuda = np.testing.dec.skipif(not cuda_capable,
+                                          'CUDA not initialized')
+
+
+###############################################################################
+# Repeated FFT multiplication
+
+def setup_cuda_fft_multiply_repeated(n_jobs, h_fft):
+    """Set up repeated CUDA FFT multiplication with a given filter
+
+    Parameters
+    ----------
+    n_jobs : int | str
+        If n_jobs == 'cuda', the function will attempt to set up for CUDA
+        FFT multiplication.
+    h_fft : array
+        The filtering function that will be used repeatedly.
+        If n_jobs='cuda', this function will be shortened (since CUDA
+        assumes FFTs of real signals are half the length of the signal)
+        and turned into a gpuarray.
+
+    Returns
+    -------
+    n_jobs : int
+        Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
+        original n_jobs is passed.
+    cuda_dict : dict
+        Dictionary with the following CUDA-related variables:
+            use_cuda : bool
+                Whether CUDA should be used.
+            fft_plan : instance of FFTPlan
+                FFT plan to use in calculating the FFT.
+            ifft_plan : instance of FFTPlan
+                FFT plan to use in calculating the IFFT.
+            x_fft : instance of gpuarray
+                Empty allocated GPU space for storing the result of the
+                frequency-domain multiplication.
+            x : instance of gpuarray
+                Empty allocated GPU space for the data to filter.
+    h_fft : array | instance of gpuarray
+        This will either be a gpuarray (if CUDA enabled) or np.ndarray.
+        If CUDA is enabled, h_fft will be modified appropriately for use
+        with filter.fft_multiply().
+
+    Notes
+    -----
+    This function is designed to be used with fft_multiply_repeated().
+    """
+    cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
+                     x_fft=None, x=None, fft_len=None)
+    n_fft = len(h_fft)
+    if n_jobs == 'cuda':
+        n_jobs = 1
+        if cuda_capable:
+            # set up all arrays necessary for CUDA
+            cuda_fft_len = int((n_fft - (n_fft % 2)) / 2 + 1)
+            use_cuda = False
+            # try setting up for float64
+            try:
+                fft_plan = cudafft.Plan(n_fft, np.float64, np.complex128)
+                ifft_plan = cudafft.Plan(n_fft, np.complex128, np.float64)
+                x_fft = gpuarray.empty(cuda_fft_len, np.complex128)
+                x = gpuarray.empty(int(n_fft), np.float64)
+                cuda_h_fft = h_fft[:cuda_fft_len].astype('complex128')
+                # do the IFFT normalization now so we don't have to later
+                cuda_h_fft /= len(h_fft)
+                h_fft = gpuarray.to_gpu(cuda_h_fft)
+                dtype = np.float64
+                multiply_inplace = cuda_multiply_inplace_complex128
+            except:
+                logger.info('CUDA not used, could not instantiate memory '
+                            '(arrays may be too large), falling back to '
+                            'n_jobs=1')
+            else:
+                use_cuda = True
+
+            if use_cuda is True:
+                logger.info('Using CUDA for FFT FIR filtering')
+                cuda_dict['use_cuda'] = True
+                cuda_dict['fft_plan'] = fft_plan
+                cuda_dict['ifft_plan'] = ifft_plan
+                cuda_dict['x_fft'] = x_fft
+                cuda_dict['x'] = x
+                cuda_dict['dtype'] = dtype
+                cuda_dict['multiply_inplace'] = multiply_inplace
+        else:
+            logger.info('CUDA not used, CUDA has not been initialized, '
+                        'falling back to n_jobs=1')
+    return n_jobs, cuda_dict, h_fft
+
+
+def fft_multiply_repeated(h_fft, x, cuda_dict=dict(use_cuda=False)):
+    """Do FFT multiplication by a filter function (possibly using CUDA)
+
+    Parameters
+    ----------
+    h_fft : 1-d array or gpuarray
+        The filtering array to apply.
+    x : 1-d array
+        The array to filter.
+    cuda_dict : dict
+        Dictionary constructed using setup_cuda_multiply_repeated().
+
+    Returns
+    -------
+    x : 1-d array
+        Filtered version of x.
+    """
+    if not cuda_dict['use_cuda']:
+        # do the fourier-domain operations
+        x = np.real(ifft(h_fft * fft(x), overwrite_x=True)).ravel()
+    else:
+        # do the fourier-domain operations, results in second param
+        cuda_dict['x'].set(x.astype(cuda_dict['dtype']))
+        cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
+        cuda_dict['multiply_inplace'](h_fft, cuda_dict['x_fft'])
+        # If we wanted to do it locally instead of using our own kernel:
+        # cuda_seg_fft.set(cuda_seg_fft.get() * h_fft)
+        cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
+                     cuda_dict['ifft_plan'], False)
+        x = np.array(cuda_dict['x'].get(), dtype=x.dtype, subok=True,
+                     copy=False)
+    return x
+
+
+###############################################################################
+# FFT Resampling
+
+def setup_cuda_fft_resample(n_jobs, W, new_len):
+    """Set up CUDA FFT resampling
+
+    Parameters
+    ----------
+    n_jobs : int | str
+        If n_jobs == 'cuda', the function will attempt to set up for CUDA
+        FFT resampling.
+    W : array
+        The filtering function to be used during resampling.
+        If n_jobs='cuda', this function will be shortened (since CUDA
+        assumes FFTs of real signals are half the length of the signal)
+        and turned into a gpuarray.
+    new_len : int
+        The size of the array following resampling.
+
+    Returns
+    -------
+    n_jobs : int
+        Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
+        original n_jobs is passed.
+    cuda_dict : dict
+        Dictionary with the following CUDA-related variables:
+            use_cuda : bool
+                Whether CUDA should be used.
+            fft_plan : instance of FFTPlan
+                FFT plan to use in calculating the FFT.
+            ifft_plan : instance of FFTPlan
+                FFT plan to use in calculating the IFFT.
+            x_fft : instance of gpuarray
+                Empty allocated GPU space for storing the result of the
+                frequency-domain multiplication.
+            x : instance of gpuarray
+                Empty allocated GPU space for the data to resample.
+    W : array | instance of gpuarray
+        This will either be a gpuarray (if CUDA enabled) or np.ndarray.
+        If CUDA is enabled, W will be modified appropriately for use
+        with filter.fft_multiply().
+
+    Notes
+    -----
+    This function is designed to be used with fft_resample().
+    """
+    cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
+                     x_fft=None, x=None, y_fft=None, y=None)
+    if n_jobs == 'cuda':
+        n_jobs = 1
+        if cuda_capable:
+            use_cuda = False
+            # try setting up for float64
+            try:
+                n_fft_x = len(W)
+                cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) / 2 + 1)
+                n_fft_y = new_len
+                cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) / 2 + 1)
+                fft_plan = cudafft.Plan(n_fft_x, np.float64, np.complex128)
+                ifft_plan = cudafft.Plan(n_fft_y, np.complex128, np.float64)
+                x_fft = gpuarray.zeros(max(cuda_fft_len_x,
+                                           cuda_fft_len_y), np.complex128)
+                x = gpuarray.empty(max(int(n_fft_x),
+                                       int(n_fft_y)), np.float64)
+                cuda_W = W[:cuda_fft_len_x].astype('complex128')
+                # do the IFFT normalization now so we don't have to later
+                cuda_W /= n_fft_y
+                W = gpuarray.to_gpu(cuda_W)
+                dtype = np.float64
+                multiply_inplace = cuda_multiply_inplace_complex128
+            except:
+                logger.info('CUDA not used, could not instantiate memory '
+                            '(arrays may be too large), falling back to '
+                            'n_jobs=1')
+            else:
+                use_cuda = True
+
+            if use_cuda is True:
+                logger.info('Using CUDA for FFT FIR filtering')
+                cuda_dict['use_cuda'] = True
+                cuda_dict['fft_plan'] = fft_plan
+                cuda_dict['ifft_plan'] = ifft_plan
+                cuda_dict['x_fft'] = x_fft
+                cuda_dict['x'] = x
+                cuda_dict['dtype'] = dtype
+                cuda_dict['multiply_inplace'] = multiply_inplace
+                cuda_dict['halve_value'] = cuda_halve_value_complex128
+                cuda_dict['real_value'] = cuda_real_value_complex128
+        else:
+            logger.info('CUDA not used, CUDA has not been initialized, '
+                        'falling back to n_jobs=1')
+    return n_jobs, cuda_dict, W
+
+
+def fft_resample(x, W, new_len, npad, to_remove,
+                 cuda_dict=dict(use_cuda=False)):
+    """Do FFT resampling with a filter function (possibly using CUDA)
+
+    Parameters
+    ----------
+    x : 1-d array
+        The array to resample.
+    W : 1-d array or gpuarray
+        The filtering function to apply.
+    new_len : int
+        The size of the output array (before removing padding).
+    npad : int
+        Amount of padding to apply before resampling.
+    to_remove : int
+        Number of samples to remove after resampling.
+    cuda_dict : dict
+        Dictionary constructed using setup_cuda_multiply_repeated().
+
+    Returns
+    -------
+    x : 1-d array
+        Filtered version of x.
+    """
+    # add some padding at beginning and end to make this work a little cleaner
+    x = _smart_pad(x, npad)
+    old_len = len(x)
+    if not cuda_dict['use_cuda']:
+        N = int(min(new_len, old_len))
+        sl_1 = slice((N + 1) / 2)
+        y_fft = np.zeros(new_len, np.complex128)
+        x_fft = fft(x).ravel()
+        x_fft *= W
+        y_fft[sl_1] = x_fft[sl_1]
+        sl_2 = slice(-(N - 1) / 2, None)
+        y_fft[sl_2] = x_fft[sl_2]
+        y = np.real(ifft(y_fft, overwrite_x=True)).ravel()
+    else:
+        if old_len < new_len:
+            x = np.concatenate((x, np.zeros(new_len - old_len, x.dtype)))
+        cuda_dict['x'].set(x)
+        # do the fourier-domain operations, results put in second param
+        cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
+        cuda_dict['multiply_inplace'](W, cuda_dict['x_fft'])
+        # This is not straightforward, but because x_fft and y_fft share
+        # the same data (and only one half of the full DFT is stored), we
+        # don't have to transfer the slice like we do in scipy. All we
+        # need to worry about is the Nyquist component, either halving it
+        # or taking just the real component...
+        if new_len > old_len:
+            if old_len % 2 == 0:
+                nyq = int((old_len - (old_len % 2)) / 2)
+                cuda_dict['halve_value'](cuda_dict['x_fft'],
+                                        slice=slice(nyq, nyq + 1))
+        else:
+            if new_len % 2 == 0:
+                nyq = int((new_len - (new_len % 2)) / 2)
+                cuda_dict['real_value'](cuda_dict['x_fft'],
+                                        slice=slice(nyq, nyq + 1))
+        cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
+                     cuda_dict['ifft_plan'], scale=False)
+        y = cuda_dict['x'].get()
+        if new_len < old_len:
+            y = y[:new_len].copy()
+
+    # now let's trim it back to the correct size (if there was padding)
+    if to_remove > 0:
+        keep = np.ones((new_len), dtype='bool')
+        keep[:to_remove] = False
+        keep[-to_remove:] = False
+        y = np.compress(keep, y)
+
+    return y
+
+
+###############################################################################
+# Misc
+
+# this has to go in mne.cuda instead of mne.filter to avoid import errors
+def _smart_pad(x, n_pad):
+    """Pad vector x
+    """
+    # need to pad with zeros if len(x) <= npad
+    z_pad = np.zeros(max(n_pad - len(x) + 1, 0), dtype=x.dtype)
+    return np.r_[z_pad, 2 * x[0] - x[n_pad:0:-1], x,
+                 2 * x[-1] - x[-2:-n_pad - 2:-1], z_pad]
diff --git a/mne/data/__init__.py b/mne/data/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/data/icos.fif.gz b/mne/data/icos.fif.gz
new file mode 100644
index 0000000..99e526b
Binary files /dev/null and b/mne/data/icos.fif.gz differ
diff --git a/mne/data/mne_analyze.sel b/mne/data/mne_analyze.sel
new file mode 100644
index 0000000..b0e9034
--- /dev/null
+++ b/mne/data/mne_analyze.sel
@@ -0,0 +1,13 @@
+#
+#	All channels
+#
+Vertex:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 0631|MEG 0431|MEG 0711|MEG 0431|MEG 0741|MEG 1821|MEG 1041|MEG 1111|MEG 0721|MEG 1141|MEG 0731|MEG 2211
+Left-temporal:MEG 0223|MEG 0222|MEG 0212|MEG 0213|MEG 0133|MEG 0132|MEG 0112|MEG 0113|MEG 0233|MEG 0232|MEG 0243|MEG 0242|MEG 1512|MEG 1513|MEG 0143|MEG 0142|MEG 1623|MEG 1622|MEG 1613|MEG 1612|MEG 1523|MEG 1522|MEG 1543|MEG 1542|MEG 1533|MEG 1532|MEG 0221|MEG 0211|MEG 0131|MEG 0111|MEG 0231|MEG 0241|MEG 1511|MEG 0141|MEG 1621|MEG 1611|MEG 1521|MEG 1541|MEG 1531
+Right-temporal:MEG 1312|MEG 1313|MEG 1323|MEG 1322|MEG 1442|MEG 1443|MEG 1423|MEG 1422|MEG 1342|MEG 1343|MEG 1333|MEG 1332|MEG 2612|MEG 2613|MEG 1433|MEG 1432|MEG 2413|MEG 2412|MEG 2422|MEG 2423|MEG 2642|MEG 2643|MEG 2623|MEG 2622|MEG 2633|MEG 2632|MEG 1311|MEG 1321|MEG 1441|MEG 1421|MEG 1341|MEG 1331|MEG 2611|MEG 1431|MEG 2411|MEG 2421|MEG 2641|MEG 2621|MEG 2631
+Left-parietal:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0412|MEG 0413|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0442|MEG 0443|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1813|MEG 1812|MEG 1832|MEG 1833|MEG 1843|MEG 1842|MEG 1632|MEG 1633|MEG 2013|MEG 2012|MEG 0631|MEG 0421|MEG 0411|MEG 0711|MEG 0431|MEG 0441|MEG 0741|MEG 1821|MEG 1811|MEG 1831|MEG 1841|MEG 1631|MEG 2011
+Right-parietal:MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 1123|MEG 1122|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 1133|MEG 1132|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 2223|MEG 2222|MEG 2242|MEG 2243|MEG 2232|MEG 2233|MEG 2442|MEG 2443|MEG 2023|MEG 2022|MEG 1041|MEG 1111|MEG 1121|MEG 0721|MEG 1141|MEG 1131|MEG 0731|MEG 2211|MEG 2221|MEG 2241|MEG 2231|MEG 2441|MEG 2021
+Left-occipital:MEG 2042|MEG 2043|MEG 1913|MEG 1912|MEG 2113|MEG 2112|MEG 1922|MEG 1923|MEG 1942|MEG 1943|MEG 1642|MEG 1643|MEG 1933|MEG 1932|MEG 1733|MEG 1732|MEG 1723|MEG 1722|MEG 2143|MEG 2142|MEG 1742|MEG 1743|MEG 1712|MEG 1713|MEG 2041|MEG 1911|MEG 2111|MEG 1921|MEG 1941|MEG 1641|MEG 1931|MEG 1731|MEG 1721|MEG 2141|MEG 1741|MEG 1711
+Right-occipital:MEG 2032|MEG 2033|MEG 2313|MEG 2312|MEG 2342|MEG 2343|MEG 2322|MEG 2323|MEG 2433|MEG 2432|MEG 2122|MEG 2123|MEG 2333|MEG 2332|MEG 2513|MEG 2512|MEG 2523|MEG 2522|MEG 2133|MEG 2132|MEG 2542|MEG 2543|MEG 2532|MEG 2533|MEG 2031|MEG 2311|MEG 2341|MEG 2321|MEG 2431|MEG 2121|MEG 2331|MEG 2511|MEG 2521|MEG 2131|MEG 2541|MEG 2531
+Left-frontal:MEG 0522|MEG 0523|MEG 0512|MEG 0513|MEG 0312|MEG 0313|MEG 0342|MEG 0343|MEG 0122|MEG 0123|MEG 0822|MEG 0823|MEG 0533|MEG 0532|MEG 0543|MEG 0542|MEG 0322|MEG 0323|MEG 0612|MEG 0613|MEG 0333|MEG 0332|MEG 0622|MEG 0623|MEG 0643|MEG 0642|MEG 0521|MEG 0511|MEG 0311|MEG 0341|MEG 0121|MEG 0821|MEG 0531|MEG 0541|MEG 0321|MEG 0611|MEG 0331|MEG 0621|MEG 0641
+Right-frontal:MEG 0813|MEG 0812|MEG 0912|MEG 0913|MEG 0922|MEG 0923|MEG 1212|MEG 1213|MEG 1223|MEG 1222|MEG 1412|MEG 1413|MEG 0943|MEG 0942|MEG 0933|MEG 0932|MEG 1232|MEG 1233|MEG 1012|MEG 1013|MEG 1022|MEG 1023|MEG 1243|MEG 1242|MEG 1033|MEG 1032|MEG 0811|MEG 0911|MEG 0921|MEG 1211|MEG 1221|MEG 1411|MEG 0941|MEG 0931|MEG 1231|MEG 1011|MEG 1021|MEG 1241|MEG 1031
+
diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py
new file mode 100644
index 0000000..263dfb2
--- /dev/null
+++ b/mne/datasets/__init__.py
@@ -0,0 +1,5 @@
+"""Demo datasets
+"""
+
+from . import sample
+from . import megsim
diff --git a/mne/datasets/megsim/__init__.py b/mne/datasets/megsim/__init__.py
new file mode 100644
index 0000000..24babeb
--- /dev/null
+++ b/mne/datasets/megsim/__init__.py
@@ -0,0 +1,4 @@
+"""MEGSIM dataset
+"""
+
+from .megsim import data_path, load_data
diff --git a/mne/datasets/megsim/megsim.py b/mne/datasets/megsim/megsim.py
new file mode 100644
index 0000000..2fc0f79
--- /dev/null
+++ b/mne/datasets/megsim/megsim.py
@@ -0,0 +1,192 @@
+# Author: Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import os
+from os import path as op
+import zipfile
+from sys import stdout
+
+import logging
+logger = logging.getLogger('mne')
+
+from ...utils import _fetch_file, get_config, set_config, _url_to_local_path
+from .urls import url_match, valid_data_types, valid_data_formats, \
+                  valid_conditions
+
+
+def data_path(url, path=None, force_update=False, update_path=None):
+    """Get path to local copy of MEGSIM dataset URL
+
+    This is a low-level function useful for getting a local copy of a
+    remote MEGSIM dataet.
+
+    Parameters
+    ----------
+    url : str
+        The dataset to use.
+    path : None | str
+        Location of where to look for the MEGSIM data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the MEGSIM dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MEGSIM"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+
+    Returns
+    -------
+    path : list of str
+        Local paths to the given data files. If URL was a .fif file, this
+        will be a list of length 1. If it was a .zip file, it may potentially
+        be many files.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import megsim
+        >>> url = 'http://cobre.mrn.org/megsim/simdata/neuromag/visual/M87174545_vis_sim1A_4mm_30na_neuro_rn.fif'
+        >>> megsim.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    And this would download the given MEGSIM data file to the 'datasets'
+    folder, and prompt the user to save the 'datasets' path to the mne-python
+    config, if it isn't there already.
+
+    The MEGSIM dataset is documented in the following publication:
+        Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
+        Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
+        (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
+        Realistic Simulated and Empirical Data. Neuroinform 10:141-158
+    """
+
+    if path is None:
+        # use an intelligent guess if it's not defined
+        def_path = op.abspath(op.join(op.dirname(__file__), '..', '..',
+                                      '..', 'examples'))
+        path = get_config('MNE_DATASETS_MEGSIM_PATH', None)
+        if path is None:
+            path = def_path
+            msg = ('No path entered, defaulting to download MEGSIM data to:\n'
+                   '    %s\nDo you want to continue ([y]/n)? '
+                   % path)
+            answer = raw_input(msg)
+            if answer.lower() == 'n':
+                raise ValueError('Please enter preferred path as '
+                                 'megsim.data_path(url, path)')
+
+    if not isinstance(path, basestring):
+        raise ValueError('path must be a string or None')
+
+    destination = _url_to_local_path(url, op.join(path, 'MEGSIM'))
+    destinations = [destination]
+
+    split = op.splitext(destination)
+    is_zip = True if split[1].lower() == '.zip' else False
+    # Fetch the file
+    do_unzip = False
+    if not op.isfile(destination) or force_update:
+        if op.isfile(destination):
+            os.remove(destination)
+        if not op.isdir(op.dirname(destination)):
+            os.makedirs(op.dirname(destination))
+        _fetch_file(url, destination, print_destination=False)
+        do_unzip = True
+
+    if is_zip:
+        z = zipfile.ZipFile(destination)
+        decomp_dir, name = op.split(destination)
+        files = z.namelist()
+        # decompress if necessary (if download was re-done)
+        if do_unzip:
+            stdout.write('Decompressing %g files from\n'
+                         '"%s" ...' % (len(files), name))
+            z.extractall(decomp_dir)
+            stdout.write(' [done]\n')
+        z.close()
+        destinations = [op.join(decomp_dir, f) for f in files]
+
+    # Offer to update the path
+    path = op.abspath(path)
+    if update_path is None:
+        if get_config('MNE_DATASETS_MEGSIM_PATH', '') != path:
+            update_path = True
+            msg = ('Do you want to set the path:\n    %s\nas the default '
+                   'MEGSIM dataset path in the mne-python config ([y]/n)? '
+                   % path)
+            answer = raw_input(msg)
+            if answer.lower() == 'n':
+                update_path = False
+        else:
+            update_path = False
+    if update_path is True:
+        set_config('MNE_DATASETS_MEGSIM_PATH', path)
+
+    return destinations
+
+
+def load_data(condition='visual', data_format='raw', data_type='experimental',
+              path=None, force_update=False, update_path=None):
+    """Get path to local copy of MEGSIM dataset type
+
+    Parameters
+    ----------
+    condition : str
+        The condition to use. Either 'visual', 'auditory', or 'somatosensory'.
+    data_format : str
+        The data format. Either 'raw', 'evoked', or 'single-trial'.
+    data_type : str
+        The type of data. Either 'experimental' or 'simulation'.
+    path : None | str
+        Location of where to look for the MEGSIM data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the MEGSIM dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MEGSIM"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+
+    Returns
+    -------
+    paths : list
+        List of local data paths of the given type.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import megsim
+        >>> megsim.load_data('visual', 'raw', 'experimental', os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    And this would download the raw visual experimental MEGSIM dataset to the
+    'datasets' folder, and prompt the user to save the 'datasets' path to the
+    mne-python config, if it isn't there already.
+
+    The MEGSIM dataset is documented in the following publication:
+        Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
+        Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
+        (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
+        Realistic Simulated and Empirical Data. Neuroinform 10:141-158
+    """
+
+    if not condition.lower() in valid_conditions:
+        raise ValueError('Unknown condition "%s"' % condition)
+    if not data_format in valid_data_formats:
+        raise ValueError('Unknown data_format "%s"' % data_format)
+    if not data_type in valid_data_types:
+        raise ValueError('Unknown data_type "%s"' % data_type)
+    urls = url_match(condition, data_format, data_type)
+
+    data_paths = list()
+    for url in urls:
+        data_paths.extend(data_path(url, path, force_update, update_path))
+    return data_paths
diff --git a/mne/datasets/megsim/urls.py b/mne/datasets/megsim/urls.py
new file mode 100644
index 0000000..c3a23e4
--- /dev/null
+++ b/mne/datasets/megsim/urls.py
@@ -0,0 +1,160 @@
+# Author: Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import numpy as np
+
+valid_data_types = ['experimental', 'simulation']
+valid_data_formats = ['single-trial', 'evoked', 'raw']
+valid_conditions = ['visual', 'auditory', 'somatosensory']
+
+url_root = 'http://cobre.mrn.org/megsim'
+
+urls = ['/empdata/neuromag/visual/subject1_day1_vis_raw.fif',
+        '/empdata/neuromag/visual/subject1_day2_vis_raw.fif',
+        '/empdata/neuromag/visual/subject3_day1_vis_raw.fif',
+        '/empdata/neuromag/visual/subject3_day2_vis_raw.fif',
+        '/empdata/neuromag/aud/subject1_day1_aud_raw.fif',
+        '/empdata/neuromag/aud/subject1_day2_aud_raw.fif',
+        '/empdata/neuromag/aud/subject3_day1_aud_raw.fif',
+        '/empdata/neuromag/aud/subject3_day2_aud_raw.fif',
+        '/empdata/neuromag/somato/subject1_day1_median_raw.fif',
+        '/empdata/neuromag/somato/subject1_day2_median_raw.fif',
+        '/empdata/neuromag/somato/subject3_day1_median_raw.fif',
+        '/empdata/neuromag/somato/subject3_day2_median_raw.fif',
+
+        '/simdata/neuromag/visual/M87174545_vis_sim1A_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim1B_20mm_50na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim2_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim3A_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim3B_20mm_50na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim4_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim5_4mm_30na_neuro_rn.fif',
+
+        '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_fif.zip',
+        '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_withOsc_fif.zip',
+        '/simdata_singleTrials/4545_sim_oscOnly_v1_IPS_ILOG_30hzAdded.fif']
+
+data_formats = ['raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+
+                'single-trial',
+                'single-trial',
+                'single-trial',
+                ]
+subjects = ['subject_1',
+            'subject_1',
+            'subject_3',
+            'subject_3',
+            'subject_1',
+            'subject_1',
+            'subject_3',
+            'subject_3',
+            'subject_1',
+            'subject_1',
+            'subject_3',
+            'subject_3',
+
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+
+            'subject_1',
+            'subject_1',
+            'subject_1']
+
+data_types = ['experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+
+              'simulation',
+              'simulation',
+              'simulation']
+
+conditions = ['visual',
+              'visual',
+              'visual',
+              'visual',
+              'auditory',
+              'auditory',
+              'auditory',
+              'auditory',
+              'somatosensory',
+              'somatosensory',
+              'somatosensory',
+              'somatosensory',
+
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+
+              'visual',
+              'visual',
+              'visual']
+
+# turn them into arrays for ease of use
+urls = np.atleast_1d(urls)
+data_formats = np.atleast_1d(data_formats)
+subjects = np.atleast_1d(subjects)
+data_types = np.atleast_1d(data_types)
+conditions = np.atleast_1d(conditions)
+
+# Useful for testing
+#assert len(conditions) == len(data_types) == len(subjects) \
+#    == len(data_formats) == len(urls)
+
+def url_match(condition, data_format, data_type):
+    """Function to match MEGSIM data files"""
+    inds = np.logical_and(conditions == condition, data_formats == data_format)
+    inds = np.logical_and(inds, data_types == data_type)
+    inds = np.logical_and(inds, data_formats == data_format)
+    good_urls = list(urls[inds])
+    for gi, g in enumerate(good_urls):
+        good_urls[gi] = url_root + g
+    if len(good_urls) == 0:
+        raise ValueError('No MEGSIM dataset found with condition="%s",\n'
+                         'data_format="%s", data_type="%s"'
+                         % (condition, data_format, data_type))
+    return good_urls
diff --git a/mne/datasets/sample/__init__.py b/mne/datasets/sample/__init__.py
new file mode 100644
index 0000000..c727c82
--- /dev/null
+++ b/mne/datasets/sample/__init__.py
@@ -0,0 +1,4 @@
+"""MNE sample dataset
+"""
+
+from .sample import data_path
diff --git a/mne/datasets/sample/sample.py b/mne/datasets/sample/sample.py
new file mode 100644
index 0000000..708dcec
--- /dev/null
+++ b/mne/datasets/sample/sample.py
@@ -0,0 +1,135 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import os
+import os.path as op
+import shutil
+from warnings import warn
+
+import logging
+logger = logging.getLogger('mne')
+
+from ... import __version__ as mne_version
+from ...utils import get_config, set_config, _fetch_file
+
+
+def _sample_version(path):
+    """Get the version of the Sample dataset"""
+    ver_fname = op.join(path, 'version.txt')
+    if op.exists(ver_fname):
+        fid = open(ver_fname, 'r')
+        version = fid.readline().strip()  # version is on first line
+        fid.close()
+    else:
+        # Sample dataset versioning was introduced after 0.3
+        version = '0.3'
+
+    return version
+
+
+def data_path(path=None, force_update=False, update_path=True):
+    """Get path to local copy of Sample dataset
+
+    Parameters
+    ----------
+    path : None | str
+        Location of where to look for the sample dataset.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_SAMPLE_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the sample dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MNE-sample-data"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the sample dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_SAMPLE_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+    """
+    if path is None:
+        # use an intelligent guess if it's not defined
+        def_path = op.abspath(op.join(op.dirname(__file__), '..', '..',
+                                      '..', 'examples'))
+        path = get_config('MNE_DATASETS_SAMPLE_PATH', def_path)
+
+    if not isinstance(path, basestring):
+        raise ValueError('path must be a string or None')
+
+    archive_name = "MNE-sample-data-processed.tar.gz"
+    url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/" + archive_name
+    folder_name = "MNE-sample-data"
+    folder_path = op.join(path, folder_name)
+    rm_archive = False
+
+    martinos_path = '/cluster/fusion/sample_data/' + archive_name
+    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name
+
+    if not op.exists(folder_path) or force_update:
+        logger.info('Sample data archive %s not found at:\n%s\n'
+                    'It will be downloaded and extracted at this location.'
+                    % (archive_name, folder_path))
+
+        if op.exists(martinos_path):
+            archive_name = martinos_path
+        elif op.exists(neurospin_path):
+            archive_name = neurospin_path
+        else:
+            archive_name = op.join(path, archive_name)
+            rm_archive = True
+            if op.exists(archive_name):
+                msg = ('Archive already exists at %r. Overwrite it '
+                       '(y/[n])? ' % archive_name)
+                answer = raw_input(msg)
+                if answer.lower() == 'y':
+                    os.remove(archive_name)
+                else:
+                    raise IOError('Archive file already exists at target '
+                                  'location %r.' % archive_name)
+
+            _fetch_file(url, archive_name, print_destination=False)
+
+        if op.exists(folder_path):
+            shutil.rmtree(folder_path)
+
+        import tarfile
+        # note that we use print statements here because these processes
+        # are interactive
+        logger.info('Decompressiong the archive: ' + archive_name)
+        tarfile.open(archive_name, 'r:gz').extractall(path=path)
+        if rm_archive:
+            os.remove(archive_name)
+
+    path = op.abspath(path)
+    if update_path is None:
+        if get_config('MNE_DATASETS_SAMPLE_PATH', '') != path:
+            update_path = True
+            msg = ('Do you want to set the path:\n    %s\nas the default '
+                   'sample dataset path in the mne-python config [y]/n? '
+                   % path)
+            answer = raw_input(msg)
+            if answer.lower() == 'n':
+                update_path = False
+        else:
+            update_path = False
+    if update_path is True:
+        set_config('MNE_DATASETS_SAMPLE_PATH', path)
+
+    path = op.join(path, folder_name)
+
+    # compare the version of the Sample dataset and mne
+    sample_version = _sample_version(path)
+    try:
+        from distutils.version import LooseVersion
+    except:
+        warn('Could not determine sample dataset version; dataset could\n'
+             'be out of date. Please install the "distutils" package.')
+    else:
+        if LooseVersion(sample_version) < LooseVersion(mne_version):
+            warn('Sample dataset (version %s) is older than mne-python '
+                 '(version %s). If the examples fail, you may need to update '
+                 'the sample dataset by using force_update=True'
+                 % (sample_version, mne_version))
+
+    return path
diff --git a/mne/dipole.py b/mne/dipole.py
new file mode 100644
index 0000000..244c865
--- /dev/null
+++ b/mne/dipole.py
@@ -0,0 +1,48 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+import numpy as np
+import logging
+logger = logging.getLogger('mne')
+
+from . import verbose
+
+
+ at verbose
+def read_dip(fname, verbose=None):
+    """Read .dip file from Neuromag/xfit or MNE
+
+    Parameters
+    ----------
+    fname : str
+        The name of the .dip file.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    time : array, shape=(n_dipoles,)
+        The time instants at which each dipole was fitted.
+    pos : array, shape=(n_dipoles, 3)
+        The dipoles positions in meters
+    amplitude : array, shape=(n_dipoles,)
+        The amplitude of the dipoles in nAm
+    ori : array, shape=(n_dipoles, 3)
+        The dipolar moments. Amplitude of the moment is in nAm.
+    gof : array, shape=(n_dipoles,)
+        The goodness of fit
+    """
+    try:
+        data = np.loadtxt(fname, comments='%')
+    except:
+        data = np.loadtxt(fname, comments='#')  # handle 2 types of comments...
+    if data.ndim == 1:
+        data = data[None, :]
+    logger.info("%d dipole(s) found" % len(data))
+    time = data[:, 0]
+    pos = 1e-3 * data[:, 2:5]  # put data in meters
+    amplitude = data[:, 5]
+    ori = data[:, 6:9]
+    gof = data[:, 9]
+    return time, pos, amplitude, ori, gof
diff --git a/mne/epochs.py b/mne/epochs.py
new file mode 100644
index 0000000..25b4343
--- /dev/null
+++ b/mne/epochs.py
@@ -0,0 +1,1515 @@
+"""Tools for working with epoched data"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+import warnings
+
+import numpy as np
+from copy import deepcopy
+
+import logging
+logger = logging.getLogger('mne')
+
+from .fiff.write import start_file, start_block, end_file, end_block, \
+                        write_int, write_float_matrix, write_float, \
+                        write_id, write_string
+from .fiff.meas_info import read_meas_info, write_meas_info
+from .fiff.open import fiff_open
+from .fiff.raw import _time_as_index, _index_as_time
+from .fiff.tree import dir_tree_find
+from .fiff.tag import read_tag
+from .fiff import Evoked, FIFF
+from .fiff.pick import pick_types, channel_indices_by_type, channel_type
+from .fiff.proj import setup_proj, ProjMixin
+from .fiff.evoked import aspect_rev
+from .baseline import rescale
+from .utils import check_random_state, _check_pandas_index_arguments, \
+                   _check_pandas_installed
+from .filter import resample, detrend
+from .event import _read_events_fif
+from . import verbose
+from .fixes import in1d
+from .viz import _mutable_defaults
+
+
+class Epochs(ProjMixin):
+    """List of Epochs
+
+    Parameters
+    ----------
+    raw : Raw object
+        An instance of Raw.
+    events : array, of shape [n_events, 3]
+        Returned by the read_events function.
+    event_id : int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to acces associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    name : string
+        Comment that describes the Evoked data created.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    picks : None (default) or array of int
+        Indices of channels to include (if None, all channels
+        are used).
+    preload : boolean
+        Load all epochs from disk when creating the object
+        or wait before accessing each epoch (more memory
+        efficient but can be slower).
+    reject : dict
+        Epoch rejection parameters based on peak to peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done.
+        Values are float. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict
+        Epoch rejection parameters based on flatness of signal
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        If flat is None then no rejection is done.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    decim : int
+        Factor by which to downsample the data from the raw file upon import.
+        Warning: This simply selects every nth sample, data is not filtered
+        here. If data is not properly filtered, aliasing artifacts may occur.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    detrend : int | None
+        If 0 or 1, the data channels (MEG and EEG) will be detrended when
+        loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
+        is no detrending. Note that detrending is performed before baseline
+        correction. If no DC offset is preferred (zeroth order detrending),
+        either turn off baseline correction, as this may introduce a DC
+        shift, or set baseline correction to use the entire time interval
+        (will yield equivalent results but be slower).
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Attributes
+    ----------
+    info: dict
+        Measurement info.
+    event_id : dict
+        Names of  of conditions corresponding to event_ids.
+    ch_names : list of string
+        List of channels' names.
+    drop_log : list of lists
+        This list (same length as events) contains the channel(s),
+        or the reasons (count equalization, not reaching minimum duration),
+        if any, that caused an event in the original event list to be dropped
+        by drop_bad_epochs(). Caveat. The drop log will only know about the
+        events passed to epochs. If the events represent a selection the
+        drop log can be misaligned with regard to other external logs (e.g.,
+        behavioral responses) that still refer to the complete list of events.
+    verbose : bool, str, int, or None
+        See above.
+
+    Notes
+    -----
+    For indexing and slicing:
+
+    epochs[idx] : Epochs
+        Return Epochs object with a subset of epochs (supports single
+        index and python-style slicing)
+
+    For subset selection using categorial labels:
+
+    epochs['name'] : Epochs
+        Return Epochs object with a subset of epochs corresponding to an
+        experimental condition as specified by 'name'.
+
+    epochs[['name_1', 'name_2', ... ]] : Epochs
+        Return Epochs object with a subset of epochs corresponding to multiple
+        experimental conditions as specified by 'name_1', 'name_2', ... .
+
+    See also
+    --------
+    mne.epochs.combine_event_ids
+    mne.Epochs.equalize_event_counts
+    """
+    @verbose
+    def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                 picks=None, name='Unknown', keep_comp=None, dest_comp=None,
+                 preload=False, reject=None, flat=None, proj=True,
+                 decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
+                 add_eeg_ref=True, verbose=None):
+        if raw is None:
+            return
+
+        self.raw = raw
+        self.verbose = raw.verbose if verbose is None else verbose
+        self.name = name
+        if isinstance(event_id, dict):
+            if not all([isinstance(v, int) for v in event_id.values()]):
+                raise ValueError('Event IDs must be of type integer')
+            if not all([isinstance(k, basestring) for k in event_id]):
+                raise ValueError('Event names must be of type str')
+            self.event_id = event_id
+        elif isinstance(event_id, int):
+            self.event_id = {str(event_id): event_id}
+        elif event_id is None:
+            self.event_id = dict((str(e), e) for e in np.unique(events[:, 2]))
+        else:
+            raise ValueError('event_id must be dict or int.')
+
+        # check reject_tmin and reject_tmax
+        if (reject_tmin is not None) and (reject_tmin < tmin):
+            raise ValueError("reject_tmin needs to be None or >= tmin")
+        if (reject_tmax is not None) and (reject_tmax > tmax):
+            raise ValueError("reject_tmax needs to be None or <= tmax")
+        if (reject_tmin is not None) and (reject_tmax is not None):
+            if reject_tmin >= reject_tmax:
+                raise ValueError('reject_tmin needs to be < reject_tmax')
+        if not detrend in [None, 0, 1]:
+            raise ValueError('detrend must be None, 0, or 1')
+
+        self.tmin = tmin
+        self.tmax = tmax
+        self.keep_comp = keep_comp
+        self.dest_comp = dest_comp
+        self.baseline = baseline
+        self.preload = preload
+        self.reject = reject
+        self.reject_tmin = reject_tmin
+        self.reject_tmax = reject_tmax
+        self.flat = flat
+
+        proj = proj or raw.proj  # proj is on when applied in Raw
+        if proj not in [True, 'delayed', False]:
+            raise ValueError(r"'proj' must either be 'True', 'False' or "
+                              "'delayed'")
+        self.proj = proj
+
+        self.decim = decim = int(decim)
+        self._bad_dropped = False
+        self.drop_log = None
+        self.detrend = detrend
+
+        # Handle measurement info
+        self.info = cp.deepcopy(raw.info)
+        # make sure projs are really copied.
+        self.info['projs'] = [cp.deepcopy(p) for p in self.info['projs']]
+        if picks is None:
+            picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                               ecg=True, eog=True, misc=True, ref_meg=True,
+                               exclude=[])
+        self.info['chs'] = [self.info['chs'][k] for k in picks]
+        self.info['ch_names'] = [self.info['ch_names'][k] for k in picks]
+        self.info['nchan'] = len(picks)
+        self.picks = picks
+
+        if len(picks) == 0:
+            raise ValueError("Picks cannot be empty.")
+
+        if self._check_delayed():
+            logger.info('Entering delayed SSP mode.')
+
+        activate = False if self._check_delayed() else self.proj
+        self._projector, self.info = setup_proj(self.info, add_eeg_ref,
+                                                activate=activate)
+
+        #   XXX : deprecate CTF compensator
+        if dest_comp is not None or keep_comp is not None:
+            raise ValueError('current_comp and keep_comp are deprecated.'
+                             ' Use the compensation parameter in Raw.')
+
+        #    Select the desired events
+        self.events = events
+        selected = in1d(events[:, 2], self.event_id.values())
+        self.events = events[selected]
+
+        n_events = len(self.events)
+
+        if n_events > 0:
+            logger.info('%d matching events found' % n_events)
+        else:
+            raise ValueError('No desired events found.')
+
+        # Handle times
+        assert tmin < tmax
+        sfreq = float(raw.info['sfreq'])
+        n_times_min = int(round(tmin * sfreq))
+        n_times_max = int(round(tmax * sfreq))
+        times = np.arange(n_times_min, n_times_max + 1, dtype=np.float) / sfreq
+        self.times = self._raw_times = times
+        self._epoch_stop = ep_len = len(self.times)
+        if decim > 1:
+            new_sfreq = sfreq / decim
+            lowpass = self.info['lowpass']
+            if new_sfreq < 2.5 * lowpass:  # nyquist says 2 but 2.5 is safer
+                msg = ("The raw file indicates a low-pass frequency of %g Hz. "
+                       "The decim=%i parameter will result in a sampling "
+                       "frequency of %g Hz, which can cause aliasing "
+                       "artifacts." % (lowpass, decim, new_sfreq))
+                warnings.warn(msg)
+
+            i_start = n_times_min % decim
+            self._decim_idx = slice(i_start, ep_len, decim)
+            self.times = self.times[self._decim_idx]
+            self.info['sfreq'] = new_sfreq
+
+        # setup epoch rejection
+        self._reject_setup()
+
+        if self.preload:
+            self._data = self._get_data_from_disk()
+            self.raw = None
+        else:
+            self._data = None
+
+    def drop_picks(self, bad_picks):
+        """Drop some picks
+
+        Allows to discard some channels.
+        """
+        self.picks = list(self.picks)
+        idx = [k for k, p in enumerate(self.picks) if p not in bad_picks]
+        self.picks = [self.picks[k] for k in idx]
+
+        # XXX : could maybe be factorized
+        self.info['chs'] = [self.info['chs'][k] for k in idx]
+        self.info['ch_names'] = [self.info['ch_names'][k] for k in idx]
+        self.info['nchan'] = len(idx)
+
+        if self._projector is not None:
+            self._projector = self._projector[idx][:, idx]
+
+        if self.preload:
+            self._data = self._data[:, idx, :]
+
+    def drop_bad_epochs(self):
+        """Drop bad epochs without retaining the epochs data.
+
+        Should be used before slicing operations.
+
+        .. Warning:: Operation is slow since all epochs have to be read from
+            disk. To avoid reading epochs form disk multiple times, initialize
+            Epochs object with preload=True.
+
+        """
+        self._get_data_from_disk(out=False)
+
+    def _check_delayed(self):
+        """ Aux method
+        """
+        is_delayed = False
+        if self.proj == 'delayed':
+            if self.reject is None:
+                raise RuntimeError('The delayed SSP mode was requested '
+                        'but no rejection parameters are present. Please add '
+                        'rejection parameters before using this option.')
+            is_delayed = True
+        return is_delayed
+
+    @verbose
+    def drop_epochs(self, indices, verbose=None):
+        """Drop epochs based on indices or boolean mask
+
+        Parameters
+        ----------
+        indices : array of ints or bools
+            Set epochs to remove by specifying indices to remove or a boolean
+            mask to apply (where True values get removed). Events are
+            correspondingly modified.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to raw.verbose.
+        """
+        indices = np.asarray(indices)
+        if indices.dtype == bool:
+            indices = np.where(indices)[0]
+        self.events = np.delete(self.events, indices, axis=0)
+        if(self.preload):
+            self._data = np.delete(self._data, indices, axis=0)
+        count = len(indices)
+        logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
+
+    @verbose
+    def _get_epoch_from_disk(self, idx, proj, verbose=None):
+        """Load one epoch from disk"""
+        if self.raw is None:
+            # This should never happen, as raw=None only if preload=True
+            raise ValueError('An error has occurred, no valid raw file found.'
+                             ' Please report this to the mne-python '
+                             'developers.')
+        sfreq = self.raw.info['sfreq']
+
+        if self.events.ndim == 1:
+            # single event
+            event_samp = self.events[0]
+        else:
+            event_samp = self.events[idx, 0]
+
+        # Read a data segment
+        first_samp = self.raw.first_samp
+        start = int(round(event_samp + self.tmin * sfreq)) - first_samp
+        stop = start + self._epoch_stop
+        if start < 0:
+            return None, None
+
+        epoch_raw, _ = self.raw[self.picks, start:stop]
+
+        # setup list of epochs to handle delayed SSP
+        epochs = []
+        # whenever requested, the first epoch is being projected.
+        if self._projector is not None and proj is True:
+            epochs += [np.dot(self._projector, epoch_raw)]
+        else:
+            epochs += [epoch_raw]
+
+        # in case the proj passed is True but self proj is not we have delayed SSP
+        if self.proj != proj:  # so append another unprojected epoch
+            epochs += [epoch_raw.copy()]
+
+        # only preprocess first candidate, to make delayed SSP working we need to
+        # postpone the preprocessing since projection comes first.
+        epochs[0] = self._preprocess(epochs[0], verbose)
+
+        # return a second None if nothing is projected
+        if len(epochs) == 1:
+            epochs += [None]
+
+        return epochs
+
+    @verbose
+    def _preprocess(self, epoch, verbose=None):
+        """ Aux Function
+        """
+        if self.detrend is not None:
+            picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                               eog=False, ecg=False, emg=False, exclude=[])
+            epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
+        # Baseline correct
+        epoch = rescale(epoch, self._raw_times, self.baseline, 'mean',
+                    copy=False, verbose=verbose)
+        # Decimate
+        if self.decim > 1:
+            epoch = epoch[:, self._decim_idx]
+        return epoch
+
+    @verbose
+    def _get_data_from_disk(self, out=True, verbose=None):
+        """Load all data from disk
+
+        Parameters
+        ----------
+        out : bool
+            Return the data. Setting this to False is used to reject bad
+            epochs without caching all the data, which saves memory.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        """
+        n_events = len(self.events)
+        data = np.array([])
+        if self._bad_dropped:
+            proj = False if self._check_delayed() else self.proj
+            if not out:
+                return
+            for ii in xrange(n_events):
+                # faster to pre-allocate memory here
+                epoch, epoch_raw = self._get_epoch_from_disk(ii, proj=proj)
+                if ii == 0:
+                    data = np.empty((n_events, epoch.shape[0],
+                                     epoch.shape[1]), dtype=epoch.dtype)
+                if self._check_delayed():
+                    epoch = epoch_raw
+                data[ii] = epoch
+        else:
+            proj = True if self._check_delayed() else self.proj
+            good_events = []
+            drop_log = [[] for _ in range(n_events)]
+            n_out = 0
+            for idx in xrange(n_events):
+                epoch, epoch_raw = self._get_epoch_from_disk(idx, proj=proj)
+                is_good, offenders = self._is_good_epoch(epoch)
+                if is_good:
+                    good_events.append(idx)
+                    if self._check_delayed():
+                        epoch = epoch_raw
+                    if out:
+                        # faster to pre-allocate, then trim as necessary
+                        if n_out == 0:
+                            data = np.empty((n_events, epoch.shape[0],
+                                             epoch.shape[1]),
+                                            dtype=epoch.dtype, order='C')
+                        data[n_out] = epoch
+                        n_out += 1
+                else:
+                    drop_log[idx] = offenders
+
+            self.drop_log = drop_log
+            self.events = np.atleast_2d(self.events[good_events])
+            self._bad_dropped = True
+            logger.info("%d bad epochs dropped"
+                        % (n_events - len(good_events)))
+            if not out:
+                return
+            # just take the good events
+            assert len(good_events) == n_out
+            if n_out > 0:
+                # slicing won't free the space, so we resize
+                # we have ensured the C-contiguity of the array in allocation
+                # so this operation will be safe unless np is very broken
+                data.resize((n_out,) + data.shape[1:], refcheck=False)
+        return data
+
+    @verbose
+    def _is_good_epoch(self, data, verbose=None):
+        """Determine if epoch is good"""
+        if data is None:
+            return False, ['NO_DATA']
+        n_times = len(self.times)
+        if data.shape[1] < n_times:
+            # epoch is too short ie at the end of the data
+            return False, ['TOO_SHORT']
+        if self.reject is None and self.flat is None:
+            return True, None
+        else:
+            if self._reject_time is not None:
+                data = data[:, self._reject_time]
+
+            return _is_good(data, self.ch_names, self._channel_type_idx,
+                            self.reject, self.flat, full_report=True,
+                            ignore_chs=self.info['bads'])
+
+    def get_data(self):
+        """Get all epochs as a 3D array
+
+        Returns
+        -------
+        data : array of shape [n_epochs, n_channels, n_times]
+            The epochs data
+        """
+        if self.preload:
+            data_ = self._data
+        else:
+            data_ = self._get_data_from_disk()
+        if self._check_delayed():
+            data = np.zeros_like(data_)
+            for ii, e in enumerate(data_):
+                data[ii] = self._preprocess(e.copy(), self.verbose)
+        else:
+            data = data_
+
+        return data
+
+    def _reject_setup(self):
+        """Sets self._reject_time and self._channel_type_idx (called from
+        __init__)
+        """
+        if self.reject is None and self.flat is None:
+            return
+
+        idx = channel_indices_by_type(self.info)
+        for key in idx.keys():
+            if (self.reject is not None and key in self.reject) \
+                    or (self.flat is not None and key in self.flat):
+                if len(idx[key]) == 0:
+                    raise ValueError("No %s channel found. Cannot reject based"
+                                     " on %s." % (key.upper(), key.upper()))
+
+        self._channel_type_idx = idx
+
+        if (self.reject_tmin is None) and (self.reject_tmax is None):
+            self._reject_time = None
+        else:
+            if self.reject_tmin is None:
+                reject_imin = None
+            else:
+                idxs = np.nonzero(self.times >= self.reject_tmin)[0]
+                reject_imin = idxs[0]
+            if self.reject_tmax is  None:
+                reject_imax = None
+            else:
+                idxs = np.nonzero(self.times <= self.reject_tmax)[0]
+                reject_imax = idxs[-1]
+
+            self._reject_time = slice(reject_imin, reject_imax)
+
+    def __len__(self):
+        """Number of epochs.
+        """
+        if not self._bad_dropped:
+            err = ("Since bad epochs have not been dropped, the length of the "
+                   "Epochs is not known. Load the Epochs with preload=True, "
+                   "or call Epochs.drop_bad_epochs(). To find the number of "
+                   "events in the Epochs, use len(Epochs.events).")
+            raise RuntimeError(err)
+        return len(self.events)
+
+    def __iter__(self):
+        """To make iteration over epochs easy.
+        """
+        self._current = 0
+        return self
+
+    def next(self):
+        """To make iteration over epochs easy.
+        """
+        if self.preload:
+            if self._current >= len(self._data):
+                raise StopIteration
+            epoch = self._data[self._current]
+            if self._check_delayed():
+                epoch = self._preprocess(epoch.copy())
+            self._current += 1
+        else:
+            proj = True if self._check_delayed() else self.proj
+            is_good = False
+            while not is_good:
+                if self._current >= len(self.events):
+                    raise StopIteration
+                epoch, epoch_raw = self._get_epoch_from_disk(self._current,
+                                                             proj=proj)
+                self._current += 1
+                is_good, _ = self._is_good_epoch(epoch)
+            # If delayed-ssp mode, pass 'virgin' data after rejection decision.
+            if self._check_delayed():
+                epoch = self._preprocess(epoch_raw)
+
+        return epoch
+
+    def __repr__(self):
+        """ Build string representation
+        """
+        if not self._bad_dropped:
+            s = 'n_events : %s (good & bad)' % len(self.events)
+        else:
+            s = 'n_events : %s (all good)' % len(self.events)
+        s += ', tmin : %s (s)' % self.tmin
+        s += ', tmax : %s (s)' % self.tmax
+        s += ', baseline : %s' % str(self.baseline)
+        if len(self.event_id) > 1:
+            counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
+                      for k, v in self.event_id.items()]
+            s += ',\n %s' % ', '.join(counts)
+
+        return '<Epochs  |  %s>' % s
+
+    def _key_match(self, key):
+        """Helper function for event dict use"""
+        if key not in self.event_id:
+            raise KeyError('Event "%s" is not in Epochs.' % key)
+        return self.events[:, 2] == self.event_id[key]
+
+    def __getitem__(self, key):
+        """Return an Epochs object with a subset of epochs
+        """
+
+        data = self._data
+        del self._data
+        epochs = self.copy()
+        self._data, epochs._data = data, data
+
+        if isinstance(key, basestring):
+            key = [key]
+
+        if isinstance(key, list) and isinstance(key[0], basestring):
+            key_match = np.any(np.atleast_2d([epochs._key_match(k)
+                                              for k in key]), axis=0)
+            select = key_match
+            epochs.name = ('-'.join(key) if epochs.name == 'Unknown'
+                           else 'epochs_%s' % '-'.join(key))
+        else:
+            key_match = key
+            select = key if isinstance(key, slice) else np.atleast_1d(key)
+            if not epochs._bad_dropped:
+                # Only matters if preload is not true, since bad epochs are
+                # dropped on preload; doesn't mater for key lookup, either
+                warnings.warn("Bad epochs have not been dropped, indexing will"
+                              " be inaccurate. Use drop_bad_epochs() or"
+                              " preload=True")
+
+        epochs.events = np.atleast_2d(epochs.events[key_match])
+        if epochs.preload:
+            epochs._data = epochs._data[select]
+
+        return epochs
+
+    def average(self, picks=None):
+        """Compute average of epochs
+
+        Parameters
+        ----------
+
+        picks : None | array of int
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+
+        Returns
+        -------
+        evoked : Evoked instance
+            The averaged epochs
+        """
+
+        return self._compute_mean_or_stderr(picks, 'ave')
+
+    def standard_error(self, picks=None):
+        """Compute standard error over epochs
+
+        Parameters
+        ----------
+        picks : None | array of int
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+
+        Returns
+        -------
+        evoked : Evoked instance
+            The standard error over epochs
+        """
+        return self._compute_mean_or_stderr(picks, 'stderr')
+
+    def _compute_mean_or_stderr(self, picks, mode='ave'):
+        """Compute the mean or std over epochs and return Evoked"""
+
+        _do_std = True if mode == 'stderr' else False
+        evoked = Evoked(None)
+        evoked.info = cp.deepcopy(self.info)
+        # make sure projs are really copied.
+        evoked.info['projs'] = [cp.deepcopy(p) for p in self.info['projs']]
+        n_channels = len(self.ch_names)
+        n_times = len(self.times)
+        if self.preload:
+            n_events = len(self.events)
+            if not _do_std:
+                data = np.mean(self._data, axis=0)
+            else:
+                data = np.std(self._data, axis=0)
+            assert len(self.events) == len(self._data)
+        else:
+            data = np.zeros((n_channels, n_times))
+            n_events = 0
+            for e in self:
+                data += e
+                n_events += 1
+            data /= n_events
+            # convert to stderr if requested, could do in one pass but do in
+            # two (slower) in case there are large numbers
+            if _do_std:
+                data_mean = cp.copy(data)
+                data.fill(0.)
+                for e in self:
+                    data += (e - data_mean) ** 2
+                data = np.sqrt(data / n_events)
+
+        evoked.data = data
+        evoked.times = self.times.copy()
+        evoked.comment = self.name
+        evoked.nave = n_events
+        evoked.first = int(self.times[0] * self.info['sfreq'])
+        evoked.last = evoked.first + len(self.times) - 1
+        if not _do_std:
+            evoked._aspect_kind = FIFF.FIFFV_ASPECT_AVERAGE
+        else:
+            evoked._aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
+            evoked.data /= np.sqrt(evoked.nave)
+        evoked.kind = aspect_rev.get(str(evoked._aspect_kind), 'Unknown')
+
+        # dropping EOG, ECG and STIM channels. Keeping only data
+        if picks is None:
+            picks = pick_types(evoked.info, meg=True, eeg=True,
+                               stim=False, eog=False, ecg=False,
+                               emg=False, ref_meg=True, exclude=[])
+            if len(picks) == 0:
+                raise ValueError('No data channel found when averaging.')
+
+        picks = np.sort(picks)  # make sure channel order does not change
+        evoked.info['chs'] = [evoked.info['chs'][k] for k in picks]
+        evoked.info['ch_names'] = [evoked.info['ch_names'][k]
+                                   for k in picks]
+        evoked.info['nchan'] = len(picks)
+        evoked.data = evoked.data[picks]
+        # otherwise the apply_proj will be confused
+        evoked.proj = True if self.proj is True else None
+        evoked.verbose = self.verbose
+
+        return evoked
+
+    def crop(self, tmin=None, tmax=None, copy=False):
+        """Crops a time interval from epochs object.
+
+        Parameters
+        ----------
+        tmin : float
+            Start time of selection in seconds.
+        tmax : float
+            End time of selection in seconds.
+        copy : bool
+            If False epochs is cropped in place.
+
+        Returns
+        -------
+        epochs : Epochs instance
+            The cropped epochs.
+        """
+        if not self.preload:
+            raise RuntimeError('Modifying data of epochs is only supported '
+                               'when preloading is used. Use preload=True '
+                               'in the constructor.')
+
+        if tmin is None:
+            tmin = self.tmin
+        elif tmin < self.tmin:
+            warnings.warn("tmin is not in epochs' time interval."
+                          "tmin is set to epochs.tmin")
+            tmin = self.tmin
+
+        if tmax is None:
+            tmax = self.tmax
+        elif tmax > self.tmax:
+            warnings.warn("tmax is not in epochs' time interval."
+                          "tmax is set to epochs.tmax")
+            tmax = self.tmax
+
+        tmask = (self.times >= tmin) & (self.times <= tmax)
+        tidx = np.where(tmask)[0]
+
+        this_epochs = self if not copy else self.copy()
+        this_epochs.tmin = this_epochs.times[tidx[0]]
+        this_epochs.tmax = this_epochs.times[tidx[-1]]
+        this_epochs.times = this_epochs.times[tmask]
+        this_epochs._data = this_epochs._data[:, :, tmask]
+        return this_epochs
+
+    @verbose
+    def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
+                 verbose=None):
+        """Resample preloaded data
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        For some data, it may be more accurate to use npad=0 to reduce
+        artifacts. This is dataset dependent -- check your data!
+        """
+        if self.preload:
+            o_sfreq = self.info['sfreq']
+            self._data = resample(self._data, sfreq, o_sfreq, npad,
+                                  n_jobs=n_jobs)
+            # adjust indirectly affected variables
+            self.info['sfreq'] = sfreq
+            self.times = (np.arange(self._data.shape[2], dtype=np.float)
+                          / sfreq + self.times[0])
+        else:
+            raise RuntimeError('Can only resample preloaded data')
+
+    def copy(self):
+        """Return copy of Epochs instance"""
+        raw = self.raw
+        del self.raw
+        new = deepcopy(self)
+        self.raw = raw
+        new.raw = raw
+
+        return new
+
+    def save(self, fname):
+        """Save epochs in a fif file
+
+        Parameters
+        ----------
+        fname : str
+            The name of the file.
+        """
+        # Create the file and save the essentials
+        fid = start_file(fname)
+
+        start_block(fid, FIFF.FIFFB_MEAS)
+        write_id(fid, FIFF.FIFF_BLOCK_ID)
+        if self.info['meas_id'] is not None:
+            write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, self.info['meas_id'])
+
+        # Write measurement info
+        write_meas_info(fid, self.info)
+
+        # One or more evoked data sets
+        start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+        start_block(fid, FIFF.FIFFB_EPOCHS)
+
+        # write events out after getting data to ensure bad events are dropped
+        data = self.get_data()
+        start_block(fid, FIFF.FIFFB_MNE_EVENTS)
+        write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, self.events.T)
+        mapping_ = ';'.join([k + ':' + str(v) for k, v in
+                             self.event_id.items()])
+        write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
+        end_block(fid, FIFF.FIFFB_MNE_EVENTS)
+
+        # First and last sample
+        first = int(self.times[0] * self.info['sfreq'])
+        last = first + len(self.times) - 1
+        write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
+        write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
+
+        # save baseline
+        if self.baseline is not None:
+            bmin, bmax = self.baseline
+            bmin = self.times[0] if bmin is None else bmin
+            bmax = self.times[-1] if bmax is None else bmax
+            write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
+            write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
+
+        # The epochs itself
+        decal = np.empty(self.info['nchan'])
+        for k in range(self.info['nchan']):
+            decal[k] = 1.0 / (self.info['chs'][k]['cal']
+                              * self.info['chs'][k].get('scale', 1.0))
+
+        data *= decal[np.newaxis, :, np.newaxis]
+
+        write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
+
+        # undo modifications to data
+        data /= decal[np.newaxis, :, np.newaxis]
+        end_block(fid, FIFF.FIFFB_EPOCHS)
+
+        end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+        end_block(fid, FIFF.FIFFB_MEAS)
+        end_file(fid)
+
+    def as_data_frame(self, picks=None, index=None, scale_time=1e3,
+                      scalings=None, copy=True):
+        """Get the epochs as Pandas DataFrame
+
+        Export epochs data in tabular structure with MEG channels as columns
+        and three additional info columns 'epoch', 'condition', and 'time'.
+        The format matches a long table format commonly used to represent
+        repeated measures in within-subject designs.
+
+        Parameters
+        ----------
+        picks : None | array of int
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+        index : tuple of str | None
+            Column to be used as index for the data. Valid string options
+            are 'epoch', 'time' and 'condition'. If None, all three info
+            columns will be included in the table as categorial data.
+        scale_time : float
+            Scaling to be applied to time units.
+        scalings : dict | None
+            Scaling to be applied to the channels picked. If None, defaults to
+            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
+        copy : bool
+            If true, data will be copied. Else data may be modified in place.
+
+        Returns
+        -------
+        df : instance of pandas.core.DataFrame
+            Epochs exported into tabular data structure.
+        """
+
+        pd = _check_pandas_installed()
+
+        default_index = ['condition', 'epoch', 'time']
+        if index is not None:
+            _check_pandas_index_arguments(index, default_index)
+        else:
+            index = default_index
+
+        if picks is None:
+            picks = range(self.info['nchan'])
+        else:
+            if not in1d(picks, np.arange(len(self.events))).all():
+                raise ValueError('At least one picked channel is not present '
+                                 'in this eppochs instance.')
+
+        data = self.get_data()[:, picks, :]
+        shape = data.shape
+        data = np.hstack(data).T
+        if copy:
+            data = data.copy()
+
+        types = [channel_type(self.info, idx) for idx in picks]
+        n_channel_types = 0
+        ch_types_used = []
+
+        scalings = _mutable_defaults(('scalings', scalings))[0]
+        for t in scalings.keys():
+            if t in types:
+                n_channel_types += 1
+                ch_types_used.append(t)
+
+        for t in ch_types_used:
+            scaling = scalings[t]
+            idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+            if len(idx) > 0:
+                data[:, idx] *= scaling
+
+        id_swapped = dict((v, k) for k, v in self.event_id.items())
+        names = [id_swapped[k] for k in self.events[:, 2]]
+
+        mindex = list()
+        mindex.append(('condition', np.repeat(names, shape[2])))
+        mindex.append(('time', np.tile(self.times, shape[0]) *
+                                scale_time))        # if 'epoch' in index:
+        mindex.append(('epoch', np.repeat(np.arange(shape[0]),
+                                shape[2])))
+
+        assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
+        col_names = [self.ch_names[k] for k in picks]
+
+        df = pd.DataFrame(data, columns=col_names)
+        [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
+        if index is not None:
+            with warnings.catch_warnings(True):
+                df.set_index(index, inplace=True)
+            if 'time' in df.index.names and hasattr(df.index, 'levels'):
+                df.index.levels[1] = df.index.levels[1].astype(int)
+
+        return df
+
+    def to_nitime(self, picks=None, epochs_idx=None, collapse=False,
+                  copy=True, first_samp=0):
+        """ Export epochs as nitime TimeSeries
+
+        Parameters
+        ----------
+        picks : array-like | None
+            Indices for exporting subsets of the epochs channels. If None
+            all good channels will be used.
+        epochs_idx : slice | array-like | None
+            Epochs index for single or selective epochs exports. If None, all
+            epochs will be used.
+        collapse : boolean
+            If True export epochs and time slices will be collapsed to 2D
+            array. This may be required by some nitime functions.
+        copy : boolean
+            If True exports copy of epochs data.
+        first_samp : int
+            Number of samples to offset the times by. Use raw.first_samp to
+            have the time returned relative to the session onset, or zero
+            (default) for time relative to the recording onset.
+
+        Returns
+        -------
+        epochs_ts : instance of nitime.TimeSeries
+            The Epochs as nitime TimeSeries object.
+        """
+        try:
+            from nitime import TimeSeries  # to avoid strong dependency
+        except ImportError:
+            raise Exception('the nitime package is missing')
+
+        if picks is None:
+            picks = pick_types(self.info, include=self.ch_names,
+                               exclude='bads')
+        if epochs_idx is None:
+            epochs_idx = slice(len(self.events))
+
+        data = self.get_data()[epochs_idx, picks]
+
+        if copy is True:
+            data = data.copy()
+
+        if collapse is True:
+            data = np.hstack(data).copy()
+
+        offset = _time_as_index(abs(self.tmin), self.info['sfreq'],
+                                first_samp, True)
+        t0 = _index_as_time(self.events[0, 0] - offset, self.info['sfreq'],
+                            first_samp, True)[0]
+        epochs_ts = TimeSeries(data, sampling_rate=self.info['sfreq'], t0=t0)
+        epochs_ts.ch_names = np.array(self.ch_names)[picks].tolist()
+
+        return epochs_ts
+
+    def equalize_event_counts(self, event_ids, method='mintime', copy=True):
+        """Equalize the number of trials in each condition
+
+        It tries to make the remaining epochs occurring as close as possible in
+        time. This method works based on the idea that if there happened to be
+        some time-varying (like on the scale of minutes) noise characteristics
+        during a recording, they could be compensated for (to some extent) in
+        the equalization process. This method thus seeks to reduce any of
+        those effects by minimizing the differences in the times of the events
+        in the two sets of epochs. For example, if one had event times
+        [1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
+        it would remove events at times [1, 2] in the first epochs and not
+        [20, 21].
+
+        Parameters
+        ----------
+        event_ids : list
+            The event types to equalize. Each entry in the list can either be
+            a str (single event) or a list of str. In the case where one of
+            the entries is a list of str, event_ids in that list will be
+            grouped together before equalizing trial counts across conditions.
+        method : str
+            If 'truncate', events will be truncated from the end of each event
+            list. If 'mintime', timing differences between each event list will
+            be minimized.
+        copy : bool
+            If True, a copy of epochs will be returned. Otherwise, the
+            function will operate in-place.
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The modified Epochs instance.
+        indices : array of int
+            Indices from the original events list that were dropped.
+
+        Notes
+        ----
+        For example (if epochs.event_id was {'Left': 1, 'Right': 2,
+        'Nonspatial':3}:
+
+            epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
+
+        would equalize the number of trials in the 'Nonspatial' condition with
+        the total number of trials in the 'Left' and 'Right' conditions.
+        """
+        if copy is True:
+            epochs = self.copy()
+        else:
+            epochs = self
+        if len(event_ids) == 0:
+            raise ValueError('event_ids must have at least one element')
+        if not epochs._bad_dropped:
+            epochs.drop_bad_epochs()
+        # figure out how to equalize
+        eq_inds = list()
+        for eq in event_ids:
+            eq = np.atleast_1d(eq)
+            # eq is now a list of types
+            key_match = np.zeros(epochs.events.shape[0])
+            for key in eq:
+                key_match = np.logical_or(key_match, epochs._key_match(key))
+            eq_inds.append(np.where(key_match)[0])
+
+        event_times = [epochs.events[eq, 0] for eq in eq_inds]
+        indices = _get_drop_indices(event_times, method)
+        # need to re-index indices
+        indices = np.concatenate([eq[inds]
+                                  for eq, inds in zip(eq_inds, indices)])
+        epochs = _check_add_drop_log(epochs, indices)
+        epochs.drop_epochs(indices)
+        # actually remove the indices
+        return epochs, indices
+
+    @property
+    def ch_names(self):
+        return self.info['ch_names']
+
+
+def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
+    """Collapse event_ids from an epochs instance into a new event_id
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs to operate on.
+    old_event_ids : str, or list
+        Conditions to collapse together.
+    new_event_id : dict, or int
+        A one-element dict (or a single integer) for the new
+        condition. Note that for safety, this cannot be any
+        existing id (in epochs.event_id.values()).
+    copy : bool
+        If True, a copy of epochs will be returned. Otherwise, the
+        function will operate in-place.
+
+    Notes
+    -----
+    This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
+
+        combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
+
+    would create a 'Directional' entry in epochs.event_id replacing
+    'Left' and 'Right' (combining their trials).
+    """
+    if copy:
+        epochs = epochs.copy()
+    old_event_ids = np.asanyarray(old_event_ids)
+    if isinstance(new_event_id, int):
+        new_event_id = {str(new_event_id): new_event_id}
+    else:
+        if not isinstance(new_event_id, dict):
+            raise ValueError('new_event_id must be a dict or int')
+        if not len(new_event_id.keys()) == 1:
+            raise ValueError('new_event_id dict must have one entry')
+    new_event_num = new_event_id.values()[0]
+    if not isinstance(new_event_num, int):
+        raise ValueError('new_event_id value must be an integer')
+    if new_event_num in epochs.event_id.values():
+        raise ValueError('new_event_id value must not already exist')
+    # could use .pop() here, but if a latter one doesn't exist, we're
+    # in trouble, so run them all here and pop() later
+    old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
+    # find the ones to replace
+    inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
+                  old_event_nums[np.newaxis, :], axis=1)
+    # replace the event numbers in the events list
+    epochs.events[inds, 2] = new_event_num
+    # delete old entries
+    [epochs.event_id.pop(key) for key in old_event_ids]
+    # add the new entry
+    epochs.event_id.update(new_event_id)
+    return epochs
+
+
+def equalize_epoch_counts(epochs_list, method='mintime'):
+    """Equalize the number of trials in multiple Epoch instances
+
+    It tries to make the remaining epochs occurring as close as possible in
+    time. This method works based on the idea that if there happened to be some
+    time-varying (like on the scale of minutes) noise characteristics during
+    a recording, they could be compensated for (to some extent) in the
+    equalization process. This method thus seeks to reduce any of those effects
+    by minimizing the differences in the times of the events in the two sets of
+    epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
+    other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
+    [1, 2] in the first epochs and not [20, 21].
+
+    Note that this operates on the Epochs instances in-place.
+
+    Example:
+
+        equalize_epoch_counts(epochs1, epochs2)
+
+    Parameters
+    ----------
+    epochs_list : list of Epochs instances
+        The Epochs instances to equalize trial counts for.
+    method : str
+        If 'truncate', events will be truncated from the end of each event
+        list. If 'mintime', timing differences between each event list will be
+        minimized.
+    """
+    if not all([isinstance(e, Epochs) for e in epochs_list]):
+        raise ValueError('All inputs must be Epochs instances')
+
+    # make sure bad epochs are dropped
+    [e.drop_bad_epochs() if not e._bad_dropped else None for e in epochs_list]
+    event_times = [e.events[:, 0] for e in epochs_list]
+    indices = _get_drop_indices(event_times, method)
+    for e, inds in zip(epochs_list, indices):
+        e = _check_add_drop_log(e, inds)
+        e.drop_epochs(inds)
+
+
+def _get_drop_indices(event_times, method):
+    """Helper to get indices to drop from multiple event timing lists"""
+    small_idx = np.argmin([e.shape[0] for e in event_times])
+    small_e_times = event_times[small_idx]
+    if not method in ['mintime', 'truncate']:
+        raise ValueError('method must be either mintime or truncate, not '
+                         '%s' % method)
+    indices = list()
+    for e in event_times:
+        if method == 'mintime':
+            mask = _minimize_time_diff(small_e_times, e)
+        else:
+            mask = np.ones(e.shape[0], dtype=bool)
+            mask[small_e_times.shape[0]:] = False
+        indices.append(np.where(np.logical_not(mask))[0])
+
+    return indices
+
+
+def _minimize_time_diff(t_shorter, t_longer):
+    """Find a boolean mask to minimize timing differences"""
+    keep = np.ones((len(t_longer)), dtype=bool)
+    scores = np.ones((len(t_longer)))
+    for iter in range(len(t_longer) - len(t_shorter)):
+        scores.fill(np.inf)
+        # Check every possible removal to see if it minimizes
+        for idx in np.where(keep)[0]:
+            keep[idx] = False
+            scores[idx] = _area_between_times(t_shorter, t_longer[keep])
+            keep[idx] = True
+        keep[np.argmin(scores)] = False
+    return keep
+
+
+def _area_between_times(t1, t2):
+    """Quantify the difference between two timing sets"""
+    x1 = range(len(t1))
+    x2 = range(len(t2))
+    xs = np.concatenate((x1, x2))
+    return np.sum(np.abs(np.interp(xs, x1, t1) - np.interp(xs, x2, t2)))
+
+
+ at verbose
+def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
+             ignore_chs=[], verbose=None):
+    """Test if data segment e is good according to the criteria
+    defined in reject and flat. If full_report=True, it will give
+    True/False as well as a list of all offending channels.
+    """
+    bad_list = list()
+    has_printed = False
+    checkable = np.ones(len(ch_names), dtype=bool)
+    checkable[np.array([c in ignore_chs
+                        for c in ch_names], dtype=bool)] = False
+    for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
+        if refl is not None:
+            for key, thresh in refl.iteritems():
+                idx = channel_type_idx[key]
+                name = key.upper()
+                if len(idx) > 0:
+                    e_idx = e[idx]
+                    deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
+                    checkable_idx = checkable[idx]
+                    idx_deltas = np.where(np.logical_and(f(deltas, thresh),
+                                                         checkable_idx))[0]
+
+                    if len(idx_deltas) > 0:
+                        ch_name = [ch_names[idx[i]] for i in idx_deltas]
+                        if (not has_printed):
+                            logger.info('    Rejecting %s epoch based on %s : '
+                                        '%s' % (t, name, ch_name))
+                            has_printed = True
+                        if not full_report:
+                            return False
+                        else:
+                            bad_list.extend(ch_name)
+
+    if not full_report:
+        return True
+    else:
+        if bad_list == []:
+            return True, None
+        else:
+            return False, bad_list
+
+
+ at verbose
+def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
+    """Read epochs from a fif file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    epochs : instance of Epochs
+        The epochs
+    """
+    epochs = Epochs(None, None, None, None, None)
+
+    logger.info('Reading %s ...' % fname)
+    fid, tree, _ = fiff_open(fname)
+
+    #   Read the measurement info
+    info, meas = read_meas_info(fid, tree)
+    info['filename'] = fname
+
+    events, mappings = _read_events_fif(fid, tree)
+
+    #   Locate the data of interest
+    processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
+    if len(processed) == 0:
+        fid.close()
+        raise ValueError('Could not find processed data')
+
+    epochs_node = dir_tree_find(tree, FIFF.FIFFB_EPOCHS)
+    if len(epochs_node) == 0:
+        fid.close()
+        raise ValueError('Could not find epochs data')
+
+    my_epochs = epochs_node[0]
+
+    # Now find the data in the block
+    comment = None
+    data = None
+    bmin, bmax = None, None
+    baseline = None
+    for k in range(my_epochs['nent']):
+        kind = my_epochs['directory'][k].kind
+        pos = my_epochs['directory'][k].pos
+        if kind == FIFF.FIFF_FIRST_SAMPLE:
+            tag = read_tag(fid, pos)
+            first = int(tag.data)
+        elif kind == FIFF.FIFF_LAST_SAMPLE:
+            tag = read_tag(fid, pos)
+            last = int(tag.data)
+        elif kind == FIFF.FIFF_COMMENT:
+            tag = read_tag(fid, pos)
+            comment = tag.data
+        elif kind == FIFF.FIFF_EPOCH:
+            tag = read_tag(fid, pos)
+            data = tag.data
+        elif kind == FIFF.FIFF_MNE_BASELINE_MIN:
+            tag = read_tag(fid, pos)
+            bmin = float(tag.data)
+        elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
+            tag = read_tag(fid, pos)
+            bmax = float(tag.data)
+
+    if bmin is not None or bmax is not None:
+        baseline = (bmin, bmax)
+
+    nsamp = last - first + 1
+    logger.info('    Found the data of interest:')
+    logger.info('        t = %10.2f ... %10.2f ms (%s)'
+                % (1000 * first / info['sfreq'],
+                   1000 * last / info['sfreq'], comment))
+    if info['comps'] is not None:
+        logger.info('        %d CTF compensation matrices available'
+                    % len(info['comps']))
+
+    # Read the data
+    if data is None:
+        raise ValueError('Epochs data not found')
+
+    if data.shape[2] != nsamp:
+        fid.close()
+        raise ValueError('Incorrect number of samples (%d instead of %d)'
+                         % (data.shape[2], nsamp))
+
+    # Calibrate
+    cals = np.array([info['chs'][k]['cal'] * info['chs'][k].get('scale', 1.0)
+                     for k in range(info['nchan'])])
+    data *= cals[np.newaxis, :, np.newaxis]
+
+    times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
+    tmin = times[0]
+    tmax = times[-1]
+
+    # Put it all together
+    epochs.preload = True
+    epochs.raw = None
+    epochs._bad_dropped = True
+    epochs.events = events
+    epochs._data = data
+    epochs.info = info
+    epochs.tmin = tmin
+    epochs.tmax = tmax
+    epochs.name = comment
+    epochs.times = times
+    epochs._data = data
+    epochs.proj = proj
+    activate = False if epochs._check_delayed() else proj
+    epochs._projector, epochs.info = setup_proj(info, add_eeg_ref,
+                                                activate=activate)
+
+    epochs.baseline = baseline
+    epochs.event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
+                       if mappings is None else mappings)
+    epochs.verbose = verbose
+    fid.close()
+
+    return epochs
+
+
+def bootstrap(epochs, random_state=None):
+    """Compute epochs selected by bootstrapping
+
+    Parameters
+    ----------
+    epochs : Epochs instance
+        epochs data to be bootstrapped
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state
+
+    Returns
+    -------
+    epochs : Epochs instance
+        The bootstrap samples
+    """
+    if not epochs.preload:
+        raise RuntimeError('Modifying data of epochs is only supported '
+                           'when preloading is used. Use preload=True '
+                           'in the constructor.')
+
+    rng = check_random_state(random_state)
+    epochs_bootstrap = epochs.copy()
+    n_events = len(epochs_bootstrap.events)
+    idx = rng.randint(0, n_events, n_events)
+    epochs_bootstrap = epochs_bootstrap[idx]
+    return epochs_bootstrap
+
+
+def _check_add_drop_log(epochs, inds):
+    """Aux Function"""
+    new_idx, new_drop_log = 0, []
+    for idx, log in enumerate(epochs.drop_log):
+        if not log:
+            new_idx += 1
+        if new_idx in inds:
+            new_log = ['EQUALIZED_COUNT']
+        elif log:
+            new_log = log
+        else:
+            new_log = []
+        new_drop_log.append(new_log)
+    epochs.drop_log = new_drop_log
+    return epochs
diff --git a/mne/event.py b/mne/event.py
new file mode 100644
index 0000000..0bc6c5f
--- /dev/null
+++ b/mne/event.py
@@ -0,0 +1,683 @@
+"""IO with fif files containing events
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from os.path import splitext
+
+import logging
+logger = logging.getLogger('mne')
+
+from .fiff.constants import FIFF
+from .fiff.tree import dir_tree_find
+from .fiff.tag import read_tag
+from .fiff.open import fiff_open
+from .fiff.write import write_int, start_block, start_file, end_block, end_file
+from .fiff.pick import pick_channels
+from .utils import get_config
+from . import verbose
+
+
+def pick_events(events, include=None, exclude=None):
+    """Select some events
+
+    Parameters
+    ----------
+    include : int | list | None
+        A event id to include or a list of them.
+        If None all events are included.
+    exclude : int | list | None
+        A event id to exclude or a list of them.
+        If None no event is excluded. If include is not None
+        the exclude parameter is ignored.
+
+    Returns
+    -------
+    events : array, shape (n_events, 3)
+        The list of events
+    """
+    if include is not None:
+        if not isinstance(include, list):
+            include = [include]
+        mask = np.zeros(len(events), dtype=np.bool)
+        for e in include:
+            mask = np.logical_or(mask, events[:, 2] == e)
+        events = events[mask]
+    elif exclude is not None:
+        if not isinstance(exclude, list):
+            exclude = [exclude]
+        mask = np.ones(len(events), dtype=np.bool)
+        for e in exclude:
+            mask = np.logical_and(mask, events[:, 2] != e)
+        events = events[mask]
+    else:
+        events = np.copy(events)
+
+    if len(events) == 0:
+        raise RuntimeError("No events found")
+
+    return events
+
+
+def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax,
+                         new_id=None, fill_na=None):
+    """Define new events by co-occurrence of existing events
+
+    This function can be used to evaluate events depending on the
+    temporal lag to another event. For example, this can be used to
+    analyze evoked responses which were followed by a button press within
+    a defined time window.
+
+    Parameters
+    ----------
+    events : ndarray
+        Array as returned by mne.find_events.
+    reference_id : int
+        The reference event. The event defining the epoch of interest.
+    target_id : int
+        The target event. The event co-occurring in within a certain time
+        window around the reference event.
+    sfreq : float
+        The sampling frequency of the data.
+    tmin : float
+        The lower limit in seconds from the target event.
+    tmax : float
+        The upper limit border in seconds from the target event.
+    new_id : int
+        new_id for the new event
+    fill_na : int | None
+        Fill event to be inserted if target is not available within the time
+        window specified. If None, the 'null' events will be dropped.
+
+    Returns
+    -------
+    new_events : ndarray
+        The new defined events
+    lag : ndarray
+        time lag between reference and target in milliseconds.
+    """
+
+    if new_id is None:
+        new_id = reference_id
+
+    tsample = 1e3 / sfreq
+    imin = int(tmin * sfreq)
+    imax = int(tmax * sfreq)
+
+    new_events = []
+    lag = []
+    for event in events.copy().astype('f8'):
+        if event[2] == reference_id:
+            lower = event[0] + imin
+            upper = event[0] + imax
+            res = events[(events[:, 0] > lower) &
+                         (events[:, 0] < upper) & (events[:, 2] == target_id)]
+            if res.any():
+                lag += [event[0] - res[0][0]]
+                event[2] = new_id
+                new_events += [event]
+            elif fill_na is not None:
+                event[2] = fill_na
+                new_events += [event]
+                lag += [fill_na]
+
+    new_events = np.array(new_events)
+
+    lag = np.abs(lag, dtype='f8')
+    if lag.any():
+        lag[lag != fill_na] *= tsample
+    else:
+        lag = np.array([])
+
+    return new_events if new_events.any() else np.array([]), lag
+
+
+def _read_events_fif(fid, tree):
+    """Aux function"""
+    #   Find the desired block
+    events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
+
+    if len(events) == 0:
+        fid.close()
+        raise ValueError('Could not find event data')
+
+    events = events[0]
+
+    for d in events['directory']:
+        kind = d.kind
+        pos = d.pos
+        if kind == FIFF.FIFF_MNE_EVENT_LIST:
+            tag = read_tag(fid, pos)
+            event_list = tag.data
+            break
+    else:
+        raise ValueError('Could not find any events')
+
+    mappings = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
+    mappings = mappings[0]
+
+    for d in mappings['directory']:
+        kind = d.kind
+        pos = d.pos
+        if kind == FIFF.FIFF_DESCRIPTION:
+            tag = read_tag(fid, pos)
+            mappings = tag.data
+            break
+    else:
+        mappings = None
+
+    if mappings is not None:
+        m_ = (m.split(':') for m in mappings.split(';'))
+        mappings = dict((k, int(v)) for k, v in m_)
+    event_list = event_list.reshape(len(event_list) / 3, 3)
+    return event_list, mappings
+
+
+def read_events(filename, include=None, exclude=None):
+    """Reads events from fif or text file
+
+    Parameters
+    ----------
+    filename : string
+        Name of the input file.
+        If the extension is .fif, events are read assuming
+        the file is in FIF format, otherwise (e.g., .eve,
+        .lst, .txt) events are read as coming from text.
+        Note that new format event files do not contain
+        the "time" column (used to be the second column).
+    include : int | list | None
+        A event id to include or a list of them.
+        If None all events are included.
+    exclude : int | list | None
+        A event id to exclude or a list of them.
+        If None no event is excluded. If include is not None
+        the exclude parameter is ignored.
+
+    Returns
+    -------
+    events: array, shape (n_events, 3)
+        The list of events
+
+    Notes
+    -----
+    This function will discard the offset line (i.e., first line with zero
+    event number) if it is present in a text file.
+    """
+    ext = splitext(filename)[1].lower()
+    if ext == '.fif' or ext == '.gz':
+        fid, tree, _ = fiff_open(filename)
+        event_list, _ = _read_events_fif(fid, tree)
+        fid.close()
+    else:
+        #  Have to read this in as float64 then convert because old style
+        #  eve/lst files had a second float column that will raise errors
+        lines = np.loadtxt(filename, dtype=np.float64).astype(np.uint32)
+        if len(lines) == 0:
+            raise ValueError('No text lines found')
+
+        if lines.ndim == 1:  # Special case for only one event
+            lines = lines[np.newaxis, :]
+
+        if len(lines[0]) == 4:  # Old format eve/lst
+            goods = [0, 2, 3]  # Omit "time" variable
+        elif len(lines[0]) == 3:
+            goods = [0, 1, 2]
+        else:
+            raise ValueError('Unknown number of columns in event text file')
+
+        event_list = lines[:, goods]
+        if event_list.shape[0] > 0 and event_list[0, 2] == 0:
+            event_list = event_list[1:]
+
+    event_list = pick_events(event_list, include, exclude)
+    return event_list
+
+
+def write_events(filename, event_list):
+    """Write events to file
+
+    Parameters
+    ----------
+    filename : string
+        Name of the output file.
+        If the extension is .fif, events are written in
+        binary FIF format, otherwise (e.g., .eve, .lst,
+        .txt) events are written as plain text.
+        Note that new format event files do not contain
+        the "time" column (used to be the second column).
+
+    event_list : array, shape (n_events, 3)
+        The list of events
+    """
+    ext = splitext(filename)[1].lower()
+    if ext == '.fif' or ext == '.gz':
+        #   Start writing...
+        fid = start_file(filename)
+
+        start_block(fid, FIFF.FIFFB_MNE_EVENTS)
+        write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, event_list.T)
+        end_block(fid, FIFF.FIFFB_MNE_EVENTS)
+
+        end_file(fid)
+    else:
+        f = open(filename, 'w')
+        [f.write('%6d %6d %3d\n' % tuple(e)) for e in event_list]
+        f.close()
+
+
+def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, stim_channel=None):
+    """Find all steps in data from a stim channel
+
+    Parameters
+    ----------
+    raw : Raw object
+        The raw data.
+    pad_start, pad_stop : None | int
+        Values to assume outside of the stim channel (e.g., if pad_start=0 and
+        the stim channel starts with value 5, an event of [0, 0, 5] will be
+        inserted at the beginning). With None, no steps will be inserted.
+    merge : int
+        Merge steps occurring in neighboring samples. The integer value
+        indicates over how many samples events should be merged, and the sign
+        indicates in which direction they should be merged (negative means
+        towards the earlier event, positive towards the later event).
+    stim_channel : None | string | list of string
+        Name of the stim channel or all the stim channels
+        affected by the trigger. If None, the config variables
+        'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
+        etc. are read. If these are not found, it will default to
+        'STI 014'.
+
+    Returns
+    -------
+    steps : array, shape = (n_samples, 3)
+        For each step in the stim channel the values [sample, v_from, v_to].
+        The first column contains the event time in samples (the first sample
+        with the new value). The second column contains the stim channel value
+        before the step, and the third column contains value after the step.
+
+    See Also
+    --------
+    find_events : More sophisticated options for finding events in a Raw file.
+    """
+    # pull stim channel from config if necessary
+    stim_channel = _get_stim_channel(stim_channel)
+
+    pick = pick_channels(raw.info['ch_names'], include=stim_channel)
+    if len(pick) == 0:
+        raise ValueError('No stim channel found to extract event triggers.')
+    data, _ = raw[pick, :]
+    if np.any(data < 0):
+        logger.warn('Trigger channel contains negative values. '
+                    'Taking absolute value.')
+        data = np.abs(data)  # make sure trig channel is positive
+    data = data.astype(np.int)
+
+    changed = np.diff(data, axis=1) != 0
+    idx = np.where(np.all(changed, axis=0))[0]
+    if len(idx) == 0:
+        return np.empty((0, 3), dtype='int32')
+
+    pre_step = data[0, idx]
+    idx += 1
+    post_step = data[0, idx]
+    idx += raw.first_samp
+    steps = np.c_[idx, pre_step, post_step]
+
+    if pad_start is not None:
+        v = steps[0, 1]
+        if v != pad_start:
+            steps = np.insert(steps, 0, [0, pad_start, v], axis=0)
+
+    if pad_stop is not None:
+        v = steps[-1, 2]
+        if v != pad_stop:
+            last_idx = len(data[0]) + raw.first_samp
+            steps = np.append(steps, [[last_idx, v, pad_stop]], axis=0)
+
+    if merge != 0:
+        diff = np.diff(steps[:, 0])
+        idx = (diff <= abs(merge))
+        if np.any(idx):
+            where = np.where(idx)[0]
+            keep = (idx == False)
+            if merge > 0:
+                # drop the earlier event
+                steps[where + 1, 1] = steps[where, 1]
+                keep = np.append(keep, True)
+            else:
+                # drop the later event
+                steps[where, 2] = steps[where + 1, 2]
+                keep = np.insert(keep, 0, True)
+
+            is_step = (steps[:, 1] != steps[:, 2])
+            keep = np.logical_and(keep, is_step)
+            steps = steps[keep]
+
+    return steps
+
+
+ at verbose
+def find_events(raw, stim_channel=None, verbose=None, output='onset',
+                consecutive='increasing', min_duration=0):
+    """Find events from raw file
+
+    Parameters
+    ----------
+    raw : Raw object
+        The raw data.
+    stim_channel : None | string | list of string
+        Name of the stim channel or all the stim channels
+        affected by the trigger. If None, the config variables
+        'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
+        etc. are read. If these are not found, it will default to
+        'STI 014'.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    output : 'onset' | 'offset' | 'step'
+        Whether to report when events start, when events end, or both.
+    consecutive : bool | 'increasing'
+        If True, consider instances where the value of the events
+        channel changes without first returning to zero as multiple
+        events. If False, report only instances where the value of the
+        events channel changes from/to zero. If 'increasing', report
+        adjacent events only when the second event code is greater than
+        the first.
+    min_duration : float
+        The minimum duration of a change in the events channel required
+        to consider it as an event (in seconds).
+
+    Returns
+    -------
+    events : array, shape = (n_events, 3)
+        All events that were found. The first column contains the event time
+        in samples and the third column contains the event id. For output =
+        'onset' or 'step', the second column contains the value of the stim
+        channel immediately before the the event/step. For output = 'offset',
+        the second column contains the value of the stim channel after the
+        event offset.
+
+    Examples
+    --------
+    Consider data with a stim channel that looks like: [0, 32, 32, 33, 32, 0]
+
+    By default, find_events returns all samples at which the value of the
+    stim channel increases:
+
+        >>> print(find_events(raw)) # doctest: +SKIP
+        [[ 1  0 32]
+         [ 3 32 33]]
+
+    If consecutive is False, find_events only returns the samples at which
+    the stim channel changes from zero to a non-zero value:
+
+        >>> print(find_events(raw, consecutive=False)) # doctest: +SKIP
+        [[ 1  0 32]]
+
+    If consecutive is True, find_events returns samples at which the
+    event changes, regardless of whether it first returns to zero:
+
+        >>> print(find_events(raw, consecutive=True)) # doctest: +SKIP
+        [[ 1  0 32]
+         [ 3 32 33]
+         [ 4 33 32]]
+
+    If output is 'offset', find_events returns the last sample of each event
+    instead of the first one:
+
+        >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
+        ...                   output='offset'))
+        [[ 2 33 32]
+         [ 3 32 33]
+         [ 4  0 32]]
+
+    If output is 'step', find_events returns the samples at which an event
+    starts or ends:
+
+        >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
+        ...                   output='step'))
+        [[ 1  0 32]
+         [ 3 32 33]
+         [ 4 33 32]
+         [ 5 32  0]]
+
+    To ignore spurious events, it is also possible to specify a minimum
+    event duration. Assuming our events channel has a sample rate of
+    1000 Hz:
+
+        >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
+        ...                   min_duration=0.002))
+        [[ 1  0 32]]
+
+
+    See Also
+    --------
+    find_stim_steps : Find all the steps in the stim channel.
+    """
+    if min_duration > 0:
+        min_samples = min_duration * raw.info['sfreq']
+        merge = int(min_samples // 1)
+        if merge == min_samples:
+            merge -= 1
+    else:
+        merge = 0
+
+    events = find_stim_steps(raw, pad_stop=0, merge=merge,
+                             stim_channel=stim_channel)
+
+    # Determine event onsets and offsets
+    if consecutive == 'increasing':
+        onsets = (events[:, 2] > events[:, 1])
+        offsets = np.logical_and(np.logical_or(onsets, (events[:, 2] == 0)),
+                                 (events[:, 1] > 0))
+    elif consecutive:
+        onsets = (events[:, 2] > 0)
+        offsets = (events[:, 1] > 0)
+    else:
+        onsets = (events[:, 1] == 0)
+        offsets = (events[:, 2] == 0)
+
+    onset_idx = np.where(onsets)[0]
+    offset_idx = np.where(offsets)[0]
+
+    if len(onset_idx) == 0 or len(offset_idx) == 0:
+        return np.empty((0, 3), dtype='int32')
+
+    # delete orphaned onsets/offsets
+    if onset_idx[0] > offset_idx[0]:
+        logger.info("Removing orphaned offset at the beginning of the file.")
+        offset_idx = np.delete(offset_idx, 0)
+
+    if onset_idx[-1] > offset_idx[-1]:
+        logger.info("Removing orphaned onset at the end of the file.")
+        onset_idx = np.delete(onset_idx, -1)
+
+    if output == 'onset':
+        events = events[onset_idx]
+    elif output == 'step':
+        idx = np.union1d(onset_idx, offset_idx)
+        events = events[idx]
+    elif output == 'offset':
+        event_id = events[onset_idx, 2]
+        events = events[offset_idx]
+        events[:, 1] = events[:, 2]
+        events[:, 2] = event_id
+        events[:, 0] -= 1
+    else:
+        raise Exception("Invalid output parameter %r" % output)
+
+    logger.info("%s events found" % len(events))
+    logger.info("Events id: %s" % np.unique(events[:, 2]))
+    return events
+
+
+def merge_events(events, ids, new_id, replace_events=True):
+    """Merge a set of events
+
+    Parameters
+    ----------
+    events : array
+        Events.
+    ids : array of int
+        The ids of events to merge.
+    new_id : int
+        The new id.
+    replace_events : bool
+        If True (default), old event ids are replaced. Otherwise,
+        new events will be added to the old event list.
+
+    Returns
+    -------
+    new_events: array
+        The new events
+    """
+    events_out = events.copy()
+    where = np.empty(events.shape[0], dtype=bool)
+    for col in [1, 2]:
+        where.fill(False)
+        for i in ids:
+            where = (events[:, col] == i)
+            events_out[where, col] = new_id
+    if not replace_events:
+        events_out = np.concatenate((events_out, events), axis=0)
+        events_out = events_out[np.argsort(events_out[:, 0])]
+    return events_out
+
+
+def shift_time_events(events, ids, tshift, sfreq):
+    """Shift an event
+
+    Parameters
+    ----------
+    events : array, shape=(n_events, 3)
+        The events
+    ids : array int
+        The ids of events to shift.
+    tshift : float
+        Time-shift event. Use positive value tshift for forward shifting
+        the event and negative value for backward shift.
+    sfreq : float
+        The sampling frequency of the data.
+
+    Returns
+    -------
+    new_events : array
+        The new events.
+    """
+    events = events.copy()
+    for ii in ids:
+        events[events[:, 2] == ii, 0] += int(tshift * sfreq)
+    return events
+
+
+def make_fixed_length_events(raw, id, start=0, stop=None, duration=1.):
+    """Make a set of events separated by a fixed duration
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        A raw object to use the data from.
+    id : int
+        The id to use.
+    start : float
+        Time of first event.
+    stop : float | None
+        Maximum time of last event. If None, events extend to the end
+        of the recording.
+    duration: float
+        The duration to separate events by.
+
+    Returns
+    -------
+    new_events : array
+        The new events.
+    """
+    start = raw.time_as_index(start)
+    start = start[0] + raw.first_samp
+    if stop is not None:
+        stop = raw.time_as_index(stop)
+        stop = min([stop[0] + raw.first_samp, raw.last_samp + 1])
+    else:
+        stop = raw.last_samp + 1
+    if not isinstance(id, int):
+        raise ValueError('id must be an integer')
+    # Make sure we don't go out the end of the file:
+    stop -= int(np.ceil(raw.info['sfreq'] * duration))
+    ts = np.arange(start, stop, raw.info['sfreq'] * duration).astype(int)
+    n_events = len(ts)
+    events = np.c_[ts, np.zeros(n_events, dtype=int),
+                   id * np.ones(n_events, dtype=int)]
+    return events
+
+
+def concatenate_events(events, first_samps, last_samps):
+    """Concatenate event lists in a manner compatible with
+    concatenate_raws
+
+    This is useful, for example, if you processed and/or changed
+    events in raw files separately before combining them using
+    concatenate_raws.
+
+    Parameters
+    ----------
+    events : list of arrays
+        List of event arrays, typically each extracted from a
+        corresponding raw file that is being concatenated.
+
+    first_samps : list or array of int
+        First sample numbers of the raw files concatenated.
+
+    last_samps : list or array of int
+        Last sample numbers of the raw files concatenated.
+
+    Returns
+    -------
+    events : array
+        The concatenated events.
+    """
+    if not isinstance(events, list):
+        raise ValueError('events must be a list of arrays')
+    if not (len(events) == len(last_samps) and
+            len(events) == len(first_samps)):
+        raise ValueError('events, first_samps, and last_samps must all have '
+                         'the same lengths')
+    first_samps = np.array(first_samps)
+    last_samps = np.array(last_samps)
+    n_samps = np.cumsum(last_samps - first_samps + 1)
+    events_out = events[0]
+    for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]):
+        # remove any skip since it doesn't exist in concatenated files
+        e2 = e.copy()
+        e2[:, 0] -= f
+        # add offset due to previous files, plus original file offset
+        e2[:, 0] += n + first_samps[0]
+        events_out = np.concatenate((events_out, e2), axis=0)
+
+    return events_out
+
+
+def _get_stim_channel(stim_channel):
+    """Helper to determine the appropriate stim_channel"""
+    if stim_channel is not None:
+        if not isinstance(stim_channel, list):
+            if not isinstance(stim_channel, basestring):
+                raise ValueError('stim_channel must be a str, list, or None')
+            stim_channel = [stim_channel]
+        if not all([isinstance(s, basestring) for s in stim_channel]):
+            raise ValueError('stim_channel list must contain all strings')
+        return stim_channel
+
+    stim_channel = list()
+    ch_count = 0
+    ch = get_config('MNE_STIM_CHANNEL')
+    while(ch is not None):
+        stim_channel.append(ch)
+        ch_count += 1
+        ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
+    if ch_count == 0:
+        stim_channel = ['STI 014']
+    return stim_channel
diff --git a/mne/fiff/__init__.py b/mne/fiff/__init__.py
new file mode 100644
index 0000000..5071794
--- /dev/null
+++ b/mne/fiff/__init__.py
@@ -0,0 +1,20 @@
+"""FIF module for IO with .fif files"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from .constants import FIFF
+from .open import fiff_open, show_fiff
+from .evoked import Evoked, read_evoked, write_evoked
+from .raw import Raw, start_writing_raw, write_raw_buffer, \
+                 finish_writing_raw, concatenate_raws
+from .pick import pick_types, pick_channels, pick_types_evoked, \
+                  pick_channels_regexp, pick_channels_forward, \
+                  pick_types_forward, pick_channels_cov, \
+                  pick_channels_evoked
+
+from .proj import proj_equal, make_eeg_average_ref_proj
+from .cov import read_cov, write_cov
+from . import bti
diff --git a/mne/fiff/bti/__init__.py b/mne/fiff/bti/__init__.py
new file mode 100644
index 0000000..7e01861
--- /dev/null
+++ b/mne/fiff/bti/__init__.py
@@ -0,0 +1,5 @@
+"""Bti module for conversion to FIF"""
+
+# Author: Denis A. Engemann <d.engemann at fz-juelich.de>
+
+from .raw import read_raw_bti
diff --git a/mne/fiff/bti/constants.py b/mne/fiff/bti/constants.py
new file mode 100644
index 0000000..e283c25
--- /dev/null
+++ b/mne/fiff/bti/constants.py
@@ -0,0 +1,107 @@
+# Authors: Denis Engemann <d.engemann at fz-juelich.de?>
+#
+# License: BSD (3-clause)
+
+from .. constants import Bunch
+
+BTI = Bunch()
+
+BTI.ELEC_STATE_NOT_COLLECTED           = 0
+BTI.ELEC_STATE_COLLECTED               = 1
+BTI.ELEC_STATE_SKIPPED                 = 2
+BTI.ELEC_STATE_NOT_APPLICABLE          = 3
+#
+## Byte offesets and data sizes for  different files
+#
+BTI.FILE_MASK                          = 2147483647
+BTI.FILE_CURPOS                        = 8
+BTI.FILE_END                           = -8
+
+BTI.FILE_HS_VERSION                    = 0
+BTI.FILE_HS_TIMESTAMP                  = 4
+BTI.FILE_HS_CHECKSUM                   = 8
+BTI.FILE_HS_N_DIGPOINTS                = 12
+BTI.FILE_HS_N_INDEXPOINTS              = 16
+
+BTI.FILE_PDF_H_ENTER                   = 1
+BTI.FILE_PDF_H_FTYPE                   = 5
+BTI.FILE_PDF_H_XLABEL                  = 16
+BTI.FILE_PDF_H_NEXT                    = 2
+BTI.FILE_PDF_H_EXIT                    = 20
+
+BTI.FILE_PDF_EPOCH_EXIT                = 28
+
+BTI.FILE_PDF_CH_NEXT                   = 6
+BTI.FILE_PDF_CH_LABELSIZE              = 16
+BTI.FILE_PDF_CH_YLABEL                 = 16
+BTI.FILE_PDF_CH_OFF_FLAG               = 16
+BTI.FILE_PDF_CH_EXIT                   = 12
+
+BTI.FILE_PDF_EVENT_NAME                = 16
+BTI.FILE_PDF_EVENT_EXIT                = 32
+
+BTI.FILE_PDF_PROCESS_BLOCKTYPE         = 20
+BTI.FILE_PDF_PROCESS_USER              = 32
+BTI.FILE_PDF_PROCESS_FNAME             = 256
+BTI.FILE_PDF_PROCESS_EXIT              = 32
+
+BTI.FILE_PDF_ASSOC_NEXT                = 32
+
+BTI.FILE_PDFED_NAME                    = 17
+BTI.FILE_PDFED_NEXT                    = 9
+BTI.FILE_PDFED_EXIT                    = 8
+
+#
+## General data constants
+#
+BTI.DATA_N_IDX_POINTS                   = 5
+BTI.DATA_ROT_N_ROW                      = 3
+BTI.DATA_ROT_N_COL                      = 3
+BTI.DATA_XFM_N_COL                      = 4
+BTI.DATA_XFM_N_ROW                      = 4
+BTI.FIFF_LOGNO                          = 111
+#
+## Channel Types
+#
+BTI.CHTYPE_MEG                          = 1
+BTI.CHTYPE_EEG                          = 2
+BTI.CHTYPE_REFERENCE                    = 3
+BTI.CHTYPE_EXTERNAL                     = 4
+BTI.CHTYPE_TRIGGER                      = 5
+BTI.CHTYPE_UTILITY                      = 6
+BTI.CHTYPE_DERIVED                      = 7
+BTI.CHTYPE_SHORTED                      = 8
+#
+## Processes
+#
+BTI.PROC_DEFAULTS                      = 'BTi_defaults'
+BTI.PROC_FILTER                        = 'b_filt_hp,b_filt_lp,b_filt_notch'
+BTI.PROC_BPFILTER                      = 'b_filt_b_pass,b_filt_b_reject'
+#
+## User blocks
+#
+BTI.UB_B_MAG_INFO                      = 'B_Mag_Info'
+BTI.UB_B_COH_POINTS                    = 'B_COH_Points'
+BTI.UB_B_CCP_XFM_BLOCK                 = 'b_ccp_xfm_block'
+BTI.UB_B_EEG_LOCS                      = 'b_eeg_elec_locs'
+BTI.UB_B_WHC_CHAN_MAP_VER              = 'B_WHChanMapVer'
+BTI.UB_B_WHC_CHAN_MAP                  = 'B_WHChanMap'
+BTI.UB_B_WHS_SUBSYS_VER                = 'B_WHSubsysVer'  # B_WHSubsysVer
+BTI.UB_B_WHS_SUBSYS                    = 'B_WHSubsys'
+BTI.UB_B_CH_LABELS                     = 'B_ch_labels'
+BTI.UB_B_CALIBRATION                   = 'B_Calibration'
+BTI.UB_B_SYS_CONFIG_TIME               = 'B_SysConfigTime'
+BTI.UB_B_DELTA_ENABLED                 = 'B_DELTA_ENABLED'
+BTI.UB_B_E_TABLE_USED                  = 'B_E_table_used'
+BTI.UB_B_E_TABLE                       = 'B_E_TABLE'
+BTI.UB_B_WEIGHTS_USED                  = 'B_weights_used'
+BTI.UB_B_TRIG_MASK                     = 'B_trig_mask'
+BTI.UB_B_WEIGHT_TABLE                  = 'BWT_'
+#
+## transforms
+#
+BTI.T_ROT_VV = ((0, -1, 0, 0), (1, 0, 0, 0), (0, 0, 1, 0), (1, 1, 1, 1))
+BTI.T_IDENT = ((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (1, 1, 1, 1))
+BTI.T_ROT_IX = slice(0, 3), slice(0, 3)
+BTI.T_TRANS_IX = slice(0, 3), slice(3, 4)
+BTI.T_SCA_IX = slice(3, 4), slice(0, 4)
\ No newline at end of file
diff --git a/mne/fiff/bti/raw.py b/mne/fiff/bti/raw.py
new file mode 100644
index 0000000..2bf0eb1
--- /dev/null
+++ b/mne/fiff/bti/raw.py
@@ -0,0 +1,1220 @@
+
+# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Yuval Harpaz <yuvharpaz at gmail.com>
+#
+#          simplified BSD-3 license
+
+import logging
+import os.path as op
+from itertools import count
+import numpy as np
+
+from ...utils import verbose
+from .. import Raw
+from .. import FIFF
+from .constants import BTI
+from .read import (read_int32, read_int16, read_str, read_float, read_double,
+                  read_transform, read_char, read_int64, read_uint16,
+                  read_uint32, read_double_matrix, read_float_matrix,
+                  read_int16_matrix)
+from .transforms import (bti_identity_trans, bti_to_vv_trans,
+                        bti_to_vv_coil_trans, inverse_trans, merge_trans)
+
+logger = logging.getLogger('mne')
+
+
+FIFF_INFO_CHS_FIELDS = ('loc', 'ch_name', 'unit_mul', 'coil_trans',
+    'coord_frame', 'coil_type', 'range', 'unit', 'cal', 'eeg_loc',
+    'scanno', 'kind', 'logno')
+
+FIFF_INFO_CHS_DEFAULTS = (np.array([0, 0, 0, 1] * 3, dtype='f4'),
+                          None, 0, None, 0, 0, 1.0,
+                          107, 1.0, None, None, 402, None)
+
+FIFF_INFO_DIG_FIELDS = ('kind', 'ident', 'r', 'coord_frame')
+FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD)
+
+BTI_WH2500_REF_MAG = ['MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA']
+BTI_WH2500_REF_GRAD = ['GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA']
+
+dtypes = zip(range(1, 5), ('>i2', '>i4', '>f4', '>f8'))
+DTYPES = dict((i, np.dtype(t)) for i, t in dtypes)
+
+RAW_INFO_FIELDS = ['dev_head_t', 'nchan', 'bads', 'projs', 'dev_ctf_t',
+                   'meas_date', 'meas_id', 'dig', 'sfreq', 'highpass',
+                   'filenames', 'comps', 'chs', 'ch_names', 'file_id',
+                   'lowpass', 'acq_pars', 'acq_stim', 'filename',
+                   'ctf_head_t']
+
+
+def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
+    """Renames appropriately ordered list of channel names
+
+    Parameters
+    ----------
+    names : list of str
+        Lists of 4-D channel names in ascending order
+
+    Returns
+    -------
+    new : list
+        List of names, channel names in Neuromag style
+    """
+    new = list()
+    ref_mag, ref_grad, eog, eeg, ext = [count(1) for _ in range(5)]
+    for i, name in enumerate(names, 1):
+        if name.startswith('A'):
+            name = 'MEG %3.3d' % i
+        elif name == 'RESPONSE':
+            name = 'STI 013'
+        elif name == 'TRIGGER':
+            name = 'STI 014'
+        elif any([name == k for k in eog_ch]):
+            name = 'EOG %3.3d' % eog.next()
+        elif name == ecg_ch:
+            name = 'ECG 001'
+        elif name.startswith('E'):
+            name = 'EEG %3.3d' % eeg.next()
+        elif name == 'UACurrent':
+            name = 'UTL 001'
+        elif name.startswith('M'):
+            name = 'RFM %3.3d' % ref_mag.next()
+        elif name.startswith('G'):
+            name = 'RFG %3.3d' % ref_grad.next()
+        elif name.startswith('X'):
+            name = 'EXT %3.3d' % ext.next()
+
+        new += [name]
+
+    return new
+
+
+def _read_head_shape(fname):
+    """ Helper Function """
+    with open(fname, 'rb') as fid:
+        fid.seek(BTI.FILE_HS_N_DIGPOINTS)
+        _n_dig_points = read_int32(fid)
+        idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3)
+        dig_points = read_double_matrix(fid, _n_dig_points, 3)
+
+    return idx_points, dig_points
+
+
+def _convert_head_shape(idx_points, dig_points):
+    """ Helper function """
+
+    fp = idx_points.astype('>f8')
+    dp = np.sum(fp[2] * (fp[0] - fp[1]))
+    tmp1, tmp2 = np.sum(fp[2] ** 2), np.sum((fp[0] - fp[1]) ** 2)
+    dcos = -dp / np.sqrt(tmp1 * tmp2)
+    dsin = np.sqrt(1. - dcos * dcos)
+    dt = dp / np.sqrt(tmp2)
+
+    idx_points_nm = np.ones((len(fp), 3), dtype='>f8')
+    for idx, f in enumerate(fp):
+        idx_points_nm[idx, 0] = dcos * f[0] - dsin * f[1] + dt
+        idx_points_nm[idx, 1] = dsin * f[0] + dcos * f[1]
+        idx_points_nm[idx, 2] = f[2]
+
+    # adjust order of fiducials to Neuromag
+    idx_points_nm[[1, 2]] = idx_points_nm[[2, 1]]
+
+    t = bti_identity_trans('>f8')
+    t[0, 0] = dcos
+    t[0, 1] = -dsin
+    t[1, 0] = dsin
+    t[1, 1] = dcos
+    t[0, 3] = dt
+
+    dig_points_nm = np.dot(t[BTI.T_ROT_IX], dig_points.T).T
+    dig_points_nm += t[BTI.T_TRANS_IX].T
+
+    return idx_points_nm, dig_points_nm, t
+
+
+def _setup_head_shape(fname, use_hpi=True):
+    """Read index points and dig points from BTi head shape file
+
+    Parameters
+    ----------
+    fname : str
+        The absolute path to the head shape file
+
+    Returns
+    -------
+    dig : list of dicts
+        The list of dig point info structures needed for the fiff info
+        structure.
+    use_hpi : bool
+        Whether to treat additional hpi coils as digitization points or not.
+        If False, hpi coils will be discarded.
+    """
+    idx_points, dig_points = _read_head_shape(fname)
+    idx_points, dig_points, t = _convert_head_shape(idx_points, dig_points)
+    all_points = np.r_[idx_points, dig_points].astype('>f4')
+
+    idx_idents = range(1, 4) + range(1, (len(idx_points) + 1) - 3)
+    dig = []
+    for idx in xrange(all_points.shape[0]):
+        point_info = dict(zip(FIFF_INFO_DIG_FIELDS, FIFF_INFO_DIG_DEFAULTS))
+        point_info['r'] = all_points[idx]
+        if idx < 3:
+            point_info['kind'] = FIFF.FIFFV_POINT_CARDINAL
+            point_info['ident'] = idx_idents[idx]
+        if 2 < idx < len(idx_points) and use_hpi:
+            point_info['kind'] = FIFF.FIFFV_POINT_HPI
+            point_info['ident'] = idx_idents[idx]
+        elif idx > 4:
+            point_info['kind'] = FIFF.FIFFV_POINT_EXTRA
+            point_info['ident'] = (idx + 1) - len(idx_idents)
+
+        if 2 < idx < len(idx_points) and not use_hpi:
+            pass
+        else:
+            dig += [point_info]
+
+    return dig, t
+
+
+def _convert_coil_trans(coil_trans, bti_trans, bti_to_nm):
+    """ Helper Function """
+    t = bti_to_vv_coil_trans(coil_trans, bti_trans, bti_to_nm)
+    loc = np.roll(t.copy().T, 1, 0)[:, :3].flatten()
+
+    return t, loc
+
+
+def _convert_dev_head_t(bti_trans, bti_to_nm, m_h_nm_h):
+    """ Helper Function """
+    nm_to_m_sensor = inverse_trans(bti_identity_trans(), bti_to_nm)
+    nm_sensor_m_head = merge_trans(bti_trans, nm_to_m_sensor)
+
+    nm_dev_head_t = merge_trans(m_h_nm_h, nm_sensor_m_head)
+    nm_dev_head_t[3, :3] = 0.
+
+    return nm_dev_head_t
+
+
+def _correct_offset(fid):
+    """ Align fid pointer """
+    current = fid.tell()
+    if ((current % BTI.FILE_CURPOS) != 0):
+        offset = current % BTI.FILE_CURPOS
+        fid.seek(BTI.FILE_CURPOS - (offset), 1)
+
+
+def _read_config(fname):
+    """Read BTi system config file
+
+    Parameters
+    ----------
+    fname : str
+        The absolute path to the config file
+
+    Returns
+    -------
+    cfg : dict
+        The config blocks found.
+
+    """
+    fid = open(fname, 'rb')
+
+    cfg = dict()
+
+    cfg['hdr'] = {'version': read_int16(fid),
+                  'site_name': read_str(fid, 32),
+                  'dap_hostname': read_str(fid, 16),
+                  'sys_type': read_int16(fid),
+                  'sys_options': read_int32(fid),
+                  'supply_freq': read_int16(fid),
+                  'total_chans': read_int16(fid),
+                  'system_fixed_gain': read_float(fid),
+                  'volts_per_bit': read_float(fid),
+                  'total_sensors': read_int16(fid),
+                  'total_user_blocks': read_int16(fid),
+                  'next_der_chan_no': read_int16(fid)}
+
+    fid.seek(2, 1)
+
+    cfg['checksum'] = read_uint32(fid)
+    cfg['reserved'] = read_char(fid, 32)
+    cfg['transforms'] = [read_transform(fid) for t in
+                         range(cfg['hdr']['total_sensors'])]
+
+    cfg['user_blocks'] = dict()
+    for block in range(cfg['hdr']['total_user_blocks']):
+        ub = dict()
+
+        ub['hdr'] = {'nbytes': read_int32(fid),
+                     'kind': read_str(fid, 20),
+                     'checksum': read_int32(fid),
+                     'username': read_str(fid, 32),
+                     'timestamp': read_int32(fid),
+                     'user_space_size': read_int32(fid),
+                     'reserved': read_char(fid, 32)}
+
+        _correct_offset(fid)
+        kind = ub['hdr'].pop('kind')
+        if not kind:  # make sure reading goes right. Should never be empty
+            raise RuntimeError('Could not read user block. Probably you'
+                               ' acquired data using a BTi version currently'
+                               'not supported. Please contact the mne-python'
+                               ' developers.')
+        dta, cfg['user_blocks'][kind] = dict(), ub
+        if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']:
+            if kind == BTI.UB_B_MAG_INFO:
+                dta['version'] = read_int32(fid)
+                fid.seek(20, 1)
+                dta['headers'] = list()
+                for hdr in range(6):
+                    d = {'name': read_str(fid, 16),
+                         'transform': read_transform(fid),
+                         'units_per_bit': read_float(fid)}
+                    dta['headers'] += [d]
+                    fid.seek(20, 1)
+
+            elif kind == BTI.UB_B_COH_POINTS:
+                dta['n_points'] = read_int32(fid)
+                dta['status'] = read_int32(fid)
+                dta['points'] = []
+                for pnt in xrange(16):
+                    d = {'pos': read_double_matrix(fid, 1, 3),
+                         'direction': read_double_matrix(fid, 1, 3),
+                         'error': read_double(fid)}
+                    dta['points'] += [d]
+
+            elif kind == BTI.UB_B_CCP_XFM_BLOCK:
+                dta['method'] = read_int32(fid)
+                # handle difference btw/ linux (0) and solaris (4)
+                size = 0 if ub['hdr']['user_space_size'] == 132 else 4
+                fid.seek(size, 1)
+                dta['transform'] = read_transform(fid)
+
+            elif kind == BTI.UB_B_EEG_LOCS:
+                dta['electrodes'] = []
+                while True:
+                    d = {'label': read_str(fid, 16),
+                         'location': read_double_matrix(fid, 1, 3)}
+                    if not d['label']:
+                        break
+                    dta['electrodes'] += [d]
+
+            elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER,
+                          BTI.UB_B_WHS_SUBSYS_VER]:
+                dta['version'] = read_int16(fid)
+                dta['struct_size'] = read_int16(fid)
+                dta['entries'] = read_int16(fid)
+
+                fid.seek(8, 1)
+
+            elif kind == BTI.UB_B_WHC_CHAN_MAP:
+                num_channels = None
+                for name, data in cfg['user_blocks'].items():
+                    if name == BTI.UB_B_WHC_CHAN_MAP_VER:
+                        num_channels = data['entries']
+                        break
+
+                if num_channels is None:
+                    raise ValueError('Cannot find block %s to determine number'
+                                     'of channels' % BTI.UB_B_WHC_CHAN_MAP_VER)
+
+                dta['channels'] = list()
+                for i in xrange(num_channels):
+                    d = {'subsys_type': read_int16(fid),
+                         'subsys_num': read_int16(fid),
+                         'card_num': read_int16(fid),
+                         'chan_num': read_int16(fid),
+                         'recdspnum': read_int16(fid)}
+                    dta['channels'] += [d]
+                    fid.seek(8, 1)
+
+            elif kind == BTI.UB_B_WHS_SUBSYS:
+                num_subsys = None
+                for name, data in cfg['user_blocks'].items():
+                    if name == BTI.UB_B_WHS_SUBSYS_VER:
+                        num_subsys = data['entries']
+                        break
+
+                if num_subsys is None:
+                    raise ValueError('Cannot find block %s to determine'
+                                     ' number of subsystems'
+                                     % BTI.UB_B_WHS_SUBSYS_VER)
+
+                dta['subsys'] = list()
+                for sub_key in range(num_subsys):
+                    d = {'subsys_type': read_int16(fid),
+                         'subsys_num': read_int16(fid),
+                         'cards_per_sys': read_int16(fid),
+                         'channels_per_card': read_int16(fid),
+                         'card_version': read_int16(fid)}
+
+                    fid.seek(2, 1)
+
+                    d.update({'offsetdacgain': read_float(fid),
+                              'squid_type': read_int32(fid),
+                              'timesliceoffset': read_int16(fid),
+                              'padding': read_int16(fid),
+                              'volts_per_bit': read_float(fid)})
+
+                    dta['subsys'] += [d]
+
+            elif kind == BTI.UB_B_CH_LABELS:
+                dta['version'] = read_int32(fid)
+                dta['entries'] = read_int32(fid)
+                fid.seek(16, 1)
+
+                dta['labels'] = list()
+                for label in xrange(dta['entries']):
+                    dta['labels'] += [read_str(fid, 16)]
+
+            elif kind == BTI.UB_B_CALIBRATION:
+                dta['sensor_no'] = read_int16(fid)
+                fid.seek(2, 1)
+                dta['timestamp'] = read_int32(fid)
+                dta['logdir'] = read_str(fid, 256)
+
+            elif kind == BTI.UB_B_SYS_CONFIG_TIME:
+                # handle difference btw/ linux (256) and solaris (512)
+                size = 256 if ub['hdr']['user_space_size'] == 260 else 512
+                dta['sysconfig_name'] = read_str(fid, size)
+                dta['timestamp'] = read_int32(fid)
+
+            elif kind == BTI.UB_B_DELTA_ENABLED:
+                dta['delta_enabled'] = read_int16(fid)
+
+            elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]:
+                dta['hdr'] = {'version': read_int32(fid),
+                              'entry_size': read_int32(fid),
+                              'n_entries': read_int32(fid),
+                              'filtername': read_str(fid, 16),
+                              'n_e_values': read_int32(fid),
+                              'reserved': read_str(fid, 28)}
+
+                if dta['hdr']['version'] == 2:
+                    size = 16
+                    dta['ch_names'] = [read_str(fid, size) for ch in
+                                          range(dta['hdr']['n_entries'])]
+                    dta['e_ch_names'] = [read_str(fid, size) for ch in
+                                          range(dta['hdr']['n_e_values'])]
+
+                    rows = dta['hdr']['n_entries']
+                    cols = dta['hdr']['n_e_values']
+                    dta['etable'] = read_float_matrix(fid, rows, cols)
+                else:  # handle MAGNES2500 naming scheme
+                    dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values']
+                    dta['hdr']['n_e_values'] = 6
+                    dta['e_ch_names'] = BTI_WH2500_REF_MAG
+                    rows = dta['hdr']['n_entries']
+                    cols = dta['hdr']['n_e_values']
+                    dta['etable'] = read_float_matrix(fid, rows, cols)
+
+                    _correct_offset(fid)
+
+            elif any([kind == BTI.UB_B_WEIGHTS_USED,
+                      kind[:4] == BTI.UB_B_WEIGHT_TABLE]):
+                dta['hdr'] = {'version': read_int32(fid),
+                              'entry_size': read_int32(fid),
+                              'n_entries': read_int32(fid),
+                              'name': read_str(fid, 32),
+                              'description': read_str(fid, 80),
+                              'n_anlg': read_int32(fid),
+                              'n_dsp': read_int32(fid),
+                              'reserved': read_str(fid, 72)}
+
+                if dta['hdr']['version'] == 2:
+                    dta['ch_names'] = [read_str(fid, 16) for ch in
+                                       range(dta['hdr']['n_entries'])]
+                    dta['anlg_ch_names'] = [read_str(fid, 16) for ch in
+                                            range(dta['hdr']['n_anlg'])]
+
+                    dta['dsp_ch_names'] = [read_str(fid, 16) for ch in
+                                           range(dta['hdr']['n_dsp'])]
+
+                    rows = dta['hdr']['n_entries']
+                    cols = dta['hdr']['n_dsp']
+                    dta['dsp_wts'] = read_float_matrix(fid, rows, cols)
+                    cols = dta['hdr']['n_anlg']
+                    dta['anlg_wts'] = read_int16_matrix(fid, rows, cols)
+
+                else:  # handle MAGNES2500 naming scheme
+                    dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries']
+                    dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3]
+                    dta['hdr']['n_anlg'] = len(dta['anlg_ch_names'])
+                    dta['dsp_ch_names'] = BTI_WH2500_REF_GRAD
+                    dta['hdr.n_dsp'] = len(dta['dsp_ch_names'])
+                    dta['anlg_wts'] = np.zeros((dta['hdr']['n_entries'],
+                                            dta['hdr']['n_anlg']), dtype='i2')
+                    dta['dsp_wts'] = np.zeros((dta['hdr']['n_entries'],
+                                            dta['hdr']['n_dsp']), dtype='f4')
+                    for n in range(dta['hdr']['n_entries']):
+                        dta['anlg_wts'][d] = read_int16_matrix(fid, 1,
+                                                    dta['hdr']['n_anlg'])
+                        read_int16(fid)
+                        dta['dsp_wts'][d] = read_float_matrix(fid, 1,
+                                                    dta['hdr']['n_dsp'])
+
+                    _correct_offset(fid)
+
+            elif kind == BTI.UB_B_TRIG_MASK:
+                dta['version'] = read_int32(fid)
+                dta['entries'] = read_int32(fid)
+                fid.seek(16, 1)
+
+                dta['masks'] = []
+                for entry in range(dta['entries']):
+                    d = {'name': read_str(fid, 20),
+                         'nbits': read_uint16(fid),
+                         'shift': read_uint16(fid),
+                         'mask': read_uint32(fid)}
+                    dta['masks'] += [d]
+                    fid.seek(8, 1)
+
+        else:
+            dta['unknown'] = {'hdr': read_char(fid,
+                              ub['hdr']['user_space_size'])}
+
+        ub.update(dta)  # finally update the userblock data
+        _correct_offset(fid)  # after reading.
+
+    cfg['chs'] = list()
+
+    # prepare reading channels
+    dev_header = lambda x: {'size': read_int32(x),
+                            'checksum': read_int32(x),
+                            'reserved': read_str(x, 32)}
+
+    for channel in range(cfg['hdr']['total_chans']):
+        ch = {'name': read_str(fid, 16),
+              'chan_no': read_int16(fid),
+              'ch_type': read_uint16(fid),
+              'sensor_no': read_int16(fid),
+              'data': dict()}
+
+        fid.seek(2, 1)
+        ch.update({'gain': read_float(fid),
+                   'units_per_bit': read_float(fid),
+                   'yaxis_label': read_str(fid, 16),
+                   'aar_val': read_double(fid),
+                   'checksum': read_int32(fid),
+                   'reserved': read_str(fid, 32)})
+
+        cfg['chs'] += [ch]
+        _correct_offset(fid)  # before and after
+        dta = dict()
+        if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]:
+            dev = {'device_info': dev_header(fid),
+                   'inductance': read_float(fid),
+                   'padding': read_str(fid, 4),
+                   'transform': read_transform(fid),
+                   'xform_flag': read_int16(fid),
+                   'total_loops': read_int16(fid)}
+
+            fid.seek(4, 1)
+            dev['reserved'] = read_str(fid, 32)
+            dta.update({'dev': dev, 'loops': []})
+            for loop in range(dev['total_loops']):
+                d = {'position': read_double_matrix(fid, 1, 3),
+                     'orientation': read_double_matrix(fid, 1, 3),
+                     'radius': read_double(fid),
+                     'wire_radius': read_double(fid),
+                     'turns': read_int16(fid)}
+                fid.seek(2, 1)
+                d['checksum'] = read_int32(fid)
+                d['reserved'] = read_str(fid, 32)
+                dta['loops'] += [d]
+
+        elif ch['ch_type'] == BTI.CHTYPE_EEG:
+            dta = {'device_info': dev_header(fid),
+                   'impedance': read_float(fid),
+                   'padding': read_str(fid, 4),
+                   'transform': read_transform(fid),
+                   'reserved': read_char(fid, 32)}
+
+        elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL:
+            dta = {'device_info': dev_header(fid),
+                   'user_space_size': read_int32(fid),
+                   'reserved': read_str(fid, 32)}
+
+        elif ch['ch_type'] == BTI.CHTYPE_TRIGGER:
+            dta = {'device_info': dev_header(fid),
+                   'user_space_size': read_int32(fid)}
+            fid.seek(2, 1)
+            dta['reserved'] = read_str(fid, 32)
+
+        elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
+            dta = {'device_info': dev_header(fid),
+                   'user_space_size': read_int32(fid),
+                   'reserved': read_str(fid, 32)}
+
+        elif ch['ch_type'] == BTI.CHTYPE_SHORTED:
+            dta = {'device_info': dev_header(fid),
+                   'reserved': read_str(fid, 32)}
+
+        ch.update(dta)  # add data collected
+        _correct_offset(fid)  # after each reading
+
+    return cfg
+
+
+def _read_epoch(fid):
+    """Read BTi PDF epoch"""
+    out = {'pts_in_epoch': read_int32(fid),
+           'epoch_duration': read_float(fid),
+           'expected_iti': read_float(fid),
+           'actual_iti': read_float(fid),
+           'total_var_events': read_int32(fid),
+           'checksum': read_int32(fid),
+           'epoch_timestamp': read_int32(fid)}
+
+    fid.seek(28, 1)
+
+    return out
+
+
+def _read_channel(fid):
+    """Read BTi PDF channel"""
+    out = {'chan_label': read_str(fid, 16),
+           'chan_no': read_int16(fid),
+           'attributes': read_int16(fid),
+           'scale': read_float(fid),
+           'yaxis_label': read_str(fid, 16),
+           'valid_min_max': read_int16(fid)}
+
+    fid.seek(6, 1)
+    out.update({'ymin': read_double(fid),
+                'ymax': read_double(fid),
+                'index': read_int32(fid),
+                'checksum': read_int32(fid),
+                'off_flag': read_str(fid, 16),
+                'offset': read_float(fid)})
+
+    fid.seek(12, 1)
+
+    return out
+
+
+def _read_event(fid):
+    """Read BTi PDF event"""
+    out = {'event_name': read_str(fid, 16),
+           'start_lat': read_float(fid),
+           'end_lat': read_float(fid),
+           'step_size': read_float(fid),
+           'fixed_event': read_int16(fid),
+           'checksum': read_int32(fid)}
+
+    fid.seek(32, 1)
+    _correct_offset(fid)
+
+    return out
+
+
+def _read_process(fid):
+    """Read BTi PDF process"""
+
+    out = {'nbytes': read_int32(fid),
+           'process_type': read_str(fid, 20),
+           'checksum': read_int32(fid),
+           'user': read_str(fid, 32),
+           'timestamp': read_int32(fid),
+           'filename': read_str(fid, 256),
+           'total_steps': read_int32(fid)}
+
+    fid.seek(32, 1)
+    _correct_offset(fid)
+    out['processing_steps'] = list()
+    for step in range(out['total_steps']):
+        this_step = {'nbytes': read_int32(fid),
+                     'process_type': read_str(fid, 20),
+                     'checksum': read_int32(fid)}
+        ptype = this_step['process_type']
+        if ptype == BTI.PROC_DEFAULTS:
+            this_step['scale_option'] = read_int32(fid)
+
+            fid.seek(4, 1)
+            this_step['scale'] = read_double(fid)
+            this_step['dtype'] = read_int32(fid)
+            this_step['selected'] = read_int16(fid)
+            this_step['color_display'] = read_int16(fid)
+
+            fid.seek(32, 1)
+        elif ptype in BTI.PROC_FILTER:
+            this_step['freq'] = read_float(fid)
+            fid.seek(32, 1)
+        elif ptype in BTI.PROC_BPFILTER:
+            this_step['high_freq'] = read_float(fid)
+            this_step['low_frew'] = read_float(fid)
+        else:
+            jump = this_step['user_space_size'] = read_int32(fid)
+            fid.seek(32, 1)
+            fid.seek(jump, 1)
+
+        out['processing_steps'] += [this_step]
+        _correct_offset(fid)
+
+    return out
+
+
+def _read_assoc_file(fid):
+    """Read BTi PDF assocfile"""
+
+    out = {'file_id': read_int16(fid),
+           'length': read_int16(fid)}
+
+    fid.seek(32, 1)
+    out['checksum'] = read_int32(fid)
+
+    return out
+
+
+def _read_pfid_ed(fid):
+    """Read PDF ed file"""
+
+    out = {'comment_size': read_int32(fid),
+           'name': read_str(fid, 17)}
+
+    fid.seek(9, 1)
+    out.update({'pdf_number': read_int16(fid),
+                'total_events': read_int32(fid),
+                'timestamp': read_int32(fid),
+                'flags': read_int32(fid),
+                'de_process': read_int32(fid),
+                'checksum': read_int32(fid),
+                'ed_id': read_int32(fid),
+                'win_width': read_float(fid),
+                'win_offset': read_float(fid)})
+
+    fid.seek(8, 1)
+
+    return out
+
+
+def _read_coil_def(fid):
+    """ Read coil definition """
+    coildef = {'position': read_double_matrix(fid, 1, 3),
+               'orientation': read_double_matrix(fid, 1, 3),
+               'radius': read_double(fid),
+               'wire_radius': read_double(fid),
+               'turns': read_int16(fid)}
+
+    fid.seek(fid, 2, 1)
+    coildef['checksum'] = read_int32(fid)
+    coildef['reserved'] = read_str(fid, 32)
+
+
+def _read_ch_config(fid):
+    """Read BTi channel config"""
+
+    cfg = {'name': read_str(fid, BTI.FILE_CONF_CH_NAME),
+           'chan_no': read_int16(fid),
+           'ch_type': read_uint16(fid),
+           'sensor_no': read_int16(fid)}
+
+    fid.seek(fid, BTI.FILE_CONF_CH_NEXT, 1)
+
+    cfg.update({'gain': read_float(fid),
+                'units_per_bit': read_float(fid),
+                'yaxis_label': read_str(fid, BTI.FILE_CONF_CH_YLABEL),
+                'aar_val': read_double(fid),
+                'checksum': read_int32(fid),
+                'reserved': read_str(fid, BTI.FILE_CONF_CH_RESERVED)})
+
+    _correct_offset(fid)
+
+    # Then the channel info
+    ch_type, chan = cfg['ch_type'], dict()
+    chan['dev'] = {'size': read_int32(fid),
+                   'checksum': read_int32(fid),
+                   'reserved': read_str(fid, 32)}
+    if ch_type in [BTI.CHTYPE_MEG, BTI.CHTYPE_REF]:
+        chan['loops'] = [_read_coil_def(fid) for d in
+                        range(chan['dev']['total_loops'])]
+
+    elif ch_type == BTI.CHTYPE_EEG:
+        chan['impedance'] = read_float(fid)
+        chan['padding'] = read_str(fid, BTI.FILE_CONF_CH_PADDING)
+        chan['transform'] = read_transform(fid)
+        chan['reserved'] = read_char(fid, BTI.FILE_CONF_CH_RESERVED)
+
+    elif ch_type in [BTI.CHTYPE_TRIGGER,  BTI.CHTYPE_EXTERNAL,
+                     BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
+        chan['user_space_size'] = read_int32(fid)
+        if ch_type == BTI.CHTYPE_TRIGGER:
+            fid.seek(2, 1)
+        chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED)
+
+    elif ch_type == BTI.CHTYPE_SHORTED:
+        chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED)
+
+    cfg['chan'] = chan
+
+    _correct_offset(fid)
+
+    return cfg
+
+
+def _read_bti_header(pdf_fname, config_fname):
+    """ Read bti PDF header
+    """
+    fid = open(pdf_fname, 'rb')
+
+    fid.seek(-8, 2)
+    start = fid.tell()
+    header_position = read_int64(fid)
+    check_value = header_position & BTI.FILE_MASK
+
+    if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK):
+        header_position = check_value
+
+    # Check header position for alignment issues
+    if ((header_position % 8) != 0):
+        header_position += (8 - (header_position % 8))
+
+    fid.seek(header_position, 0)
+
+    # actual header starts here
+    info = {'version': read_int16(fid),
+           'file_type': read_str(fid, 5),
+           'hdr_size': start - header_position,  # add to info for convenience
+           'start': start}
+
+    fid.seek(1, 1)
+
+    info.update({'data_format': read_int16(fid),
+                'acq_mode': read_int16(fid),
+                'total_epochs': read_int32(fid),
+                'input_epochs': read_int32(fid),
+                'total_events': read_int32(fid),
+                'total_fixed_events': read_int32(fid),
+                'sample_period': read_float(fid),
+                'xaxis_label': read_str(fid, 16),
+                'total_processes': read_int32(fid),
+                'total_chans': read_int16(fid)})
+
+    fid.seek(2, 1)
+    info.update({'checksum': read_int32(fid),
+                'total_ed_classes': read_int32(fid),
+                'total_associated_files': read_int16(fid),
+                'last_file_index': read_int16(fid),
+                'timestamp': read_int32(fid)})
+
+    fid.seek(20, 1)
+    _correct_offset(fid)
+
+    # actual header ends here, so dar seems ok.
+
+    info['epochs'] = [_read_epoch(fid) for epoch in
+                       range(info['total_epochs'])]
+
+    info['chs'] = [_read_channel(fid) for ch in
+                   range(info['total_chans'])]
+
+    info['events'] = [_read_event(fid) for event in
+                      range(info['total_events'])]
+
+    info['processes'] = [_read_process(fid) for process in
+                         range(info['total_processes'])]
+
+    info['assocfiles'] = [_read_assoc_file(fid) for af in
+                          range(info['total_associated_files'])]
+
+    info['edclasses'] = [_read_pfid_ed(fid) for ed_class in
+                         range(info['total_ed_classes'])]
+
+    info['extra_data'] = fid.read(start - fid.tell())
+    info['fid'] = fid
+
+    info['total_slices'] = sum(e['pts_in_epoch'] for e in
+                               info['epochs'])
+
+    info['dtype'] = DTYPES[info['data_format']]
+    bps = info['dtype'].itemsize * info['total_chans']
+    info['bytes_per_slice'] = bps
+
+    cfg = _read_config(config_fname)
+    info['bti_transform'] = cfg['transforms']
+
+    # augment channel list by according info from config.
+    # get channels from config present in PDF
+    chans = info['chs']
+    chans_cfg = [c for c in cfg['chs'] if c['chan_no']
+                 in [c_['chan_no'] for c_ in chans]]
+
+    # check all pdf chanels are present in config
+    match = [c['chan_no'] for c in chans_cfg] == \
+            [c['chan_no'] for c in chans]
+
+    if not match:
+        raise RuntimeError('Could not match raw data channels with'
+                           ' config channels. Some of the channels'
+                           ' found are not described in config.')
+
+    # transfer channel info from config to channel info
+    for ch, ch_cfg in zip(chans, chans_cfg):
+        ch['upb'] = ch_cfg['units_per_bit']
+        ch['gain'] = ch_cfg['gain']
+        ch['name'] = ch_cfg['name']
+        ch['coil_trans'] = (ch_cfg['dev'].get('transform', None)
+                            if 'dev' in ch_cfg else None)
+        if info['data_format'] <= 2:
+            ch['cal'] = ch['scale'] * ch['upb'] * (ch['gain'] ** -1)
+        else:
+            ch['cal'] = ch['scale'] * ch['gain']
+
+    by_index = [(i, d['index']) for i, d in enumerate(chans)]
+    by_index.sort(key=lambda c: c[1])
+    by_index = [idx[0] for idx in by_index]
+    info['chs'] = [chans[pos] for pos in by_index]
+
+    by_name = [(i, d['name']) for i, d in enumerate(info['chs'])]
+    by_name.sort(key=lambda c: int(c[1][1:]) if c[1][0] == 'A' else c[1])
+    by_name = [idx[0] for idx in by_name]
+    info['chs'] = [chans[pos] for pos in by_name]
+    info['order'] = by_name
+
+    # finally add some important fields from the config
+    info['e_table'] = cfg['user_blocks'][BTI.UB_B_E_TABLE_USED]
+    info['weights'] = cfg['user_blocks'][BTI.UB_B_WEIGHTS_USED]
+
+    return info
+
+
+def _read_data(info, start=None, stop=None):
+    """ Helper function: read Bti processed data file (PDF)
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    start : int | None
+        The number of the first time slice to read. If None, all data will
+        be read from the beginning.
+    stop : int | None
+        The number of the last time slice to read. If None, all data will
+        be read to the end.
+    dtype : str | dtype object
+        The type the data are casted to.
+
+    Returns
+    -------
+    data : ndarray
+        The measurement data, a channels x time slices array.
+    """
+
+    total_slices = info['total_slices']
+    if start is None:
+        start = 0
+    if stop is None:
+        stop = total_slices
+
+    if any([start < 0, stop > total_slices, start >= stop]):
+        raise RuntimeError('Invalid data range supplied:'
+                           ' %d, %d' % (start, stop))
+
+    info['fid'].seek(info['bytes_per_slice'] * start, 0)
+
+    cnt = (stop - start) * info['total_chans']
+    shape = [stop - start, info['total_chans']]
+    data = np.fromfile(info['fid'], dtype=info['dtype'],
+                       count=cnt).astype('f4').reshape(shape)
+
+    for ch in info['chs']:
+        data[:, ch['index']] *= ch['cal']
+
+    return data[:, info['order']].T
+
+
+class RawBTi(Raw):
+    """ Raw object from 4D Neuroimaging MagnesWH3600 data
+
+    Parameters
+    ----------
+    pdf_fname : str | None
+        absolute path to the processed data file (PDF)
+    config_fname : str | None
+        absolute path to system config file. If None, it is assumed to be in
+        the same directory.
+    head_shape_fname : str
+        absolute path to the head shape file. If None, it is assumed to be in
+        the same directory.
+    rotation_x : float | int | None
+        Degrees to tilt x-axis for sensor frame misalignment.
+        If None, no adjustment will be applied.
+    translation : array-like
+        The translation to place the origin of coordinate system
+        to the center of the head.
+    ecg_ch: str | None
+      The 4D name of the ECG channel. If None, the channel will be treated
+      as regular EEG channel.
+    eog_ch: tuple of str | None
+      The 4D names of the EOG channels. If None, the channels will be treated
+      as regular EEG channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes & Methods
+    --------------------
+    See documentation for mne.fiff.Raw
+
+    """
+    @verbose
+    def __init__(self, pdf_fname, config_fname='config',
+                 head_shape_fname='hs_file', rotation_x=None,
+                 translation=(0.0, 0.02, 0.11), ecg_ch='E31',
+                 eog_ch=('E63', 'E64'), verbose=None):
+
+        if not op.isabs(pdf_fname):
+            pdf_fname = op.abspath(pdf_fname)
+
+        if not op.isabs(config_fname):
+            config_fname = op.join(op.dirname(pdf_fname), config_fname)
+
+        if not op.exists(config_fname):
+            raise ValueError('Could not find the config file %s. Please check'
+                             ' whether you are in the right directory '
+                             'or pass the full name' % config_fname)
+
+        if not op.isabs(head_shape_fname):
+            head_shape_fname = op.join(op.dirname(pdf_fname), head_shape_fname)
+
+        if not op.exists(head_shape_fname):
+            raise ValueError('Could not find the head_shape file %s. You shoul'
+                             'd check whether you are in the right directory o'
+                             'r pass the full file name.' % head_shape_fname)
+
+        logger.info('Reading 4D PDF file %s...' % pdf_fname)
+        bti_info = _read_bti_header(pdf_fname, config_fname)
+
+         # XXX indx is informed guess. Normally only one transform is stored.
+        dev_ctf_t = bti_info['bti_transform'][0].astype('>f8')
+        bti_to_nm = bti_to_vv_trans(adjust=rotation_x,
+                                    translation=translation, dtype='>f8')
+
+        use_hpi = False  # hard coded, but marked as later option.
+        logger.info('Creating Neuromag info structure ...')
+        info = dict()
+        info['bads'] = []
+        info['meas_id'] = None
+        info['file_id'] = None
+        info['projs'] = list()
+        info['comps'] = list()
+        date = bti_info['processes'][0]['timestamp']
+        info['meas_date'] = [date, 0]
+        info['sfreq'] = 1e3 / bti_info['sample_period'] * 1e-3
+        info['nchan'] = len(bti_info['chs'])
+
+        # browse processing info for filter specs.
+        hp, lp = 0.0, info['sfreq'] * 0.4  # find better default
+        for proc in bti_info['processes']:
+            if 'filt' in proc['process_type']:
+                for step in proc['processing_steps']:
+                    if 'high_freq' in step:
+                        hp, lp = step['high_freq'], step['low_freq']
+                    elif 'hp' in step['process_type']:
+                        hp = step['freq']
+                    elif 'lp' in step['process_type']:
+                        lp = step['freq']
+
+        info['highpass'] = hp
+        info['lowpass'] = lp
+        info['acq_pars'], info['acq_stim'] = None, None
+        info['filename'] = None
+        info['filenames'] = []
+        chs = []
+
+        ch_names = [ch['name'] for ch in bti_info['chs']]
+        info['ch_names'] = _rename_channels(ch_names)
+        ch_mapping = zip(ch_names, info['ch_names'])
+        logger.info('... Setting channel info structure.')
+        for idx, (chan_4d, chan_vv) in enumerate(ch_mapping):
+            chan_info = dict(zip(FIFF_INFO_CHS_FIELDS, FIFF_INFO_CHS_DEFAULTS))
+            chan_info['ch_name'] = chan_vv
+            chan_info['logno'] = idx + BTI.FIFF_LOGNO
+            chan_info['scanno'] = idx + 1
+            chan_info['cal'] = bti_info['chs'][idx]['scale']
+
+            if any([chan_vv.startswith(k) for k in ('MEG', 'RFG', 'RFM')]):
+                t, loc = bti_info['chs'][idx]['coil_trans'], None
+                if t is not None:
+                    t, loc = _convert_coil_trans(t.astype('>f8'), dev_ctf_t,
+                                                 bti_to_nm)
+                    if idx == 1:
+                        logger.info('... putting coil transforms in Neuromag '
+                                    'coordinates')
+                chan_info['coil_trans'] = t
+                if loc is not None:
+                    chan_info['loc'] = loc.astype('>f4')
+
+            if chan_vv.startswith('MEG'):
+                chan_info['kind'] = FIFF.FIFFV_MEG_CH
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG
+                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                chan_info['unit'] = FIFF.FIFF_UNIT_T
+
+            elif chan_vv.startswith('RFM'):
+                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_MAG
+                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                chan_info['unit'] = FIFF.FIFF_UNIT_T
+
+            elif chan_vv.startswith('RFG'):
+                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                chan_info['unit'] = FIFF.FIFF_UNIT_T_M
+                if chan_4d in ('GxxA', 'GyyA'):
+                    chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
+                elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
+                    chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF
+            elif chan_vv == 'STI 013':
+                chan_info['kind'] = FIFF.FIFFV_RESP_CH
+            elif chan_vv == 'STI 014':
+                chan_info['kind'] = FIFF.FIFFV_STIM_CH
+            elif chan_vv.startswith('EOG'):
+                chan_info['kind'] = FIFF.FIFFV_EOG_CH
+            elif chan_vv == 'ECG 001':
+                chan_info['kind'] = FIFF.FIFFV_ECG_CH
+            elif chan_vv.startswith('EXT'):
+                chan_info['kind'] = FIFF.FIFFV_MISC_CH
+            elif chan_vv.startswith('UTL'):
+                chan_info['kind'] = FIFF.FIFFV_MISC_CH
+
+            chs.append(chan_info)
+
+        info['chs'] = chs
+
+        logger.info('... Reading digitization points from %s' %
+                    head_shape_fname)
+        logger.info('... putting digitization points in Neuromag c'
+                    'oordinates')
+        info['dig'], ctf_head_t = _setup_head_shape(head_shape_fname, use_hpi)
+        logger.info('... Computing new device to head transform.')
+        dev_head_t = _convert_dev_head_t(dev_ctf_t, bti_to_nm,
+                                         ctf_head_t)
+
+        info['dev_head_t'] = dict()
+        info['dev_head_t']['from'] = FIFF.FIFFV_COORD_DEVICE
+        info['dev_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
+        info['dev_head_t']['trans'] = dev_head_t
+        info['dev_ctf_t'] = dict()
+        info['dev_ctf_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_DEVICE
+        info['dev_ctf_t']['to'] = FIFF.FIFFV_COORD_HEAD
+        info['dev_ctf_t']['trans'] = dev_ctf_t
+        info['ctf_head_t'] = dict()
+        info['ctf_head_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+        info['ctf_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
+        info['ctf_head_t']['trans'] = ctf_head_t
+        logger.info('Done.')
+
+        if False:  # XXX : reminds us to support this as we go
+            # include digital weights from reference channel
+            comps = info['comps'] = list()
+            weights = bti_info['weights']
+            by_name = lambda x: x[1]
+            chn = dict(ch_mapping)
+            columns = [chn[k] for k in weights['dsp_ch_names']]
+            rows = [chn[k] for k in weights['ch_names']]
+            col_order, col_names = zip(*sorted(enumerate(columns),
+                                               key=by_name))
+            row_order, row_names = zip(*sorted(enumerate(rows), key=by_name))
+            # for some reason the C code would invert the signs, so we follow.
+            mat = -weights['dsp_wts'][row_order, :][:, col_order]
+            comp_data = dict(data=mat,
+                             col_names=col_names,
+                             row_names=row_names,
+                             nrow=mat.shape[0], ncol=mat.shape[1])
+            comps += [dict(data=comp_data, ctfkind=101,
+                           #  no idea how to calibrate, just ones.
+                           rowcals=np.ones(mat.shape[0], dtype='>f4'),
+                           colcals=np.ones(mat.shape[1], dtype='>f4'),
+                           save_calibrated=0)]
+        else:
+            logger.warning('Warning. Currently direct inclusion of 4D weight t'
+                           'ables is not supported. For critical use cases '
+                           '\nplease take into account the MNE command '
+                           '\'mne_create_comp_data\' to include weights as '
+                           'printed out \nby the 4D \'print_table\' routine.')
+
+        # check that the info is complete
+        assert not set(RAW_INFO_FIELDS) - set(info.keys())
+
+        # check nchan is correct
+        assert len(info['ch_names']) == info['nchan']
+
+        cals = np.zeros(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+
+        self.verbose = verbose
+        self.cals = cals
+        self.rawdir = None
+        self.proj = None
+        self.comp = None
+        self.fids = list()
+        self._preloaded = True
+        self._projector_hashes = [None]
+        self.info = info
+
+        logger.info('Reading raw data from %s...' % pdf_fname)
+        self._data = _read_data(bti_info)
+        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
+
+        assert len(self._data) == len(self.info['ch_names'])
+        self._times = np.arange(self.first_samp, \
+                                self.last_samp + 1) / info['sfreq']
+        self._projectors = [None]
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                   self.first_samp, self.last_samp,
+                   float(self.first_samp) / info['sfreq'],
+                   float(self.last_samp) / info['sfreq']))
+
+        logger.info('Ready.')
+
+
+ at verbose
+def read_raw_bti(pdf_fname, config_fname='config',
+                 head_shape_fname='hs_file', rotation_x=None,
+                 translation=(0.0, 0.02, 0.11), ecg_ch='E31',
+                 eog_ch=('E63', 'E64'), verbose=True):
+    """ Raw object from 4D Neuroimaging MagnesWH3600 data
+
+    Note.
+    1) Currently direct inclusion of reference channel weights
+    is not supported. Please use 'mne_create_comp_data' to include
+    the weights or use the low level functions from this module to
+    include them by yourself.
+    2) The informed guess for the 4D name is E31 for the ECG channel and
+    E63, E63 for the EOG channels. Pleas check and adjust if those channels
+    are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
+    appear in the channel names of the raw object.
+
+    Parameters
+    ----------
+    pdf_fname : str | None
+        absolute path to the processed data file (PDF)
+    config_fname : str | None
+        absolute path to system confnig file. If None, it is assumed to be in
+        the same directory.
+    head_shape_fname : str
+        absolute path to the head shape file. If None, it is assumed to be in
+        the same directory.
+    rotation_x : float | int | None
+        Degrees to tilt x-axis for sensor frame misalignment.
+        If None, no adjustment will be applied.
+    translation : array-like
+        The translation to place the origin of coordinate system
+        to the center of the head.
+    ecg_ch: str | None
+      The 4D name of the ECG channel. If None, the channel will be treated
+      as regular EEG channel.
+    eog_ch: tuple of str | None
+      The 4D names of the EOG channels. If None, the channels will be treated
+      as regular EEG channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    return RawBTi(pdf_fname, config_fname=config_fname,
+                  head_shape_fname=head_shape_fname,
+                  rotation_x=rotation_x, translation=translation,
+                  verbose=verbose)
diff --git a/mne/fiff/bti/read.py b/mne/fiff/bti/read.py
new file mode 100644
index 0000000..3f7d71e
--- /dev/null
+++ b/mne/fiff/bti/read.py
@@ -0,0 +1,126 @@
+# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+#          simplified BSD-3 license
+
+import struct
+import logging
+import numpy as np
+
+logger = logging.getLogger('mne')
+
+
+def _unpack_matrix(fid, format, rows, cols, dtype):
+    """ Aux Function """
+    out = np.zeros((rows, cols), dtype=dtype)
+    bsize = struct.calcsize(format)
+    string = fid.read(bsize)
+    data = struct.unpack(format, string)
+    iter_mat = [(r, c) for r in xrange(rows) for c in xrange(cols)]
+    for idx, (row, col) in enumerate(iter_mat):
+        out[row, col] = data[idx]
+
+    return out
+
+
+def _unpack_simple(fid, format, count):
+    """ Aux Function """
+    bsize = struct.calcsize(format)
+    string = fid.read(bsize)
+    data = list(struct.unpack(format, string))
+
+    out = data if count < 2 else list(data)
+    if len(out) > 0:
+        out = out[0]
+
+    return out
+
+
+def read_str(fid, count=1):
+    """ Read string """
+    format = '>' + ('c' * count)
+    data = list(struct.unpack(format, fid.read(struct.calcsize(format))))
+
+    return ''.join(data[0:data.index('\x00') if '\x00' in data else count])
+
+
+def read_char(fid, count=1):
+    " Read character from bti file """
+    return _unpack_simple(fid, '>' + ('c' * count), count)
+
+
+def read_bool(fid, count=1):
+    """ Read bool value from bti file """
+    return _unpack_simple(fid, '>' + ('?' * count), count)
+
+
+def read_uint8(fid, count=1):
+    """ Read unsigned 8bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('B' * count), count)
+
+
+def read_int8(fid, count=1):
+    """ Read 8bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('b' * count),  count)
+
+
+def read_uint16(fid, count=1):
+    """ Read unsigned 16bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('H' * count), count)
+
+
+def read_int16(fid, count=1):
+    """ Read 16bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('H' * count),  count)
+
+
+def read_uint32(fid, count=1):
+    """ Read unsigned 32bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('I' * count), count)
+
+
+def read_int32(fid, count=1):
+    """ Read 32bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('i' * count), count)
+
+
+def read_uint64(fid, count=1):
+    """ Read unsigned 64bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('Q' * count), count)
+
+
+def read_int64(fid, count=1):
+    """ Read 64bit integer from bti file """
+    return _unpack_simple(fid, '>' + ('q' * count), count)
+
+
+def read_float(fid, count=1):
+    """ Read 32bit float from bti file """
+    return _unpack_simple(fid, '>' + ('f' * count), count)
+
+
+def read_double(fid, count=1):
+    """ Read 64bit float from bti file """
+    return _unpack_simple(fid, '>' + ('d' * count), count)
+
+
+def read_int16_matrix(fid, rows, cols):
+    """ Read 16bit integer matrix from bti file """
+    format = '>' + ('h' * rows * cols)
+    return _unpack_matrix(fid, format, rows, cols, np.int16)
+
+
+def read_float_matrix(fid, rows, cols):
+    """ Read 32bit float matrix from bti file """
+    format = '>' + ('f' * rows * cols)
+    return _unpack_matrix(fid, format, rows, cols, 'f4')
+
+
+def read_double_matrix(fid, rows, cols):
+    """ Read 64bit float matrix from bti file """
+    format = '>' + ('d' * rows * cols)
+    return _unpack_matrix(fid, format, rows, cols, 'f8')
+
+
+def read_transform(fid):
+    """ Read 64bit float matrix transform from bti file """
+    format = '>' + ('d' * 4 * 4)
+    return _unpack_matrix(fid, format, 4, 4, 'f8')
diff --git a/mne/fiff/bti/tests/__init__.py b/mne/fiff/bti/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/fiff/bti/tests/data/exported4D_linux.fif b/mne/fiff/bti/tests/data/exported4D_linux.fif
new file mode 100644
index 0000000..8e344d0
Binary files /dev/null and b/mne/fiff/bti/tests/data/exported4D_linux.fif differ
diff --git a/mne/fiff/bti/tests/data/exported4D_solaris.fif b/mne/fiff/bti/tests/data/exported4D_solaris.fif
new file mode 100644
index 0000000..83fc7ee
Binary files /dev/null and b/mne/fiff/bti/tests/data/exported4D_solaris.fif differ
diff --git a/mne/fiff/bti/tests/data/test_config_linux b/mne/fiff/bti/tests/data/test_config_linux
new file mode 100755
index 0000000..1a1b701
Binary files /dev/null and b/mne/fiff/bti/tests/data/test_config_linux differ
diff --git a/mne/fiff/bti/tests/data/test_config_solaris b/mne/fiff/bti/tests/data/test_config_solaris
new file mode 100644
index 0000000..f133d86
Binary files /dev/null and b/mne/fiff/bti/tests/data/test_config_solaris differ
diff --git a/mne/fiff/bti/tests/data/test_hs_linux b/mne/fiff/bti/tests/data/test_hs_linux
new file mode 100755
index 0000000..f59a04c
Binary files /dev/null and b/mne/fiff/bti/tests/data/test_hs_linux differ
diff --git a/mne/fiff/bti/tests/data/test_hs_solaris b/mne/fiff/bti/tests/data/test_hs_solaris
new file mode 100644
index 0000000..b3a5c57
Binary files /dev/null and b/mne/fiff/bti/tests/data/test_hs_solaris differ
diff --git a/mne/fiff/bti/tests/data/test_pdf_linux b/mne/fiff/bti/tests/data/test_pdf_linux
new file mode 100755
index 0000000..44f5052
Binary files /dev/null and b/mne/fiff/bti/tests/data/test_pdf_linux differ
diff --git a/mne/fiff/bti/tests/data/test_pdf_solaris b/mne/fiff/bti/tests/data/test_pdf_solaris
new file mode 100644
index 0000000..0fc2cf7
Binary files /dev/null and b/mne/fiff/bti/tests/data/test_pdf_solaris differ
diff --git a/mne/fiff/bti/tests/test_bti.py b/mne/fiff/bti/tests/test_bti.py
new file mode 100644
index 0000000..f4b62f7
--- /dev/null
+++ b/mne/fiff/bti/tests/test_bti.py
@@ -0,0 +1,91 @@
+# Authors: Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true, assert_raises, assert_equal
+
+from mne.fiff import Raw as Raw
+from mne.fiff.bti.raw import _read_config, _setup_head_shape,\
+                             read_raw_bti, _read_data, _read_bti_header
+from mne.utils import _TempDir
+
+base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
+
+archs = 'linux', 'solaris'
+pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
+config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
+hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
+exported_fnames = [op.join(base_dir, 'exported4D_%s.fif' % a) for a in archs]
+tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
+tempdir = _TempDir()
+
+# the 4D exporter doesn't export all channels, so we confine our comparison
+NCH = 248
+
+
+def test_read_config():
+    """ Test read bti config file """
+    # for config in config_fname, config_solaris_fname:
+    for config in config_fnames:
+        cfg = _read_config(config)
+        assert_true(all([all([k not in block.lower() for k in ['', 'unknown']]
+                    for block in cfg['user_blocks'])]))
+
+
+def test_read_pdf():
+    """ Test read bti PDF file """
+    for pdf, config in zip(pdf_fnames, config_fnames):
+        info = _read_bti_header(pdf, config)
+        data = _read_data(info)
+        shape = (info['total_chans'], info['total_slices'])
+        assert_true(data.shape == shape)
+        info['fid'].close()
+
+
+def test_raw():
+    """ Test conversion to Raw object """
+
+    for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
+                               exported_fnames):
+        # rx = 2 if 'linux' in pdf else 0
+        assert_raises(ValueError, read_raw_bti, pdf, 'eggs')
+        assert_raises(ValueError, read_raw_bti, pdf, config, 'spam')
+        if op.exists(tmp_raw_fname):
+            os.remove(tmp_raw_fname)
+        with Raw(exported, preload=True) as ex:
+            with read_raw_bti(pdf, config, hs) as ra:
+                assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
+                assert_array_almost_equal(ex.info['dev_head_t']['trans'],
+                                          ra.info['dev_head_t']['trans'], 7)
+                dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']])
+                              for r_ in ra, ex]
+                assert_array_equal(dig1, dig2)
+
+                coil1, coil2 = [np.concatenate([d['coil_trans'].flatten()
+                            for d in r_.info['chs'][:NCH]]) for r_ in ra, ex]
+                assert_array_almost_equal(coil1, coil2, 7)
+
+                loc1, loc2 = [np.concatenate([d['loc'].flatten()
+                for d in r_.info['chs'][:NCH]]) for r_ in ra, ex]
+                assert_array_equal(loc1, loc2)
+
+                assert_array_equal(ra._data[:NCH], ex._data[:NCH])
+                assert_array_equal(ra.cals[:NCH], ex.cals[:NCH])
+                ra.save(tmp_raw_fname)
+            with Raw(tmp_raw_fname) as r:
+                print r
+        os.remove(tmp_raw_fname)
+
+
+def test_setup_headshape():
+    """ Test reading bti headshape """
+    for hs in hs_fnames:
+        dig, t = _setup_head_shape(hs)
+        expected = set(['kind', 'ident', 'r'])
+        found = set(reduce(lambda x, y: x + y, [d.keys() for d in dig]))
+        assert_true(not expected - found)
diff --git a/mne/fiff/bti/transforms.py b/mne/fiff/bti/transforms.py
new file mode 100644
index 0000000..9627c3d
--- /dev/null
+++ b/mne/fiff/bti/transforms.py
@@ -0,0 +1,102 @@
+# Authors: Denis A. Engemann  <d.engemann at fz-juelich.de>
+#
+#          simplified BSD-3 license
+
+
+import numpy as np
+from .constants import BTI
+
+
+def bti_identity_trans(dtype='>f8'):
+    """ Get BTi identity transform
+
+    Parameters
+    ----------
+    dtype : str | dtype object
+        The data format of the transform
+
+    Returns
+    -------
+    itrans : ndarray
+        The 4 x 4 transformation matrix.
+    """
+    return np.array(BTI.T_IDENT, dtype=dtype)
+
+
+def bti_to_vv_trans(adjust=None, translation=(0.0, 0.02, 0.11), dtype='>f8'):
+    """ Get the general Magnes3600WH to Neuromag coordinate transform
+
+    Parameters
+    ----------
+    adjust : int | None
+        Degrees to tilt x-axis for sensor frame misalignment.
+        If None, no adjustment will be applied.
+    translation : array-like
+        The translation to place the origin of coordinate system
+        to the center of the head.
+
+    Returns
+    -------
+    m_nm_t : ndarray
+        4 x 4 rotation, translation, scaling matrix.
+    """
+    flip_t = np.array(BTI.T_ROT_VV, dtype=dtype)
+    adjust_t = bti_identity_trans(dtype=dtype)
+    adjust = 0 if adjust is None else adjust
+    deg = np.deg2rad(np.float64(adjust))
+    adjust_t[[1, 2], [1, 2]] = np.cos(deg)
+    adjust_t[[1, 2], [2, 1]] = -np.sin(deg), np.sin(deg)
+    m_nm_t = np.ones([4, 4], dtype=dtype)
+    m_nm_t[BTI.T_ROT_IX] = np.dot(flip_t[BTI.T_ROT_IX],
+                                  adjust_t[BTI.T_ROT_IX])
+    m_nm_t[BTI.T_TRANS_IX] = np.matrix(translation, dtype=dtype).T
+
+    return m_nm_t
+
+
+def bti_to_vv_coil_trans(ch_t, bti_t, nm_t, nm_default_scale=True):
+    """ transforms 4D coil position to fiff / Neuromag
+    """
+    nm_coil_trans = apply_trans(inverse_trans(ch_t, bti_t), nm_t)
+
+    if nm_default_scale:
+        nm_coil_trans[3, :3] = 0.
+
+    return nm_coil_trans
+
+
+def inverse_trans(x, t, rot=BTI.T_ROT_IX, trans=BTI.T_TRANS_IX,
+                  scal=BTI.T_SCA_IX):
+    """ Undo a transform
+    """
+    x = x.copy()
+    x[scal] *= t[scal]
+    x[rot] = np.dot(t[rot].T, x[rot])
+    x[trans] -= t[trans]
+    x[trans] = np.dot(t[rot].T, x[trans])
+
+    return x
+
+
+def apply_trans(x, t, rot=BTI.T_ROT_IX, trans=BTI.T_TRANS_IX,
+                scal=BTI.T_SCA_IX):
+    """ Apply a transform
+    """
+    x = x.copy()
+    x[rot] = np.dot(t[rot], x[rot])
+    x[trans] = np.dot(t[rot], x[trans])
+    x[trans] += t[trans]
+    x[scal] *= t[scal]
+
+    return x
+
+
+def merge_trans(t1, t2, dtype='>f8'):
+    """ Merge two transforms
+    """
+    t = bti_identity_trans(dtype=dtype)
+    t[BTI.T_ROT_IX] = np.dot(t1[BTI.T_ROT_IX], t2[BTI.T_ROT_IX])
+    t[BTI.T_TRANS_IX] = np.dot(t1[BTI.T_ROT_IX], t2[BTI.T_TRANS_IX])
+    t[BTI.T_TRANS_IX] += t1[BTI.T_TRANS_IX]
+
+    return t
diff --git a/mne/fiff/channels.py b/mne/fiff/channels.py
new file mode 100644
index 0000000..4c1e11e
--- /dev/null
+++ b/mne/fiff/channels.py
@@ -0,0 +1,35 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from .tree import dir_tree_find
+from .tag import find_tag
+from .constants import FIFF
+
+
+def read_bad_channels(fid, node):
+    """Read bad channels
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+
+    node : dict
+        The node of the FIF tree that contains info on the bad channels.
+
+    Returns
+    -------
+    bads : list
+        A list of bad channel's names.
+    """
+    nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    bads = []
+    if len(nodes) > 0:
+        for node in nodes:
+            tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
+            if tag is not None and tag.data is not None:
+                bads = tag.data.split(':')
+    return bads
diff --git a/mne/fiff/compensator.py b/mne/fiff/compensator.py
new file mode 100644
index 0000000..1d8c452
--- /dev/null
+++ b/mne/fiff/compensator.py
@@ -0,0 +1,156 @@
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+from .. import verbose
+
+
+def get_current_comp(info):
+    """Get the current compensation in effect in the data
+    """
+    comp = None
+    first_comp = -1
+    for k, chan in enumerate(info['chs']):
+        if chan['kind'] == FIFF.FIFFV_MEG_CH:
+            comp = int(chan['coil_type']) >> 16
+            if first_comp < 0:
+                first_comp = comp
+            elif comp != first_comp:
+                raise ValueError('Compensation is not set equally on '
+                                 'all MEG channels')
+
+    return comp
+
+
+def _make_compensator(info, kind):
+    """Auxiliary function for make_compensator
+    """
+    for k in range(len(info['comps'])):
+        if info['comps'][k]['kind'] == kind:
+            this_data = info['comps'][k]['data']
+
+            #   Create the preselector
+            presel = np.zeros((this_data['ncol'], info['nchan']))
+            for col, col_name in enumerate(this_data['col_names']):
+                ind = [k for k, ch in enumerate(info['ch_names'])
+                                                            if ch == col_name]
+                if len(ind) == 0:
+                    raise ValueError('Channel %s is not available in data' % \
+                                                                      col_name)
+                elif len(ind) > 1:
+                    raise ValueError('Ambiguous channel %s' % col_name)
+                presel[col, ind] = 1.0
+
+            #   Create the postselector
+            postsel = np.zeros((info['nchan'], this_data['nrow']))
+            for c, ch_name in enumerate(info['ch_names']):
+                ind = [k for k, ch in enumerate(this_data['row_names'])
+                                                            if ch == ch_name]
+                if len(ind) > 1:
+                    raise ValueError('Ambiguous channel %s' % ch_name)
+                elif len(ind) == 1:
+                    postsel[c, ind[0]] = 1.0
+
+            this_comp = np.dot(postsel, np.dot(this_data['data'], presel))
+            return this_comp
+
+    raise ValueError('Desired compensation matrix (kind = %d) not'
+                     ' found' % kind)
+
+
+def make_compensator(info, from_, to, exclude_comp_chs=False):
+    """Returns compensation matrix eg. for CTF system.
+
+    Create a compensation matrix to bring the data from one compensation
+    state to another.
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    from_ : int
+        Compensation in the input data.
+    to : int
+        Desired compensation in the output.
+    exclude_comp_chs : bool
+        Exclude compensation channels from the output.
+
+    Returns
+    -------
+    comp : array | None.
+        The compensation matrix. Might be None if no compensation
+        is needed (from == to).
+    """
+    if from_ == to:
+        return None
+
+    if from_ == 0:
+        C1 = np.zeros((info['nchan'], info['nchan']))
+    else:
+        C1 = _make_compensator(info, from_)
+
+    if to == 0:
+        C2 = np.zeros((info['nchan'], info['nchan']))
+    else:
+        C2 = _make_compensator(info, to)
+
+    #   s_orig = s_from + C1*s_from = (I + C1)*s_from
+    #   s_to   = s_orig - C2*s_orig = (I - C2)*s_orig
+    #   s_to   = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from
+    comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1)
+
+    if exclude_comp_chs:
+        pick = [k for k, c in enumerate(info['chs'])
+                                    if c['kind'] != FIFF.FIFFV_REF_MEG_CH]
+
+        if len(pick) == 0:
+            raise ValueError('Nothing remains after excluding the '
+                             'compensation channels')
+
+        comp = comp[pick, :]
+
+    return comp
+
+
+# @verbose
+# def compensate_to(data, to, verbose=None):
+#     """
+#     %
+#     % [newdata] = mne_compensate_to(data,to)
+#     %
+#     % Apply compensation to the data as desired
+#     %
+#     """
+#
+#     newdata = data.copy()
+#     now = get_current_comp(newdata['info'])
+#
+#     #   Are we there already?
+#     if now == to:
+#         logger.info('Data are already compensated as desired')
+#
+#     #   Make the compensator and apply it to all data sets
+#     comp = make_compensator(newdata['info'], now, to)
+#     for k in range(len(newdata['evoked'])):
+#         newdata['evoked'][k]['epochs'] = np.dot(comp,
+#                                               newdata['evoked'][k]['epochs'])
+#
+#     #  Update the compensation info in the channel descriptors
+#     newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to)
+#     return newdata
+
+
+# def set_current_comp(chs, value):
+#     """Set the current compensation value in the channel info structures
+#     """
+#     new_chs = chs
+#
+#     lower_half = int('FFFF', 16) # hex2dec('FFFF')
+#     for k in range(len(chs)):
+#         if chs[k]['kind'] == FIFF.FIFFV_MEG_CH:
+#             coil_type = float(chs[k]['coil_type']) & lower_half
+#             new_chs[k]['coil_type'] = int(coil_type | (value << 16))
+#
+#     return new_chs
diff --git a/mne/fiff/constants.py b/mne/fiff/constants.py
new file mode 100644
index 0000000..c2657e7
--- /dev/null
+++ b/mne/fiff/constants.py
@@ -0,0 +1,713 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+class Bunch(dict):
+    """ Container object for datasets: dictionnary-like object that
+        exposes its keys as attributes.
+    """
+
+    def __init__(self, **kwargs):
+        dict.__init__(self, kwargs)
+        self.__dict__ = self
+
+FIFF = Bunch()
+
+#
+# Blocks
+#
+FIFF.FIFFB_ROOT               = 999
+FIFF.FIFFB_MEAS               = 100
+FIFF.FIFFB_MEAS_INFO          = 101
+FIFF.FIFFB_RAW_DATA           = 102
+FIFF.FIFFB_PROCESSED_DATA     = 103
+FIFF.FIFFB_EVOKED             = 104
+FIFF.FIFFB_ASPECT             = 105
+FIFF.FIFFB_SUBJECT            = 106
+FIFF.FIFFB_ISOTRAK            = 107
+FIFF.FIFFB_HPI_MEAS           = 108
+FIFF.FIFFB_HPI_RESULT         = 109
+FIFF.FIFFB_HPI_COIL           = 110
+FIFF.FIFFB_PROJECT            = 111
+FIFF.FIFFB_CONTINUOUS_DATA    = 112
+FIFF.FIFFB_VOID               = 114
+FIFF.FIFFB_EVENTS             = 115
+FIFF.FIFFB_INDEX              = 116
+FIFF.FIFFB_DACQ_PARS          = 117
+FIFF.FIFFB_REF                = 118
+FIFF.FIFFB_SMSH_RAW_DATA      = 119
+FIFF.FIFFB_SMSH_ASPECT        = 120
+FIFF.FIFFB_HPI_SUBSYSTEM      = 121
+FIFF.FIFFB_EPOCHS             = 122
+FIFF.FIFFB_ICA                = 123
+
+FIFF.FIFFB_SPHERE             = 300	  # Concentric sphere model related
+FIFF.FIFFB_BEM                = 310	  # Boundary-element method
+FIFF.FIFFB_BEM_SURF           = 311	  # Boundary-element method surfaces
+FIFF.FIFFB_CONDUCTOR_MODEL    = 312	  # One conductor model definition
+FIFF.FIFFB_PROJ               = 313
+FIFF.FIFFB_PROJ_ITEM          = 314
+FIFF.FIFFB_MRI                = 200
+FIFF.FIFFB_MRI_SET            = 201
+FIFF.FIFFB_MRI_SLICE          = 202
+FIFF.FIFFB_MRI_SCENERY        = 203     # These are for writing unrelated 'slices'
+FIFF.FIFFB_MRI_SCENE          = 204	  # Which are actually 3D scenes...
+FIFF.FIFFB_MRI_SEG            = 205     # MRI segmentation data
+FIFF.FIFFB_MRI_SEG_REGION     = 206     # One MRI segmentation region
+FIFF.FIFFB_PROCESSING_HISTORY = 900
+FIFF.FIFFB_SSS_INFO           = 502
+FIFF.FIFFB_SSS_CAL_ADJUST     = 503
+FIFF.FIFFB_SSS_ST_INFO        = 504
+FIFF.FIFFB_SSS_BASES          = 505
+#
+# Of general interest
+#
+FIFF.FIFF_FILE_ID         = 100
+FIFF.FIFF_DIR_POINTER     = 101
+FIFF.FIFF_BLOCK_ID        = 103
+FIFF.FIFF_BLOCK_START     = 104
+FIFF.FIFF_BLOCK_END       = 105
+FIFF.FIFF_FREE_LIST       = 106
+FIFF.FIFF_FREE_BLOCK      = 107
+FIFF.FIFF_NOP             = 108
+FIFF.FIFF_PARENT_FILE_ID  = 109
+FIFF.FIFF_PARENT_BLOCK_ID = 110
+FIFF.FIFF_BLOCK_NAME      = 111
+FIFF.FIFF_BLOCK_VERSION   = 112
+FIFF.FIFF_CREATOR         = 113  # Program that created the file (string)
+FIFF.FIFF_MODIFIER        = 114  # Program that modified the file (string)
+#
+#  Megacq saves the parameters in these tags
+#
+FIFF.FIFF_DACQ_PARS      = 150
+FIFF.FIFF_DACQ_STIM      = 151
+
+FIFF.FIFF_NCHAN       = 200
+FIFF.FIFF_SFREQ       = 201
+FIFF.FIFF_DATA_PACK   = 202
+FIFF.FIFF_CH_INFO     = 203
+FIFF.FIFF_MEAS_DATE   = 204
+FIFF.FIFF_SUBJECT     = 205
+FIFF.FIFF_COMMENT     = 206
+FIFF.FIFF_NAVE        = 207
+FIFF.FIFF_FIRST_SAMPLE = 208          # The first sample of an epoch
+FIFF.FIFF_LAST_SAMPLE  = 209          # The last sample of an epoch
+FIFF.FIFF_ASPECT_KIND  = 210
+FIFF.FIFF_REF_EVENT    = 211
+FIFF.FIFF_EXPERIMENTER = 212
+FIFF.FIFF_DIG_POINT   = 213
+FIFF.FIFF_CH_POS      = 214
+FIFF.FIFF_HPI_SLOPES  = 215
+FIFF.FIFF_HPI_NCOIL   = 216
+FIFF.FIFF_REQ_EVENT   = 217
+FIFF.FIFF_REQ_LIMIT   = 218
+FIFF.FIFF_LOWPASS     = 219
+FIFF.FIFF_BAD_CHS       = 220
+FIFF.FIFF_ARTEF_REMOVAL = 221
+FIFF.FIFF_COORD_TRANS = 222
+FIFF.FIFF_HIGHPASS    = 223
+FIFF.FIFF_CH_CALS        = 224	  # This will not occur in new files
+FIFF.FIFF_HPI_BAD_CHS    = 225     # List of channels considered to be bad in hpi
+FIFF.FIFF_HPI_CORR_COEFF = 226	  # Hpi curve fit correlations
+FIFF.FIFF_EVENT_COMMENT  = 227     # Comment about the events used in averaging
+FIFF.FIFF_NO_SAMPLES     = 228     # Number of samples in an epoch
+FIFF.FIFF_FIRST_TIME     = 229     # Time scale minimum
+
+FIFF.FIFF_SUBAVE_SIZE    = 230	  # Size of a subaverage
+FIFF.FIFF_SUBAVE_FIRST   = 231	  # The first epoch # contained in the subaverage
+FIFF.FIFF_NAME           = 233          # Intended to be a short name.
+FIFF.FIFF_DESCRIPTION    = FIFF.FIFF_COMMENT # (Textual) Description of an object
+FIFF.FIFF_DIG_STRING     = 234          # String of digitized points
+#
+# HPI fitting program tags
+#
+FIFF.FIFF_HPI_COIL_MOMENTS       = 240   # Estimated moment vectors for the HPI coil magnetic dipoles
+FIFF.FIFF_HPI_FIT_GOODNESS       = 241   # Three floats indicating the goodness of fit
+FIFF.FIFF_HPI_FIT_ACCEPT         = 242   # Bitmask indicating acceptance (see below)
+FIFF.FIFF_HPI_FIT_GOOD_LIMIT     = 243   # Limit for the goodness-of-fit
+FIFF.FIFF_HPI_FIT_DIST_LIMIT     = 244   # Limit for the coil distance difference
+FIFF.FIFF_HPI_COIL_NO            = 245   # Coil number listed by HPI measurement
+FIFF.FIFF_HPI_COILS_USED         = 246   # List of coils finally used when the transformation was computed
+FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247   # Which Isotrak digitization point corresponds to each of the coils energized
+#
+# Pointers
+#
+FIFF.FIFFV_NEXT_SEQ    = 0
+FIFF.FIFFV_NEXT_NONE   = -1
+#
+# Channel types
+#
+FIFF.FIFFV_MEG_CH     =   1
+FIFF.FIFFV_REF_MEG_CH = 301
+FIFF.FIFFV_EEG_CH     =   2
+FIFF.FIFFV_MCG_CH     = 201
+FIFF.FIFFV_STIM_CH    =   3
+FIFF.FIFFV_EOG_CH     = 202
+FIFF.FIFFV_EMG_CH     = 302
+FIFF.FIFFV_ECG_CH     = 402
+FIFF.FIFFV_MISC_CH    = 502
+FIFF.FIFFV_RESP_CH    = 602                # Respiration monitoring
+#
+# Quaternion channels for head position monitoring
+#
+FIFF.FIFFV_QUAT_0   = 700   # Quaternion param q0 obsolete for unit quaternion
+FIFF.FIFFV_QUAT_1   = 701   # Quaternion param q1 rotation
+FIFF.FIFFV_QUAT_2   = 702   # Quaternion param q2 rotation
+FIFF.FIFFV_QUAT_3   = 703   # Quaternion param q3 rotation
+FIFF.FIFFV_QUAT_4   = 704   # Quaternion param q4 translation
+FIFF.FIFFV_QUAT_5   = 705   # Quaternion param q5 translation
+FIFF.FIFFV_QUAT_6   = 706   # Quaternion param q6 translation
+FIFF.FIFFV_HPI_G    = 707   # Goodness-of-fit in continuous hpi
+FIFF.FIFFV_HPI_ERR  = 708   # Estimation error in continuous hpi
+FIFF.FIFFV_HPI_MOV  = 709   # Estimated head movement speed in continuous hpi
+#
+# Coordinate frames
+#
+FIFF.FIFFV_COORD_UNKNOWN        = 0
+FIFF.FIFFV_COORD_DEVICE         = 1
+FIFF.FIFFV_COORD_ISOTRAK        = 2
+FIFF.FIFFV_COORD_HPI            = 3
+FIFF.FIFFV_COORD_HEAD           = 4
+FIFF.FIFFV_COORD_MRI            = 5
+FIFF.FIFFV_COORD_MRI_SLICE      = 6
+FIFF.FIFFV_COORD_MRI_DISPLAY    = 7
+FIFF.FIFFV_COORD_DICOM_DEVICE   = 8
+FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
+#
+# Needed for raw and evoked-response data
+#
+FIFF.FIFF_FIRST_SAMPLE   = 208
+FIFF.FIFF_LAST_SAMPLE    = 209
+FIFF.FIFF_ASPECT_KIND    = 210
+FIFF.FIFF_DATA_BUFFER    = 300    # Buffer containing measurement data
+FIFF.FIFF_DATA_SKIP      = 301    # Data skip in buffers
+FIFF.FIFF_EPOCH          = 302    # Buffer containing one epoch and channel
+FIFF.FIFF_DATA_SKIP_SAMP = 303    # Data skip in samples
+FIFF.FIFF_MNE_BASELINE_MIN   = 304    # Time of baseline beginning
+FIFF.FIFF_MNE_BASELINE_MAX   = 305    # Time of baseline end
+#
+# Info on subject
+#
+FIFF.FIFF_SUBJ_ID           = 400  # Subject ID
+FIFF.FIFF_SUBJ_FIRST_NAME   = 401  # First name of the subject
+FIFF.FIFF_SUBJ_MIDDLE_NAME  = 402  # Middle name of the subject
+FIFF.FIFF_SUBJ_LAST_NAME    = 403  # Last name of the subject
+FIFF.FIFF_SUBJ_BIRTH_DAY    = 404  # Birthday of the subject
+FIFF.FIFF_SUBJ_SEX          = 405  # Sex of the subject
+FIFF.FIFF_SUBJ_HAND         = 406  # Handedness of the subject
+FIFF.FIFF_SUBJ_WEIGHT       = 407  # Weight of the subject
+FIFF.FIFF_SUBJ_HEIGHT       = 408  # Height of the subject
+FIFF.FIFF_SUBJ_COMMENT      = 409  # Comment about the subject
+FIFF.FIFF_SUBJ_HIS_ID       = 410  # ID used in the Hospital Information System
+
+FIFF.FIFF_PROJ_ID           = 500
+FIFF.FIFF_PROJ_NAME         = 501
+FIFF.FIFF_PROJ_AIM          = 502
+FIFF.FIFF_PROJ_PERSONS      = 503
+FIFF.FIFF_PROJ_COMMENT      = 504
+
+FIFF.FIFF_EVENT_CHANNELS    = 600  # Event channel numbers */
+FIFF.FIFF_EVENT_LIST        = 601  # List of events (integers: <sample before after>
+#
+# Tags used in saving SQUID characteristics etc.
+#
+FIFF.FIFF_SQUID_BIAS        = 701
+FIFF.FIFF_SQUID_OFFSET      = 702
+FIFF.FIFF_SQUID_GATE        = 703
+#
+# Aspect values used to save charactersitic curves of SQUIDs. (mjk)
+#
+FIFF.FIFFV_ASPECT_IFII_LOW  = 1100
+FIFF.FIFFV_ASPECT_IFII_HIGH = 1101
+FIFF.FIFFV_ASPECT_GATE      = 1102
+#
+# References
+#
+FIFF.FIFF_REF_PATH           = 1101
+
+#
+# Different aspects of data
+#
+FIFF.FIFFV_ASPECT_AVERAGE       = 100  # Normal average of epochs
+FIFF.FIFFV_ASPECT_STD_ERR       = 101  # Std. error of mean
+FIFF.FIFFV_ASPECT_SINGLE        = 102  # Single epoch cut out from the continuous data
+FIFF.FIFFV_ASPECT_SUBAVERAGE    = 103
+FIFF.FIFFV_ASPECT_ALTAVERAGE    = 104  # Alternating subaverage
+FIFF.FIFFV_ASPECT_SAMPLE        = 105  # A sample cut out by graph
+FIFF.FIFFV_ASPECT_POWER_DENSITY = 106  # Power density spectrum
+FIFF.FIFFV_ASPECT_DIPOLE_WAVE   = 200  # Dipole amplitude curve
+#
+# BEM surface IDs
+#
+FIFF.FIFFV_BEM_SURF_ID_UNKNOWN    = -1
+FIFF.FIFFV_BEM_SURF_ID_BRAIN      = 1
+FIFF.FIFFV_BEM_SURF_ID_SKULL      = 3
+FIFF.FIFFV_BEM_SURF_ID_HEAD       = 4
+#
+# More of those defined in MNE
+#
+FIFF.FIFFV_MNE_SURF_UNKNOWN       = -1
+FIFF.FIFFV_MNE_SURF_LEFT_HEMI     = 101
+FIFF.FIFFV_MNE_SURF_RIGHT_HEMI    = 102
+FIFF.FIFFV_MNE_SURF_MEG_HELMET    = 201               # Use this irrespective of the system
+#
+#   These relate to the Isotrak data
+#
+FIFF.FIFFV_POINT_CARDINAL = 1
+FIFF.FIFFV_POINT_HPI      = 2
+FIFF.FIFFV_POINT_EEG      = 3
+FIFF.FIFFV_POINT_ECG      = FIFF.FIFFV_POINT_EEG
+FIFF.FIFFV_POINT_EXTRA    = 4
+
+FIFF.FIFFV_POINT_LPA = 1
+FIFF.FIFFV_POINT_NASION = 2
+FIFF.FIFFV_POINT_RPA = 3
+#
+#   SSP
+#
+FIFF.FIFF_PROJ_ITEM_KIND         = 3411
+FIFF.FIFF_PROJ_ITEM_TIME         = 3412
+FIFF.FIFF_PROJ_ITEM_NVEC         = 3414
+FIFF.FIFF_PROJ_ITEM_VECTORS      = 3415
+FIFF.FIFF_PROJ_ITEM_DEFINITION   = 3416
+FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417
+#
+#   MRIs
+#
+FIFF.FIFF_MRI_SOURCE_PATH       = FIFF.FIFF_REF_PATH
+FIFF.FIFF_MRI_SOURCE_FORMAT     = 2002
+FIFF.FIFF_MRI_PIXEL_ENCODING    = 2003
+FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004
+FIFF.FIFF_MRI_PIXEL_SCALE       = 2005
+FIFF.FIFF_MRI_PIXEL_DATA        = 2006
+FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007
+FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA     = 2008
+FIFF.FIFF_MRI_BOUNDING_BOX           = 2009
+FIFF.FIFF_MRI_WIDTH             = 2010
+FIFF.FIFF_MRI_WIDTH_M           = 2011
+FIFF.FIFF_MRI_HEIGHT            = 2012
+FIFF.FIFF_MRI_HEIGHT_M          = 2013
+FIFF.FIFF_MRI_DEPTH             = 2014
+FIFF.FIFF_MRI_DEPTH_M           = 2015
+FIFF.FIFF_MRI_THICKNESS         = 2016
+FIFF.FIFF_MRI_SCENE_AIM         = 2017
+FIFF.FIFF_MRI_ORIG_SOURCE_PATH       = 2020
+FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT     = 2021
+FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING    = 2022
+FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023
+FIFF.FIFF_MRI_VOXEL_DATA             = 2030
+FIFF.FIFF_MRI_VOXEL_ENCODING         = 2031
+FIFF.FIFF_MRI_MRILAB_SETUP           = 2100
+FIFF.FIFF_MRI_SEG_REGION_ID          = 2200
+#
+FIFF.FIFFV_MRI_PIXEL_UNKNOWN    = 0
+FIFF.FIFFV_MRI_PIXEL_BYTE       = 1
+FIFF.FIFFV_MRI_PIXEL_WORD       = 2
+FIFF.FIFFV_MRI_PIXEL_SWAP_WORD  = 3
+FIFF.FIFFV_MRI_PIXEL_FLOAT      = 4
+FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5
+FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR     = 6
+FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7
+FIFF.FIFFV_MRI_PIXEL_BIT_RLE            = 8
+#
+#   These are the MNE fiff definitions
+#
+FIFF.FIFFB_MNE                    = 350
+FIFF.FIFFB_MNE_SOURCE_SPACE       = 351
+FIFF.FIFFB_MNE_FORWARD_SOLUTION   = 352
+FIFF.FIFFB_MNE_PARENT_MRI_FILE    = 353
+FIFF.FIFFB_MNE_PARENT_MEAS_FILE   = 354
+FIFF.FIFFB_MNE_COV                = 355
+FIFF.FIFFB_MNE_INVERSE_SOLUTION   = 356
+FIFF.FIFFB_MNE_NAMED_MATRIX       = 357
+FIFF.FIFFB_MNE_ENV                = 358
+FIFF.FIFFB_MNE_BAD_CHANNELS       = 359
+FIFF.FIFFB_MNE_VERTEX_MAP         = 360
+FIFF.FIFFB_MNE_EVENTS             = 361
+FIFF.FIFFB_MNE_MORPH_MAP          = 362
+FIFF.FIFFB_MNE_SURFACE_MAP        = 363
+FIFF.FIFFB_MNE_SURFACE_MAP_GROUP  = 364
+
+#
+# CTF compensation data
+#
+FIFF.FIFFB_MNE_CTF_COMP           = 370
+FIFF.FIFFB_MNE_CTF_COMP_DATA      = 371
+FIFF.FIFFB_MNE_DERIVATIONS        = 372
+#
+# Fiff tags associated with MNE computations (3500...)
+#
+#
+# 3500... Bookkeeping
+#
+FIFF.FIFF_MNE_ROW_NAMES              = 3502
+FIFF.FIFF_MNE_COL_NAMES              = 3503
+FIFF.FIFF_MNE_NROW                   = 3504
+FIFF.FIFF_MNE_NCOL                   = 3505
+FIFF.FIFF_MNE_COORD_FRAME            = 3506  # Coordinate frame employed. Defaults:
+                          #  FIFFB_MNE_SOURCE_SPACE       FIFFV_COORD_MRI
+                          #  FIFFB_MNE_FORWARD_SOLUTION   FIFFV_COORD_HEAD
+                          #  FIFFB_MNE_INVERSE_SOLUTION   FIFFV_COORD_HEAD
+FIFF.FIFF_MNE_CH_NAME_LIST           = 3507
+FIFF.FIFF_MNE_FILE_NAME              = 3508  # This removes the collision with fiff_file.h (used to be 3501)
+#
+# 3510... 3590... Source space or surface
+#
+FIFF.FIFF_MNE_SOURCE_SPACE_POINTS        = 3510  # The vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS       = 3511  # The vertex normals
+FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS       = 3512  # How many vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION     = 3513  # Which are selected to the source space
+FIFF.FIFF_MNE_SOURCE_SPACE_NUSE          = 3514  # How many are in use
+FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST       = 3515  # Nearest source space vertex for all vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST  = 3516  # Distance to the Nearest source space vertex for all vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_ID            = 3517  # Identifier
+FIFF.FIFF_MNE_SOURCE_SPACE_TYPE          = 3518  # Surface or volume
+FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES      = 3519  # List of vertices (zero based)
+
+FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS    = 3596  # Voxel space dimensions in a volume source space
+FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR  = 3597  # Matrix to interpolate a volume source space into a mri volume
+FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE      = 3598  # MRI file used in the interpolation
+
+FIFF.FIFF_MNE_SOURCE_SPACE_NTRI          = 3590  # Number of triangles
+FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES     = 3591  # The triangulation
+FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI      = 3592  # Number of triangles corresponding to the number of vertices in use
+FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593  # The triangulation of the used vertices in the source space
+FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS    = 3594  # Number of neighbors for each source space point (used for volume source spaces)
+FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS     = 3595  # Neighbors for each source space point (used for volume source spaces)
+
+FIFF.FIFF_MNE_SOURCE_SPACE_DIST          = 3599  # Distances between vertices in use (along the surface)
+FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT    = 3600  # If distance is above this limit (in the volume) it has not been calculated
+
+FIFF.FIFF_MNE_SURFACE_MAP_DATA           = 3610  # Surface map data
+FIFF.FIFF_MNE_SURFACE_MAP_KIND           = 3611  # Type of map
+
+#
+# 3520... Forward solution
+#
+FIFF.FIFF_MNE_FORWARD_SOLUTION       = 3520
+FIFF.FIFF_MNE_SOURCE_ORIENTATION     = 3521  # Fixed or free
+FIFF.FIFF_MNE_INCLUDED_METHODS       = 3522
+FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD  = 3523
+#
+# 3530... Covariance matrix
+#
+FIFF.FIFF_MNE_COV_KIND               = 3530  # What kind of a covariance matrix
+FIFF.FIFF_MNE_COV_DIM                = 3531  # Matrix dimension
+FIFF.FIFF_MNE_COV                    = 3532  # Full matrix in packed representation (lower triangle)
+FIFF.FIFF_MNE_COV_DIAG               = 3533  # Diagonal matrix
+FIFF.FIFF_MNE_COV_EIGENVALUES        = 3534  # Eigenvalues and eigenvectors of the above
+FIFF.FIFF_MNE_COV_EIGENVECTORS       = 3535
+FIFF.FIFF_MNE_COV_NFREE              = 3536  # Number of degrees of freedom
+#
+# 3540... Inverse operator
+#
+# We store the inverse operator as the eigenleads, eigenfields,
+# and weights
+#
+FIFF.FIFF_MNE_INVERSE_LEADS              = 3540   # The eigenleads
+FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED     = 3546   # The eigenleads (already weighted with R^0.5)
+FIFF.FIFF_MNE_INVERSE_FIELDS             = 3541   # The eigenfields
+FIFF.FIFF_MNE_INVERSE_SING               = 3542   # The singular values
+FIFF.FIFF_MNE_PRIORS_USED                = 3543   # Which kind of priors have been used for the source covariance matrix
+FIFF.FIFF_MNE_INVERSE_FULL               = 3544   # Inverse operator as one matrix
+                               # This matrix includes the whitening operator as well
+                           # The regularization is applied
+FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545  # Contains the orientation of one source per row
+                           # The source orientations must be expressed in the coordinate system
+                           # given by FIFF_MNE_COORD_FRAME
+FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT         = 3547  # Are the sources given in Am or Am/m^2 ?
+#
+# 3550... Saved environment info
+#
+FIFF.FIFF_MNE_ENV_WORKING_DIR        = 3550     # Working directory where the file was created
+FIFF.FIFF_MNE_ENV_COMMAND_LINE       = 3551     # The command used to create the file
+FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN    = 3552     # Reference to an external binary file (big-endian) */
+FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553	    # Reference to an external binary file (little-endian) */
+#
+# 3560... Miscellaneous
+#
+FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE       = 3560     # Is this projection item active?
+FIFF.FIFF_MNE_EVENT_LIST             = 3561     # An event list (for STI 014)
+FIFF.FIFF_MNE_HEMI                   = 3562     # Hemisphere association for general purposes
+FIFF.FIFF_MNE_DATA_SKIP_NOP          = 3563     # A data skip turned off in the raw data
+FIFF.FIFF_MNE_ORIG_CH_INFO           = 3564     # Channel information before any changes
+FIFF.FIFF_MNE_EVENT_TRIGGER_MASK     = 3565     # Mask applied to the trigger channnel values
+FIFF.FIFF_MNE_EVENT_COMMENTS         = 3566	    # Event comments merged into one long string
+#
+# 3570... Morphing maps
+#
+FIFF.FIFF_MNE_MORPH_MAP              = 3570     # Mapping of closest vertices on the sphere
+FIFF.FIFF_MNE_MORPH_MAP_FROM         = 3571     # Which subject is this map from
+FIFF.FIFF_MNE_MORPH_MAP_TO           = 3572     # Which subject is this map to
+#
+# 3580... CTF compensation data
+#
+FIFF.FIFF_MNE_CTF_COMP_KIND         = 3580     # What kind of compensation
+FIFF.FIFF_MNE_CTF_COMP_DATA         = 3581     # The compensation data itself
+FIFF.FIFF_MNE_CTF_COMP_CALIBRATED   = 3582     # Are the coefficients calibrated?
+
+FIFF.FIFF_MNE_DERIVATION_DATA       = 3585     # Used to store information about EEG and other derivations
+#
+# 3601... values associated with ICA decomposition
+#
+FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS  = 3601     # ICA interface parameters
+FIFF.FIFF_MNE_ICA_CHANNEL_NAMES     = 3602     # ICA channel names
+FIFF.FIFF_MNE_ICA_WHITENER          = 3603     # ICA whitener
+FIFF.FIFF_MNE_ICA_PCA_COMPONENTS    = 3604     # PCA components
+FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605     # PCA explained variance
+FIFF.FIFF_MNE_ICA_PCA_MEAN          = 3606     # PCA mean
+FIFF.FIFF_MNE_ICA_MATRIX            = 3607     # ICA unmixing matrix
+FIFF.FIFF_MNE_ICA_BADS              = 3608     # ICA bad sources
+#
+# Fiff values associated with MNE computations
+#
+FIFF.FIFFV_MNE_UNKNOWN_ORI          = 0
+FIFF.FIFFV_MNE_FIXED_ORI            = 1
+FIFF.FIFFV_MNE_FREE_ORI             = 2
+
+FIFF.FIFFV_MNE_MEG                  = 1
+FIFF.FIFFV_MNE_EEG                  = 2
+FIFF.FIFFV_MNE_MEG_EEG              = 3
+
+FIFF.FIFFV_MNE_PRIORS_NONE          = 0
+FIFF.FIFFV_MNE_PRIORS_DEPTH         = 1
+FIFF.FIFFV_MNE_PRIORS_LORETA        = 2
+FIFF.FIFFV_MNE_PRIORS_SULCI         = 3
+
+FIFF.FIFFV_MNE_UNKNOWN_COV          = 0
+FIFF.FIFFV_MNE_SENSOR_COV           = 1
+FIFF.FIFFV_MNE_NOISE_COV            = 1         # This is what it should have been called
+FIFF.FIFFV_MNE_SOURCE_COV           = 2
+FIFF.FIFFV_MNE_FMRI_PRIOR_COV       = 3
+FIFF.FIFFV_MNE_SIGNAL_COV           = 4         # This will be potentially employed in beamformers
+FIFF.FIFFV_MNE_DEPTH_PRIOR_COV      = 5         # The depth weighting prior
+FIFF.FIFFV_MNE_ORIENT_PRIOR_COV     = 6         # The orientation prior
+
+FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF  = 10        # Linear projection related to EEG average reference
+#
+# Output map types
+#
+FIFF.FIFFV_MNE_MAP_UNKNOWN                   = -1     # Unspecified
+FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT            =  1     # Scalar current value
+FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE       =  2     # Absolute value of the above
+FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT            =  3     # Current vector components
+FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE       =  4     # Vector current size
+FIFF.FIFFV_MNE_MAP_T_STAT                    =  5     # Student's t statistic
+FIFF.FIFFV_MNE_MAP_F_STAT                    =  6     # F statistic
+FIFF.FIFFV_MNE_MAP_F_STAT_SQRT               =  7     # Square root of the F statistic
+FIFF.FIFFV_MNE_MAP_CHI2_STAT                 =  8     # (Approximate) chi^2 statistic
+FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT            =  9     # Square root of the (approximate) chi^2 statistic
+FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE      = 10     # Current noise approximation (scalar)
+FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE      = 11     # Current noise approximation (vector)
+#
+# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE)
+#
+FIFF.FIFFV_MNE_SPACE_UNKNOWN  = -1
+FIFF.FIFFV_MNE_SPACE_SURFACE  = 1
+FIFF.FIFFV_MNE_SPACE_VOLUME   = 2
+FIFF.FIFFV_MNE_SPACE_DISCRETE = 3
+#
+# Covariance matrix channel classification
+#
+FIFF.FIFFV_MNE_COV_CH_UNKNOWN  = -1  # No idea
+FIFF.FIFFV_MNE_COV_CH_MEG_MAG  =  0  # Axial gradiometer or magnetometer [T]
+FIFF.FIFFV_MNE_COV_CH_MEG_GRAD =  1  # Planar gradiometer [T/m]
+FIFF.FIFFV_MNE_COV_CH_EEG      =  2  # EEG [V]
+#
+# Projection item kinds
+#
+FIFF.FIFFV_PROJ_ITEM_NONE           = 0
+FIFF.FIFFV_PROJ_ITEM_FIELD          = 1
+FIFF.FIFFV_PROJ_ITEM_DIP_FIX        = 2
+FIFF.FIFFV_PROJ_ITEM_DIP_ROT        = 3
+FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD     = 4
+FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD    = 5
+FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF  = 10
+#
+# Additional coordinate frames
+#
+FIFF.FIFFV_MNE_COORD_TUFTS_EEG   =  300         # For Tufts EEG data
+FIFF.FIFFV_MNE_COORD_CTF_DEVICE  = 1001         # CTF device coordinates
+FIFF.FIFFV_MNE_COORD_CTF_HEAD    = 1004         # CTF head coordinates
+FIFF.FIFFV_MNE_COORD_DIGITIZER   = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates
+FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI     # The surface RAS coordinates
+FIFF.FIFFV_MNE_COORD_MRI_VOXEL   = 2001         # The MRI voxel coordinates
+FIFF.FIFFV_MNE_COORD_RAS         = 2002         # Surface RAS coordinates with non-zero origin
+FIFF.FIFFV_MNE_COORD_MNI_TAL     = 2003         # MNI Talairach coordinates
+FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ  = 2004         # FreeSurfer Talairach coordinates (MNI z > 0)
+FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ  = 2005         # FreeSurfer Talairach coordinates (MNI z < 0)
+FIFF.FIFFV_MNE_COORD_FS_TAL      = 2006         # FreeSurfer Talairach coordinates
+#
+# 4D and KIT use the same head coordinate system definition as CTF
+#
+FIFF.FIFFV_MNE_COORD_4D_HEAD     = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+FIFF.FIFFV_MNE_COORD_KIT_HEAD    = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+#
+# KIT system coil types
+#
+FIFF.FIFFV_COIL_KIT_GRAD         = 6001
+FIFF.FIFFV_COIL_KIT_REF_MAG      = 6002
+#
+# CTF coil and channel types
+#
+FIFF.FIFFV_COIL_CTF_GRAD             = 5001
+FIFF.FIFFV_COIL_CTF_REF_MAG          = 5002
+FIFF.FIFFV_COIL_CTF_REF_GRAD         = 5003
+FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004
+#
+# Magnes reference sensors
+#
+FIFF.FIFFV_COIL_MAGNES_REF_MAG          = 4003
+FIFF.FIFFV_COIL_MAGNES_REF_GRAD         = 4004
+FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005
+#
+# BabySQUID sensors
+#
+FIFF.FIFFV_COIL_BABY_GRAD               = 7001
+FIFF.FIFFV_REF_MEG_CH                   = 301
+FIFF.FIFF_UNIT_AM_M2  = 203 # Am/m^2
+FIFF.FIFF_UNIT_AM_M3  = 204	 # Am/m^3
+
+#
+#   Data types
+#
+FIFF.FIFFT_VOID                  = 0
+FIFF.FIFFT_BYTE                  = 1
+FIFF.FIFFT_SHORT                 = 2
+FIFF.FIFFT_INT                   = 3
+FIFF.FIFFT_FLOAT                 = 4
+FIFF.FIFFT_DOUBLE                = 5
+FIFF.FIFFT_JULIAN                = 6
+FIFF.FIFFT_USHORT                = 7
+FIFF.FIFFT_UINT                  = 8
+FIFF.FIFFT_ULONG                 = 9
+FIFF.FIFFT_STRING                = 10
+FIFF.FIFFT_LONG                  = 11
+FIFF.FIFFT_DAU_PACK13            = 13
+FIFF.FIFFT_DAU_PACK14            = 14
+FIFF.FIFFT_DAU_PACK16            = 16
+FIFF.FIFFT_COMPLEX_FLOAT         = 20
+FIFF.FIFFT_COMPLEX_DOUBLE        = 21
+FIFF.FIFFT_OLD_PACK              = 23
+FIFF.FIFFT_CH_INFO_STRUCT        = 30
+FIFF.FIFFT_ID_STRUCT             = 31
+FIFF.FIFFT_DIR_ENTRY_STRUCT      = 32
+FIFF.FIFFT_DIG_POINT_STRUCT      = 33
+FIFF.FIFFT_CH_POS_STRUCT         = 34
+FIFF.FIFFT_COORD_TRANS_STRUCT    = 35
+FIFF.FIFFT_DIG_STRING_STRUCT     = 36
+FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37
+#
+# Units of measurement
+#
+FIFF.FIFF_UNIT_NONE = -1
+#
+# SI base units
+#
+FIFF.FIFF_UNIT_M   = 1
+FIFF.FIFF_UNIT_KG  = 2
+FIFF.FIFF_UNIT_SEC = 3
+FIFF.FIFF_UNIT_A   = 4
+FIFF.FIFF_UNIT_K   = 5
+FIFF.FIFF_UNIT_MOL = 6
+#
+# SI Supplementary units
+#
+FIFF.FIFF_UNIT_RAD = 7
+FIFF.FIFF_UNIT_SR  = 8
+#
+# SI base candela
+#
+FIFF.FIFF_UNIT_CD  = 9
+#
+# SI derived units
+#
+FIFF.FIFF_UNIT_HZ  = 101
+FIFF.FIFF_UNIT_N   = 102
+FIFF.FIFF_UNIT_PA  = 103
+FIFF.FIFF_UNIT_J   = 104
+FIFF.FIFF_UNIT_W   = 105
+FIFF.FIFF_UNIT_C   = 106
+FIFF.FIFF_UNIT_V   = 107
+FIFF.FIFF_UNIT_F   = 108
+FIFF.FIFF_UNIT_OHM = 109
+FIFF.FIFF_UNIT_MHO = 110
+FIFF.FIFF_UNIT_WB  = 111
+FIFF.FIFF_UNIT_T   = 112
+FIFF.FIFF_UNIT_H   = 113
+FIFF.FIFF_UNIT_CEL = 114
+FIFF.FIFF_UNIT_LM  = 115
+FIFF.FIFF_UNIT_LX  = 116
+#
+# Others we need
+#
+FIFF.FIFF_UNIT_T_M   = 201  # T/m
+FIFF.FIFF_UNIT_AM    = 202  # Am
+FIFF.FIFF_UNIT_AM_M2 = 203  # Am/m^2
+FIFF.FIFF_UNIT_AM_M3 = 204  # Am/m^3
+#
+# Multipliers
+#
+FIFF.FIFF_UNITM_E    = 18
+FIFF.FIFF_UNITM_PET  = 15
+FIFF.FIFF_UNITM_T    = 12
+FIFF.FIFF_UNITM_MEG  = 6
+FIFF.FIFF_UNITM_K    = 3
+FIFF.FIFF_UNITM_H    = 2
+FIFF.FIFF_UNITM_DA   = 1
+FIFF.FIFF_UNITM_NONE = 0
+FIFF.FIFF_UNITM_D    = -1
+FIFF.FIFF_UNITM_C    = -2
+FIFF.FIFF_UNITM_M    = -3
+FIFF.FIFF_UNITM_MU   = -6
+FIFF.FIFF_UNITM_N    = -9
+FIFF.FIFF_UNITM_P    = -12
+FIFF.FIFF_UNITM_F    = -15
+FIFF.FIFF_UNITM_A    = -18
+#
+# Digitization point details
+#
+FIFF.FIFFV_POINT_CARDINAL = 1
+FIFF.FIFFV_POINT_HPI      = 2
+FIFF.FIFFV_POINT_EEG      = 3
+FIFF.FIFFV_POINT_ECG      = FIFF.FIFFV_POINT_EEG
+FIFF.FIFFV_POINT_EXTRA    = 4
+
+FIFF.FIFFV_POINT_LPA      = 1
+FIFF.FIFFV_POINT_NASION   = 2
+FIFF.FIFFV_POINT_RPA      = 3
+#
+# Coil types
+#
+FIFF.FIFFV_COIL_NONE                  = 0  # The location info contains no data
+FIFF.FIFFV_COIL_EEG                   = 1  # EEG electrode position in r0
+FIFF.FIFFV_COIL_NM_122                = 2  # Neuromag 122 coils
+FIFF.FIFFV_COIL_NM_24                 = 3  # Old 24 channel system in HUT
+FIFF.FIFFV_COIL_NM_MCG_AXIAL          = 4  # The axial devices in the HUCS MCG system
+FIFF.FIFFV_COIL_EEG_BIPOLAR           = 5  # Bipolar EEG lead
+
+FIFF.FIFFV_COIL_DIPOLE             = 200  # Time-varying dipole definition
+# The coil info contains dipole location (r0) and
+# direction (ex)
+FIFF.FIFFV_COIL_MCG_42             = 1000  # For testing the MCG software
+
+FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000  # Simple point magnetometer
+FIFF.FIFFV_COIL_AXIAL_GRAD_5CM     = 2001  # Generic axial gradiometer
+
+FIFF.FIFFV_COIL_VV_PLANAR_W        = 3011  # VV prototype wirewound planar sensor
+FIFF.FIFFV_COIL_VV_PLANAR_T1       = 3012  # Vectorview SQ20483N planar gradiometer
+FIFF.FIFFV_COIL_VV_PLANAR_T2       = 3013  # Vectorview SQ20483N-A planar gradiometer
+FIFF.FIFFV_COIL_VV_PLANAR_T3       = 3014  # Vectorview SQ20950N planar gradiometer
+FIFF.FIFFV_COIL_VV_MAG_W           = 3021  # VV prototype wirewound magnetometer
+FIFF.FIFFV_COIL_VV_MAG_T1          = 3022  # Vectorview SQ20483N magnetometer
+FIFF.FIFFV_COIL_VV_MAG_T2          = 3023  # Vectorview SQ20483-A magnetometer
+FIFF.FIFFV_COIL_VV_MAG_T3          = 3024  # Vectorview SQ20950N magnetometer
+
+FIFF.FIFFV_COIL_MAGNES_MAG         = 4001  # Magnes WH magnetometer
+FIFF.FIFFV_COIL_MAGNES_GRAD        = 4002  # Magnes WH gradiometer
+FIFF.FIFFV_COIL_MAGNES_R_MAG       = 4003  # Magnes WH reference magnetometer
+FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA  = 4004  # Magnes WH reference diagonal gradioometer
+FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF  = 4005  # Magnes WH reference off-diagonal gradiometer
+FIFF.FIFFV_COIL_CTF_GRAD           = 5001  # CTF axial gradiometer
+FIFF.FIFFV_COIL_KIT_GRAD           = 6001  # KIT system axial gradiometer
diff --git a/mne/fiff/cov.py b/mne/fiff/cov.py
new file mode 100644
index 0000000..0ef3515
--- /dev/null
+++ b/mne/fiff/cov.py
@@ -0,0 +1,184 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+from .write import start_block, end_block, write_int, write_name_list, \
+                       write_double, write_float_matrix
+from .tag import find_tag
+from .tree import dir_tree_find
+from .proj import read_proj, write_proj
+from .channels import read_bad_channels
+from .. import verbose
+
+
+ at verbose
+def read_cov(fid, node, cov_kind, verbose=None):
+    """Read a noise covariance matrix
+
+    This is a low-level function. Consider using `mne.cov.read_cov()`
+    for most user-level purposes.
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+    node : dict
+        The node in the FIF tree.
+    cov_kind : int
+        The type of covariance. This is typically
+        FIFF.FIFFV_MNE_NOISE_COV, although it could also be
+        FIFF.FIFFV_MNE_SOURCE_COV or FIFF.FIFFV_MNE_FMRI_PRIOR_COV.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    data : dict
+        The noise covariance
+    """
+    #   Find all covariance matrices
+    covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
+    if len(covs) == 0:
+        raise ValueError('No covariance matrices found')
+
+    #   Is any of the covariance matrices a noise covariance
+    for p in range(len(covs)):
+        tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
+
+        if tag is not None and int(tag.data) == cov_kind:
+            this = covs[p]
+
+            #   Find all the necessary data
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
+            if tag is None:
+                raise ValueError('Covariance matrix dimension not found')
+            dim = int(tag.data)
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
+            if tag is None:
+                nfree = -1
+            else:
+                nfree = int(tag.data)
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
+            if tag is None:
+                names = []
+            else:
+                names = tag.data.split(':')
+                if len(names) != dim:
+                    raise ValueError('Number of names does not match '
+                                       'covariance matrix dimension')
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
+            if tag is None:
+                tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
+                if tag is None:
+                    raise ValueError('No covariance matrix data found')
+                else:
+                    #   Diagonal is stored
+                    data = tag.data
+                    diagmat = True
+                    logger.info('    %d x %d diagonal covariance (kind = '
+                                '%d) found.' % (dim, dim, cov_kind))
+
+            else:
+                from scipy import sparse
+                if not sparse.issparse(tag.data):
+                    #   Lower diagonal is stored
+                    vals = tag.data
+                    data = np.zeros((dim, dim))
+                    data[np.tril(np.ones((dim, dim))) > 0] = vals
+                    data = data + data.T
+                    data.flat[::dim + 1] /= 2.0
+                    diagmat = False
+                    logger.info('    %d x %d full covariance (kind = %d) '
+                                'found.' % (dim, dim, cov_kind))
+                else:
+                    diagmat = False
+                    data = tag.data
+                    logger.info('    %d x %d sparse covariance (kind = %d)'
+                                ' found.' % (dim, dim, cov_kind))
+
+            #   Read the possibly precomputed decomposition
+            tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
+            tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
+            if tag1 is not None and tag2 is not None:
+                eig = tag1.data
+                eigvec = tag2.data
+            else:
+                eig = None
+                eigvec = None
+
+            #   Read the projection operator
+            projs = read_proj(fid, this)
+
+            #   Read the bad channel list
+            bads = read_bad_channels(fid, this)
+
+            #   Put it together
+            cov = dict(kind=cov_kind, diag=diagmat, dim=dim, names=names,
+                       data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
+                       eigvec=eigvec)
+            return cov
+
+    logger.info('    Did not find the desired covariance matrix (kind = %d)'
+                % cov_kind)
+
+    return None
+
+
+def write_cov(fid, cov):
+    """Write a noise covariance matrix
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+    cov : dict
+        The noise covariance matrix to write.
+    """
+    start_block(fid, FIFF.FIFFB_MNE_COV)
+
+    #   Dimensions etc.
+    write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
+    write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
+    if cov['nfree'] > 0:
+        write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
+
+    #   Channel names
+    if cov['names'] is not None and len(cov['names']) > 0:
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
+
+    #   Data
+    if cov['diag']:
+        write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
+    else:
+        # Store only lower part of covariance matrix
+        dim = cov['dim']
+        mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
+        vals = cov['data'][mask].ravel()
+        write_double(fid, FIFF.FIFF_MNE_COV, vals)
+
+    #   Eigenvalues and vectors if present
+    if cov['eig'] is not None and cov['eigvec'] is not None:
+        write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
+        write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
+
+    #   Projection operator
+    write_proj(fid, cov['projs'])
+
+    #   Bad channels
+    if cov['bads'] is not None:
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    #   Done!
+    end_block(fid, FIFF.FIFFB_MNE_COV)
diff --git a/mne/fiff/ctf.py b/mne/fiff/ctf.py
new file mode 100644
index 0000000..ced9611
--- /dev/null
+++ b/mne/fiff/ctf.py
@@ -0,0 +1,256 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+from .tag import find_tag, has_tag, read_tag
+from .tree import dir_tree_find
+from .. import verbose
+
+
+def hex2dec(s):
+    return int(s, 16)
+
+
+def _read_named_matrix(fid, node, matkind):
+    """read_named_matrix(fid,node)
+
+    Read named matrix from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor
+    node : dict
+        Node
+    matkind : mat kind
+        XXX
+    Returns
+    -------
+    mat : dict
+        The matrix with row and col names.
+    """
+
+    #   Descend one level if necessary
+    if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
+        for k in range(node['nchild']):
+            if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
+                if has_tag(node['children'][k], matkind):
+                    node = node['children'][k]
+                    break
+        else:
+            raise ValueError('Desired named matrix (kind = %d) not'
+                             ' available' % matkind)
+
+    else:
+        if not has_tag(node, matkind):
+            raise ValueError('Desired named matrix (kind = %d) not available'
+                                                                    % matkind)
+
+    #   Read everything we need
+    tag = find_tag(fid, node, matkind)
+    if tag is None:
+        raise ValueError('Matrix data missing')
+    else:
+        data = tag.data
+
+    nrow, ncol = data.shape
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
+    if tag is not None:
+        if tag.data != nrow:
+            raise ValueError('Number of rows in matrix data and '
+                             'FIFF_MNE_NROW tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
+    if tag is not None:
+        if tag.data != ncol:
+            raise ValueError('Number of columns in matrix data and '
+                             'FIFF_MNE_NCOL tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
+    if tag is not None:
+        row_names = tag.data
+    else:
+        row_names = None
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
+    if tag is not None:
+        col_names = tag.data
+    else:
+        col_names = None
+
+    #   Put it together
+    mat = dict(nrow=nrow, ncol=ncol)
+    if row_names is not None:
+        mat['row_names'] = row_names.split(':')
+    else:
+        mat['row_names'] = None
+
+    if col_names is not None:
+        mat['col_names'] = col_names.split(':')
+    else:
+        mat['col_names'] = None
+
+    mat['data'] = data
+    return mat
+
+
+ at verbose
+def read_ctf_comp(fid, node, chs, verbose=None):
+    """Read the CTF software compensation data from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+    node : dict
+        The node in the FIF tree.
+    chs : list
+        The list of channels # XXX unclear.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    compdata : list
+        The compensation data
+    """
+    compdata = []
+    comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA)
+
+    for node in comps:
+        #   Read the data we need
+        mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA)
+        for p in range(node['nent']):
+            kind = node['directory'][p].kind
+            pos = node['directory'][p].pos
+            if kind == FIFF.FIFF_MNE_CTF_COMP_KIND:
+                tag = read_tag(fid, pos)
+                break
+        else:
+            raise Exception('Compensation type not found')
+
+        #   Get the compensation kind and map it to a simple number
+        one = dict(ctfkind=tag.data)
+        del tag
+
+        if one['ctfkind'] == int('47314252', 16):  # hex2dec('47314252'):
+            one['kind'] = 1
+        elif one['ctfkind'] == int('47324252', 16):  # hex2dec('47324252'):
+            one['kind'] = 2
+        elif one['ctfkind'] == int('47334252', 16):  # hex2dec('47334252'):
+            one['kind'] = 3
+        else:
+            one['kind'] = int(one['ctfkind'])
+
+        for p in range(node['nent']):
+            kind = node['directory'][p].kind
+            pos = node['directory'][p].pos
+            if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED:
+                tag = read_tag(fid, pos)
+                calibrated = tag.data
+                break
+        else:
+            calibrated = False
+
+        one['save_calibrated'] = calibrated
+        one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float)
+        one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float)
+
+        row_cals, col_cals = None, None  # initialize cals
+
+        if not calibrated:
+            #
+            #   Calibrate...
+            #
+            #   Do the columns first
+            #
+            ch_names = [c['ch_name'] for c in chs]
+
+            col_cals = np.zeros(mat['data'].shape[1], dtype=np.float)
+            for col in range(mat['data'].shape[1]):
+                p = ch_names.count(mat['col_names'][col])
+                if p == 0:
+                    raise Exception('Channel %s is not available in data'
+                                                % mat['col_names'][col])
+                elif p > 1:
+                    raise Exception('Ambiguous channel %s' %
+                                                        mat['col_names'][col])
+                idx = ch_names.index(mat['col_names'][col])
+                col_cals[col] = 1.0 / (chs[idx]['range'] * chs[idx]['cal'])
+
+            #    Then the rows
+            row_cals = np.zeros(mat['data'].shape[0])
+            for row in range(mat['data'].shape[0]):
+                p = ch_names.count(mat['row_names'][row])
+                if p == 0:
+                    raise Exception('Channel %s is not available in data'
+                                               % mat['row_names'][row])
+                elif p > 1:
+                    raise Exception('Ambiguous channel %s' %
+                                                mat['row_names'][row])
+                idx = ch_names.index(mat['row_names'][row])
+                row_cals[row] = chs[idx]['range'] * chs[idx]['cal']
+
+            mat['data'] = np.dot(np.diag(row_cals), np.dot(mat['data'],
+                                                        np.diag(col_cals)))
+            one['rowcals'] = row_cals
+            one['colcals'] = col_cals
+
+        one['data'] = mat
+        compdata.append(one)
+        if row_cals is not None:
+            del row_cals
+        if col_cals is not None:
+            del col_cals
+
+    if len(compdata) > 0:
+        logger.info('    Read %d compensation matrices' % len(compdata))
+
+    return compdata
+
+
+###############################################################################
+# Writing
+
+from .write import start_block, end_block, write_int
+from .matrix import write_named_matrix
+
+
+def write_ctf_comp(fid, comps):
+    """Write the CTF compensation data into a fif file
+
+    Parameters
+    ----------
+    fid : file
+        The open FIF file descriptor
+
+    comps : list
+        The compensation data to write
+    """
+    if len(comps) <= 0:
+        return
+
+    #  This is very simple in fact
+    start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
+    for comp in comps:
+        start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
+        #    Write the compensation kind
+        write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp['ctfkind'])
+        write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED,
+                      comp['save_calibrated'])
+
+        #    Write an uncalibrated or calibrated matrix
+        comp['data']['data'] = (comp['rowcals'][:, None] * comp['data']['data']
+                                                    * comp['colcals'][None, :])
+        write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data'])
+        end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
+
+    end_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
diff --git a/mne/fiff/diff.py b/mne/fiff/diff.py
new file mode 100644
index 0000000..08662bd
--- /dev/null
+++ b/mne/fiff/diff.py
@@ -0,0 +1,42 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD Style.
+
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .. import verbose
+
+
+ at verbose
+def is_equal(first, second, verbose=None):
+    """ Says if 2 python structures are the same. Designed to
+    handle dict, list, np.ndarray etc.
+    """
+    all_equal = True
+    # Check all keys in first dict
+    if type(first) != type(second):
+        all_equal = False
+    if isinstance(first, dict):
+        for key in first.keys():
+            if (not key in second):
+                logger.info("Missing key %s in %s" % (key, second))
+                all_equal = False
+            else:
+                if not is_equal(first[key], second[key]):
+                    all_equal = False
+    elif isinstance(first, np.ndarray):
+        if not np.allclose(first, second):
+            all_equal = False
+    elif isinstance(first, list):
+        for a, b in zip(first, second):
+            if not is_equal(a, b):
+                logger.info('%s and\n%s are different' % (a, b))
+                all_equal = False
+    else:
+        if first != second:
+            logger.info('%s and\n%s are different' % (first, second))
+            all_equal = False
+    return all_equal
diff --git a/mne/fiff/evoked.py b/mne/fiff/evoked.py
new file mode 100644
index 0000000..35eb578
--- /dev/null
+++ b/mne/fiff/evoked.py
@@ -0,0 +1,774 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+import numpy as np
+import warnings
+
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+from .open import fiff_open
+from .tag import read_tag
+from .tree import dir_tree_find
+from .pick import channel_type, pick_types
+from .meas_info import read_meas_info, write_meas_info
+from .proj import ProjMixin
+from ..baseline import rescale
+from ..filter import resample, detrend
+from ..fixes import in1d
+from ..utils import _check_pandas_installed
+
+from .write import start_file, start_block, end_file, end_block, \
+                   write_int, write_string, write_float_matrix, \
+                   write_id
+
+from ..viz import plot_evoked, plot_evoked_topomap, _mutable_defaults
+from .. import verbose
+
+aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
+               'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
+aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
+              str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
+
+
+class Evoked(ProjMixin):
+    """Evoked data
+
+    Parameters
+    ----------
+    fname : string
+        Name of evoked/average FIF file to load.
+        If None no data is loaded.
+    setno : int, or str
+        Dataset ID number (int) or comment/name (str). Optional if there is
+        only one data set in file.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    proj : bool, optional
+        Apply SSP projection vectors
+    kind : str
+        Either 'average' or 'standard_error'. The type of data to read.
+        Only used if 'setno' is a str.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    info : dict
+        Measurement info.
+    `ch_names` : list of string
+        List of channels' names.
+    nave : int
+        Number of averaged epochs.
+    kind : str
+        Type of data, either average or standard_error.
+    first : int
+        First time sample.
+    last : int
+        Last time sample.
+    comment : string
+        Comment on dataset. Can be the condition.
+    times : array
+        Array of time instants in seconds.
+    data : array of shape (n_channels, n_times)
+        Evoked response.
+    verbose : bool, str, int, or None.
+        See above.
+    """
+    @verbose
+    def __init__(self, fname, setno=None, baseline=None, proj=True,
+                 kind='average', verbose=None):
+        if fname is None:
+            return
+
+        self.verbose = verbose
+        logger.info('Reading %s ...' % fname)
+        fid, tree, _ = fiff_open(fname)
+        if not isinstance(proj, bool):
+            raise ValueError(r"'proj' must be 'True' or 'False'")
+
+        #   Read the measurement info
+        info, meas = read_meas_info(fid, tree)
+        info['filename'] = fname
+
+        #   Locate the data of interest
+        processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
+        if len(processed) == 0:
+            fid.close()
+            raise ValueError('Could not find processed data')
+
+        evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
+        if len(evoked_node) == 0:
+            fid.close()
+            raise ValueError('Could not find evoked data')
+
+        # convert setno to an integer
+        if setno is None:
+            if len(evoked_node) > 1:
+                try:
+                    _, _, t = _get_entries(fid, evoked_node)
+                except:
+                    t = 'None found, must use integer'
+                else:
+                    fid.close()
+                raise ValueError('%d datasets present, setno parameter '
+                                 'must be set. Candidate setno names:\n%s'
+                                 % (len(evoked_node), t))
+            else:
+                setno = 0
+
+        # find string-based entry
+        elif isinstance(setno, basestring):
+            if not kind in aspect_dict.keys():
+                fid.close()
+                raise ValueError('kind must be "average" or '
+                                 '"standard_error"')
+
+            comments, aspect_kinds, t = _get_entries(fid, evoked_node)
+            goods = np.logical_and(in1d(comments, [setno]),
+                                   in1d(aspect_kinds, [aspect_dict[kind]]))
+            found_setno = np.where(goods)[0]
+            if len(found_setno) != 1:
+                fid.close()
+                raise ValueError('setno "%s" (%s) not found, out of found '
+                                 'datasets:\n  %s' % (setno, kind, t))
+            setno = found_setno[0]
+
+        if setno >= len(evoked_node) or setno < 0:
+            fid.close()
+            raise ValueError('Data set selector out of range')
+
+        my_evoked = evoked_node[setno]
+
+        # Identify the aspects
+        aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
+        if len(aspects) > 1:
+            logger.info('Multiple aspects found. Taking first one.')
+        my_aspect = aspects[0]
+
+        # Now find the data in the evoked block
+        nchan = 0
+        sfreq = -1
+        chs = []
+        comment = None
+        for k in range(my_evoked['nent']):
+            my_kind = my_evoked['directory'][k].kind
+            pos = my_evoked['directory'][k].pos
+            if my_kind == FIFF.FIFF_COMMENT:
+                tag = read_tag(fid, pos)
+                comment = tag.data
+            elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
+                tag = read_tag(fid, pos)
+                first = int(tag.data)
+            elif my_kind == FIFF.FIFF_LAST_SAMPLE:
+                tag = read_tag(fid, pos)
+                last = int(tag.data)
+            elif my_kind == FIFF.FIFF_NCHAN:
+                tag = read_tag(fid, pos)
+                nchan = int(tag.data)
+            elif my_kind == FIFF.FIFF_SFREQ:
+                tag = read_tag(fid, pos)
+                sfreq = float(tag.data)
+            elif my_kind == FIFF.FIFF_CH_INFO:
+                tag = read_tag(fid, pos)
+                chs.append(tag.data)
+
+        if comment is None:
+            comment = 'No comment'
+
+        #   Local channel information?
+        if nchan > 0:
+            if chs is None:
+                fid.close()
+                raise ValueError('Local channel information was not found '
+                                 'when it was expected.')
+
+            if len(chs) != nchan:
+                fid.close()
+                raise ValueError('Number of channels and number of '
+                                 'channel definitions are different')
+
+            info['chs'] = chs
+            info['nchan'] = nchan
+            logger.info('    Found channel information in evoked data. '
+                        'nchan = %d' % nchan)
+            if sfreq > 0:
+                info['sfreq'] = sfreq
+
+        nsamp = last - first + 1
+        logger.info('    Found the data of interest:')
+        logger.info('        t = %10.2f ... %10.2f ms (%s)'
+                    % (1000 * first / info['sfreq'],
+                       1000 * last / info['sfreq'], comment))
+        if info['comps'] is not None:
+            logger.info('        %d CTF compensation matrices available'
+                                                   % len(info['comps']))
+
+        # Read the data in the aspect block
+        nave = 1
+        epoch = []
+        for k in range(my_aspect['nent']):
+            kind = my_aspect['directory'][k].kind
+            pos = my_aspect['directory'][k].pos
+            if kind == FIFF.FIFF_COMMENT:
+                tag = read_tag(fid, pos)
+                comment = tag.data
+            elif kind == FIFF.FIFF_ASPECT_KIND:
+                tag = read_tag(fid, pos)
+                aspect_kind = int(tag.data)
+            elif kind == FIFF.FIFF_NAVE:
+                tag = read_tag(fid, pos)
+                nave = int(tag.data)
+            elif kind == FIFF.FIFF_EPOCH:
+                tag = read_tag(fid, pos)
+                epoch.append(tag)
+
+        logger.info('        nave = %d - aspect type = %d'
+                    % (nave, aspect_kind))
+
+        nepoch = len(epoch)
+        if nepoch != 1 and nepoch != info['nchan']:
+            fid.close()
+            raise ValueError('Number of epoch tags is unreasonable '
+                         '(nepoch = %d nchan = %d)' % (nepoch, info['nchan']))
+
+        if nepoch == 1:
+            # Only one epoch
+            all_data = epoch[0].data
+            # May need a transpose if the number of channels is one
+            if all_data.shape[1] == 1 and info['nchan'] == 1:
+                all_data = all_data.T
+        else:
+            # Put the old style epochs together
+            all_data = np.concatenate([e.data[None, :] for e in epoch], axis=0)
+
+        if all_data.shape[1] != nsamp:
+            fid.close()
+            raise ValueError('Incorrect number of samples (%d instead of %d)'
+                              % (all_data.shape[1], nsamp))
+
+        # Calibrate
+        cals = np.array([info['chs'][k]['cal']
+                         * info['chs'][k].get('scale', 1.0)
+                         for k in range(info['nchan'])])
+        all_data *= cals[:, np.newaxis]
+
+        times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
+        self.info = info
+
+        # Put the rest together all together
+        self.nave = nave
+        self._aspect_kind = aspect_kind
+        self.kind = aspect_rev.get(str(self._aspect_kind), 'Unknown')
+        self.first = first
+        self.last = last
+        self.comment = comment
+        self.times = times
+        self.data = all_data
+
+        # bind info, proj, data to self so apply_proj can be used
+        self.data = all_data
+        self.proj = False
+        if proj == True:
+            self.apply_proj()
+        # Run baseline correction
+        self.data = rescale(self.data, times, baseline, 'mean', copy=False)
+
+        fid.close()
+
+    def save(self, fname):
+        """Save dataset to file.
+
+        Parameters
+        ----------
+        fname : string
+            Name of the file where to save the data.
+        """
+        write_evoked(fname, self)
+
+    def __repr__(self):
+        s = "comment : %r" % self.comment
+        s += ", time : [%f, %f]" % (self.times[0], self.times[-1])
+        s += ", n_epochs : %d" % self.nave
+        s += ", n_channels x n_times : %s x %s" % self.data.shape
+        return "<Evoked  |  %s>" % s
+
+    @property
+    def ch_names(self):
+        return self.info['ch_names']
+
+    def crop(self, tmin=None, tmax=None):
+        """Crop data to a given time interval
+        """
+        times = self.times
+        mask = np.ones(len(times), dtype=np.bool)
+        if tmin is not None:
+            mask = mask & (times >= tmin)
+        if tmax is not None:
+            mask = mask & (times <= tmax)
+        self.times = times[mask]
+        self.first = int(self.times[0] * self.info['sfreq'])
+        self.last = len(self.times) + self.first - 1
+        self.data = self.data[:, mask]
+
+    def shift_time(self, tshift, relative=True):
+        """Shift time scale in evoked data
+
+        Parameters
+        ----------
+        tshift : float
+            The amount of time shift to be applied if relative is True
+            else the first time point. When relative is True, positive value
+            of tshift moves the data forward while negative tshift moves it
+            backward.
+        relative : bool
+            If true, move the time backwards or forwards by specified amount.
+            Else, set the starting time point to the value of tshift.
+
+        Notes
+        -----
+        Maximum accuracy of time shift is 1 / evoked.info['sfreq']
+        """
+        times = self.times
+        sfreq = self.info['sfreq']
+
+        offset = self.first if relative else 0
+
+        self.first = int(tshift * sfreq) + offset
+        self.last = self.first + len(times) - 1
+        self.times = np.arange(self.first, self.last + 1,
+                               dtype=np.float) / sfreq
+
+    def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
+             proj=False, xlim='tight', hline=None, units=None, scalings=None,
+             titles=None, axes=None):
+        """Plot evoked data
+
+        Note: If bad channels are not excluded they are shown in red.
+
+        Parameters
+        ----------
+        picks : None | array-like of int
+            The indices of channels to plot. If None show all.
+        exclude : list of str | 'bads'
+            Channels names to exclude from being shown. If 'bads', the
+            bad channels are excluded.
+        unit : bool
+            Scale plot with channel (SI) unit.
+        show : bool
+            Call pylab.show() at the end or not.
+        ylim : dict
+            ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
+            Valid keys are eeg, mag, grad
+        xlim : 'tight' | tuple | None
+            xlim for plots.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If 'interactive',
+            a check box for reversible selection of SSP projection vectors will
+            be shown.
+        hline : list of floats | None
+            The values at which show an horizontal line.
+        units : dict | None
+            The units of the channel types used for axes lables. If None,
+            defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+        scalings : dict | None
+            The scalings of the channel types to be applied for plotting.
+            If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+        titles : dict | None
+            The titles associated with the channels. If None, defaults to
+            `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of channel types. If instance of
+            Axes, there must be only one channel type plotted.
+        """
+        plot_evoked(self, picks=picks, exclude=exclude, unit=unit, show=show,
+                    ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
+                    scalings=scalings, titles=titles, axes=axes)
+
+    def plot_topomap(self, times=None, ch_type='mag', layout=None, vmax=None,
+                     cmap='RdBu_r', sensors='k,', colorbar=True, scale=None,
+                     unit=None, res=256, size=1, format="%3.1f", proj=False,
+                     show=True):
+        """Plot topographic maps of specific time points
+
+        Parameters
+        ----------
+        times : float | array of floats | None.
+            The time point(s) to plot. If None, 10 topographies will be shown
+            will a regular time spacing between the first and last time
+            instant.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+            The channel type to plot. For 'grad', the gradiometers are collec-
+            ted in pairs and the RMS for each pair is plotted.
+        layout : None | str | Layout
+            Layout name or instance specifying sensor positions (does not need
+            to be specified for Neuromag data).
+        vmax : scalar
+            The value specfying the range of the color scale (-vmax to +vmax).
+            If None, the largest absolute value in the data is used.
+        cmap : matplotlib colormap
+            Colormap.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses).
+        colorbar : bool
+            Plot a colorbar.
+        scale : float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        units : str | None
+            The units of the channel types used for colorbar lables. If
+            scale == None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : scalar
+            Side length of the topomaps in inches (only applies when plotting
+            multiple topomaps at a time).
+        format : str
+            String format for colorbar values.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If 'interactive',
+            a check box for reversible selection of SSP projection vectors will
+            be shown.
+        show : bool
+            Call pylab.show() at the end.
+        """
+        plot_evoked_topomap(self, times=times, ch_type=ch_type, layout=layout,
+                            vmax=vmax, cmap=cmap, sensors=sensors,
+                            colorbar=colorbar, scale=scale, unit=unit, res=res,
+                            proj=proj, size=size, format=format)
+
+    def to_nitime(self, picks=None):
+        """Export Evoked object to NiTime
+
+        Parameters
+        ----------
+        picks : array-like | None
+            Indices of channels to apply. If None, all channels will be
+            exported.
+
+        Returns
+        -------
+        evoked_ts : instance of nitime.TimeSeries
+            The TimeSeries instance
+        """
+        try:
+            from nitime import TimeSeries  # to avoid strong dependency
+        except ImportError:
+            raise Exception('the nitime package is missing')
+
+        evoked_ts = TimeSeries(self.data if picks is None
+                               else self.data[picks],
+                               sampling_rate=self.info['sfreq'])
+        return evoked_ts
+
+    def as_data_frame(self, picks=None, scale_time=1e3, scalings=None,
+                      use_time_index=True, copy=True):
+        """Get the epochs as Pandas DataFrame
+
+        Export raw data in tabular structure with MEG channels.
+
+        Parameters
+        ----------
+        picks : None | array of int
+            If None all channels are kept, otherwise the channels indices in
+            picks are kept.
+        scale_time : float
+            Scaling to be applied to time units.
+        scalings : dict | None
+            Scaling to be applied to the channels picked. If None, defaults to
+            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
+        use_time_index : bool
+            If False, times will be included as in the data table, else it will
+            be used as index object.
+        copy : bool
+            If true, evoked will be copied. Else data may be modified in place.
+
+        Returns
+        -------
+        df : instance of pandas.core.DataFrame
+            Evoked data exported into tabular data structure.
+        """
+
+        pd = _check_pandas_installed()
+
+        if picks is None:
+            picks = range(self.info['nchan'])
+        else:
+            if not in1d(picks, np.arange(len(self.ch_names))).all():
+                raise ValueError('At least one picked channel is not present '
+                                 'in this eppochs instance.')
+
+        data, times = self.data, self.times
+
+        if copy is True:
+            data = data.copy()
+
+        types = [channel_type(self.info, idx) for idx in picks]
+        n_channel_types = 0
+        ch_types_used = []
+
+        scalings = _mutable_defaults(('scalings', scalings))[0]
+        for t in scalings.keys():
+            if t in types:
+                n_channel_types += 1
+                ch_types_used.append(t)
+
+        for t in ch_types_used:
+            scaling = scalings[t]
+            idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+            if len(idx) > 0:
+                data[idx] *= scaling
+
+        assert times.shape[0] == data.shape[1]
+        col_names = [self.ch_names[k] for k in picks]
+
+        df = pd.DataFrame(data.T, columns=col_names)
+        df.insert(0, 'time', times * scale_time)
+
+        if use_time_index is True:
+            with warnings.catch_warnings(True):
+                df.set_index('time', inplace=True)
+            df.index = df.index.astype(int)
+
+        return df
+
+    def resample(self, sfreq, npad=100, window='boxcar'):
+        """Resample data
+
+        This function operates in-place.
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        """
+        o_sfreq = self.info['sfreq']
+        self.data = resample(self.data, sfreq, o_sfreq, npad, window)
+        # adjust indirectly affected variables
+        self.info['sfreq'] = sfreq
+        self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq
+                      + self.times[0])
+        self.first = int(self.times[0] * self.info['sfreq'])
+        self.last = len(self.times) + self.first - 1
+
+    def detrend(self, order=1, picks=None):
+        """Detrend data
+
+        This function operates in-place.
+
+        Parameters
+        ----------
+        order : int
+            Either 0 or 1, the order of the detrending. 0 is a constant
+            (DC) detrend, 1 is a linear detrend.
+        picks : None | array of int
+            If None only MEG and EEG channels are detrended.
+        """
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                               eog=False, ecg=False, emg=False, exclude='bads')
+        self.data[picks] = detrend(self.data[picks], order, axis=-1)
+
+    def copy(self):
+        """Copy the instance of evoked
+
+        Returns
+        -------
+        evoked : instance of Evoked
+        """
+        evoked = deepcopy(self)
+        return evoked
+
+    def __add__(self, evoked):
+        """Add evoked taking into account number of epochs"""
+        out = merge_evoked([self, evoked])
+        out.comment = self.comment + " + " + evoked.comment
+        return out
+
+    def __sub__(self, evoked):
+        """Add evoked taking into account number of epochs"""
+        this_evoked = deepcopy(evoked)
+        this_evoked.data *= -1.
+        out = merge_evoked([self, this_evoked])
+        out.comment = self.comment + " - " + this_evoked.comment
+        return out
+
+
+def _get_entries(fid, evoked_node):
+    """Helper to get all evoked entries"""
+    comments = list()
+    aspect_kinds = list()
+    for ev in evoked_node:
+        for k in range(ev['nent']):
+            my_kind = ev['directory'][k].kind
+            pos = ev['directory'][k].pos
+            if my_kind == FIFF.FIFF_COMMENT:
+                tag = read_tag(fid, pos)
+                comments.append(tag.data)
+        my_aspect = dir_tree_find(ev, FIFF.FIFFB_ASPECT)[0]
+        for k in range(my_aspect['nent']):
+            my_kind = my_aspect['directory'][k].kind
+            pos = my_aspect['directory'][k].pos
+            if my_kind == FIFF.FIFF_ASPECT_KIND:
+                tag = read_tag(fid, pos)
+                aspect_kinds.append(int(tag.data))
+    comments = np.atleast_1d(comments)
+    aspect_kinds = np.atleast_1d(aspect_kinds)
+    if len(comments) != len(aspect_kinds) or len(comments) == 0:
+        fid.close()
+        raise ValueError('Dataset names in FIF file '
+                         'could not be found.')
+    t = [aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
+    t = ['"' + c + '" (' + t + ')' for t, c in zip(t, comments)]
+    t = '  ' + '\n  '.join(t)
+    return comments, aspect_kinds, t
+
+
+def merge_evoked(all_evoked):
+    """Merge/concat evoked data
+
+    Data should have the same channels and the same time instants.
+
+    Parameters
+    ----------
+    all_evoked : list of Evoked
+        The evoked datasets
+
+    Returns
+    -------
+    evoked : Evoked
+        The merged evoked data
+    """
+    evoked = deepcopy(all_evoked[0])
+
+    ch_names = evoked.ch_names
+    for e in all_evoked[1:]:
+        assert e.ch_names == ch_names, ValueError("%s and %s do not contain "
+                        "the same channels" % (evoked, e))
+        assert np.max(np.abs(e.times - evoked.times)) < 1e-7, \
+                ValueError("%s and %s do not "
+                           "contain the same time instants" % (evoked, e))
+
+    # use union of bad channels
+    bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
+                                                 for ev in all_evoked[1:])))
+    evoked.info['bads'] = bads
+
+    all_nave = sum(e.nave for e in all_evoked)
+    evoked.data = sum(e.nave * e.data for e in all_evoked) / all_nave
+    evoked.nave = all_nave
+    return evoked
+
+
+def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True):
+    """Read an evoked dataset
+
+    Parameters
+    ----------
+    fname : string
+        The file name.
+    setno : int or str | list of int or str | None
+        The index or list of indices of the evoked dataset to read. FIF
+        file can contain multiple datasets. If None and there is only one
+        dataset in the file, this dataset is loaded.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    kind : str
+        Either 'average' or 'standard_error', the type of data to read.
+    proj : bool
+        If False, available projectors won't be applied to the data.
+
+    Returns
+    -------
+    evoked : instance of Evoked or list of Evoked
+        The evoked datasets.
+    """
+    if isinstance(setno, list):
+        return [Evoked(fname, s, baseline=baseline, kind=kind, proj=proj)
+                for s in setno]
+    else:
+        return Evoked(fname, setno, baseline=baseline, kind=kind, proj=proj)
+
+
+def write_evoked(fname, evoked):
+    """Write an evoked dataset to a file
+
+    Parameters
+    ----------
+    fname : string
+        The file name.
+
+    evoked : instance of Evoked, or list of Evoked
+        The evoked dataset to save, or a list of evoked datasets to save
+        in one file. Note that the measurement info from the first evoked
+        instance is used, so be sure that information matches.
+    """
+
+    if not isinstance(evoked, list):
+        evoked = [evoked]
+
+    # Create the file and save the essentials
+    fid = start_file(fname)
+
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    if evoked[0].info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
+
+    # Write measurement info
+    write_meas_info(fid, evoked[0].info)
+
+    # One or more evoked data sets
+    start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+    for e in evoked:
+        start_block(fid, FIFF.FIFFB_EVOKED)
+
+        # Comment is optional
+        if len(e.comment) > 0:
+            write_string(fid, FIFF.FIFF_COMMENT, e.comment)
+
+        # First and last sample
+        write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
+        write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
+
+        # The epoch itself
+        start_block(fid, FIFF.FIFFB_ASPECT)
+
+        write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
+        write_int(fid, FIFF.FIFF_NAVE, e.nave)
+
+        decal = np.zeros((e.info['nchan'], 1))
+        for k in range(e.info['nchan']):
+            decal[k] = 1.0 / (e.info['chs'][k]['cal']
+                              * e.info['chs'][k].get('scale', 1.0))
+
+        write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
+        end_block(fid, FIFF.FIFFB_ASPECT)
+        end_block(fid, FIFF.FIFFB_EVOKED)
+
+    end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
diff --git a/mne/fiff/kit/__init__.py b/mne/fiff/kit/__init__.py
new file mode 100644
index 0000000..b17c036
--- /dev/null
+++ b/mne/fiff/kit/__init__.py
@@ -0,0 +1,10 @@
+"""KIT module for conversion to FIF"""
+
+# Author: Teon Brooks <teon at nyu.edu>
+#
+# License: BSD (3-clause)
+
+from .kit import read_raw_kit
+from . import kit
+from . import coreg
+from . import constants
diff --git a/mne/fiff/kit/constants.py b/mne/fiff/kit/constants.py
new file mode 100644
index 0000000..d7c287d
--- /dev/null
+++ b/mne/fiff/kit/constants.py
@@ -0,0 +1,98 @@
+"""KIT constants"""
+
+# Author: Teon Brooks <teon at nyu.edu>
+#
+# License: BSD (3-clause)
+
+from ..constants import Bunch
+
+
+KIT = Bunch()
+
+# byte values
+KIT.SHORT = 2
+KIT.INT = 4
+KIT.DOUBLE = 8
+KIT.STRING = 128
+
+# pointer locations
+KIT.AMPLIFIER_INFO = 112
+KIT.BASIC_INFO = 16
+KIT.CHAN_SENS = 80
+KIT.DATA_OFFSET = 144
+KIT.SAMPLE_INFO = 128
+KIT.MRK_INFO = 192
+
+# parameters
+KIT.VOLTAGE_RANGE = 5.
+KIT.CALIB_FACTOR = 1.0  # mne_manual p.272
+KIT.RANGE = 1.  # mne_manual p.272
+KIT.UNIT_MUL = 0  # default is 0 mne_manual p.273
+
+# gain: 0:x1, 1:x2, 2:x5, 3:x10, 4:x20, 5:x50, 6:x100, 7:x200
+KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200]
+# BEF options: 0:THRU, 1:50Hz, 2:60Hz
+KIT.BEFS = [0, 50, 60]
+
+# coreg constants
+KIT.DIG_POINTS = 10000
+
+# create system specific dicts
+KIT_NY = Bunch(**KIT)
+KIT_AD = Bunch(**KIT)
+
+# NYU-system channel information
+KIT_NY.nchan = 192
+KIT_NY.nmegchan = 157
+KIT_NY.nrefchan = 3
+KIT_NY.nmiscchan = 32
+KIT_NY.n_sens = KIT_NY.nmegchan + KIT_NY.nrefchan
+# 12-bit A-to-D converter, one bit for signed integer. range +/- 2048
+KIT_NY.DYNAMIC_RANGE = 2 ** 12 / 2
+# amplifier information
+KIT_NY.GAIN1_BIT = 11  # stored in Bit 11-12
+KIT_NY.GAIN1_MASK = 2 ** 11 + 2 ** 12
+KIT_NY.GAIN2_BIT = 0  # stored in Bit 0-2
+KIT_NY.GAIN2_MASK = 2 ** 0 + 2 ** 1 + 2 ** 2  # (0x0007)
+KIT_NY.GAIN3_BIT = None
+KIT_NY.GAIN3_MASK = None
+KIT_NY.HPF_BIT = 4  # stored in Bit 4-5
+KIT_NY.HPF_MASK = 2 ** 4 + 2 ** 5
+KIT_NY.LPF_BIT = 8  # stored in Bit 8-10
+KIT_NY.LPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
+KIT_NY.BEF_BIT = 14  # stored in Bit 14-15
+KIT_NY.BEF_MASK = 2 ** 14 + 2 ** 15
+# HPF options: 0:0, 1:1, 2:3
+KIT_NY.HPFS = [0, 1, 3]
+# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
+#              6:1,000Hz, 7:2,000Hz
+KIT_NY.LPFS = [10, 20, 50, 100, 200, 500, 1000, 2000]
+
+
+# AD-system channel information
+KIT_AD.nchan = 256
+KIT_AD.nmegchan = 208
+KIT_AD.nrefchan = 16
+KIT_AD.ntrigchan = 8
+KIT_AD.nmiscchan = 24
+KIT_AD.n_sens = KIT_AD.nmegchan + KIT_AD.nrefchan
+# 16-bit A-to-D converter, one bit for signed integer. range +/- 32768
+KIT_AD.DYNAMIC_RANGE = 2 ** 16 / 2
+# amplifier information
+KIT_AD.GAIN1_BIT = 12  # stored in Bit 12-14
+KIT_AD.GAIN1_MASK = 2 ** 12 + 2 ** 13 + 2 ** 14
+KIT_AD.GAIN2_BIT = 28  # stored in Bit 28-30
+KIT_AD.GAIN2_MASK = 2 ** 28 + 2 ** 29 + 2 ** 30
+KIT_AD.GAIN3_BIT = 24  # stored in Bit 24-26
+KIT_AD.GAIN3_MASK = 2 ** 24 + 2 ** 25 + 2 ** 26
+KIT_AD.HPF_BIT = 8  # stored in Bit 8-10
+KIT_AD.HPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
+KIT_AD.LPF_BIT = 18  # stored in Bit 16-18
+KIT_AD.LPF_MASK = 2 ** 16 + 2 ** 17 + 2 ** 18
+KIT_AD.BEF_BIT = 0  # stored in Bit 0-1
+KIT_AD.BEF_MASK = 2 ** 0 + 2 ** 1
+# HPF options: 0:0Hz, 1:0.03Hz, 2:0.1Hz, 3:0.3Hz, 4:1Hz, 5:3Hz, 6:10Hz, 7:30Hz
+KIT_AD.HPFS = [0, 0.03, 0.1, 0.3, 1, 3, 10, 30]
+# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
+#              6:1,000Hz, 7:10,000Hz
+KIT_AD.LPFS = [10, 20, 50, 100, 200, 500, 1000, 10000]
diff --git a/mne/fiff/kit/coreg.py b/mne/fiff/kit/coreg.py
new file mode 100644
index 0000000..84e7c4c
--- /dev/null
+++ b/mne/fiff/kit/coreg.py
@@ -0,0 +1,276 @@
+"""Coordinate Point Extractor for KIT system"""
+
+# Author: Teon Brooks <teon at nyu.edu>
+#
+# License: BSD (3-clause)
+
+from struct import unpack
+from os import SEEK_CUR, path
+import re
+import cPickle as pickle
+import numpy as np
+from scipy import linalg
+from ..constants import FIFF
+from ...transforms.transforms import apply_trans, rotation, translation
+from .constants import KIT
+
+
+def get_points(mrk_fname, elp_fname, hsp_fname):
+    """Extracts dig points, elp, and mrk points from files needed for coreg
+
+    Parameters
+    ----------
+    mrk_fname : str
+        Path to marker file (saved as text from MEG160).
+    elp_fname : str
+        Path to elp digitizer file.
+    hsp_fname : str
+        Path to hsp headshape file.
+
+    Returns
+    -------
+    mrk_points : numpy.array, shape = (n_points, 3)
+        Array of 5 points by coordinate (x,y,z) from marker measurement.
+    elp_points : numpy.array, shape = (n_points, 3)
+        Array of 5 points by coordinate (x,y,z) from digitizer laser point.
+    dig : dict
+        A dictionary containing the mrk_points, elp_points, and hsp_points in
+        a format used for raw.info['dig'].
+    """
+
+    mrk_points = read_mrk(mrk_fname=mrk_fname)
+    mrk_points = transform_pts(mrk_points, unit='m')
+
+    elp_points = read_elp(elp_fname=elp_fname)
+    elp_points = transform_pts(elp_points)
+    nasion = elp_points[0, :]
+    lpa = elp_points[1, :]
+    rpa = elp_points[2, :]
+
+    trans = get_neuromag_transform(lpa, rpa, nasion)
+    elp_points = np.dot(elp_points, trans.T)
+    nasion = elp_points[0]
+    lpa = elp_points[1]
+    rpa = elp_points[2]
+    elp_points = elp_points[3:]
+
+    hsp_points = read_hsp(hsp_fname=hsp_fname)
+    hsp_points = transform_pts(hsp_points)
+    hsp_points = np.dot(hsp_points, trans.T)
+    dig = []
+
+    point_dict = {}
+    point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+    point_dict['ident'] = FIFF.FIFFV_POINT_NASION
+    point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
+    point_dict['r'] = nasion
+    dig.append(point_dict)
+
+    point_dict = {}
+    point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+    point_dict['ident'] = FIFF.FIFFV_POINT_LPA
+    point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
+    point_dict['r'] = lpa
+    dig.append(point_dict)
+
+    point_dict = {}
+    point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+    point_dict['ident'] = FIFF.FIFFV_POINT_RPA
+    point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
+    point_dict['r'] = rpa
+    dig.append(point_dict)
+
+    for idx, point in enumerate(elp_points):
+        point_dict = {}
+        point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+        point_dict['ident'] = idx
+        point_dict['kind'] = FIFF.FIFFV_POINT_HPI
+        point_dict['r'] = point
+        dig.append(point_dict)
+
+    for idx, point in enumerate(hsp_points):
+        point_dict = {}
+        point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+        point_dict['ident'] = idx
+        point_dict['kind'] = FIFF.FIFFV_POINT_EXTRA
+        point_dict['r'] = point
+        dig.append(point_dict)
+    return mrk_points, elp_points, dig
+
+
+def read_mrk(mrk_fname):
+    """Marker Point Extraction in MEG space directly from sqd
+
+    Parameters
+    ----------
+    mrk_fname : str
+        Absolute path to Marker file.
+        File formats allowed: *.sqd, *.txt, *.pickled
+
+    Returns
+    -------
+    mrk_points : numpy.array, shape = (n_points, 3)
+        Marker points in MEG space [m].
+    """
+    ext = path.splitext(mrk_fname)[-1]
+    if ext == '.sqd':
+        with open(mrk_fname, 'r') as fid:
+            fid.seek(KIT.MRK_INFO)
+            mrk_offset = unpack('i', fid.read(KIT.INT))[0]
+            fid.seek(mrk_offset)
+            # skips match_done, meg_to_mri and mri_to_meg
+            fid.seek(KIT.INT + (2 * KIT.DOUBLE * 4 ** 2), SEEK_CUR)
+            mrk_count = unpack('i', fid.read(KIT.INT))[0]
+            pts = []
+            for _ in range(mrk_count):
+                # skips mri/meg mrk_type and done, mri_marker
+                fid.seek(KIT.INT * 4 + (KIT.DOUBLE * 3), SEEK_CUR)
+                pts.append(np.fromfile(fid, dtype='d', count=3))
+                mrk_points = np.array(pts)
+    elif ext == '.hpi':
+        mrk_points = np.loadtxt(mrk_fname)
+    elif ext == '.pickled':
+        mrk = pickle.load(open(mrk_fname))
+        mrk_points = mrk['points']
+    else:
+        raise TypeError('File must be *.sqd, *.hpi or *.pickled.')
+    return mrk_points
+
+
+def read_elp(elp_fname):
+    """ELP point extraction in Polhemus head space
+
+    Parameters
+    ----------
+    elp_fname : str
+        Absolute path to laser point file acquired from Polhemus system.
+        File formats allowed: *.txt
+
+    Returns
+    -------
+    elp_points : numpy.array, shape = (n_points, 3)
+        Fiducial and marker points in Polhemus head space.
+    """
+    p = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
+    elp_points = p.findall(open(elp_fname).read())
+    elp_points = np.array(elp_points, dtype=float)
+    return elp_points
+
+
+def read_hsp(hsp_fname):
+    """HSP point extraction in Polhemus head space
+
+    Parameters
+    ----------
+    hsp_fname : str
+        Absolute path to headshape file acquired from Polhemus system.
+
+    Returns
+    -------
+    hsp_points : numpy.array, shape = (n_points, 3)
+        Headshape points in Polhemus head space.
+        File formats allowed: *.txt, *.pickled
+    """
+    ext = path.splitext(hsp_fname)[-1]
+    if ext == '.txt':
+        p = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
+        hsp_points = p.findall(open(hsp_fname).read())
+        hsp_points = np.array(hsp_points, dtype=float)
+        # downsample the digitizer points
+        n_pts = len(hsp_points)
+        if n_pts > KIT.DIG_POINTS:
+            space = int(n_pts / KIT.DIG_POINTS)
+            hsp_points = np.copy(hsp_points[::space])
+    elif ext == '.pickled':
+        hsp = pickle.load(open(hsp_fname))
+        hsp_points = hsp['points']
+    else:
+        raise TypeError('File must be either *.txt or *.pickled.')
+    return hsp_points
+
+
+def read_sns(sns_fname):
+    """Sensor coordinate extraction in MEG space
+
+    Parameters
+    ----------
+    sns_fname : str
+        Absolute path to sensor definition file.
+
+    Returns
+    -------
+    locs : numpy.array, shape = (n_points, 3)
+        Sensor coil location.
+    """
+
+    p = re.compile(r'\d,[A-Za-z]*,([\.\-0-9]+),' +
+                   r'([\.\-0-9]+),([\.\-0-9]+),' +
+                   r'([\.\-0-9]+),([\.\-0-9]+)')
+    locs = np.array(p.findall(open(sns_fname).read()), dtype=float)
+    return locs
+
+
+def get_neuromag_transform(lpa, rpa, nasion):
+    """Creates a transformation matrix from RAS to Neuromag-like space
+
+    Resets the origin to mid-distance of peri-auricular points with nasion
+    passing through y-axis.
+    (mne manual, pg. 97)
+
+    Parameters
+    ----------
+    lpa : numpy.array, shape = (1, 3)
+        Left peri-auricular point coordinate.
+    rpa : numpy.array, shape = (1, 3)
+        Right peri-auricular point coordinate.
+    nasion : numpy.array, shape = (1, 3)
+        Nasion point coordinate.
+
+    Returns
+    -------
+    trans : numpy.array, shape = (3, 3)
+        Transformation matrix to Neuromag-like space.
+    """
+    origin = (lpa + rpa) / 2
+    nasion = nasion - origin
+    lpa = lpa - origin
+    rpa = rpa - origin
+    axes = np.empty((3, 3))
+    axes[1] = nasion / linalg.norm(nasion)
+    axes[2] = np.cross(axes[1], lpa - rpa)
+    axes[2] /= linalg.norm(axes[2])
+    axes[0] = np.cross(axes[1], axes[2])
+
+    trans = linalg.inv(axes)
+    return trans
+
+
+def transform_pts(pts, unit='mm'):
+    """Transform KIT and Polhemus points to RAS coordinate system
+
+    This is used to orient points in Neuromag coordinates.
+    KIT sensors are (x,y,z) in [mm].
+    KIT markers are (x,y,z) in [m].
+    Polhemus points are (x,y,z) in [mm].
+    The transformation to RAS space is -y,x,z in [m].
+
+    Parameters
+    ----------
+    pts : numpy.array, shape = (n_points, 3)
+        Points to be transformed.
+    unit : 'mm' | 'm'
+        Unit of source points to be converted.
+
+    Returns
+    -------
+    pts : numpy.array, shape = (n_points, 3)
+        Points transformed to Neuromag-like head space (RAS).
+    """
+    if unit == 'mm':
+        pts = pts / 1e3
+    elif unit != 'm':
+        raise ValueError('The unit must be either "m" or "mm".')
+    pts = np.array(pts, ndmin=2)
+    pts = pts[:, [1, 0, 2]]
+    pts[:, 0] = pts[:, 0] * -1
+    return pts
diff --git a/mne/fiff/kit/kit.py b/mne/fiff/kit/kit.py
new file mode 100644
index 0000000..447a224
--- /dev/null
+++ b/mne/fiff/kit/kit.py
@@ -0,0 +1,466 @@
+"""Conversion tool from SQD to FIF
+
+RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py
+
+"""
+
+# Author: Teon Brooks <teon at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import time
+import logging
+from struct import unpack
+from os import SEEK_CUR
+import numpy as np
+from scipy.linalg import norm
+from ...fiff import pick_types
+from ...transforms.coreg import fit_matched_pts
+from ...utils import verbose
+from ..raw import Raw
+from ..constants import FIFF
+from .constants import KIT, KIT_NY, KIT_AD
+from . import coreg
+
+logger = logging.getLogger('mne')
+
+
+class RawKIT(Raw):
+    """Raw object from KIT SQD file adapted from bti/raw.py
+
+    Parameters
+    ----------
+    input_fname : str
+        Absolute path to the sqd file.
+    mrk_fname : str
+        Absolute path to marker coils file.
+    elp_fname : str
+        Absolute path to elp digitizer laser points file.
+    hsp_fname : str
+        Absolute path to elp digitizer head shape points file.
+    sns_fname : str
+        Absolute path to sensor information file.
+    stim : list of int | '<' | '>'
+        Can be submitted as list of trigger channels.
+        If a list is not specified, the default triggers extracted from
+        misc channels will be used with specified directionality.
+        '<' means that largest values assigned to the first channel
+        in sequence.
+        '>' means the largest trigger assigned to the last channel
+        in sequence.
+    stimthresh : float
+        The threshold level for accepting voltage change as a trigger event.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+
+    See Also
+    --------
+    mne.fiff.Raw : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, input_fname, mrk_fname, elp_fname, hsp_fname, sns_fname,
+                 stim='<', stimthresh=1, verbose=None, preload=False):
+
+        logger.info('Extracting SQD Parameters from %s...' % input_fname)
+        self._sqd_params = get_sqd_params(input_fname)
+        self._sqd_params['stimthresh'] = stimthresh
+        self._sqd_params['fname'] = input_fname
+        logger.info('Creating Raw.info structure...')
+
+        # Raw attributes
+        self.verbose = verbose
+        self._preloaded = preload
+        self.fids = list()
+        self._projector = None
+        self.first_samp = 0
+        self.last_samp = self._sqd_params['nsamples'] - 1
+        self.comp = None  # no compensation for KIT
+
+        # Create raw.info dict for raw fif object with SQD data
+        self.info = {}
+        self.info['meas_id'] = None
+        self.info['file_id'] = None
+        self.info['meas_date'] = int(time.time())
+        self.info['projs'] = []
+        self.info['comps'] = []
+        self.info['lowpass'] = self._sqd_params['lowpass']
+        self.info['highpass'] = self._sqd_params['highpass']
+        self.info['sfreq'] = float(self._sqd_params['sfreq'])
+        # meg channels plus synthetic channel
+        self.info['nchan'] = self._sqd_params['nchan'] + 1
+        self.info['bads'] = []
+        self.info['acq_pars'], self.info['acq_stim'] = None, None
+        self.info['filename'] = None
+        self.info['ctf_head_t'] = None
+        self.info['dev_ctf_t'] = []
+        self.info['filenames'] = []
+        self.info['dev_head_t'] = {}
+        self.info['dev_head_t']['from'] = FIFF.FIFFV_COORD_DEVICE
+        self.info['dev_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
+
+        mrk, elp, self.info['dig'] = coreg.get_points(mrk_fname=mrk_fname,
+                                                      elp_fname=elp_fname,
+                                                      hsp_fname=hsp_fname)
+        self.info['dev_head_t']['trans'] = fit_matched_pts(tgt_pts=mrk,
+                                                           src_pts=elp)
+
+        # Creates a list of dicts of meg channels for raw.info
+        logger.info('Setting channel info structure...')
+        ch_names = {}
+        ch_names['MEG'] = ['MEG %03d' % ch for ch
+                                in range(1, self._sqd_params['n_sens'] + 1)]
+        ch_names['MISC'] = ['MISC %03d' % ch for ch
+                                 in range(1, self._sqd_params['nmiscchan']
+                                          + 1)]
+        ch_names['STIM'] = ['STI 014']
+        locs = coreg.read_sns(sns_fname=sns_fname)
+        chan_locs = coreg.transform_pts(locs[:, :3])
+        chan_angles = locs[:, 3:]
+        self.info['chs'] = []
+        for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
+                                          chan_angles), 1):
+            ch_name, ch_loc, ch_angles = ch_info
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = idx
+            chan_info['scanno'] = idx
+            chan_info['range'] = KIT.RANGE
+            chan_info['unit_mul'] = KIT.UNIT_MUL
+            chan_info['ch_name'] = ch_name
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+            chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+            if idx <= self._sqd_params['nmegchan']:
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
+                chan_info['kind'] = FIFF.FIFFV_MEG_CH
+            else:
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+
+            # create three orthogonal vector
+            # ch_angles[0]: theta, ch_angles[1]: phi
+            ch_angles = np.radians(ch_angles)
+            x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
+            y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
+            z = np.cos(ch_angles[0])
+            vec_z = np.array([x, y, z])
+            length = norm(vec_z)
+            vec_z /= length
+            vec_x = np.zeros(vec_z.size, dtype=np.float)
+            if vec_z[1] < vec_z[2]:
+                if vec_z[0] < vec_z[1]:
+                    vec_x[0] = 1.0
+                else:
+                    vec_x[1] = 1.0
+            elif vec_z[0] < vec_z[2]:
+                vec_x[0] = 1.0
+            else:
+                vec_x[2] = 1.0
+            vec_x -= np.sum(vec_x * vec_z) * vec_z
+            length = norm(vec_x)
+            vec_x /= length
+            vec_y = np.cross(vec_z, vec_x)
+            # transform to Neuromag like coordinate space
+            vecs = np.vstack((vec_x, vec_y, vec_z))
+            vecs = coreg.transform_pts(vecs, unit='m')
+            chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
+            self.info['chs'].append(chan_info)
+
+        # label trigger and misc channels
+        for idy, ch_name in enumerate(ch_names['MISC'] + ch_names['STIM'],
+                                      self._sqd_params['n_sens']):
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = idy
+            chan_info['scanno'] = idy
+            chan_info['range'] = 1.0
+            chan_info['unit'] = FIFF.FIFF_UNIT_V
+            chan_info['unit_mul'] = 0  # default is 0 mne_manual p.273
+            chan_info['ch_name'] = ch_name
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['loc'] = np.zeros(12)
+            if ch_name.startswith('STI'):
+                chan_info['unit'] = FIFF.FIFF_UNIT_NONE
+                chan_info['kind'] = FIFF.FIFFV_STIM_CH
+            else:
+                chan_info['kind'] = FIFF.FIFFV_MISC_CH
+            self.info['chs'].append(chan_info)
+        self.info['ch_names'] = (ch_names['MEG'] + ch_names['MISC'] +
+                                 ch_names['STIM'])
+
+        # Acquire stim channels
+        if isinstance(stim, str):
+            picks = pick_types(self.info, meg=False, misc=True,
+                              exclude=[])[:8]
+            if stim == '<':
+                stim = picks[::-1]
+            elif stim == '>':
+                stim = picks
+            else:
+                raise ValueError("stim needs to be list of int, '>' or '<', "
+                                 "not %r" % stim)
+        self._sqd_params['stim'] = stim
+
+        if self._preloaded:
+            logger.info('Reading raw data from %s...' % input_fname)
+            self._data, _ = self._read_segment()
+            assert len(self._data) == self.info['nchan']
+
+            # Create a synthetic channel
+            trig_chs = self._data[stim, :]
+            trig_chs = trig_chs > stimthresh
+            trig_vals = np.array(2 ** np.arange(len(stim)), ndmin=2).T
+            trig_chs = trig_chs * trig_vals
+            stim_ch = trig_chs.sum(axis=0)
+            self._data[-1, :] = stim_ch
+
+            # Add time info
+            self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
+            self._times = np.arange(self.first_samp, self.last_samp + 1,
+                                    dtype=np.float64)
+            self._times /= self.info['sfreq']
+            logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
+                        % (self.first_samp, self.last_samp,
+                           float(self.first_samp) / self.info['sfreq'],
+                           float(self.last_samp) / self.info['sfreq']))
+        logger.info('Ready.')
+
+    def read_stim_ch(self, buffer_size=1e5):
+        """Read events from data
+
+        Parameter
+        ---------
+        buffer_size : int
+            The size of chunk to by which the data are scanned.
+
+        Returns
+        -------
+        events : array, [samples]
+           The event vector (1 x samples).
+        """
+        buffer_size = int(buffer_size)
+        start = int(self.first_samp)
+        stop = int(self.last_samp + 1)
+
+        pick = pick_types(self.info, meg=False, stim=True, exclude=[])
+        stim_ch = np.empty((1, stop), dtype=np.int)
+        for b_start in range(start, stop, buffer_size):
+            b_stop = b_start + buffer_size
+            x, _ = self._read_segment(start=b_start, stop=b_stop, sel=pick)
+            stim_ch[:, b_start:b_start + x.shape[1]] = x
+
+        return stim_ch
+
+    def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
+                      projector=None):
+        """Read a chunk of raw data
+
+        Parameters
+        ----------
+        start : int, (optional)
+            first sample to include (first is 0). If omitted, defaults to the
+            first sample in data.
+        stop : int, (optional)
+            First sample to not include.
+            If omitted, data is included to the end.
+        sel : array, optional
+            Indices of channels to select.
+        projector : array
+            SSP operator to apply to the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        data : array, [channels x samples]
+           the data matrix (channels x samples).
+        times : array, [samples]
+            returns the time values corresponding to the samples.
+        """
+        if sel is None:
+            sel = range(self.info['nchan'])
+        elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
+            return (666, 666)
+        if projector is not None:
+            raise NotImplementedError('Currently does not handle projections.')
+        if stop is None:
+            stop = self.last_samp + 1
+        elif stop > self.last_samp + 1:
+            stop = self.last_samp + 1
+
+        #  Initial checks
+        start = int(start)
+        stop = int(stop)
+
+        if start >= stop:
+            raise ValueError('No data in this range')
+
+        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
+                    (start, stop - 1, start / float(self.info['sfreq']),
+                               (stop - 1) / float(self.info['sfreq'])))
+
+        with open(self._sqd_params['fname'], 'r') as fid:
+            # extract data
+            fid.seek(KIT.DATA_OFFSET)
+            # data offset info
+            data_offset = unpack('i', fid.read(KIT.INT))[0]
+            nchan = self._sqd_params['nchan']
+            buffer_size = stop - start
+            count = buffer_size * nchan
+            pointer = start * nchan * KIT.SHORT
+            fid.seek(data_offset + pointer)
+            data = np.fromfile(fid, dtype='h', count=count)
+            data = data.reshape((buffer_size, nchan))
+        # amplifier applies only to the sensor channels
+        n_sens = self._sqd_params['n_sens']
+        sensor_gain = np.copy(self._sqd_params['sensor_gain'])
+        sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
+                                self._sqd_params['amp_gain'])
+        conv_factor = np.array((KIT.VOLTAGE_RANGE /
+                                self._sqd_params['DYNAMIC_RANGE'])
+                               * sensor_gain, ndmin=2)
+        data = conv_factor * data
+        data = data.T
+        # Create a synthetic channel
+        trig_chs = data[self._sqd_params['stim'], :]
+        trig_chs = trig_chs > self._sqd_params['stimthresh']
+        trig_vals = np.array(2 ** np.arange(len(self._sqd_params['stim'])),
+                             ndmin=2).T
+        trig_chs = trig_chs * trig_vals
+        stim_ch = np.array(trig_chs.sum(axis=0), ndmin=2)
+        data = np.vstack((data, stim_ch))
+        data = data[sel]
+
+        logger.info('[done]')
+        times = np.arange(start, stop) / self.info['sfreq']
+
+        return data, times
+
+
+def get_sqd_params(rawfile):
+    """Extracts all the information from the sqd file.
+
+    Parameters
+    ----------
+    rawfile : str
+        Raw sqd file to be read.
+
+    Returns
+    -------
+    sqd : dict
+        A dict containing all the sqd parameter settings.
+    """
+    sqd = dict()
+    sqd['rawfile'] = rawfile
+    with open(rawfile, 'r') as fid:
+        fid.seek(KIT.BASIC_INFO)
+        basic_offset = unpack('i', fid.read(KIT.INT))[0]
+        fid.seek(basic_offset)
+        # skips version, revision, sysid
+        fid.seek(KIT.INT * 3, SEEK_CUR)
+        # basic info
+        sysname = unpack('128s', fid.read(KIT.STRING))
+        sysname = sysname[0].split('\n')[0]
+        fid.seek(KIT.STRING, SEEK_CUR)  # skips modelname
+        sqd['nchan'] = unpack('i', fid.read(KIT.INT))[0]
+
+        if sysname == 'New York University Abu Dhabi':
+            KIT_SYS = KIT_AD
+        elif sysname == 'NYU 160ch System since Jan24 2009':
+            KIT_SYS = KIT_NY
+        else:
+            raise NotImplementedError
+
+        # amplifier gain
+        fid.seek(KIT_SYS.AMPLIFIER_INFO)
+        amp_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
+        fid.seek(amp_offset)
+        amp_data = unpack('i', fid.read(KIT_SYS.INT))[0]
+
+        gain1 = KIT_SYS.GAINS[(KIT_SYS.GAIN1_MASK & amp_data)
+                              >> KIT_SYS.GAIN1_BIT]
+        gain2 = KIT_SYS.GAINS[(KIT_SYS.GAIN2_MASK & amp_data)
+                              >> KIT_SYS.GAIN2_BIT]
+        if KIT_SYS.GAIN3_BIT:
+            gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data)
+                                     >> KIT_SYS.GAIN3_BIT]
+            sqd['amp_gain'] = gain1 * gain2 * gain3
+        else:
+            sqd['amp_gain'] = gain1 * gain2
+
+        # filter settings
+        sqd['lowpass'] = KIT_SYS.LPFS[(KIT_SYS.LPF_MASK & amp_data)
+                                      >> KIT_SYS.LPF_BIT]
+        sqd['highpass'] = KIT_SYS.HPFS[(KIT_SYS.HPF_MASK & amp_data)
+                                       >> KIT_SYS.HPF_BIT]
+        sqd['notch'] = KIT_SYS.BEFS[(KIT_SYS.BEF_MASK & amp_data)
+                                    >> KIT_SYS.BEF_BIT]
+
+        # only sensor channels requires gain. the additional misc channels
+        # (trigger channels, audio and voice channels) are passed
+        # through unaffected
+
+        fid.seek(KIT_SYS.CHAN_SENS)
+        sens_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
+        fid.seek(sens_offset)
+        sens = np.fromfile(fid, dtype='d', count=sqd['nchan'] * 2)
+        sensitivities = (np.reshape(sens, (sqd['nchan'], 2))
+                         [:KIT_SYS.n_sens, 1])
+        sqd['sensor_gain'] = np.ones(KIT_SYS.nchan)
+        sqd['sensor_gain'][:KIT_SYS.n_sens] = sensitivities
+
+        fid.seek(KIT_SYS.SAMPLE_INFO)
+        acqcond_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
+        fid.seek(acqcond_offset)
+        acq_type = unpack('i', fid.read(KIT_SYS.INT))[0]
+        if acq_type == 1:
+            sqd['sfreq'] = unpack('d', fid.read(KIT_SYS.DOUBLE))[0]
+            _ = fid.read(KIT_SYS.INT)  # initialized estimate of samples
+            sqd['nsamples'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+        else:
+            raise NotImplementedError
+        sqd['n_sens'] = KIT_SYS.n_sens
+        sqd['nmegchan'] = KIT_SYS.nmegchan
+        sqd['nmiscchan'] = KIT_SYS.nmiscchan
+        sqd['DYNAMIC_RANGE'] = KIT_SYS.DYNAMIC_RANGE
+    return sqd
+
+
+def read_raw_kit(input_fname, mrk_fname, elp_fname, hsp_fname, sns_fname,
+                 stim='<', stimthresh=1, verbose=None, preload=False):
+    """Reader function for KIT conversion to FIF
+
+    Parameters
+    ----------
+    input_fname : str
+        Absolute path to the sqd file.
+    mrk_fname : str
+        Absolute path to marker coils file.
+    elp_fname : str
+        Absolute path to elp digitizer laser points file.
+    hsp_fname : str
+        Absolute path to elp digitizer head shape points file.
+    sns_fname : str
+        Absolute path to sensor information file.
+    stim : list of int | '<' | '>'
+        Can be submitted as list of trigger channels.
+        If a list is not specified, the default triggers extracted from
+        misc channels, will be used with specified directionality.
+        '<' means that largest values assigned to the first channel
+        in sequence.
+        '>' means the largest trigger assigned to the last channel
+        in sequence.
+    stimthresh : float
+        The threshold level for accepting voltage change as a trigger event.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    """
+    return RawKIT(input_fname=input_fname, mrk_fname=mrk_fname,
+                  elp_fname=elp_fname, hsp_fname=hsp_fname,
+                  sns_fname=sns_fname, stim=stim, stimthresh=stimthresh,
+                  verbose=verbose, preload=preload)
diff --git a/mne/fiff/kit/tests/__init__.py b/mne/fiff/kit/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/fiff/kit/tests/data/sns.txt b/mne/fiff/kit/tests/data/sns.txt
new file mode 100644
index 0000000..8b166df
--- /dev/null
+++ b/mne/fiff/kit/tests/data/sns.txt
@@ -0,0 +1,195 @@
+[Sensor Definition]
+Channel no.,Type,x,y,z,theta,phi,size,baseline
+,,[mm],[mm],[mm],[deg],[deg],[mm],[mm]
+0,AxialGradioMeter,-101.52,-68.02,20.93,81.07,222.01,15.50,50.00
+1,AxialGradioMeter,-122.64,-22.28,22.28,79.66,192.59,15.50,50.00
+2,AxialGradioMeter,-107.05,-65.31,-61.76,90.46,217.16,15.50,50.00
+3,AxialGradioMeter,-118.37,-44.33,-58.74,89.16,206.22,15.50,50.00
+4,AxialGradioMeter,-124.69,-22.17,-57.88,89.07,193.26,15.50,50.00
+5,AxialGradioMeter,-89.78,-84.53,-61.59,89.29,228.49,15.50,50.00
+6,AxialGradioMeter,-89.38,-73.82,39.07,73.39,228.04,15.50,50.00
+7,AxialGradioMeter,-104.96,-53.16,41.21,72.14,215.24,15.50,50.00
+8,AxialGradioMeter,-16.03,119.37,-65.63,87.63,95.79,15.50,50.00
+9,AxialGradioMeter,-80.87,88.26,-14.78,95.70,127.17,15.50,50.00
+10,AxialGradioMeter,-38.58,110.27,-65.95,89.53,105.43,15.50,50.00
+11,AxialGradioMeter,-63.03,100.12,-66.07,90.36,115.82,15.50,50.00
+12,AxialGradioMeter,-124.55,3.28,22.20,79.74,176.77,15.50,50.00
+13,AxialGradioMeter,-110.55,50.16,19.36,80.44,148.66,15.50,50.00
+14,AxialGradioMeter,-99.07,69.89,-9.70,91.04,137.55,15.50,50.00
+15,AxialGradioMeter,-87.38,37.06,82.65,51.93,151.83,15.50,50.00
+16,AxialGradioMeter,-58.58,-4.07,110.92,28.65,184.17,15.50,50.00
+17,AxialGradioMeter,-123.94,-21.02,-35.42,92.08,191.89,15.50,50.00
+18,AxialGradioMeter,-88.81,-84.37,-40.92,95.46,232.31,15.50,50.00
+19,AxialGradioMeter,-114.17,-29.81,41.39,72.20,198.16,15.50,50.00
+20,AxialGradioMeter,-4.18,-117.98,31.45,79.03,268.94,15.50,50.00
+21,AxialGradioMeter,-54.91,-105.44,37.81,75.48,246.23,15.50,50.00
+22,AxialGradioMeter,-109.43,-7.78,61.83,63.61,184.63,15.50,50.00
+23,AxialGradioMeter,-80.50,-74.17,58.91,65.70,230.29,15.50,50.00
+24,AxialGradioMeter,-115.11,15.80,42.04,70.78,169.68,15.50,50.00
+25,AxialGradioMeter,-109.18,38.57,42.82,71.62,154.74,15.50,50.00
+26,AxialGradioMeter,-76.22,18.26,99.72,38.50,160.84,15.50,50.00
+27,AxialGradioMeter,-72.25,58.02,81.73,53.37,134.26,15.50,50.00
+28,AxialGradioMeter,-56.04,74.97,80.73,55.31,120.54,15.50,50.00
+29,AxialGradioMeter,-34.11,83.86,78.30,56.85,110.70,15.50,50.00
+30,AxialGradioMeter,-9.43,89.24,76.81,56.03,96.48,15.50,50.00
+31,AxialGradioMeter,-123.65,28.89,-6.45,88.97,161.26,15.50,50.00
+32,AxialGradioMeter,1.09,-117.79,-7.02,85.17,269.85,15.50,50.00
+33,AxialGradioMeter,-73.27,-32.84,97.86,41.11,213.18,15.50,50.00
+34,AxialGradioMeter,-3.61,-25.36,128.05,16.37,260.34,15.50,50.00
+35,AxialGradioMeter,-105.02,-31.70,61.46,63.27,201.21,15.50,50.00
+36,AxialGradioMeter,-18.84,-120.72,-7.11,84.66,263.45,15.50,50.00
+37,AxialGradioMeter,-49.90,-109.62,-60.60,87.90,248.83,15.50,50.00
+38,AxialGradioMeter,-71.46,-99.70,-7.19,87.12,238.78,15.50,50.00
+39,AxialGradioMeter,-28.62,-116.91,-60.88,86.52,257.39,15.50,50.00
+40,AxialGradioMeter,-70.68,77.60,62.00,64.02,125.79,15.50,50.00
+41,AxialGradioMeter,-88.20,58.58,62.14,63.47,139.05,15.50,50.00
+42,AxialGradioMeter,-107.77,15.56,61.92,62.86,167.40,15.50,50.00
+43,AxialGradioMeter,-115.79,44.29,-60.88,91.85,153.90,15.50,50.00
+44,AxialGradioMeter,-103.66,65.18,-64.04,90.92,139.15,15.50,50.00
+45,AxialGradioMeter,-126.16,-0.48,-56.75,89.67,180.75,15.50,50.00
+46,AxialGradioMeter,-123.58,22.49,-58.02,88.62,166.26,15.50,50.00
+47,AxialGradioMeter,-44.35,35.38,109.86,32.47,133.19,15.50,50.00
+48,AxialGradioMeter,-4.59,-121.03,-62.10,87.80,264.83,15.50,50.00
+49,AxialGradioMeter,-87.33,-84.88,-13.24,95.60,229.72,15.50,50.00
+50,AxialGradioMeter,-118.29,-47.73,-7.15,89.55,207.65,15.50,50.00
+51,AxialGradioMeter,-50.54,-113.93,-32.53,85.55,249.85,15.50,50.00
+52,AxialGradioMeter,-48.08,-116.39,-9.09,85.89,249.42,15.50,50.00
+53,AxialGradioMeter,-15.34,-119.05,10.98,86.20,267.43,15.50,50.00
+54,AxialGradioMeter,-68.25,-99.97,16.27,82.75,238.88,15.50,50.00
+55,AxialGradioMeter,-0.80,-126.39,-31.12,81.24,269.18,15.50,50.00
+56,AxialGradioMeter,-34.15,18.93,119.59,22.02,144.71,15.50,50.00
+57,AxialGradioMeter,-32.80,-28.21,119.20,24.62,225.61,15.50,50.00
+58,AxialGradioMeter,-60.16,93.93,38.42,74.47,118.58,15.50,50.00
+59,AxialGradioMeter,-98.06,60.82,42.83,71.46,141.78,15.50,50.00
+60,AxialGradioMeter,-12.37,110.43,37.16,75.57,96.90,15.50,50.00
+61,AxialGradioMeter,-102.13,68.95,-40.47,93.88,138.71,15.50,50.00
+62,AxialGradioMeter,-128.89,3.16,-6.02,89.22,176.30,15.50,50.00
+63,AxialGradioMeter,-0.50,59.40,109.06,36.67,91.53,15.50,50.00
+64,AxialGradioMeter,-81.57,-55.03,79.87,54.71,222.29,15.50,50.00
+65,AxialGradioMeter,-79.74,-7.43,99.52,38.76,184.96,15.50,50.00
+66,AxialGradioMeter,51.38,-115.52,-12.92,88.12,289.89,15.50,50.00
+67,AxialGradioMeter,74.74,-99.45,-14.91,92.19,300.75,15.50,50.00
+68,AxialGradioMeter,22.12,-121.59,-8.92,86.20,274.14,15.50,50.00
+69,AxialGradioMeter,44.60,-87.67,72.58,60.15,288.26,15.50,50.00
+70,AxialGradioMeter,-0.48,-94.50,75.07,57.01,269.10,15.50,50.00
+71,AxialGradioMeter,23.84,-94.50,73.16,58.35,280.15,15.50,50.00
+72,AxialGradioMeter,-59.56,101.64,-7.64,86.37,115.66,15.50,50.00
+73,AxialGradioMeter,-10.46,116.08,-9.57,86.74,95.09,15.50,50.00
+74,AxialGradioMeter,-35.46,110.65,-9.73,86.02,105.92,15.50,50.00
+75,AxialGradioMeter,-4.48,17.23,128.02,11.05,110.32,15.50,50.00
+76,AxialGradioMeter,22.28,16.36,125.65,14.49,45.07,15.50,50.00
+77,AxialGradioMeter,-61.11,101.55,-37.57,89.33,115.42,15.50,50.00
+78,AxialGradioMeter,-13.80,117.71,-35.98,85.43,95.06,15.50,50.00
+79,AxialGradioMeter,64.72,102.18,-11.94,88.31,64.31,15.50,50.00
+80,AxialGradioMeter,-47.37,-83.00,76.43,57.55,245.56,15.50,50.00
+81,AxialGradioMeter,-26.04,-91.12,75.07,58.25,256.60,15.50,50.00
+82,AxialGradioMeter,50.44,-114.38,-38.12,88.90,289.50,15.50,50.00
+83,AxialGradioMeter,88.89,-87.74,-42.91,93.52,306.43,15.50,50.00
+84,AxialGradioMeter,-42.56,-46.61,108.49,35.99,236.74,15.50,50.00
+85,AxialGradioMeter,94.17,-84.20,8.46,86.45,308.62,15.50,50.00
+86,AxialGradioMeter,24.97,-119.28,10.66,86.05,276.54,15.50,50.00
+87,AxialGradioMeter,75.94,-98.27,9.94,84.91,300.74,15.50,50.00
+88,AxialGradioMeter,39.94,112.95,-36.10,86.02,71.72,15.50,50.00
+89,AxialGradioMeter,79.69,89.89,-49.30,100.09,55.10,15.50,50.00
+90,AxialGradioMeter,-47.87,58.21,95.68,43.78,124.76,15.50,50.00
+91,AxialGradioMeter,-77.96,87.32,17.90,80.23,125.96,15.50,50.00
+92,AxialGradioMeter,86.68,19.32,94.88,42.09,13.84,15.50,50.00
+93,AxialGradioMeter,14.78,40.34,119.13,25.45,77.53,15.50,50.00
+94,AxialGradioMeter,16.06,115.60,-10.59,86.39,85.15,15.50,50.00
+95,AxialGradioMeter,40.68,110.41,-11.22,86.80,74.04,15.50,50.00
+96,AxialGradioMeter,-41.48,-68.87,92.67,47.62,246.09,15.50,50.00
+97,AxialGradioMeter,-66.49,-71.53,79.41,56.48,236.61,15.50,50.00
+98,AxialGradioMeter,44.68,-110.64,33.27,76.89,286.25,15.50,50.00
+99,AxialGradioMeter,84.83,-85.22,32.16,76.77,305.43,15.50,50.00
+100,AxialGradioMeter,53.91,-112.29,11.67,84.23,291.50,15.50,50.00
+101,AxialGradioMeter,56.38,-66.94,90.28,49.24,297.67,15.50,50.00
+102,AxialGradioMeter,-61.78,-87.37,58.45,65.45,242.61,15.50,50.00
+103,AxialGradioMeter,-41.80,-99.21,58.56,66.08,251.63,15.50,50.00
+104,AxialGradioMeter,0.91,101.33,56.27,66.23,92.25,15.50,50.00
+105,AxialGradioMeter,-24.38,99.41,58.21,65.95,101.10,15.50,50.00
+106,AxialGradioMeter,25.47,100.68,56.56,66.23,82.12,15.50,50.00
+107,AxialGradioMeter,50.31,92.73,55.99,65.65,69.66,15.50,50.00
+108,AxialGradioMeter,70.73,80.59,56.57,64.50,57.89,15.50,50.00
+109,AxialGradioMeter,-33.22,109.85,13.64,84.62,105.09,15.50,50.00
+110,AxialGradioMeter,-48.21,91.31,59.86,65.16,113.01,15.50,50.00
+111,AxialGradioMeter,49.94,7.92,115.65,23.06,13.35,15.50,50.00
+112,AxialGradioMeter,-15.95,-107.49,57.73,64.58,262.88,15.50,50.00
+113,AxialGradioMeter,8.86,-108.71,56.11,66.04,273.95,15.50,50.00
+114,AxialGradioMeter,56.03,-94.00,54.55,65.74,292.70,15.50,50.00
+115,AxialGradioMeter,34.02,-103.80,55.22,65.95,282.25,15.50,50.00
+116,AxialGradioMeter,92.91,-69.58,53.66,67.26,311.27,15.50,50.00
+117,AxialGradioMeter,108.13,-50.31,55.17,65.99,324.76,15.50,50.00
+118,AxialGradioMeter,75.21,-82.79,53.97,66.46,301.54,15.50,50.00
+119,AxialGradioMeter,130.61,2.52,34.15,74.62,2.07,15.50,50.00
+120,AxialGradioMeter,18.14,114.55,13.50,83.66,84.00,15.50,50.00
+121,AxialGradioMeter,42.56,108.16,12.25,84.50,72.91,15.50,50.00
+122,AxialGradioMeter,66.19,98.40,11.00,85.46,61.62,15.50,50.00
+123,AxialGradioMeter,86.80,85.69,9.90,83.79,51.81,15.50,50.00
+124,AxialGradioMeter,42.40,85.52,76.34,56.91,69.88,15.50,50.00
+125,AxialGradioMeter,105.24,49.33,55.50,65.12,32.48,15.50,50.00
+126,AxialGradioMeter,117.75,27.82,55.61,64.10,18.64,15.50,50.00
+127,AxialGradioMeter,89.42,66.46,57.04,64.36,45.01,15.50,50.00
+128,AxialGradioMeter,117.46,-51.45,33.61,76.24,326.70,15.50,50.00
+129,AxialGradioMeter,30.54,-63.60,105.41,42.23,288.16,15.50,50.00
+130,AxialGradioMeter,3.22,-67.93,104.40,42.64,270.69,15.50,50.00
+131,AxialGradioMeter,62.47,27.21,106.70,32.35,29.10,15.50,50.00
+132,AxialGradioMeter,105.30,-24.63,76.14,54.36,340.80,15.50,50.00
+133,AxialGradioMeter,79.82,-64.83,73.71,58.23,309.27,15.50,50.00
+134,AxialGradioMeter,23.00,-23.33,124.13,16.75,305.36,15.50,50.00
+135,AxialGradioMeter,62.75,-33.81,106.76,34.25,319.76,15.50,50.00
+136,AxialGradioMeter,115.01,50.88,33.75,74.74,31.61,15.50,50.00
+137,AxialGradioMeter,19.88,90.44,76.48,56.00,79.87,15.50,50.00
+138,AxialGradioMeter,56.09,60.04,92.51,45.95,57.42,15.50,50.00
+139,AxialGradioMeter,27.17,56.29,107.60,37.65,69.78,15.50,50.00
+140,AxialGradioMeter,79.51,60.20,75.78,55.65,47.35,15.50,50.00
+141,AxialGradioMeter,104.40,25.42,75.88,54.40,20.84,15.50,50.00
+142,AxialGradioMeter,77.72,92.19,-74.17,95.17,56.33,15.50,50.00
+143,AxialGradioMeter,11.39,119.11,-65.02,89.80,85.69,15.50,50.00
+144,AxialGradioMeter,118.56,-24.78,57.56,64.34,342.15,15.50,50.00
+145,AxialGradioMeter,50.04,-14.83,115.24,23.47,336.39,15.50,50.00
+146,AxialGradioMeter,17.04,-48.20,116.89,31.08,282.70,15.50,50.00
+147,AxialGradioMeter,20.36,-118.34,-64.37,90.52,274.85,15.50,50.00
+148,AxialGradioMeter,42.77,-110.63,-65.89,91.74,286.08,15.50,50.00
+149,AxialGradioMeter,63.94,-101.32,-67.25,93.82,293.74,15.50,50.00
+150,AxialGradioMeter,85.05,-89.65,-68.16,94.79,307.40,15.50,50.00
+151,AxialGradioMeter,85.11,-32.44,92.47,45.12,326.98,15.50,50.00
+152,AxialGradioMeter,33.35,113.59,-66.96,91.67,74.38,15.50,50.00
+153,AxialGradioMeter,54.97,104.73,-71.03,95.41,66.56,15.50,50.00
+154,AxialGradioMeter,81.11,83.71,35.47,73.17,54.06,15.50,50.00
+155,AxialGradioMeter,38.61,104.83,33.91,75.45,76.35,15.50,50.00
+156,AxialGradioMeter,90.82,-8.65,93.89,43.54,348.98,15.50,50.00
+157,RefMagnetoMeter,0,0.00,0.00,0.00,0.00,0.00,4.00
+158,RefMagnetoMeter,0,0.00,0.00,0.00,0.00,0.00,4.00
+159,RefMagnetoMeter,0,0.00,0.00,0.00,0.00,0.00,4.00
+160,Null Channel
+161,Null Channel
+162,Null Channel
+163,Null Channel
+164,Null Channel
+165,Null Channel
+166,Null Channel
+167,Null Channel
+168,Null Channel
+169,Null Channel
+170,Null Channel
+171,Null Channel
+172,Null Channel
+173,Null Channel
+174,Null Channel
+175,Null Channel
+176,Null Channel
+177,Null Channel
+178,Null Channel
+179,Null Channel
+180,Null Channel
+181,Null Channel
+182,Null Channel
+183,Null Channel
+184,Null Channel
+185,Null Channel
+186,Null Channel
+187,Null Channel
+188,Null Channel
+189,Null Channel
+190,Null Channel
+191,Null Channel
diff --git a/mne/fiff/kit/tests/data/test.sqd b/mne/fiff/kit/tests/data/test.sqd
new file mode 100644
index 0000000..d6851d8
Binary files /dev/null and b/mne/fiff/kit/tests/data/test.sqd differ
diff --git a/mne/fiff/kit/tests/data/test_Ykgw.mat b/mne/fiff/kit/tests/data/test_Ykgw.mat
new file mode 100644
index 0000000..1ebbbfd
Binary files /dev/null and b/mne/fiff/kit/tests/data/test_Ykgw.mat differ
diff --git a/mne/fiff/kit/tests/data/test_bin.fif b/mne/fiff/kit/tests/data/test_bin.fif
new file mode 100644
index 0000000..ef3aed3
Binary files /dev/null and b/mne/fiff/kit/tests/data/test_bin.fif differ
diff --git a/mne/fiff/kit/tests/data/test_elp.txt b/mne/fiff/kit/tests/data/test_elp.txt
new file mode 100755
index 0000000..1c4a236
--- /dev/null
+++ b/mne/fiff/kit/tests/data/test_elp.txt
@@ -0,0 +1,11 @@
+% Ascii stylus data file created by FastSCAN V4.0.7 on Thu May 31 13:04:52 2012
+% raw surface, 8 visible points, bounding box reference
+% x y z
+    1.3930   13.1613   -4.6967
+  -62.4997  -73.7271   79.9600
+  -74.8957   87.3785   81.1943
+  -59.5004  -70.4836   75.8930
+  -64.6373   83.8228   76.2123
+  -13.5035    7.2522  -26.8405
+  -20.2967  -35.1498  -12.9305
+  -27.7519   45.2628  -22.2407
diff --git a/mne/fiff/kit/tests/data/test_hsp.txt b/mne/fiff/kit/tests/data/test_hsp.txt
new file mode 100755
index 0000000..1965f96
--- /dev/null
+++ b/mne/fiff/kit/tests/data/test_hsp.txt
@@ -0,0 +1,504 @@
+% Copyright (c) 1995-2012, Applied Research Associates NZ Ltd. All rights reserved.
+% Ascii 3D points file created by FastSCAN V4.0.7 on Thu May 31 13:04:40 2012
+% Raw(separate)Exp(b,p,CRLF)
+% 346306 3D points, x y z per line
+ -106.93    99.80    68.81
+ -106.28   100.26    67.09
+ -105.16   100.67    66.55
+ -106.53    99.70    66.04
+ -105.17   100.15    65.40
+ -106.39    98.95    64.85
+ -105.40    99.77    64.16
+ -104.02    99.87    63.67
+ -104.74    99.11    62.77
+ -103.62    99.72    62.13
+ -136.60    80.86    57.38
+ -137.22    80.32    56.79
+ -136.37    80.84    56.27
+ -139.31    78.86    57.10
+ -138.44    79.33    56.59
+ -137.63    79.78    56.11
+ -136.92    80.45    55.57
+ -135.97    80.63    55.16
+ -140.13    78.11    56.54
+ -139.40    78.74    56.01
+ -138.64    79.28    55.51
+ -137.85    79.78    55.01
+ -136.94    80.53    54.36
+ -136.01    80.76    53.95
+ -140.73    77.69    55.63
+ -139.76    78.60    54.89
+ -138.81    79.41    54.20
+ -137.61    80.12    53.47
+ -136.40    80.81    52.75
+ -134.96    80.33    52.48
+ -133.34    80.00    52.07
+ -131.81    80.37    51.39
+ -130.27    80.94    50.62
+ -129.09    81.45    49.99
+ -127.23    82.17    49.04
+ -123.71    83.50    47.27
+ -120.54    84.75    45.65
+ -111.40    88.41    40.93
+ -147.06    74.00    58.00
+ -145.41    74.70    57.11
+ -144.13    75.35    56.38
+ -142.99    75.81    55.78
+ -141.97    76.62    55.07
+ -141.19    77.78    54.28
+ -139.82    78.18    53.64
+ -138.96    79.54    52.73
+ -137.76    80.24    52.00
+ -136.66    81.11    51.23
+ -134.88    80.24    51.02
+ -133.32    79.79    50.70
+ -132.05    80.37    50.01
+ -130.73    80.86    49.34
+ -129.40    81.34    48.68
+ -128.08    81.83    48.01
+ -126.74    82.30    47.35
+ -125.39    82.74    46.70
+ -124.08    83.24    46.04
+ -122.74    83.72    45.38
+ -121.14    84.20    44.62
+ -119.74    85.01    43.79
+ -117.88    85.95    42.74
+ -115.21    86.88    41.42
+ -113.10    87.43    40.46
+ -112.12    87.60    40.05
+ -152.91    71.19    59.57
+ -151.50    71.86    58.79
+ -150.30    72.61    58.04
+ -149.06    73.27    57.32
+ -147.78    73.84    56.62
+ -146.51    74.43    55.93
+ -145.24    75.02    55.22
+ -144.00    75.64    54.51
+ -142.72    76.19    53.82
+ -141.78    77.42    52.95
+ -140.40    77.79    52.31
+ -139.44    78.95    51.46
+ -138.35    79.86    50.68
+ -137.26    80.76    49.90
+ -135.88    81.11    49.27
+ -133.67    79.93    49.05
+ -132.28    80.28    48.42
+ -130.98    80.81    47.74
+ -129.65    81.28    47.08
+ -128.31    81.73    46.43
+ -126.99    82.22    45.77
+ -125.67    82.72    45.10
+ -124.33    83.19    44.44
+ -122.97    83.59    43.80
+ -121.62    84.03    43.15
+ -120.36    84.62    42.46
+ -119.20    85.38    41.73
+ -118.00    86.07    41.02
+ -116.44    86.16    40.45
+ -114.87    86.26    39.87
+ -113.58    86.36    39.39
+ -155.81    69.77    59.31
+ -154.17    70.68    58.34
+ -152.54    71.63    57.36
+ -150.89    72.50    56.40
+ -149.22    73.34    55.45
+ -147.53    74.11    54.52
+ -145.88    74.96    53.57
+ -144.24    75.83    52.61
+ -143.15    76.75    51.82
+ -142.69    78.40    50.93
+ -140.45    77.58    50.52
+ -139.40    79.05    49.50
+ -138.20    80.24    48.56
+ -136.64    81.24    47.58
+ -134.63    80.42    47.26
+ -132.86    80.07    46.81
+ -131.10    80.72    45.92
+ -129.36    81.40    45.03
+ -127.57    81.98    44.16
+ -125.81    82.64    43.28
+ -124.07    83.32    42.39
+ -122.33    84.00    41.50
+ -120.38    84.32    40.69
+ -118.62    84.96    39.81
+ -116.52    85.06    39.05
+ -114.81    85.37    38.33
+ -159.09    67.93    59.18
+ -157.48    69.01    58.15
+ -155.86    69.97    57.17
+ -154.27    70.97    56.17
+ -152.63    71.86    55.21
+ -150.97    72.72    54.26
+ -149.31    73.56    53.30
+ -147.64    74.37    52.37
+ -146.00    75.24    51.41
+ -144.55    75.94    50.59
+ -144.40    78.20    49.53
+ -143.81    79.07    48.94
+ -141.38    77.36    48.85
+ -139.81    77.83    48.10
+ -139.05    79.86    46.94
+ -137.42    80.71    46.00
+ -135.91    81.29    45.22
+ -133.79    80.29    44.94
+ -132.01    80.39    44.29
+ -130.22    80.97    43.42
+ -128.51    81.71    42.52
+ -126.74    82.32    41.64
+ -124.95    82.93    40.77
+ -123.12    83.43    39.92
+ -121.17    83.75    39.11
+ -119.37    84.33    38.25
+ -117.38    84.60    37.44
+ -115.39    84.89    36.63
+ -113.90    85.10    36.02
+ -162.35    66.02    58.96
+ -160.79    67.16    57.92
+ -159.22    68.24    56.90
+ -157.65    69.32    55.88
+ -155.99    70.18    54.93
+ -154.39    71.15    53.94
+ -152.80    72.14    52.95
+ -151.15    73.00    52.00
+ -149.47    73.76    51.07
+ -147.81    74.58    50.13
+ -146.20    75.49    49.16
+ -145.98    79.16    47.47
+ -144.10    78.47    47.11
+ -141.83    77.07    46.93
+ -140.09    77.72    46.04
+ -139.06    79.71    44.80
+ -137.39    80.47    43.89
+ -135.61    81.04    43.03
+ -132.90    79.92    42.59
+ -131.12    80.52    41.72
+ -129.27    81.00    40.88
+ -127.65    81.86    39.94
+ -125.91    82.54    39.05
+ -124.07    83.03    38.21
+ -121.98    83.10    37.46
+ -120.32    83.92    36.53
+ -118.31    84.13    35.75
+ -116.33    84.43    34.93
+ -114.35    84.73    34.12
+ -112.62    85.00    33.40
+ -167.07    63.42    59.60
+ -165.19    64.26    58.58
+ -163.63    65.48    57.50
+ -162.08    66.64    56.46
+ -160.52    67.74    55.43
+ -158.98    68.85    54.40
+ -157.39    69.87    53.41
+ -155.76    70.78    52.44
+ -154.12    71.65    51.48
+ -152.52    72.59    50.51
+ -150.84    73.37    49.58
+ -149.15    74.09    48.66
+ -147.56    75.04    47.68
+ -147.75    78.50    46.23
+ -147.02    79.60    45.50
+ -145.50    78.61    45.39
+ -143.26    76.73    45.42
+ -141.40    77.14    44.59
+ -139.71    77.87    43.68
+ -138.74    79.96    42.42
+ -137.04    80.66    41.53
+ -135.25    81.20    40.68
+ -133.20    81.29    39.93
+ -131.09    81.27    39.21
+ -129.07    81.42    38.45
+ -127.21    81.88    37.62
+ -125.36    82.35    36.78
+ -123.46    82.74    35.95
+ -121.61    83.21    35.11
+ -119.72    83.62    34.28
+ -117.71    83.85    33.48
+ -115.72    84.13    32.67
+ -113.68    84.33    31.87
+ -111.74    84.71    31.03
+ -167.49    62.96    57.86
+ -166.25    64.50    56.76
+ -164.48    65.11    55.87
+ -162.95    66.29    54.82
+ -161.41    67.44    53.78
+ -159.86    68.54    52.76
+ -158.28    69.55    51.76
+ -156.66    70.48    50.79
+ -155.03    71.36    49.83
+ -153.43    72.28    48.87
+ -151.76    73.06    47.93
+ -150.09    73.83    47.00
+ -148.61    74.48    46.19
+ -148.24    75.80    45.49
+ -148.74    78.86    44.33
+ -147.34    79.11    43.72
+ -144.21    76.46    43.77
+ -142.33    76.81    42.95
+ -140.63    77.52    42.04
+ -139.23    78.32    41.20
+ -138.33    80.03    40.14
+ -136.64    80.75    39.25
+ -134.66    80.94    38.48
+ -132.60    81.00    37.74
+ -130.48    80.95    37.03
+ -128.55    81.28    36.22
+ -126.70    81.73    35.39
+ -124.87    82.24    34.54
+ -122.94    82.57    33.72
+ -121.07    83.01    32.88
+ -119.11    83.30    32.07
+ -117.09    83.52    31.27
+ -115.15    83.86    30.44
+ -113.12    84.09    29.64
+ -111.06    84.27    28.84
+ -172.55    60.11    58.89
+ -170.94    60.98    57.95
+ -169.57    62.62    56.76
+ -167.73    63.11    55.90
+ -166.09    64.05    54.92
+ -164.64    65.43    53.81
+ -163.12    66.62    52.76
+ -161.55    67.66    51.76
+ -160.01    68.76    50.73
+ -158.42    69.74    49.75
+ -156.77    70.58    48.80
+ -155.20    71.56    47.81
+ -153.55    72.40    46.87
+ -151.93    73.26    45.91
+ -150.25    74.00    44.99
+ -148.93    74.94    44.11
+ -148.99    78.23    42.70
+ -146.00    76.17    42.53
+ -144.23    76.31    41.84
+ -142.50    76.93    40.95
+ -140.76    77.58    40.06
+ -139.01    78.17    39.19
+ -137.84    79.87    38.04
+ -136.24    80.74    37.11
+ -134.35    81.09    36.30
+ -132.19    80.98    35.60
+ -130.32    81.38    34.78
+ -128.17    81.31    34.07
+ -126.27    81.67    33.25
+ -124.42    82.11    32.41
+ -122.48    82.43    31.60
+ -120.57    82.79    30.77
+ -118.61    83.10    29.95
+ -116.62    83.36    29.14
+ -114.62    83.60    28.34
+ -112.57    83.79    27.53
+ -110.50    83.96    26.73
+ -109.00    84.18    26.11
+ -184.83    55.65    63.23
+ -182.31    56.35    62.05
+ -177.75    57.74    59.85
+ -174.67    58.96    58.24
+ -173.27    59.96    57.32
+ -171.90    60.97    56.40
+ -170.35    62.16    55.34
+ -168.77    63.26    54.31
+ -167.11    64.14    53.34
+ -165.53    65.20    52.33
+ -163.96    66.25    51.32
+ -162.45    67.44    50.27
+ -160.89    68.48    49.27
+ -159.30    69.44    48.28
+ -157.72    70.43    47.30
+ -156.11    71.34    46.33
+ -154.45    72.12    45.40
+ -152.82    72.95    44.45
+ -151.19    73.79    43.50
+ -149.77    74.53    42.67
+ -149.01    76.60    41.51
+ -146.53    75.70    41.00
+ -144.78    76.30    40.11
+ -143.03    76.89    39.24
+ -141.29    77.51    38.36
+ -139.52    78.06    37.49
+ -138.11    78.83    36.68
+ -137.07    80.27    35.70
+ -135.40    81.01    34.80
+ -133.74    81.29    34.10
+ -132.04    81.06    33.60
+ -130.26    81.15    32.94
+ -128.44    81.64    32.09
+ -126.29    81.57    31.37
+ -124.34    81.85    30.57
+ -122.42    82.19    29.75
+ -120.46    82.46    28.94
+ -118.56    82.86    28.10
+ -116.56    83.09    27.29
+ -114.52    83.27    26.50
+ -112.41    83.37    25.71
+ -110.37    83.58    24.90
+ -108.56    83.73    24.19
+ -185.57    55.22    61.89
+ -174.89    59.14    56.44
+ -173.80    60.22    55.60
+ -172.40    61.17    54.69
+ -170.70    61.97    53.75
+ -169.16    63.13    52.70
+ -167.55    64.11    51.71
+ -165.99    65.20    50.68
+ -164.44    66.30    49.66
+ -162.90    67.41    48.64
+ -161.33    68.42    47.65
+ -159.80    69.51    46.63
+ -158.21    70.45    45.66
+ -156.59    71.32    44.70
+ -154.95    72.14    43.75
+ -153.35    73.03    42.79
+ -151.72    73.86    41.85
+ -150.46    74.90    40.94
+ -150.05    76.77    39.99
+ -147.87    75.74    39.65
+ -146.23    76.12    38.89
+ -144.49    76.71    38.02
+ -142.69    77.22    37.16
+ -140.89    77.71    36.31
+ -139.05    78.15    35.48
+ -137.75    79.10    34.62
+ -136.70    80.50    33.66
+ -134.98    81.13    32.78
+ -133.43    81.62    32.03
+ -131.59    81.15    31.59
+ -129.67    80.99    30.98
+ -128.04    81.80    30.07
+ -126.34    82.04    29.36
+ -124.68    81.91    28.83
+ -122.85    81.95    28.17
+ -120.89    82.22    27.36
+ -118.93    82.51    26.55
+ -116.95    82.79    25.73
+ -115.17    82.94    25.03
+ -113.35    83.05    24.33
+ -111.30    83.24    23.52
+ -109.47    83.36    22.82
+ -108.11    83.37    22.33
+ -186.20    54.65    60.58
+ -175.44    58.30    55.20
+ -174.50    59.78    54.24
+ -173.36    60.73    53.42
+ -172.28    61.82    52.58
+ -170.75    62.40    51.79
+ -169.28    63.08    50.96
+ -167.94    64.11    50.04
+ -166.54    64.99    49.17
+ -165.23    66.04    48.25
+ -163.69    67.13    47.23
+ -162.15    68.18    46.23
+ -160.62    69.27    45.21
+ -159.01    70.15    44.25
+ -157.42    71.09    43.28
+ -155.83    71.99    42.32
+ -154.23    72.87    41.36
+ -152.65    73.79    40.39
+ -151.78    75.11    39.50
+ -151.15    76.90    38.51
+ -148.78    75.71    38.16
+ -147.11    75.92    37.47
+ -145.38    76.54    36.59
+ -143.62    77.09    35.72
+ -141.81    77.56    34.87
+ -139.96    77.96    34.05
+ -138.11    78.37    33.22
+ -136.70    79.59    32.21
+ -135.16    80.55    31.26
+ -133.60    81.48    30.32
+ -131.81    81.54    29.67
+ -129.42    80.58    29.23
+ -127.77    81.33    28.32
+ -126.08    82.04    27.43
+ -124.25    82.06    26.77
+ -122.63    82.01    26.22
+ -120.74    81.97    25.56
+ -118.76    82.22    24.75
+ -116.79    82.50    23.93
+ -114.66    82.53    23.15
+ -112.89    82.73    22.44
+ -111.25    82.71    21.86
+ -109.42    82.82    21.15
+ -107.58    82.94    20.45
+ -186.56    54.56    58.89
+ -184.07    54.98    57.83
+ -179.56    55.72    55.91
+ -176.83    56.85    54.45
+ -175.76    58.01    53.57
+ -174.82    59.48    52.60
+ -173.79    60.69    51.72
+ -172.55    61.97    50.73
+ -170.79    62.58    49.83
+ -169.12    63.41    48.88
+ -167.59    64.55    47.84
+ -166.06    65.67    46.81
+ -164.51    66.73    45.80
+ -162.94    67.73    44.81
+ -161.45    68.88    43.78
+ -159.87    69.84    42.80
+ -158.27    70.73    41.83
+ -156.72    71.72    40.84
+ -155.12    72.59    39.88
+ -153.53    73.48    38.92
+ -152.21    74.91    37.83
+ -150.74    76.01    36.83
+ -149.72    77.01    36.04
+ -147.67    75.96    35.74
+ -145.88    76.44    34.89
+ -144.10    76.95    34.03
+ -142.34    77.51    33.17
+ -140.56    78.02    32.31
+ -138.74    78.48    31.47
+ -137.10    79.26    30.56
+ -135.23    79.63    29.74
+ -133.87    80.90    28.73
+ -132.26    81.71    27.81
+ -130.69    82.15    27.07
+ -128.22    80.61    26.84
+ -126.59    80.96    26.11
+ -124.90    81.66    25.22
+ -123.08    82.14    24.36
+ -120.85    81.97    23.64
+ -118.66    81.88    22.89
+ -116.61    82.02    22.09
+ -114.52    82.14    21.29
+ -112.43    82.23    20.50
+ -110.27    82.27    19.71
+ -108.15    82.36    18.91
+ -186.55    53.87    57.31
+ -184.10    54.37    56.22
+ -180.13    55.21    54.44
+ -177.74    56.58    53.00
+ -176.52    58.00    51.95
+ -175.08    58.83    51.08
+ -174.21    60.42    50.09
+ -172.98    61.72    49.09
+ -171.46    62.90    48.04
+ -169.57    63.19    47.23
+ -168.05    64.34    46.19
+ -166.51    65.41    45.18
+ -164.99    66.52    44.15
+ -163.42    67.51    43.16
+ -161.88    68.55    42.16
+ -160.36    69.63    41.14
+ -158.80    70.61    40.16
+ -157.21    71.50    39.19
+ -155.71    72.57    38.18
+ -154.56    73.84    37.22
+ -153.67    75.11    36.35
+ -152.29    75.86    35.53
+ -150.49    76.30    34.69
+ -148.77    76.87    33.82
+ -146.56    76.53    33.16
+ -144.62    76.72    32.38
+ -142.91    77.37    31.49
+ -141.21    78.03    30.61
+ -139.29    78.28    29.81
+ -137.50    78.79    28.96
+ -135.53    78.98    28.17
+ -133.93    79.81    27.25
+ -132.55    81.03    26.25
+ -130.99    81.91    25.32
+ -129.37    82.25    24.59
+ -126.85    80.65    24.37
+ -125.06    80.73    23.69
+ -123.35    81.39    22.79
\ No newline at end of file
diff --git a/mne/fiff/kit/tests/data/test_mrk.sqd b/mne/fiff/kit/tests/data/test_mrk.sqd
new file mode 100755
index 0000000..47ed513
Binary files /dev/null and b/mne/fiff/kit/tests/data/test_mrk.sqd differ
diff --git a/mne/fiff/kit/tests/test_kit.py b/mne/fiff/kit/tests/test_kit.py
new file mode 100644
index 0000000..3bca73b
--- /dev/null
+++ b/mne/fiff/kit/tests/test_kit.py
@@ -0,0 +1,109 @@
+"""Data and Channel Location Equivalence Tests"""
+
+# Author: Teon Brooks <teon at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import inspect
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+import scipy.io
+from mne.utils import _TempDir
+from mne.fiff import Raw, pick_types, kit
+
+FILE = inspect.getfile(inspect.currentframe())
+parent_dir = op.dirname(op.abspath(FILE))
+data_dir = op.join(parent_dir, 'data')
+tempdir = _TempDir()
+
+
+def test_data():
+    """Test reading raw kit files
+    """
+    raw_py = kit.read_raw_kit(input_fname=op.join(data_dir, 'test.sqd'),
+                           mrk_fname=op.join(data_dir, 'test_mrk.sqd'),
+                           elp_fname=op.join(data_dir, 'test_elp.txt'),
+                           hsp_fname=op.join(data_dir, 'test_hsp.txt'),
+                           sns_fname=op.join(data_dir, 'sns.txt'),
+                           stim=range(167, 159, -1), stimthresh=1,
+                           preload=True)
+    # Binary file only stores the sensor channels
+    py_picks = pick_types(raw_py.info, exclude='bads')
+    raw_bin = op.join(data_dir, 'test_bin.fif')
+    raw_bin = Raw(raw_bin, preload=True)
+    bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads')
+    data_bin, _ = raw_bin[bin_picks]
+    data_py, _ = raw_py[py_picks]
+
+    # this .mat was generated using the Yokogawa MEG Reader
+    data_Ykgw = op.join(data_dir, 'test_Ykgw.mat')
+    data_Ykgw = scipy.io.loadmat(data_Ykgw)['data']
+    data_Ykgw = data_Ykgw[py_picks]
+
+    assert_array_almost_equal(data_py, data_Ykgw)
+
+    py_picks = pick_types(raw_py.info, stim=True, exclude='bads')
+    data_py, _ = raw_py[py_picks]
+    assert_array_almost_equal(data_py, data_bin)
+
+
+def test_read_segment():
+    """Test writing raw kit files when preload is False
+    """
+    raw1 = kit.read_raw_kit(input_fname=op.join(data_dir, 'test.sqd'),
+                           mrk_fname=op.join(data_dir, 'test_mrk.sqd'),
+                           elp_fname=op.join(data_dir, 'test_elp.txt'),
+                           hsp_fname=op.join(data_dir, 'test_hsp.txt'),
+                           sns_fname=op.join(data_dir, 'sns.txt'),
+                           stim=range(167, 159, -1), preload=False)
+    raw1_file = op.join(tempdir, 'raw1.fif')
+    raw1.save(raw1_file, buffer_size_sec=.1, overwrite=True)
+    raw2 = kit.read_raw_kit(input_fname=op.join(data_dir, 'test.sqd'),
+                           mrk_fname=op.join(data_dir, 'test_mrk.sqd'),
+                           elp_fname=op.join(data_dir, 'test_elp.txt'),
+                           hsp_fname=op.join(data_dir, 'test_hsp.txt'),
+                           sns_fname=op.join(data_dir, 'sns.txt'),
+                           stim=range(167, 159, -1), preload=True)
+    raw2_file = op.join(tempdir, 'raw2.fif')
+    raw2.save(raw2_file, buffer_size_sec=.1, overwrite=True)
+    raw1 = Raw(raw1_file, preload=True)
+    raw2 = Raw(raw2_file, preload=True)
+    assert_array_equal(raw1._data, raw2._data)
+    raw3 = kit.read_raw_kit(input_fname=op.join(data_dir, 'test.sqd'),
+                           mrk_fname=op.join(data_dir, 'test_mrk.sqd'),
+                           elp_fname=op.join(data_dir, 'test_elp.txt'),
+                           hsp_fname=op.join(data_dir, 'test_hsp.txt'),
+                           sns_fname=op.join(data_dir, 'sns.txt'),
+                           stim=range(167, 159, -1), preload=True)
+    assert_array_almost_equal(raw1._data, raw3._data)
+
+def test_ch_loc():
+    """Test raw kit loc
+    """
+    raw_py = kit.read_raw_kit(input_fname=op.join(data_dir, 'test.sqd'),
+                       mrk_fname=op.join(data_dir, 'test_mrk.sqd'),
+                       elp_fname=op.join(data_dir, 'test_elp.txt'),
+                       hsp_fname=op.join(data_dir, 'test_hsp.txt'),
+                       sns_fname=op.join(data_dir, 'sns.txt'),
+                       stim=range(167, 159, -1))
+    raw_bin = Raw(op.join(data_dir, 'test_bin.fif'))
+
+    for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
+        if bin_ch['ch_name'].startswith('MEG'):
+            # the mne_kit2fiff_bin has a different representation of pi.
+            assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=5)
+
+def test_stim_ch():
+    """Test raw kit stim ch
+    """
+    raw = kit.read_raw_kit(input_fname=op.join(data_dir, 'test.sqd'),
+                           mrk_fname=op.join(data_dir, 'test_mrk.sqd'),
+                           elp_fname=op.join(data_dir, 'test_elp.txt'),
+                           hsp_fname=op.join(data_dir, 'test_hsp.txt'),
+                           sns_fname=op.join(data_dir, 'sns.txt'),
+                           stim=range(167, 159, -1), preload=True)
+    stim_pick = pick_types(raw.info, meg=False, stim=True, exclude='bads')
+    stim1, _ = raw[stim_pick]
+    stim2 = np.array(raw.read_stim_ch(), ndmin=2)
+    assert_array_equal(stim1, stim2)
diff --git a/mne/fiff/matrix.py b/mne/fiff/matrix.py
new file mode 100644
index 0000000..f8f472f
--- /dev/null
+++ b/mne/fiff/matrix.py
@@ -0,0 +1,133 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+from .tag import find_tag, has_tag
+from .write import write_int, start_block, end_block, write_float_matrix, \
+                   write_name_list
+from .. import verbose
+
+
+def _transpose_named_matrix(mat, copy=True):
+    """Transpose mat inplace (no copy)
+    """
+    if copy is True:
+        mat = mat.copy()
+    mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow']
+    mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names']
+    mat['data'] = mat['data'].T
+    return mat
+
+
+ at verbose
+def _read_named_matrix(fid, node, matkind, indent='    ', verbose=None):
+    """Read named matrix from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The opened file descriptor.
+    node : dict
+        The node in the tree.
+    matkind : int
+        The type of matrix.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    mat: dict
+        The matrix data
+    """
+    #   Descend one level if necessary
+    if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
+        for k in range(node['nchild']):
+            if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
+                if has_tag(node['children'][k], matkind):
+                    node = node['children'][k]
+                    break
+        else:
+            logger.info(indent + 'Desired named matrix (kind = %d) not '
+                        'available' % matkind)
+            return None
+    else:
+        if not has_tag(node, matkind):
+            logger.info(indent + 'Desired named matrix (kind = %d) not '
+                        'available' % matkind)
+            return None
+
+    #   Read everything we need
+    tag = find_tag(fid, node, matkind)
+    if tag is None:
+        raise ValueError('Matrix data missing')
+    else:
+        data = tag.data
+
+    nrow, ncol = data.shape
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
+    if tag is not None and tag.data != nrow:
+        raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW '
+                         'tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
+    if tag is not None and tag.data != ncol:
+        raise ValueError('Number of columns in matrix data and '
+                         'FIFF_MNE_NCOL tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
+    row_names = tag.data.split(':') if tag is not None else []
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
+    col_names = tag.data.split(':') if tag is not None else []
+
+    mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names,
+               data=data)
+    return mat
+
+
+def write_named_matrix(fid, kind, mat):
+    """Write named matrix from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The opened file descriptor.
+    kind : int
+        The kind of the matrix.
+    matkind : int
+        The type of matrix.
+    """
+    # let's save ourselves from disaster
+    n_tot = mat['nrow'] * mat['ncol']
+    if mat['data'].size != n_tot:
+        ratio = n_tot / float(mat['data'].size)
+        if n_tot < mat['data'].size and ratio > 0:
+            ratio = 1 / ratio
+        raise ValueError('Cannot write matrix: row (%i) and column (%i) '
+                         'total element (%i) mismatch with data size (%i), '
+                         'appears to be off by a factor of %gx'
+                         % (mat['nrow'], mat['ncol'], n_tot,
+                            mat['data'].size, ratio))
+    start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
+    write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow'])
+    write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol'])
+
+    if len(mat['row_names']) > 0:
+        # let's prevent unintentional stupidity
+        if len(mat['row_names']) != mat['nrow']:
+            raise ValueError('len(mat["row_names"]) != mat["nrow"]')
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names'])
+
+    if len(mat['col_names']) > 0:
+        # let's prevent unintentional stupidity
+        if len(mat['col_names']) != mat['ncol']:
+            raise ValueError('len(mat["col_names"]) != mat["ncol"]')
+        write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names'])
+
+    write_float_matrix(fid, kind, mat['data'])
+    end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
diff --git a/mne/fiff/meas_info.py b/mne/fiff/meas_info.py
new file mode 100644
index 0000000..6fb899a
--- /dev/null
+++ b/mne/fiff/meas_info.py
@@ -0,0 +1,391 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from warnings import warn
+from copy import deepcopy
+import numpy as np
+from scipy import linalg
+from StringIO import StringIO
+
+import logging
+logger = logging.getLogger('mne')
+
+from .open import fiff_open
+from .tree import dir_tree_find, copy_tree
+from .constants import FIFF
+from .tag import read_tag
+from .proj import read_proj, write_proj
+from .ctf import read_ctf_comp, write_ctf_comp
+from .channels import read_bad_channels
+
+from .write import start_block, end_block, write_string, write_dig_point, \
+                   write_float, write_int, write_coord_trans, write_ch_info, \
+                   write_name_list, start_file
+from .. import verbose
+
+
+ at verbose
+def read_meas_info(fid, tree, verbose=None):
+    """Read the measurement info
+
+    Parameters
+    ----------
+    fid : file
+        Open file descriptor.
+    tree : tree
+        FIF tree structure.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    info : dict
+       Info on dataset.
+    meas : dict
+        Node in tree that contains the info.
+    """
+    #   Find the desired blocks
+    meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
+    if len(meas) == 0:
+        raise ValueError('Could not find measurement data')
+    if len(meas) > 1:
+        raise ValueError('Cannot read more that 1 measurement data')
+    meas = meas[0]
+
+    meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
+    if len(meas_info) == 0:
+        raise ValueError('Could not find measurement info')
+    if len(meas_info) > 1:
+        raise ValueError('Cannot read more that 1 measurement info')
+    meas_info = meas_info[0]
+
+    #   Read measurement info
+    dev_head_t = None
+    ctf_head_t = None
+    meas_date = None
+    highpass = None
+    lowpass = None
+    nchan = None
+    sfreq = None
+    chs = []
+    experimenter = None
+    description = None
+    proj_id = None
+    proj_name = None
+    p = 0
+    for k in range(meas_info['nent']):
+        kind = meas_info['directory'][k].kind
+        pos = meas_info['directory'][k].pos
+        if kind == FIFF.FIFF_NCHAN:
+            tag = read_tag(fid, pos)
+            nchan = int(tag.data)
+        elif kind == FIFF.FIFF_SFREQ:
+            tag = read_tag(fid, pos)
+            sfreq = float(tag.data)
+        elif kind == FIFF.FIFF_CH_INFO:
+            tag = read_tag(fid, pos)
+            chs.append(tag.data)
+            p += 1
+        elif kind == FIFF.FIFF_LOWPASS:
+            tag = read_tag(fid, pos)
+            lowpass = float(tag.data)
+        elif kind == FIFF.FIFF_HIGHPASS:
+            tag = read_tag(fid, pos)
+            highpass = float(tag.data)
+        elif kind == FIFF.FIFF_MEAS_DATE:
+            tag = read_tag(fid, pos)
+            meas_date = tag.data
+        elif kind == FIFF.FIFF_COORD_TRANS:
+            tag = read_tag(fid, pos)
+            cand = tag.data
+            if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
+                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                dev_head_t = cand
+            elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
+                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                ctf_head_t = cand
+        elif kind == FIFF.FIFF_EXPERIMENTER:
+            tag = read_tag(fid, pos)
+            experimenter = tag.data
+        elif kind == FIFF.FIFF_DESCRIPTION:
+            tag = read_tag(fid, pos)
+            description = tag.data
+        elif kind == FIFF.FIFF_PROJ_ID:
+            tag = read_tag(fid, pos)
+            proj_id = tag.data
+        elif kind == FIFF.FIFF_PROJ_NAME:
+            tag = read_tag(fid, pos)
+            proj_name = tag.data
+
+    # Check that we have everything we need
+    if nchan is None:
+        raise ValueError('Number of channels in not defined')
+
+    if sfreq is None:
+        raise ValueError('Sampling frequency is not defined')
+
+    if len(chs) == 0:
+        raise ValueError('Channel information not defined')
+
+    if len(chs) != nchan:
+        raise ValueError('Incorrect number of channel definitions found')
+
+    if dev_head_t is None or ctf_head_t is None:
+        hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
+        if len(hpi_result) == 1:
+            hpi_result = hpi_result[0]
+            for k in range(hpi_result['nent']):
+                kind = hpi_result['directory'][k].kind
+                pos = hpi_result['directory'][k].pos
+                if kind == FIFF.FIFF_COORD_TRANS:
+                    tag = read_tag(fid, pos)
+                    cand = tag.data
+                    if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
+                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                        dev_head_t = cand
+                    elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
+                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                        ctf_head_t = cand
+
+    #   Locate the Polhemus data
+    isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
+    dig = None
+    if len(isotrak) == 0:
+        logger.info('Isotrak not found')
+    elif len(isotrak) > 1:
+        warn('Multiple Isotrak found')
+    else:
+        isotrak = isotrak[0]
+        dig = []
+        for k in range(isotrak['nent']):
+            kind = isotrak['directory'][k].kind
+            pos = isotrak['directory'][k].pos
+            if kind == FIFF.FIFF_DIG_POINT:
+                tag = read_tag(fid, pos)
+                dig.append(tag.data)
+                dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+
+    #   Locate the acquisition information
+    acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
+    acq_pars = None
+    acq_stim = None
+    if len(acqpars) == 1:
+        acqpars = acqpars[0]
+        for k in range(acqpars['nent']):
+            kind = acqpars['directory'][k].kind
+            pos = acqpars['directory'][k].pos
+            if kind == FIFF.FIFF_DACQ_PARS:
+                tag = read_tag(fid, pos)
+                acq_pars = tag.data
+            elif kind == FIFF.FIFF_DACQ_STIM:
+                tag = read_tag(fid, pos)
+                acq_stim = tag.data
+
+    #   Load the SSP data
+    projs = read_proj(fid, meas_info)
+
+    #   Load the CTF compensation data
+    comps = read_ctf_comp(fid, meas_info, chs)
+
+    #   Load the bad channel list
+    bads = read_bad_channels(fid, meas_info)
+
+    #
+    #   Put the data together
+    #
+    if tree['id'] is not None:
+        info = dict(file_id=tree['id'])
+    else:
+        info = dict(file_id=None)
+
+    #   Load extra information blocks
+    read_extra_meas_info(fid, tree, info)
+
+    #  Make the most appropriate selection for the measurement id
+    if meas_info['parent_id'] is None:
+        if meas_info['id'] is None:
+            if meas['id'] is None:
+                if meas['parent_id'] is None:
+                    info['meas_id'] = info['file_id']
+                else:
+                    info['meas_id'] = meas['parent_id']
+            else:
+                info['meas_id'] = meas['id']
+        else:
+            info['meas_id'] = meas_info['id']
+    else:
+        info['meas_id'] = meas_info['parent_id']
+
+    info['experimenter'] = experimenter
+    info['description'] = description
+    info['proj_id'] = proj_id
+    info['proj_name'] = proj_name
+
+    if meas_date is None:
+        info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
+    else:
+        info['meas_date'] = meas_date
+
+    info['nchan'] = nchan
+    info['sfreq'] = sfreq
+    info['highpass'] = highpass if highpass is not None else 0
+    info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0
+
+    #   Add the channel information and make a list of channel names
+    #   for convenience
+    info['chs'] = chs
+    info['ch_names'] = [ch['ch_name'] for ch in chs]
+
+    #
+    #  Add the coordinate transformations
+    #
+    info['dev_head_t'] = dev_head_t
+    info['ctf_head_t'] = ctf_head_t
+    if dev_head_t is not None and ctf_head_t is not None:
+        head_ctf_trans = linalg.inv(ctf_head_t['trans'])
+        dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
+        info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE,
+                             'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD,
+                             'trans': dev_ctf_trans}
+    else:
+        info['dev_ctf_t'] = None
+
+    #   All kinds of auxliary stuff
+    info['dig'] = dig
+    info['bads'] = bads
+    info['projs'] = projs
+    info['comps'] = comps
+    info['acq_pars'] = acq_pars
+    info['acq_stim'] = acq_stim
+
+    return info, meas
+
+
+def read_extra_meas_info(fid, tree, info):
+    """Read extra blocks from fid"""
+    # current method saves them into a cStringIO file instance for simplicity
+    # this and its partner, write_extra_meas_info, could be made more
+    # comprehensive (i.e.., actually parse and read the data instead of
+    # just storing it for later)
+    blocks = [FIFF.FIFFB_SUBJECT, FIFF.FIFFB_EVENTS,
+              FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,
+              FIFF.FIFFB_PROCESSING_HISTORY]
+    info['orig_blocks'] = blocks
+
+    fid_str = StringIO()
+    fid_str = start_file(fid_str)
+    start_block(fid_str, FIFF.FIFFB_MEAS_INFO)
+    for block in blocks:
+        nodes = dir_tree_find(tree, block)
+        copy_tree(fid, tree['id'], nodes, fid_str)
+    info['orig_fid_str'] = fid_str
+
+
+def write_extra_meas_info(fid, info):
+    """Write otherwise left out blocks of data"""
+    # uses cStringIO fake file to read the appropriate blocks
+    if 'orig_blocks' in info:
+        # Blocks from the original
+        blocks = info['orig_blocks']
+        fid_str, tree, _ = fiff_open(info['orig_fid_str'])
+        for block in blocks:
+            nodes = dir_tree_find(tree, block)
+            copy_tree(fid_str, tree['id'], nodes, fid)
+
+
+def write_meas_info(fid, info, data_type=None, reset_range=True):
+    """Write measurement info in fif file.
+
+    Parameters
+    ----------
+    fid : file
+        Open file descriptor
+    info : dict
+        The measurement info structure
+    data_type : int
+        The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
+        5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for
+        raw data.
+    reset_range : bool
+        If True, info['chs'][k]['range'] will be set to unity.
+
+    Note
+    ----
+    Tags are written in a particular order for compatibility with maxfilter
+    """
+
+    # Measurement info
+    start_block(fid, FIFF.FIFFB_MEAS_INFO)
+
+    #   Extra measurement info
+    write_extra_meas_info(fid, info)
+
+    #   Polhemus data
+    if info['dig'] is not None:
+        start_block(fid, FIFF.FIFFB_ISOTRAK)
+        for d in info['dig']:
+            write_dig_point(fid, d)
+
+        end_block(fid, FIFF.FIFFB_ISOTRAK)
+
+    #   megacq parameters
+    if info['acq_pars'] is not None or info['acq_stim'] is not None:
+        start_block(fid, FIFF.FIFFB_DACQ_PARS)
+        if info['acq_pars'] is not None:
+            write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
+
+        if info['acq_stim'] is not None:
+            write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
+
+        end_block(fid, FIFF.FIFFB_DACQ_PARS)
+
+    #   Coordinate transformations if the HPI result block was not there
+    if info['dev_head_t'] is not None:
+        write_coord_trans(fid, info['dev_head_t'])
+
+    if info['ctf_head_t'] is not None:
+        write_coord_trans(fid, info['ctf_head_t'])
+
+    #   Projectors
+    write_proj(fid, info['projs'])
+
+    #   CTF compensation info
+    write_ctf_comp(fid, info['comps'])
+
+    #   Bad channels
+    if len(info['bads']) > 0:
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    #   General
+    if info.get('experimenter') is not None:
+        write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
+    if info.get('description') is not None:
+        write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
+    if info.get('proj_id') is not None:
+        write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
+    if info.get('proj_name') is not None:
+        write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
+    if info.get('meas_date') is not None:
+        write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])
+    write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
+    write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
+    write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
+    write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
+    if data_type is not None:
+        write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
+
+    #  Channel information
+    for k, c in enumerate(info['chs']):
+        #   Scan numbers may have been messed up
+        c = deepcopy(c)
+        c['scanno'] = k + 1
+        # for float/double, the "range" param is unnecessary
+        if reset_range is True:
+            c['range'] = 1.0
+        write_ch_info(fid, c)
+
+    end_block(fid, FIFF.FIFFB_MEAS_INFO)
diff --git a/mne/fiff/open.py b/mne/fiff/open.py
new file mode 100644
index 0000000..8c7aabf
--- /dev/null
+++ b/mne/fiff/open.py
@@ -0,0 +1,204 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+import gzip
+import cStringIO
+import logging
+logger = logging.getLogger('mne')
+
+from .tag import read_tag_info, read_tag, read_big, Tag
+from .tree import make_dir_tree
+from .constants import FIFF
+from .. import verbose
+
+
+ at verbose
+def fiff_open(fname, preload=False, verbose=None):
+    """Open a FIF file.
+
+    Parameters
+    ----------
+    fname : string | fid
+        Name of the fif file, or an opened file (will seek back to 0).
+    preload : bool
+        If True, all data from the file is read into a memory buffer. This
+        requires more memory, but can be faster for I/O operations that require
+        frequent seeks.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fid : file
+        The file descriptor of the open file
+    tree : fif tree
+        The tree is a complex structure filled with dictionaries,
+        lists and tags.
+    directory : list
+        list of nodes.
+    """
+    if isinstance(fname, basestring):
+        if op.splitext(fname)[1].lower() == '.gz':
+            logger.debug('Using gzip')
+            fid = gzip.open(fname, "rb")  # Open in binary mode
+        else:
+            logger.debug('Using normal I/O')
+            fid = open(fname, "rb")  # Open in binary mode
+    else:
+        fid = fname
+        fid.seek(0)
+
+    # do preloading of entire file
+    if preload:
+        # note that cStringIO objects instantiated this way are read-only,
+        # but that's okay here since we are using mode "rb" anyway
+        fid_old = fid
+        fid = cStringIO.StringIO(read_big(fid_old))
+        fid_old.close()
+
+    tag = read_tag_info(fid)
+
+    #   Check that this looks like a fif file
+    if tag.kind != FIFF.FIFF_FILE_ID:
+        raise ValueError('file does not start with a file id tag')
+
+    if tag.type != FIFF.FIFFT_ID_STRUCT:
+        raise ValueError('file does not start with a file id tag')
+
+    if tag.size != 20:
+        raise ValueError('file does not start with a file id tag')
+
+    tag = read_tag(fid)
+
+    if tag.kind != FIFF.FIFF_DIR_POINTER:
+        raise ValueError('file does not have a directory pointer')
+
+    #   Read or create the directory tree
+    logger.debug('    Creating tag directory for %s...' % fname)
+
+    dirpos = int(tag.data)
+    if dirpos > 0:
+        tag = read_tag(fid, dirpos)
+        directory = tag.data
+    else:
+        fid.seek(0, 0)
+        directory = list()
+        while tag.next >= 0:
+            pos = fid.tell()
+            tag = read_tag_info(fid)
+            if tag is None:
+                break  # HACK : to fix file ending with empty tag...
+            else:
+                tag.pos = pos
+                directory.append(tag)
+
+    tree, _ = make_dir_tree(fid, directory)
+
+    logger.debug('[done]')
+
+    #   Back to the beginning
+    fid.seek(0)
+
+    return fid, tree, directory
+
+
+def show_fiff(fname, indent='    ', read_limit=np.inf, max_str=30,
+              output=str, verbose=None):
+    """Show FIFF information
+
+    This function is similar to mne_show_fiff.
+
+    Parameters
+    ----------
+    fname : str
+        Filename to evaluate.
+    indent : str
+        How to indent the lines.
+    read_limit : int
+        Max number of bytes of data to read from a tag. Can be np.inf
+        to always read all data (helps test read completion).
+    max_str : int
+        Max number of characters of string representation to print for
+        each tag's data.
+    output : type
+        Either str or list. str is equivalent to ``"\n".join(list)``,
+        which is more convenient for using ``print show_fiff(...)``.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    if not output in [list, str]:
+        raise ValueError('output must be list or str')
+    f, tree, directory = fiff_open(fname)
+    with f as fid:
+        out = _show_tree(fid, tree['children'][0], indent=indent, level=0,
+                         read_limit=read_limit, max_str=max_str)
+    if output == str:
+        out = '\n'.join(out)
+    return out
+
+
+def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']):
+    """Helper to find matching values"""
+    vals = [k for k, v in FIFF.iteritems()
+            if v == value and any([fmt in k for fmt in fmts])
+            and not any(exc in k for exc in exclude)]
+    return vals
+
+
+def _show_tree(fid, tree, indent, level, read_limit, max_str):
+    """Helper for showing FIFF"""
+    this_idt = indent * level
+    next_idt = indent * (level + 1)
+    # print block-level information
+    out = [this_idt + str(tree['block'][0]) + ' = '
+           + '/'.join(_find_type(tree['block'], fmts=['FIFFB_']))]
+    if tree['directory'] is not None:
+        kinds = [ent.kind for ent in tree['directory']] + [-1]
+        sizes = [ent.size for ent in tree['directory']]
+        poss = [ent.pos for ent in tree['directory']]
+        counter = 0
+        good = True
+        for k, kn, size, pos in zip(kinds[:-1], kinds[1:], sizes, poss):
+            tag = Tag(k, size, 0, pos)
+            if read_limit is None or size <= read_limit:
+                try:
+                    tag = read_tag(fid, pos)
+                except Exception:
+                    good = False
+
+            if kn == k:
+                # don't print if the next item is the same type (count 'em)
+                counter += 1
+            else:
+                # find the tag type
+                this_type = _find_type(k, fmts=['FIFF_'])
+                # prepend a count if necessary
+                prepend = 'x' + str(counter + 1) + ': ' if counter > 0 else ''
+                postpend = ''
+                # print tag data nicely
+                if tag.data is not None:
+                    postpend = ' = ' + str(tag.data)[:max_str]
+                    if isinstance(tag.data, np.ndarray):
+                        if tag.data.size > 1:
+                            postpend += ' ... array size=' + str(tag.data.size)
+                    elif isinstance(tag.data, dict):
+                        postpend += ' ... dict len=' + str(len(tag.data))
+                    elif isinstance(tag.data, basestring):
+                        postpend += ' ... str len=' + str(len(tag.data))
+                    else:
+                        postpend += ' ... (unknown type)'
+                postpend = '>' * 20 + 'BAD' if not good else postpend
+                out += [next_idt + prepend + str(k) + ' = '
+                        + '/'.join(this_type) + ' (' + str(size) + ')'
+                        + postpend]
+                counter = 0
+                good = True
+
+    # deal with children
+    for branch in tree['children']:
+        out += _show_tree(fid, branch, indent, level + 1, read_limit, max_str)
+    return out
diff --git a/mne/fiff/pick.py b/mne/fiff/pick.py
new file mode 100644
index 0000000..c54e037
--- /dev/null
+++ b/mne/fiff/pick.py
@@ -0,0 +1,494 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+import re
+from warnings import warn
+
+import logging
+logger = logging.getLogger('mne')
+
+import numpy as np
+from .constants import FIFF
+from .. import verbose
+
+
+def channel_type(info, idx):
+    """Get channel type
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info
+    idx : int
+        Index of channel
+
+    Returns
+    -------
+    type : 'grad' | 'mag' | 'eeg' | 'stim' | 'eog' | 'emg' | 'ecg'
+           'ref_meg' | 'resp'
+        Type of channel
+    """
+    kind = info['chs'][idx]['kind']
+    if kind == FIFF.FIFFV_MEG_CH:
+        if info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T_M:
+            return 'grad'
+        elif info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T:
+            return 'mag'
+    elif kind == FIFF.FIFFV_REF_MEG_CH:
+        return 'ref_meg'
+    elif kind == FIFF.FIFFV_EEG_CH:
+        return 'eeg'
+    elif kind == FIFF.FIFFV_STIM_CH:
+        return 'stim'
+    elif kind == FIFF.FIFFV_EOG_CH:
+        return 'eog'
+    elif kind == FIFF.FIFFV_EMG_CH:
+        return 'emg'
+    elif kind == FIFF.FIFFV_ECG_CH:
+        return 'ecg'
+    elif kind == FIFF.FIFFV_RESP_CH:
+        return 'resp'
+    elif kind == FIFF.FIFFV_MISC_CH:
+        return 'misc'
+    elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
+                  FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
+                  FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
+                  FIFF.FIFFV_HPI_MOV]:
+        return 'chpi'  # channels relative to head position monitoring
+    raise Exception('Unknown channel type')
+
+
+def pick_channels(ch_names, include, exclude=[]):
+    """Pick channels by names
+
+    Returns the indices of the good channels in ch_names.
+
+    Parameters
+    ----------
+    ch_names : list of string
+        List of channels.
+    include : list of string
+        List of channels to include (if empty include all available).
+    exclude : list of string
+        List of channels to exclude (if empty do not exclude any channel).
+
+    Returns
+    -------
+    sel : array of int
+        Indices of good channels.
+    """
+    sel = []
+    for k, name in enumerate(ch_names):
+        if (len(include) == 0 or name in include) and name not in exclude:
+            sel.append(k)
+    sel = np.unique(sel)
+    np.sort(sel)
+    return sel
+
+
+def pick_channels_regexp(ch_names, regexp):
+    """Pick channels using regular expression
+
+    Returns the indices of the good channels in ch_names.
+
+    Parameters
+    ----------
+    ch_names : list of string
+        List of channels
+
+    regexp : string
+        The regular expression. See python standard module for regular
+        expressions.
+
+    Returns
+    -------
+    sel : array of int
+        Indices of good channels.
+
+    Examples
+    --------
+    >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')
+    [0]
+    >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *')
+    [0, 1, 2]
+    """
+    r = re.compile(regexp)
+    return [k for k, name in enumerate(ch_names) if r.match(name)]
+
+
+def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
+               emg=False, ref_meg=False, misc=False, resp=False, chpi=False,
+               include=[], exclude=None, selection=None):
+    """Pick channels by type and names
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    meg : bool or string
+        If True include all MEG channels. If False include None
+        If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select only
+        magnetometers, all gradiometers, or a specific type of gradiometer.
+    eeg : bool
+        If True include EEG channels.
+    eog : bool
+        If True include EOG channels.
+    ecg : bool
+        If True include ECG channels.
+    emg : bool
+        If True include EMG channels.
+    stim : bool
+        If True include stimulus channels.
+    ref_meg: bool
+        If True include CTF / 4D reference channels.
+    misc : bool
+        If True include miscellaneous analog channels.
+    resp : bool
+        If True include response-trigger channel. For some MEG systems this
+        is separate from the stim channel.
+    chpi : bool
+        If True include continuous HPI coil channels.
+    include : list of string
+        List of additional channels to include. If empty do not include any.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads'].
+    selection : list of string
+        Restrict sensor channels (MEG, EEG) to this list of channel names.
+
+    Returns
+    -------
+    sel : array of int
+        Indices of good channels.
+    """
+    nchan = info['nchan']
+    pick = np.zeros(nchan, dtype=np.bool)
+
+    if exclude is None:
+        msg = ('In pick_types, the parameter "exclude" must be specified as '
+               'either "bads" or a list of channels to exclude. In 0.7, the '
+               'default will be changed from [] (current behavior) to "bads".')
+        warn(msg, category=DeprecationWarning)
+        logger.warn(msg)
+        exclude = []
+    elif exclude == 'bads':
+        exclude = info.get('bads', [])
+    elif not isinstance(exclude, list):
+        raise ValueError('exclude must either be "bads" or a list of strings.'
+                         ' If only one channel is to be excluded, use '
+                         '[ch_name] instead of passing ch_name.')
+
+    for k in range(nchan):
+        kind = info['chs'][k]['kind']
+        if kind == FIFF.FIFFV_MEG_CH:
+            if meg is True:
+                pick[k] = True
+            elif info['chs'][k]['unit'] == FIFF.FIFF_UNIT_T_M:
+                if meg == 'grad':
+                    pick[k] = True
+                elif meg == 'planar1' and  info['ch_names'][k].endswith('2'):
+                    pick[k] = True
+                elif meg == 'planar2' and  info['ch_names'][k].endswith('3'):
+                    pick[k] = True
+            elif (meg == 'mag'
+                    and info['chs'][k]['unit'] == FIFF.FIFF_UNIT_T):
+                pick[k] = True
+        elif kind == FIFF.FIFFV_EEG_CH and eeg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_STIM_CH and stim:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_EOG_CH and eog:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_ECG_CH and ecg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_EMG_CH and emg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_MISC_CH and misc:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_REF_MEG_CH and ref_meg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_RESP_CH and resp:
+            pick[k] = True
+        elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
+                      FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
+                      FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
+                      FIFF.FIFFV_HPI_MOV] and chpi:
+            pick[k] = True
+
+    # restrict channels to selection if provided
+    if selection is not None:
+        # the selection only restricts these types of channels
+        sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
+                    FIFF.FIFFV_EEG_CH]
+        for k in np.where(pick == True)[0]:
+            if (info['chs'][k]['kind'] in sel_kind
+                and info['ch_names'][k] not in selection):
+                pick[k] = False
+
+    myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
+    myinclude += include
+
+    if len(myinclude) == 0:
+        sel = []
+    else:
+        sel = pick_channels(info['ch_names'], myinclude, exclude)
+
+    return sel
+
+
+def pick_info(info, sel=[]):
+    """Restrict an info structure to a selection of channels
+
+    Parameters
+    ----------
+    info : dict
+        Info structure from evoked or raw data.
+    sel : list of int
+        Indices of channels to include.
+
+    Returns
+    -------
+    res : dict
+        Info structure restricted to a selection of channels.
+    """
+
+    res = deepcopy(info)
+    if len(sel) == 0:
+        raise ValueError('Warning : No channels match the selection.')
+
+    res['chs'] = [res['chs'][k] for k in sel]
+    res['ch_names'] = [res['ch_names'][k] for k in sel]
+    res['nchan'] = len(sel)
+    return res
+
+
+def pick_channels_evoked(orig, include=[], exclude=[]):
+    """Pick channels from evoked data
+
+    Parameters
+    ----------
+    orig : Evoked object
+        One evoked dataset.
+    include : list of string, (optional)
+        List of channels to include (if empty, include all available).
+    exclude : list of string, (optional)
+        Channels to exclude (if empty, do not exclude any).
+
+    Returns
+    -------
+    res : instance of Evoked
+        Evoked data restricted to selected channels. If include and
+        exclude are empty it returns orig without copy.
+    """
+
+    if len(include) == 0 and len(exclude) == 0:
+        return orig
+
+    sel = pick_channels(orig.info['ch_names'], include=include,
+                        exclude=exclude)
+
+    if len(sel) == 0:
+        raise ValueError('Warning : No channels match the selection.')
+
+    res = deepcopy(orig)
+    #
+    #   Modify the measurement info
+    #
+    res.info = pick_info(res.info, sel)
+    #
+    #   Create the reduced data set
+    #
+    res.data = res.data[sel, :]
+
+    return res
+
+
+def pick_types_evoked(orig, meg=True, eeg=False, stim=False, eog=False,
+                      ecg=False, emg=False, ref_meg=False, misc=False,
+                      resp=False, chpi=False, include=[], exclude=None):
+    """Pick by channel type and names from evoked data
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info
+    meg : bool or string
+        If True include all MEG channels. If False include None
+        If string it can be 'mag' or 'grad' to select only gradiometers
+        or magnetometers.
+    eeg : bool
+        If True include EEG channels
+    eog : bool
+        If True include EOG channels
+    ecg : bool
+        If True include ECG channels
+    emg : bool
+        If True include EMG channels
+    stim : bool
+        If True include stimulus channels
+    ref_meg : bool
+        If True include CTF / 4D reference channels
+    misc : bool
+        If True include miscellaneous analog channels
+    resp : bool
+        If True include response-trigger channel. For some MEG systems this
+        is separate from the stim channel.
+    chpi : bool
+        If True include continuous HPI coil channels.
+    include : list of string
+        List of additional channels to include. If empty do not include any.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads'].
+
+    Returns
+    -------
+    res : instance of Evoked
+        Evoked data restricted to selected channels. If include and
+        exclude are None it returns orig without copy.
+    """
+    sel = pick_types(info=orig.info, meg=meg, eeg=eeg, stim=stim, eog=eog,
+                     ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc,
+                     resp=resp, chpi=chpi, include=include, exclude=exclude)
+    include_ch_names = [orig.ch_names[k] for k in sel]
+    return pick_channels_evoked(orig, include_ch_names)
+
+
+ at verbose
+def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
+    """Pick channels from forward operator
+
+    Parameters
+    ----------
+    orig : dict
+        A forward solution.
+    include : list of string (optional)
+        List of channels to include (if empty, include all available).
+    exclude : list of string (optional)
+        Channels to exclude (if empty, do not exclude any).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    res : dict
+        Forward solution restricted to selected channels. If include and
+        exclude are empty it returns orig without copy.
+    """
+
+    if len(include) == 0 and len(exclude) == 0:
+        return orig
+
+    sel = pick_channels(orig['sol']['row_names'], include=include,
+                        exclude=exclude)
+
+    fwd = deepcopy(orig)
+
+    #   Do we have something?
+    nuse = len(sel)
+    if nuse == 0:
+        raise ValueError('Nothing remains after picking')
+
+    logger.info('    %d out of %d channels remain after picking'
+                % (nuse, fwd['nchan']))
+
+    #   Pick the correct rows of the forward operator
+    fwd['sol']['data'] = fwd['sol']['data'][sel, :]
+    fwd['sol']['nrow'] = nuse
+
+    ch_names = [fwd['sol']['row_names'][k] for k in sel]
+    fwd['nchan'] = nuse
+    fwd['sol']['row_names'] = ch_names
+
+    fwd['info']['ch_names'] = [fwd['info']['ch_names'][k] for k in sel]
+    fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel]
+    fwd['info']['nchan'] = nuse
+    fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
+
+    if fwd['sol_grad'] is not None:
+        fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel, :]
+        fwd['sol_grad']['nrow'] = nuse
+        fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
+                                        for k in sel]
+
+    return fwd
+
+
+def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, include=[],
+                       exclude=[]):
+    """Pick by channel type and names from a forward operator
+
+    Parameters
+    ----------
+    orig : dict
+        A forward solution
+    meg : bool or string
+        If True include all MEG channels. If False include None
+        If string it can be 'mag' or 'grad' to select only gradiometers
+        or magnetometers.
+    eeg : bool
+        If True include EEG channels
+    ref_meg : bool
+        If True include CTF / 4D reference channels
+    include : list of string
+        List of additional channels to include. If empty do not include any.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in orig['info']['bads'].
+
+    Returns
+    -------
+    res : dict
+        Forward solution restricted to selected channel types.
+    """
+    info = orig['info']
+    sel = pick_types(info, meg, eeg, ref_meg=ref_meg, include=include,
+                     exclude=exclude)
+    if len(sel) == 0:
+        raise ValueError('No valid channels found')
+    include_ch_names = [info['ch_names'][k] for k in sel]
+    return pick_channels_forward(orig, include_ch_names)
+
+
+def channel_indices_by_type(info):
+    """Get indices of channels by type
+    """
+    idx = dict(grad=[], mag=[], eeg=[], eog=[], ecg=[])
+    for k, ch in enumerate(info['chs']):
+        for key in idx.keys():
+            if channel_type(info, k) == key:
+                idx[key].append(k)
+
+    return idx
+
+
+def pick_channels_cov(orig, include=[], exclude=[]):
+    """Pick channels from covariance matrix
+
+    Parameters
+    ----------
+    orig : Covariance
+        A covariance.
+    include : list of string, (optional)
+        List of channels to include (if empty, include all available).
+    exclude : list of string, (optional)
+        Channels to exclude (if empty, do not exclude any).
+
+    Returns
+    -------
+    res : dict
+        Covariance solution restricted to selected channels.
+    """
+    sel = pick_channels(orig['names'], include=include, exclude=exclude)
+    res = deepcopy(orig)
+    res['dim'] = len(sel)
+    res['data'] = orig['data'][sel][:, sel]
+    res['names'] = [orig['names'][k] for k in sel]
+    res['bads'] = [name for name in orig['bads'] if name in res['names']]
+    res['eig'] = None
+    res['eigvec'] = None
+    return res
diff --git a/mne/fiff/proj.py b/mne/fiff/proj.py
new file mode 100644
index 0000000..98a1ef4
--- /dev/null
+++ b/mne/fiff/proj.py
@@ -0,0 +1,645 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+from math import sqrt
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from .tree import dir_tree_find
+from .constants import FIFF
+from .tag import find_tag
+from .pick import pick_types
+from .. import verbose
+from ..utils import deprecated
+
+
+class Projection(dict):
+    """Projection vector
+
+    A basic class to proj a meaningful print for projection vectors.
+    """
+    def __repr__(self):
+        s = "%s" % self['desc']
+        s += ", active : %s" % self['active']
+        s += ", n_channels : %s" % self['data']['ncol']
+        return "<Projection  |  %s>" % s
+
+
+class ProjMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def add_proj(self, projs, remove_existing=False):
+        """Add SSP projection vectors
+
+        Parameters
+        ----------
+        projs : list
+            List with projection vectors.
+        remove_existing : bool
+            Remove the projection vectors currently in the file.
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+            The data container.
+        """
+        if isinstance(projs, Projection):
+            projs = [projs]
+
+        if (not isinstance(projs, list) and
+            not all([isinstance(p, Projection) for p in projs])):
+            raise ValueError('Only projs can be added. You supplied '
+                             'something else.')
+
+        # mark proj as inactive, as they have not been applied
+        projs = deactivate_proj(projs, copy=True, verbose=self.verbose)
+        if remove_existing:
+            # we cannot remove the proj if they are active
+            if any(p['active'] for p in self.info['projs']):
+                raise ValueError('Cannot remove projectors that have '
+                                 'already been applied')
+            self.info['projs'] = projs
+        else:
+            self.info['projs'].extend(projs)
+
+        return self
+
+    @deprecated(r"'apply_projector' is deprecated and will be removed in "
+                "version 0.7. Please use apply_proj instead")
+    def apply_projector(self):
+        """Apply the signal space projection (SSP) operators to the data.
+
+        Notes
+        -----
+        Once the projectors have been applied, they can no longer be
+        removed. It is usually not recommended to apply the projectors at
+        too early stages, as they are applied automatically later on
+        (e.g. when computing inverse solutions).
+        Hint: using the copy method individual projection vectors
+        can be tested without affecting the original data.
+        With evoked data, consider the following example::
+
+            projs_a = mne.read_proj('proj_a.fif')
+            projs_b = mne.read_proj('proj_b.fif')
+            # add the first, copy, apply and see ...
+            evoked.add_proj(a).copy().apply_proj().plot()
+            # add the second, copy, apply and see ...
+            evoked.add_proj(b).copy().apply_proj().plot()
+            # drop the first and see again
+            evoked.copy().del_proj(0).apply_proj().plot()
+            evoked.apply_proj()  # finally keep both
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+            The instance.
+        """
+        return self.apply_proj()
+
+    def apply_proj(self):
+        """Apply the signal space projection (SSP) operators to the data.
+
+        Notes
+        -----
+        Once the projectors have been applied, they can no longer be
+        removed. It is usually not recommended to apply the projectors at
+        too early stages, as they are applied automatically later on
+        (e.g. when computing inverse solutions).
+        Hint: using the copy method individual projection vectors
+        can be tested without affecting the original data.
+        With evoked data, consider the following example::
+
+            projs_a = mne.read_proj('proj_a.fif')
+            projs_b = mne.read_proj('proj_b.fif')
+            # add the first, copy, apply and see ...
+            evoked.add_proj(a).copy().apply_proj().plot()
+            # add the second, copy, apply and see ...
+            evoked.add_proj(b).copy().apply_proj().plot()
+            # drop the first and see again
+            evoked.copy().del_proj(0).apply_proj().plot()
+            evoked.apply_proj()  # finally keep both
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+            The instance.
+        """
+        if self.info['projs'] is None:
+            logger.info('No projector specified for this dataset.'
+                        'Please consider the method self.add_proj.')
+            return
+
+        if all([p['active'] for p in self.info['projs']]):
+            logger.info('Projections have already been applied. Doing '
+                         'nothing.')
+            return
+
+        _projector, info = setup_proj(deepcopy(self.info), activate=True,
+                                      verbose=self.verbose)
+        # let's not raise a RuntimeError here, otherwise interactive plotting
+        if _projector is None:  # won't be fun.
+            logger.info('The projections don\'t apply to these data.'
+                        ' Doing nothing.')
+            return self
+
+        self._projector, self.info = _projector, info
+        self.proj = True  # track that proj were applied
+        # handle different data / preload attrs and create reference
+        # this also helps avoiding circular imports
+        for attr in ('get_data', '_data', 'data'):
+            data = getattr(self, attr, None)
+            if data is None:
+                continue
+            elif callable(data):
+                if self.preload:
+                    data = np.empty_like(self._data)
+                    for ii, e in enumerate(self._data):
+                        data[ii] = self._preprocess(np.dot(self._projector, e),
+                            self.verbose)
+                else:  # get data knows what to do.
+                    data = data()
+            else:
+                data = np.dot(self._projector, data)
+            break
+        logger.info('SSP projectors applied...')
+        if hasattr(self, '_data'):
+            self._data = data
+        else:
+            self.data = data
+
+        return self
+
+    def del_proj(self, idx):
+        """Remove SSP projection vector
+
+        Note: The projection vector can only be removed if it is inactive
+              (has not been applied to the data).
+
+        Parameters
+        ----------
+        idx : int
+            Index of the projector to remove.
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+        """
+        if self.info['projs'][idx]['active']:
+            raise ValueError('Cannot remove projectors that have already '
+                             'been applied')
+
+        self.info['projs'].pop(idx)
+
+        return self
+
+
+def proj_equal(a, b):
+    """ Test if two projectors are equal """
+
+    equal = a['active'] == b['active']\
+            and a['kind'] == b['kind']\
+            and a['desc'] == b['desc']\
+            and a['data']['col_names'] == b['data']['col_names']\
+            and a['data']['row_names'] == b['data']['row_names']\
+            and a['data']['ncol'] == b['data']['ncol']\
+            and a['data']['nrow'] == b['data']['nrow']\
+            and np.all(a['data']['data'] == b['data']['data'])
+
+    return equal
+
+
+ at verbose
+def read_proj(fid, node, verbose=None):
+    """Read spatial projections from a FIF file.
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor of the open file.
+    node : tree node
+        The node of the tree where to look.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs: dict
+        The list of projections.
+    """
+    projs = list()
+
+    #   Locate the projection data
+    nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)
+    if len(nodes) == 0:
+        return projs
+
+    tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)
+    if tag is not None:
+        global_nchan = int(tag.data)
+
+    items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)
+    for i in range(len(items)):
+
+        #   Find all desired tags in one item
+        item = items[i]
+        tag = find_tag(fid, item, FIFF.FIFF_NCHAN)
+        if tag is not None:
+            nchan = int(tag.data)
+        else:
+            nchan = global_nchan
+
+        tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)
+        if tag is not None:
+            desc = tag.data
+        else:
+            tag = find_tag(fid, item, FIFF.FIFF_NAME)
+            if tag is not None:
+                desc = tag.data
+            else:
+                raise ValueError('Projection item description missing')
+
+        # XXX : is this useful ?
+        # tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
+        # if tag is not None:
+        #     namelist = tag.data
+        # else:
+        #     raise ValueError('Projection item channel list missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
+        if tag is not None:
+            kind = int(tag.data)
+        else:
+            raise ValueError('Projection item kind missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
+        if tag is not None:
+            nvec = int(tag.data)
+        else:
+            raise ValueError('Number of projection vectors not specified')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
+        if tag is not None:
+            names = tag.data.split(':')
+        else:
+            raise ValueError('Projection item channel list missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
+        if tag is not None:
+            data = tag.data
+        else:
+            raise ValueError('Projection item data missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
+        if tag is not None:
+            active = bool(tag.data)
+        else:
+            active = False
+
+        if data.shape[1] != len(names):
+            raise ValueError('Number of channel names does not match the '
+                             'size of data matrix')
+
+        #   Use exactly the same fields in data as in a named matrix
+        one = Projection(kind=kind, active=active, desc=desc,
+                    data=dict(nrow=nvec, ncol=nchan, row_names=None,
+                              col_names=names, data=data))
+
+        projs.append(one)
+
+    if len(projs) > 0:
+        logger.info('    Read a total of %d projection items:' % len(projs))
+        for k in range(len(projs)):
+            if projs[k]['active']:
+                misc = 'active'
+            else:
+                misc = ' idle'
+            logger.info('        %s (%d x %d) %s'
+                        % (projs[k]['desc'], projs[k]['data']['nrow'],
+                           projs[k]['data']['ncol'], misc))
+
+    return projs
+
+###############################################################################
+# Write
+
+from .write import write_int, write_float, write_string, write_name_list, \
+                   write_float_matrix, end_block, start_block
+
+
+def write_proj(fid, projs):
+    """Write a projection operator to a file.
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor of the open file.
+    projs : dict
+        The projection operator.
+    """
+    start_block(fid, FIFF.FIFFB_PROJ)
+
+    for proj in projs:
+        start_block(fid, FIFF.FIFFB_PROJ_ITEM)
+        write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
+        write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
+                             proj['data']['col_names'])
+        write_string(fid, FIFF.FIFF_NAME, proj['desc'])
+        write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
+        if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
+            write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)
+
+        write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])
+        write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])
+        write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,
+                           proj['data']['data'])
+        end_block(fid, FIFF.FIFFB_PROJ_ITEM)
+
+    end_block(fid, FIFF.FIFFB_PROJ)
+
+
+###############################################################################
+# Utils
+
+def make_projector(projs, ch_names, bads=[], include_active=True):
+    """Create an SSP operator from SSP projection vectors
+
+    Parameters
+    ----------
+    projs : list
+        List of projection vectors.
+    ch_names : list of strings
+        List of channels to include in the projection matrix.
+    bads : list of strings
+        Some bad channels to exclude. If bad channels were marked
+        in the raw file when projs were calculated using mne-python,
+        they should not need to be included here as they will
+        have been automatically omitted from the projectors.
+    include_active : bool
+        Also include projectors that are already active.
+
+    Returns
+    -------
+    proj : array of shape [n_channels, n_channels]
+        The projection operator to apply to the data.
+    nproj : int
+        How many items in the projector.
+    U : array
+        The orthogonal basis of the projection vectors (optional).
+    """
+    nchan = len(ch_names)
+    if nchan == 0:
+        raise ValueError('No channel names specified')
+
+    default_return = (np.eye(nchan, nchan), 0, [])
+
+    #   Check trivial cases first
+    if projs is None:
+        return default_return
+
+    nvec = 0
+    nproj = 0
+    for p in projs:
+        if not p['active'] or include_active:
+            nproj += 1
+            nvec += p['data']['nrow']
+
+    if nproj == 0:
+        return default_return
+
+    #   Pick the appropriate entries
+    vecs = np.zeros((nchan, nvec))
+    nvec = 0
+    nonzero = 0
+    for k, p in enumerate(projs):
+        if not p['active'] or include_active:
+            if len(p['data']['col_names']) != \
+                        len(np.unique(p['data']['col_names'])):
+                raise ValueError('Channel name list in projection item %d'
+                                 ' contains duplicate items' % k)
+
+            # Get the two selection vectors to pick correct elements from
+            # the projection vectors omitting bad channels
+            sel = []
+            vecsel = []
+            for c, name in enumerate(ch_names):
+                if name in p['data']['col_names'] and name not in bads:
+                    sel.append(c)
+                    vecsel.append(p['data']['col_names'].index(name))
+
+            # If there is something to pick, pickit
+            if len(sel) > 0:
+                for v in range(p['data']['nrow']):
+                    vecs[sel, nvec + v] = p['data']['data'][v, vecsel].T
+
+            # Rescale for better detection of small singular values
+            for v in range(p['data']['nrow']):
+                psize = sqrt(np.sum(vecs[:, nvec + v] * vecs[:, nvec + v]))
+                if psize > 0:
+                    vecs[:, nvec + v] /= psize
+                    nonzero += 1
+
+            nvec += p['data']['nrow']
+
+    #   Check whether all of the vectors are exactly zero
+    if nonzero == 0:
+        return default_return
+
+    # Reorthogonalize the vectors
+    U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
+
+    # Throw away the linearly dependent guys
+    nproj = np.sum((S / S[0]) > 1e-2)
+    U = U[:, :nproj]
+
+    # Here is the celebrated result
+    proj = np.eye(nchan, nchan) - np.dot(U, U.T)
+
+    return proj, nproj, U
+
+
+def make_projector_info(info, include_active=True):
+    """Make an SSP operator using the measurement info
+
+    Calls make_projector on good channels.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    include_active : bool
+        Also include projectors that are already active.
+
+    Returns
+    -------
+    proj : array of shape [n_channels, n_channels]
+        The projection operator to apply to the data.
+    nproj : int
+        How many items in the projector.
+    """
+    proj, nproj, _ = make_projector(info['projs'], info['ch_names'],
+                                    info['bads'], include_active)
+    return proj, nproj
+
+
+ at verbose
+def activate_proj(projs, copy=True, verbose=None):
+    """Set all projections to active
+
+    Useful before passing them to make_projector.
+
+    Parameters
+    ----------
+    projs : list
+        The projectors.
+    copy : bool
+        Modify projs in place or operate on a copy.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs : list
+        The projectors.
+    """
+    if copy:
+        projs = deepcopy(projs)
+
+    #   Activate the projection items
+    for proj in projs:
+        proj['active'] = True
+
+    logger.info('%d projection items activated' % len(projs))
+
+    return projs
+
+
+ at verbose
+def deactivate_proj(projs, copy=True, verbose=None):
+    """Set all projections to inactive
+
+    Useful before saving raw data without projectors applied.
+
+    Parameters
+    ----------
+    projs : list
+        The projectors.
+    copy : bool
+        Modify projs in place or operate on a copy.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs : list
+        The projectors.
+    """
+    if copy:
+        projs = deepcopy(projs)
+
+    #   Deactivate the projection items
+    for proj in projs:
+        proj['active'] = False
+
+    logger.info('%d projection items deactivated' % len(projs))
+
+    return projs
+
+
+ at verbose
+def make_eeg_average_ref_proj(info, activate=True, verbose=None):
+    """Create an EEG average reference SSP projection vector
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    activate : bool
+        If True projections are activated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eeg_proj: instance of Projection
+        The SSP/PCA projector.
+    """
+    logger.info("Adding average EEG reference projection.")
+    eeg_sel = pick_types(info, meg=False, eeg=True, exclude='bads')
+    ch_names = info['ch_names']
+    eeg_names = [ch_names[k] for k in eeg_sel]
+    n_eeg = len(eeg_sel)
+    if n_eeg == 0:
+        raise ValueError('Cannot create EEG average reference projector '
+                         '(no EEG data found)')
+    vec = np.ones((1, n_eeg)) / n_eeg
+    eeg_proj_data = dict(col_names=eeg_names, row_names=None,
+                         data=vec, nrow=1, ncol=n_eeg)
+    eeg_proj = Projection(active=activate, data=eeg_proj_data,
+                    desc='Average EEG reference',
+                    kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF)
+    return eeg_proj
+
+
+def _has_eeg_average_ref_proj(projs):
+    """Determine if a list of projectors has an average EEG ref"""
+    for proj in projs:
+        if proj['desc'] == 'Average EEG reference' or \
+                proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF:
+            return True
+    return False
+
+
+ at verbose
+def setup_proj(info, add_eeg_ref=True, activate=True,
+              verbose=None):
+    """Set up projection for Raw and Epochs
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    activate : bool
+        If True projections are activated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projector : array of shape [n_channels, n_channels]
+        The projection operator to apply to the data.
+    info : dict
+        The modified measurement info (Warning: info is modified inplace).
+    """
+    # Add EEG ref reference proj if necessary
+    eeg_sel = pick_types(info, meg=False, eeg=True, exclude='bads')
+    if len(eeg_sel) > 0 and not _has_eeg_average_ref_proj(info['projs']) \
+            and add_eeg_ref is True:
+        eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
+        info['projs'].append(eeg_proj)
+
+    #   Create the projector
+    projector, nproj = make_projector_info(info)
+    if nproj == 0:
+        if verbose:
+            logger.info('The projection vectors do not apply to these '
+                        'channels')
+        projector = None
+    else:
+        logger.info('Created an SSP operator (subspace dimension = %d)'
+                                                               % nproj)
+
+    #   The projection items have been activated
+    if activate:
+        info['projs'] = activate_proj(info['projs'], copy=False)
+
+    return projector, info
diff --git a/mne/fiff/raw.py b/mne/fiff/raw.py
new file mode 100644
index 0000000..e42bcb6
--- /dev/null
+++ b/mne/fiff/raw.py
@@ -0,0 +1,1890 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from math import floor, ceil
+import copy
+from copy import deepcopy
+import warnings
+import os
+import os.path as op
+
+import numpy as np
+from scipy.signal import hilbert
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+from .open import fiff_open
+from .meas_info import read_meas_info, write_meas_info
+from .tree import dir_tree_find
+from .tag import read_tag
+from .pick import pick_types, channel_type
+from .proj import setup_proj, activate_proj, proj_equal, ProjMixin
+from .compensator import get_current_comp, make_compensator
+
+from ..filter import low_pass_filter, high_pass_filter, band_pass_filter, \
+                     notch_filter, band_stop_filter, resample
+from ..parallel import parallel_func
+from ..utils import deprecated, _check_fname, estimate_rank, \
+                    _check_pandas_installed
+from ..viz import plot_raw, _mutable_defaults
+from .. import verbose
+
+
+class Raw(ProjMixin):
+    """Raw data
+
+    Parameters
+    ----------
+    fnames : list, or string
+        A list of the raw files to treat as a Raw instance, or a single
+        raw file.
+    allow_maxshield : bool, (default False)
+        allow_maxshield if True, allow loading of data that has been
+        processed with Maxshield. Maxshield-processed data should generally
+        not be loaded directly, but should be processed using SSS first.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    proj : bool
+        Apply the signal space projection (SSP) operators present in
+        the file to the data. Note: Once the projectors have been
+        applied, they can no longer be removed. It is usually not
+        recommended to apply the projectors at this point as they are
+        applied automatically later on (e.g. when computing inverse
+        solutions).
+    compensation : None | int
+        If None the compensation in the data is not modified.
+        If set to n, e.g. 3, apply gradient compensation of grade n as
+        for CTF systems.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    info : dict
+        Measurement info.
+    `ch_names` : list of string
+        List of channels' names.
+    `n_times` : int
+        Total number of time points in the raw file.
+    verbose : bool, str, int, or None
+        See above.
+    """
+    @verbose
+    def __init__(self, fnames, allow_maxshield=False, preload=False,
+                 proj=False, proj_active=None, compensation=None,
+                 verbose=None):
+
+        if proj_active is not None:
+            warnings.warn('proj_active param in Raw is deprecated and will be'
+                          ' removed in version 0.7. Please use proj instead.')
+            proj = proj_active
+
+        if not isinstance(fnames, list):
+            fnames = [op.abspath(fnames)] if not op.isabs(fnames) else [fnames]
+        else:
+            fnames = [op.abspath(f) if not op.isabs(f) else f for f in fnames]
+
+        raws = [self._read_raw_file(fname, allow_maxshield, preload,
+                                    compensation) for fname in fnames]
+
+        _check_raw_compatibility(raws)
+
+        # combine information from each raw file to construct self
+        self.first_samp = raws[0].first_samp  # meta first sample
+        self._first_samps = np.array([r.first_samp for r in raws])
+        self._last_samps = np.array([r.last_samp for r in raws])
+        self._raw_lengths = np.array([r.n_times for r in raws])
+        self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
+        self.cals = raws[0].cals
+        self.rawdirs = [r.rawdir for r in raws]
+        self.comp = copy.deepcopy(raws[0].comp)
+        self.fids = [r.fid for r in raws]
+        self.info = copy.deepcopy(raws[0].info)
+        self.verbose = verbose
+        self.info['filenames'] = fnames
+        self.orig_format = raws[0].orig_format
+        self.proj = False
+
+        if preload:
+            self._preload_data(preload)
+        else:
+            self._preloaded = False
+
+        self._projector = None
+        # setup the SSP projector
+        self.proj = proj
+        if proj:
+            self.apply_proj()
+
+    def __del__(self):
+        # remove file for memmap
+        if hasattr(self, '_data') and hasattr(self._data, 'filename'):
+            # First, close the file out; happens automatically on del
+            filename = self._data.filename
+            del self._data
+            # Now file can be removed
+            os.remove(filename)
+
+    def __enter__(self):
+        """ Entering with block """
+        return self
+
+    def __exit__(self, exception_type, exception_val, trace):
+        """ Exiting with block """
+        try:
+            self.close()
+        except:
+            return exception_type, exception_val, trace
+
+    def _preload_data(self, preload):
+        """This function actually preloads the data"""
+        if isinstance(preload, basestring):
+            # we will use a memmap: preload is a filename
+            data_buffer = preload
+        else:
+            data_buffer = None
+
+        self._data, self._times = self._read_segment(data_buffer=data_buffer)
+        self._preloaded = True
+        # close files once data are preloaded
+        self.close()
+
+    @verbose
+    def _read_raw_file(self, fname, allow_maxshield, preload, compensation,
+                       verbose=None):
+        """Read in header information from a raw file"""
+        logger.info('Opening raw data file %s...' % fname)
+
+        #   Read in the whole file if preload is on and .fif.gz (saves time)
+        ext = os.path.splitext(fname)[1].lower()
+        whole_file = preload if '.gz' in ext else False
+        fid, tree, _ = fiff_open(fname, preload=whole_file)
+
+        #   Read the measurement info
+        info, meas = read_meas_info(fid, tree)
+
+        #   Locate the data of interest
+        raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
+        if len(raw_node) == 0:
+            raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
+            if allow_maxshield:
+                raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA)
+                if len(raw_node) == 0:
+                    raise ValueError('No raw data in %s' % fname)
+            else:
+                if len(raw_node) == 0:
+                    raise ValueError('No raw data in %s' % fname)
+
+        if len(raw_node) == 1:
+            raw_node = raw_node[0]
+
+        #   Set up the output structure
+        info['filename'] = fname
+
+        #   Process the directory
+        directory = raw_node['directory']
+        nent = raw_node['nent']
+        nchan = int(info['nchan'])
+        first = 0
+        first_samp = 0
+        first_skip = 0
+
+        #   Get first sample tag if it is there
+        if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
+            tag = read_tag(fid, directory[first].pos)
+            first_samp = int(tag.data)
+            first += 1
+
+        #   Omit initial skip
+        if directory[first].kind == FIFF.FIFF_DATA_SKIP:
+            # This first skip can be applied only after we know the buffer size
+            tag = read_tag(fid, directory[first].pos)
+            first_skip = int(tag.data)
+            first += 1
+
+        #  Get first sample tag if it is there
+        if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
+            tag = read_tag(fid, directory[first].pos)
+            first_samp += int(tag.data)
+            first += 1
+
+        raw = _RawShell()
+        raw.first_samp = first_samp
+
+        #   Go through the remaining tags in the directory
+        rawdir = list()
+        nskip = 0
+        orig_format = None
+        for k in range(first, nent):
+            ent = directory[k]
+            if ent.kind == FIFF.FIFF_DATA_SKIP:
+                tag = read_tag(fid, ent.pos)
+                nskip = int(tag.data)
+            elif ent.kind == FIFF.FIFF_DATA_BUFFER:
+                #   Figure out the number of samples in this buffer
+                if ent.type == FIFF.FIFFT_DAU_PACK16:
+                    nsamp = ent.size / (2 * nchan)
+                elif ent.type == FIFF.FIFFT_SHORT:
+                    nsamp = ent.size / (2 * nchan)
+                elif ent.type == FIFF.FIFFT_FLOAT:
+                    nsamp = ent.size / (4 * nchan)
+                elif ent.type == FIFF.FIFFT_DOUBLE:
+                    nsamp = ent.size / (8 * nchan)
+                elif ent.type == FIFF.FIFFT_INT:
+                    nsamp = ent.size / (4 * nchan)
+                elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                    nsamp = ent.size / (8 * nchan)
+                elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                    nsamp = ent.size / (16 * nchan)
+                else:
+                    fid.close()
+                    raise ValueError('Cannot handle data buffers of type %d' %
+                                     ent.type)
+                if orig_format is None:
+                    if ent.type == FIFF.FIFFT_DAU_PACK16:
+                        orig_format = 'short'
+                    elif ent.type == FIFF.FIFFT_SHORT:
+                        orig_format = 'short'
+                    elif ent.type == FIFF.FIFFT_FLOAT:
+                        orig_format = 'single'
+                    elif ent.type == FIFF.FIFFT_DOUBLE:
+                        orig_format = 'double'
+                    elif ent.type == FIFF.FIFFT_INT:
+                        orig_format = 'int'
+                    elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                        orig_format = 'single'
+                    elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                        orig_format = 'double'
+
+                #  Do we have an initial skip pending?
+                if first_skip > 0:
+                    first_samp += nsamp * first_skip
+                    raw.first_samp = first_samp
+                    first_skip = 0
+
+                #  Do we have a skip pending?
+                if nskip > 0:
+                    rawdir.append(dict(ent=None, first=first_samp,
+                                       last=first_samp + nskip * nsamp - 1,
+                                       nsamp=nskip * nsamp))
+                    first_samp += nskip * nsamp
+                    nskip = 0
+
+                #  Add a data buffer
+                rawdir.append(dict(ent=ent, first=first_samp,
+                                   last=first_samp + nsamp - 1,
+                                   nsamp=nsamp))
+                first_samp += nsamp
+
+        raw.last_samp = first_samp - 1
+        raw.orig_format = orig_format
+
+        #   Add the calibration factors
+        cals = np.zeros(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+
+        raw.cals = cals
+        raw.rawdir = rawdir
+        raw.comp = None
+
+        #   Set up the CTF compensator
+        current_comp = get_current_comp(info)
+        if current_comp is not None:
+            logger.info('Current compensation grade : %d' % current_comp)
+
+        if compensation is not None:
+            raw.comp = make_compensator(info, current_comp, compensation)
+            if raw.comp is not None:
+                logger.info('Appropriate compensator added to change to '
+                            'grade %d.' % (compensation))
+
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    raw.first_samp, raw.last_samp,
+                    float(raw.first_samp) / info['sfreq'],
+                    float(raw.last_samp) / info['sfreq']))
+
+        # store the original buffer size
+        info['buffer_size_sec'] = (np.median([r['nsamp'] for r in rawdir])
+                                   / info['sfreq'])
+
+        raw.fid = fid
+        raw.info = info
+        raw.verbose = verbose
+
+        logger.info('Ready.')
+
+        return raw
+
+    def _parse_get_set_params(self, item):
+        # make sure item is a tuple
+        if not isinstance(item, tuple):  # only channel selection passed
+            item = (item, slice(None, None, None))
+
+        if len(item) != 2:  # should be channels and time instants
+            raise RuntimeError("Unable to access raw data (need both channels "
+                               "and time)")
+
+        if isinstance(item[0], slice):
+            start = item[0].start if item[0].start is not None else 0
+            nchan = self.info['nchan']
+            stop = item[0].stop if item[0].stop is not None else nchan
+            step = item[0].step if item[0].step is not None else 1
+            sel = range(start, stop, step)
+        else:
+            sel = item[0]
+
+        if isinstance(item[1], slice):
+            time_slice = item[1]
+            start, stop, step = time_slice.start, time_slice.stop, \
+                                time_slice.step
+        elif isinstance(item[1], int):
+            start, stop, step = item[1], item[1] + 1, 1
+        else:
+            raise ValueError('Must pass int or slice to __getitem__')
+
+        if start is None:
+            start = 0
+        if (step is not None) and (step is not 1):
+            raise ValueError('step needs to be 1 : %d given' % step)
+
+        if isinstance(sel, int):
+            sel = np.array([sel])
+
+        if sel is not None and len(sel) == 0:
+            raise ValueError("Empty channel list")
+
+        return sel, start, stop
+
+    def __getitem__(self, item):
+        """getting raw data content with python slicing"""
+        sel, start, stop = self._parse_get_set_params(item)
+        if self._preloaded:
+            data, times = self._data[sel, start:stop], self._times[start:stop]
+        else:
+            data, times = self._read_segment(start=start, stop=stop, sel=sel,
+                                            projector=self._projector,
+                                            verbose=self.verbose)
+        return data, times
+
+    def __setitem__(self, item, value):
+        """setting raw data content with python slicing"""
+        if not self._preloaded:
+            raise RuntimeError('Modifying data of Raw is only supported '
+                               'when preloading is used. Use preload=True '
+                               '(or string) in the constructor.')
+        sel, start, stop = self._parse_get_set_params(item)
+        # set the data
+        self._data[sel, start:stop] = value
+
+    @verbose
+    def apply_function(self, fun, picks, dtype, n_jobs, verbose=None, *args,
+                       **kwargs):
+        """ Apply a function to a subset of channels.
+
+        The function "fun" is applied to the channels defined in "picks". The
+        data of the Raw object is modified inplace. If the function returns
+        a different data type (e.g. numpy.complex) it must be specified using
+        the dtype parameter, which causes the data type used for representing
+        the raw data to change.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
+              additional time points need to be temporaily stored in memory.
+
+        Note: If the data type changes (dtype != None), more memory is required
+              since the original and the converted data needs to be stored in
+              memory.
+
+        Parameters
+        ----------
+        fun : function
+            A function to be applied to the channels. The first argument of
+            fun has to be a timeseries (numpy.ndarray). The function must
+            return an numpy.ndarray with the same size as the input.
+        picks : list of int
+            Indices of channels to apply the function to.
+        dtype : numpy.dtype
+            Data type to use for raw data after applying the function. If None
+            the data type is not modified.
+        n_jobs: int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        *args :
+            Additional positional arguments to pass to fun (first pos. argument
+            of fun is the timeseries of a channel).
+        **kwargs :
+            Keyword arguments to pass to fun.
+        """
+        if not self._preloaded:
+            raise RuntimeError('Raw data needs to be preloaded. Use '
+                               'preload=True (or string) in the constructor.')
+
+        if not callable(fun):
+            raise ValueError('fun needs to be a function')
+
+        data_in = self._data
+        if dtype is not None and dtype != self._data.dtype:
+            self._data = self._data.astype(dtype)
+
+        if n_jobs == 1:
+            # modify data inplace to save memory
+            for idx in picks:
+                self._data[idx, :] = fun(data_in[idx, :], *args, **kwargs)
+        else:
+            # use parallel function
+            parallel, p_fun, _ = parallel_func(fun, n_jobs)
+            data_picks_new = parallel(p_fun(data_in[p], *args, **kwargs)
+                                      for p in picks)
+            for pp, p in enumerate(picks):
+                self._data[p, :] = data_picks_new[pp]
+
+    @verbose
+    def apply_hilbert(self, picks, envelope=False, n_jobs=1, verbose=None):
+        """ Compute analytic signal or envelope for a subset of channels.
+
+        If envelope=False, the analytic signal for the channels defined in
+        "picks" is computed and the data of the Raw object is converted to
+        a complex representation (the analytic signal is complex valued).
+
+        If envelope=True, the absolute value of the analytic signal for the
+        channels defined in "picks" is computed, resulting in the envelope
+        signal.
+
+        Note: DO NOT use envelope=True if you intend to compute an inverse
+              solution from the raw data. If you want to compute the
+              envelope in source space, use envelope=False and compute the
+              envelope after the inverse solution has been obtained.
+
+        Note: If envelope=False, more memory is required since the original
+              raw data as well as the analytic signal have temporarily to
+              be stored in memory.
+
+        Note: If n_jobs > 1 and envelope=True, more memory is required as
+              "len(picks) * n_times" additional time points need to be
+              temporaily stored in memory.
+
+        Parameters
+        ----------
+        picks : list of int
+            Indices of channels to apply the function to.
+        envelope : bool (default: False)
+            Compute the envelope signal of each channel.
+        n_jobs: int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        The analytic signal "x_a(t)" of "x(t)" is::
+
+            x_a = F^{-1}(F(x) 2U) = x + i y
+
+        where "F" is the Fourier transform, "U" the unit step function,
+        and "y" the Hilbert transform of "x". One usage of the analytic
+        signal is the computation of the envelope signal, which is given by
+        "e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the
+        MNE inverse solution, the enevlope in source space can be obtained
+        by computing the analytic signal in sensor space, applying the MNE
+        inverse, and computing the envelope in source space.
+        """
+        if envelope:
+            self.apply_function(_envelope, picks, None, n_jobs)
+        else:
+            self.apply_function(hilbert, picks, np.complex64, n_jobs)
+
+    @verbose
+    def filter(self, l_freq, h_freq, picks=None, filter_length='10s',
+               l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
+               method='fft', iir_params=dict(order=4, ftype='butter'),
+               verbose=None):
+        """Filter a subset of channels.
+
+        Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
+        filter to the channels selected by "picks". The data of the Raw
+        object is modified inplace.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        l_freq and h_freq are the frequencies below which and above which,
+        respectively, to filter out of the data. Thus the uses are:
+            l_freq < h_freq: band-pass filter
+            l_freq > h_freq: band-stop filter
+            l_freq is not None, h_freq is None: low-pass filter
+            l_freq is None, h_freq is not None: high-pass filter
+
+        Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
+              additional time points need to be temporarily stored in memory.
+
+        Note: self.info['lowpass'] and self.info['highpass'] are only updated
+              with picks=None.
+
+        Parameters
+        ----------
+        l_freq : float | None
+            Low cut-off frequency in Hz. If None the data are only low-passed.
+        h_freq : float | None
+            High cut-off frequency in Hz. If None the data are only
+            high-passed.
+        picks : list of int | None
+            Indices of channels to filter. If None only the data (MEG/EEG)
+            channels will be filtered.
+        filter_length : str (Default: '10s') | int | None
+            Length of the filter to use. If None or "len(x) < filter_length",
+            the filter length used is len(x). Otherwise, if int, overlap-add
+            filtering with a filter of the specified length in samples) is
+            used (faster for long signals). If str, a human-readable time in
+            units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+            to the shortest power-of-two length at least that duration.
+        l_trans_bandwidth : float
+            Width of the transition band at the low cut-off frequency in Hz.
+        h_trans_bandwidth : float
+            Width of the transition band at the high cut-off frequency in Hz.
+        n_jobs : int | str
+            Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+            is installed properly, CUDA is initialized, and method='fft'.
+        method : str
+            'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+            forward-backward filtering (via filtfilt).
+        iir_params : dict
+            Dictionary of parameters to use for IIR filtering.
+            See mne.filter.construct_iir_filter for details.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        """
+        if verbose is None:
+            verbose = self.verbose
+        fs = float(self.info['sfreq'])
+        if l_freq == 0:
+            l_freq = None
+        if h_freq > (fs / 2.):
+            h_freq = None
+        if not self._preloaded:
+            raise RuntimeError('Raw data needs to be preloaded to filter. Use '
+                               'preload=True (or string) in the constructor.')
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, exclude=[])
+
+            # update info if filter is applied to all data channels,
+            # and it's not a band-stop filter
+            if h_freq is not None and (l_freq is None or l_freq < h_freq) and \
+                    h_freq < self.info['lowpass']:
+                self.info['lowpass'] = h_freq
+            if l_freq is not None and (h_freq is None or l_freq < h_freq) and \
+                    l_freq > self.info['highpass']:
+                self.info['highpass'] = l_freq
+        if l_freq is None and h_freq is not None:
+            logger.info('Low-pass filtering at %0.2g Hz' % h_freq)
+            low_pass_filter(self._data, fs, h_freq,
+                            filter_length=filter_length,
+                            trans_bandwidth=l_trans_bandwidth, method=method,
+                            iir_params=iir_params, picks=picks, n_jobs=n_jobs,
+                            copy=False)
+        if l_freq is not None and h_freq is None:
+            logger.info('High-pass filtering at %0.2g Hz' % l_freq)
+            high_pass_filter(self._data, fs, l_freq,
+                             filter_length=filter_length,
+                             trans_bandwidth=h_trans_bandwidth, method=method,
+                             iir_params=iir_params, picks=picks, n_jobs=n_jobs,
+                             copy=False)
+        if l_freq is not None and h_freq is not None:
+            if l_freq < h_freq:
+                logger.info('Band-pass filtering from %0.2g - %0.2g Hz'
+                            % (l_freq, h_freq))
+                self._data = band_pass_filter(self._data, fs, l_freq, h_freq,
+                    filter_length=filter_length,
+                    l_trans_bandwidth=l_trans_bandwidth,
+                    h_trans_bandwidth=h_trans_bandwidth,
+                    method=method, iir_params=iir_params, picks=picks,
+                    n_jobs=n_jobs, copy=False)
+            else:
+                logger.info('Band-stop filtering from %0.2g - %0.2g Hz'
+                            % (h_freq, l_freq))
+                self._data = band_stop_filter(self._data, fs, h_freq, l_freq,
+                    filter_length=filter_length,
+                    l_trans_bandwidth=h_trans_bandwidth,
+                    h_trans_bandwidth=l_trans_bandwidth, method=method,
+                    iir_params=iir_params, picks=picks, n_jobs=n_jobs,
+                    copy=False)
+
+    @verbose
+    def notch_filter(self, freqs, picks=None, filter_length='10s',
+                     notch_widths=None, trans_bandwidth=1.0, n_jobs=1,
+                     method='fft', iir_params=dict(order=4, ftype='butter'),
+                     mt_bandwidth=None, p_value=0.05, verbose=None):
+        """Notch filter a subset of channels.
+
+        Applies a zero-phase notch filter to the channels selected by
+        "picks". The data of the Raw object is modified inplace.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
+              additional time points need to be temporaily stored in memory.
+
+        Parameters
+        ----------
+        freqs : float | array of float | None
+            Specific frequencies to filter out from data, e.g.,
+            np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
+            Europe. None can only be used with the mode 'spectrum_fit',
+            where an F test is used to find sinusoidal components.
+        picks : list of int | None
+            Indices of channels to filter. If None only the data (MEG/EEG)
+            channels will be filtered.
+        filter_length : str (Default: '10s') | int | None
+            Length of the filter to use. If None or "len(x) < filter_length",
+            the filter length used is len(x). Otherwise, if int, overlap-add
+            filtering with a filter of the specified length in samples) is
+            used (faster for long signals). If str, a human-readable time in
+            units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+            to the shortest power-of-two length at least that duration.
+        notch_widths : float | array of float | None
+            Width of each stop band (centred at each freq in freqs) in Hz.
+            If None, freqs / 200 is used.
+        trans_bandwidth : float
+            Width of the transition band in Hz.
+        n_jobs : int | str
+            Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+            is installed properly, CUDA is initialized, and method='fft'.
+        method : str
+            'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+            forward-backward filtering (via filtfilt). 'spectrum_fit' will
+            use multi-taper estimation of sinusoidal components.
+        iir_params : dict
+            Dictionary of parameters to use for IIR filtering.
+            See mne.filter.construct_iir_filter for details.
+        mt_bandwidth : float | None
+            The bandwidth of the multitaper windowing function in Hz.
+            Only used in 'spectrum_fit' mode.
+        p_value : float
+            p-value to use in F-test thresholding to determine significant
+            sinusoidal components to remove when method='spectrum_fit' and
+            freqs=None. Note that this will be Bonferroni corrected for the
+            number of frequencies, so large p-values may be justified.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        For details, see mne.filter.notch_filter.
+        """
+        if verbose is None:
+            verbose = self.verbose
+        fs = float(self.info['sfreq'])
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, exclude=[])
+        if not self._preloaded:
+            raise RuntimeError('Raw data needs to be preloaded to filter. Use '
+                               'preload=True (or string) in the constructor.')
+
+        self._data = notch_filter(self._data, fs, freqs,
+                                  filter_length=filter_length,
+                                  notch_widths=notch_widths,
+                                  trans_bandwidth=trans_bandwidth,
+                                  method=method, iir_params=iir_params,
+                                  mt_bandwidth=mt_bandwidth, p_value=p_value,
+                                  picks=picks, n_jobs=n_jobs, copy=False)
+
+    @verbose
+    def resample(self, sfreq, npad=100, window='boxcar',
+                 stim_picks=None, n_jobs=1, verbose=None):
+        """Resample data channels.
+
+        Resamples all channels. The data of the Raw object is modified inplace.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        WARNING: The intended purpose of this function is primarily to speed
+        up computations (e.g., projection calculation) when precise timing
+        of events is not required, as downsampling raw data effectively
+        jitters trigger timings. It is generally recommended not to epoch
+        downsampled data, but instead epoch and then downsample, as epoching
+        downsampled data jitters triggers.
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use.
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        stim_picks : array of int | None
+            Stim channels. These channels are simply subsampled or
+            supersampled (without applying any filtering). This reduces
+            resampling artifacts in stim channels, but may lead to missing
+            triggers. If None, stim channels are automatically chosen using
+            mne.fiff.pick_types(raw.info, meg=False, stim=True, exclude=[]).
+        n_jobs : int | str
+            Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+            is installed properly and CUDA is initialized.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        For some data, it may be more accurate to use npad=0 to reduce
+        artifacts. This is dataset dependent -- check your data!
+        """
+        if not self._preloaded:
+            raise RuntimeError('Can only resample preloaded data')
+
+        o_sfreq = self.info['sfreq']
+        offsets = np.concatenate(([0], np.cumsum(self._raw_lengths)))
+        new_data = list()
+        # set up stim channel processing
+        if stim_picks is None:
+            stim_picks = pick_types(self.info, meg=False, stim=True,
+                                    exclude=[])
+        stim_picks = np.asanyarray(stim_picks)
+        ratio = sfreq / float(o_sfreq)
+        for ri in range(len(self._raw_lengths)):
+            data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]]
+            new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
+                                     n_jobs=n_jobs))
+            new_ntimes = new_data[ri].shape[1]
+
+            # Now deal with the stim channels. In empirical testing, it was
+            # faster to resample all channels (above) and then replace the
+            # stim channels than it was to only resample the proper subset
+            # of channels and then use np.insert() to restore the stims
+
+            # figure out which points in old data to subsample
+            stim_inds = np.floor(np.arange(new_ntimes) / ratio).astype(int)
+            for sp in stim_picks:
+                new_data[ri][sp] = data_chunk[sp][:, stim_inds]
+
+            self._first_samps[ri] = int(self._first_samps[ri] * ratio)
+            self._last_samps[ri] = self._first_samps[ri] + new_ntimes - 1
+            self._raw_lengths[ri] = new_ntimes
+
+        # adjust affected variables
+        self._data = np.concatenate(new_data, axis=1)
+        self.first_samp = self._first_samps[0]
+        self.last_samp = self.first_samp + self._data.shape[1] - 1
+        self.info['sfreq'] = sfreq
+
+    def crop(self, tmin=0.0, tmax=None, copy=True):
+        """Crop raw data file.
+
+        Limit the data from the raw file to go between specific times. Note
+        that the new tmin is assumed to be t=0 for all subsequently called
+        functions (e.g., time_as_index, or Epochs). New first_samp and
+        last_samp are set accordingly. And data are modified in-place when
+        called with copy=False.
+
+        Parameters
+        ----------
+        tmin : float
+            New start time (must be >= 0).
+        tmax : float | None
+            New end time of the data (cannot exceed data duration).
+        copy : bool
+            If False Raw is cropped in place.
+
+        Returns
+        -------
+        raw : instance of Raw
+            The cropped raw object.
+        """
+        raw = self.copy() if copy is True else self
+        max_time = (raw.n_times - 1) / raw.info['sfreq']
+        if tmax is None:
+            tmax = max_time
+
+        if tmin > tmax:
+            raise ValueError('tmin must be less than tmax')
+        if tmin < 0.0:
+            raise ValueError('tmin must be >= 0')
+        elif tmax > max_time:
+            raise ValueError('tmax must be less than or equal to the max raw '
+                             'time (%0.4f sec)' % max_time)
+
+        smin = raw.time_as_index(tmin)[0]
+        smax = raw.time_as_index(tmax)[0]
+        cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
+                                     dtype='int')))
+        cumul_lens = np.cumsum(cumul_lens)
+        keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
+                                 np.greater_equal(smax, cumul_lens[:-1]))
+        keepers = np.where(keepers)[0]
+        raw._first_samps = np.atleast_1d(raw._first_samps[keepers])
+        # Adjust first_samp of first used file!
+        raw._first_samps[0] += smin - cumul_lens[keepers[0]]
+        raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
+        raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
+        raw._raw_lengths = raw._last_samps - raw._first_samps + 1
+        raw.fids = [f for fi, f in enumerate(raw.fids) if fi in keepers]
+        raw.rawdirs = [r for ri, r in enumerate(raw.rawdirs)
+                       if ri in keepers]
+        if raw._preloaded:
+            raw._data = raw._data[:, smin:smax + 1]
+        raw.first_samp = raw._first_samps[0]
+        raw.last_samp = raw.first_samp + (smax - smin)
+        return raw
+
+    @verbose
+    def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
+             drop_small_buffer=False, proj=False, format='single',
+             overwrite=False, verbose=None, proj_active=None):
+        """Save raw data to file
+
+        Parameters
+        ----------
+        fname : string
+            File name of the new dataset. This has to be a new filename
+            unless data have been preloaded.
+        picks : list of int
+            Indices of channels to include.
+        tmin : float
+            Time in seconds of first sample to save.
+        tmax : float
+            Time in seconds of last sample to save.
+        buffer_size_sec : float | None
+            Size of data chunks in seconds. If None, the buffer size of
+            the original file is used.
+        drop_small_buffer : bool
+            Drop or not the last buffer. It is required by maxfilter (SSS)
+            that only accepts raw files with buffers of the same size.
+        proj : bool
+            If True the data is saved with the projections applied (active).
+            Note: If apply_proj() was used to apply the projections,
+            the projectons will be active even if proj is False.
+        format : str
+            Format to use to save raw data. Valid options are 'double',
+            'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
+            16-bit integers, respectively. It is STRONGLY recommended to use
+            'single', as this is backward-compatible, and is standard for
+            maintaining precision. Note that using 'short' or 'int' may result
+            in loss of precision, complex data cannot be saved as 'short',
+            and neither complex data types nor real data stored as 'double'
+            can be loaded with the MNE command-line tools. See raw.orig_format
+            to determine the format the original data were stored in.
+        overwrite : bool
+            If True, the destination file (if it exists) will be overwritten.
+            If False (default), an error will be raised if the file exists.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        If Raw is a concatenation of several raw files, *be warned* that only
+        the measurement information from the first raw file is stored. This
+        likely means that certain operations with external tools may not
+        work properly on a saved concatenated file (e.g., probably some
+        or all forms of SSS). It is recommended not to concatenate and
+        then save raw files for this reason.
+        """
+        if proj_active is not None:
+            warnings.warn('proj_active param in Raw is deprecated and will be'
+                          ' removed in version 0.7. Please use proj instead.')
+            proj = proj_active
+
+        fname = op.abspath(fname)
+        if not self._preloaded and fname in self.info['filenames']:
+            raise ValueError('You cannot save data to the same file.'
+                             ' Please use a different filename.')
+
+        if self._preloaded:
+            if np.iscomplexobj(self._data):
+                warnings.warn('Saving raw file with complex data. Loading '
+                              'with command-line MNE tools will not work.')
+
+        type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,
+                         int=FIFF.FIFFT_INT,
+                         single=FIFF.FIFFT_FLOAT,
+                         double=FIFF.FIFFT_DOUBLE)
+        if not format in type_dict.keys():
+            raise ValueError('format must be "short", "int", "single", '
+                             'or "double"')
+        reset_dict = dict(short=False, int=False, single=True, double=True)
+
+        data_test = self[0, 0][0]
+        if format == 'short' and np.iscomplexobj(data_test):
+            raise ValueError('Complex data must be saved as "single" or '
+                             '"double", not "short"')
+
+        # check for file existence
+        _check_fname(fname, overwrite)
+
+        if proj:
+            info = copy.deepcopy(self.info)
+            projector, info = setup_proj(info)
+            activate_proj(info['projs'], copy=False)
+        else:
+            info = self.info
+            projector = None
+
+        outfid, cals = start_writing_raw(fname, info, picks, type_dict[format],
+                                         reset_range=reset_dict[format])
+        #
+        #   Set up the reading parameters
+        #
+
+        #   Convert to samples
+        start = int(floor(tmin * self.info['sfreq']))
+        first_samp = self.first_samp + start
+
+        if tmax is None:
+            stop = self.last_samp + 1 - self.first_samp
+        else:
+            stop = int(floor(tmax * self.info['sfreq']))
+
+        if buffer_size_sec is None:
+            if 'buffer_size_sec' in self.info:
+                buffer_size_sec = self.info['buffer_size_sec']
+            else:
+                buffer_size_sec = 10.0
+        buffer_size = int(ceil(buffer_size_sec * self.info['sfreq']))
+        #
+        #   Read and write all the data
+        #
+
+        # Take care of CTF compensation
+        inv_comp = None
+        if self.comp is not None:
+            inv_comp = linalg.inv(self.comp)
+
+        write_int(outfid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
+        for first in range(start, stop, buffer_size):
+            last = first + buffer_size
+            if last >= stop:
+                last = stop + 1
+
+            if picks is None:
+                data, times = self[:, first:last]
+            else:
+                data, times = self[picks, first:last]
+
+            if projector is not None:
+                data = np.dot(projector, data)
+
+            if (drop_small_buffer and (first > start)
+                                            and (len(times) < buffer_size)):
+                logger.info('Skipping data chunk due to small buffer ... '
+                            '[done]')
+                break
+            logger.info('Writing ...')
+            write_raw_buffer(outfid, data, cals, format, inv_comp)
+            logger.info('[done]')
+
+        finish_writing_raw(outfid)
+
+    def plot(raw, events=None, duration=10.0, start=0.0, n_channels=20,
+             bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
+             event_color='cyan', scalings=None, remove_dc=True, order='type',
+             show_options=False, title=None, show=True):
+        """Plot raw data
+
+        Parameters
+        ----------
+        raw : instance of Raw
+            The raw data to plot.
+        events : array | None
+            Events to show with vertical bars.
+        duration : float
+            Time window (sec) to plot in a given time.
+        start : float
+            Initial time to show (can be changed dynamically once plotted).
+        n_channels : int
+            Number of channels to plot at once.
+        bgcolor : color object
+            Color of the background.
+        color : dict | color object | None
+            Color for the data traces. If None, defaults to:
+            `dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
+                 ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
+        bad_color : color object
+            Color to make bad channels.
+        event_color : color object
+            Color to use for events.
+        scalings : dict | None
+            Scale factors for the traces. If None, defaults to:
+            `dict(mag=1e-12, grad=4e-11, eeg=20e-6,
+                  eog=150e-6, ecg=5e-4, emg=1e-3,
+                  ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
+        remove_dc : bool
+            If True remove DC component when plotting data.
+        order : 'type' | 'original' | array
+            Order in which to plot data. 'type' groups by channel type,
+            'original' plots in the order of ch_names, array gives the
+            indices to use in plotting.
+        show_options : bool
+            If True, a dialog for options related to projecion is shown.
+        title : str | None
+            The title of the window. If None, and either the filename of the
+            raw object or '<unknown>' will be displayed as title.
+        show : bool
+            Show figures if True
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            Raw traces.
+
+        Notes
+        -----
+        The arrow keys (up/down/left/right) can typically be used to navigate
+        between channels and time ranges, but this depends on the backend
+        matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
+        """
+        return plot_raw(raw, events, duration, start, n_channels, bgcolor,
+                        color, bad_color, event_color, scalings, remove_dc,
+                        order, show_options, title, show)
+
+    @deprecated('time_to_index is deprecated please use time_as_index instead.'
+                ' Will be removed in v0.7.')
+    def time_to_index(self, *args):
+        """Convert time to indices"""
+        indices = []
+        for time in args:
+            ind = int(time * self.info['sfreq'])
+            indices.append(ind)
+        return indices
+
+    def time_as_index(self, times, use_first_samp=False):
+        """Convert time to indices
+
+        Parameters
+        ----------
+        times : list-like | float | int
+            List of numbers or a number representing points in time.
+        use_first_samp : boolean
+            If True, time is treated as relative to the session onset, else
+            as relative to the recording onset.
+
+        Returns
+        -------
+        index : ndarray
+            Indices corresponding to the times supplied.
+        """
+        return _time_as_index(times, self.info['sfreq'], self.first_samp,
+                              use_first_samp)
+
+    def index_as_time(self, index, use_first_samp=False):
+        """Convert time to indices
+
+        Parameters
+        ----------
+        index : list-like | int
+            List of ints or int representing points in time.
+        use_first_samp : boolean
+            If True, the time returned is relative to the session onset, else
+            relative to the recording onset.
+
+        Returns
+        -------
+        times : ndarray
+            Times corresponding to the index supplied.
+        """
+        return _index_as_time(index, self.info['sfreq'], self.first_samp,
+                              use_first_samp)
+
+    def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
+                      return_singular=False):
+        """Estimate rank of the raw data
+
+        This function is meant to provide a reasonable estimate of the rank.
+        The true rank of the data depends on many factors, so use at your
+        own risk.
+
+        Parameters
+        ----------
+        tstart : float
+            Start time to use for rank estimation. Defaul is 0.0.
+        tstop : float | None
+            End time to use for rank estimation. Default is 30.0.
+            If None, the end time of the raw file is used.
+        tol : float
+            Tolerance for singular values to consider non-zero in
+            calculating the rank. The singular values are calculated
+            in this method such that independent data are expected to
+            have singular value around one.
+        return_singular : bool
+            If True, also return the singular values that were used
+            to determine the rank.
+
+        Returns
+        -------
+        rank : int
+            Estimated rank of the data.
+        s : array
+            If return_singular is True, the singular values that were
+            thresholded to determine the rank are also returned.
+
+        Notes
+        -----
+        If data are not pre-loaded, the appropriate data will be loaded
+        by this function (can be memory intensive).
+
+        Projectors are not taken into account unless they have been applied
+        to the data using apply_proj(), since it is not always possible
+        to tell whether or not projectors have been applied previously.
+
+        Bad channels will be excluded from calculations.
+        """
+        start = max(0, self.time_as_index(tstart)[0])
+        if tstop is None:
+            stop = self.n_times - 1
+        else:
+            stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
+        tslice = slice(start, stop + 1)
+        picks = pick_types(self.info, meg=True, eeg=True, exclude='bads')
+        # ensure we don't get a view of data
+        if len(picks) == 1:
+            return 1.0, 1.0
+        # this should already be a copy, so we can overwrite it
+        data = self[picks, tslice][0]
+        return estimate_rank(data, tol, return_singular, copy=False)
+
+    @property
+    def ch_names(self):
+        return self.info['ch_names']
+
+    @property
+    def n_times(self):
+        return self.last_samp - self.first_samp + 1
+
+    def __len__(self):
+        return self.n_times
+
+    def load_bad_channels(self, bad_file=None, force=False):
+        """
+        Mark channels as bad from a text file, in the style
+        (mostly) of the C function mne_mark_bad_channels
+
+        Parameters
+        ----------
+        bad_file : string
+            File name of the text file containing bad channels
+            If bad_file = None, bad channels are cleared, but this
+            is more easily done directly as raw.info['bads'] = [].
+
+        force : boolean
+            Whether or not to force bad channel marking (of those
+            that exist) if channels are not found, instead of
+            raising an error.
+        """
+
+        if bad_file is not None:
+            # Check to make sure bad channels are there
+            names = frozenset(self.info['ch_names'])
+            bad_names = filter(None, open(bad_file).read().splitlines())
+            names_there = [ci for ci in bad_names if ci in names]
+            count_diff = len(bad_names) - len(names_there)
+
+            if count_diff > 0:
+                if not force:
+                    raise ValueError('Bad channels from:\n%s\n not found '
+                                     'in:\n%s' % (bad_file,
+                                                  self.info['filenames'][0]))
+                else:
+                    warnings.warn('%d bad channels from:\n%s\nnot found '
+                                  'in:\n%s' % (count_diff, bad_file,
+                                               self.info['filenames'][0]))
+            self.info['bads'] = names_there
+        else:
+            self.info['bads'] = []
+
+    def append(self, raws, preload=None):
+        """Concatenate raw instances as if they were continuous
+
+        Parameters
+        ----------
+        raws : list, or Raw instance
+            list of Raw instances to concatenate to the current instance
+            (in order), or a single raw instance to concatenate.
+
+        preload : bool, str, or None (default None)
+            Preload data into memory for data manipulation and faster indexing.
+            If True, the data will be preloaded into memory (fast, requires
+            large amount of memory). If preload is a string, preload is the
+            file name of a memory-mapped file which is used to store the data
+            on the hard drive (slower, requires less memory). If preload is
+            None, preload=True or False is inferred using the preload status
+            of the raw files passed in.
+        """
+        if not isinstance(raws, list):
+            raws = [raws]
+
+        # make sure the raws are compatible
+        all_raws = [self]
+        all_raws += raws
+        _check_raw_compatibility(all_raws)
+
+        # deal with preloading data first (while files are separate)
+        all_preloaded = self._preloaded and all(r._preloaded for r in raws)
+        if preload is None:
+            if all_preloaded:
+                preload = True
+            else:
+                preload = False
+
+        if preload is False:
+            if self._preloaded:
+                self._data = None
+                self._times = None
+            self._preloaded = False
+        else:
+            # do the concatenation ourselves since preload might be a string
+            nchan = self.info['nchan']
+            c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
+            nsamp = c_ns[-1]
+
+            if not self._preloaded:
+                this_data = self._read_segment()[0]
+            else:
+                this_data = self._data
+
+            # allocate the buffer
+            if isinstance(preload, basestring):
+                _data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
+                                  shape=(nchan, nsamp))
+            else:
+                _data = np.empty((nchan, nsamp), dtype=this_data.dtype)
+
+            _data[:, 0:c_ns[0]] = this_data
+
+            for ri in range(len(raws)):
+                if not raws[ri]._preloaded:
+                    # read the data directly into the buffer
+                    data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
+                    raws[ri]._read_segment(data_buffer=data_buffer)
+                else:
+                    _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
+            self._data = _data
+            self._preloaded = True
+
+        # now combine information from each raw file to construct new self
+        for r in raws:
+            self._first_samps = np.r_[self._first_samps, r._first_samps]
+            self._last_samps = np.r_[self._last_samps, r._last_samps]
+            self._raw_lengths = np.r_[self._raw_lengths, r._raw_lengths]
+            self.rawdirs += r.rawdirs
+            self.info['filenames'] += r.info['filenames']
+        # reconstruct fids in case some were preloaded and others weren't
+        self._initialize_fids()
+        self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
+
+        # this has to be done after first and last sample are set appropriately
+        if self._preloaded:
+            self._times = np.arange(self.n_times) / self.info['sfreq']
+
+    def close(self):
+        """Close the files on disk."""
+        [f.close() for f in self.fids]
+        self.fids = []
+
+    def copy(self):
+        """ Return copy of Raw instance
+        """
+        new = deepcopy(self)
+        new._initialize_fids()
+        return new
+
+    def _initialize_fids(self):
+        """Initialize self.fids based on self.info['filenames']
+        """
+        if not self._preloaded:
+            self.fids = [open(fname, "rb") for fname in self.info['filenames']]
+            [fid.seek(0, 0) for fid in self.fids]
+        else:
+            self.fids = []
+
+    def as_data_frame(self, picks=None, start=None, stop=None, scale_time=1e3,
+                      scalings=None, use_time_index=True, copy=True):
+        """Get the epochs as Pandas DataFrame
+
+        Export raw data in tabular structure with MEG channels.
+
+        Caveat! To save memory, depending on selected data size consider
+        setting copy to False.
+
+        Parameters
+        ----------
+        picks : None | array of int
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+        start : int | None
+            Data-extraction start index. If None, data will be exported from
+            the first sample.
+        stop : int | None
+            Data-extraction stop index. If None, data will be exported to the
+            last index.
+        scale_time : float
+            Scaling to be applied to time units.
+        scalings : dict | None
+            Scaling to be applied to the channels picked. If None, defaults to
+            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
+        use_time_index : bool
+            If False, times will be included as in the data table, else it will
+            be used as index object.
+        copy : bool
+            If true, data will be copied. Else data may be modified in place.
+
+        Returns
+        -------
+        df : instance of pandas.core.DataFrame
+            Raw data exported into tabular data structure.
+        """
+
+        pd = _check_pandas_installed()
+        if picks is None:
+            picks = range(self.info['nchan'])
+
+        data, times = self[picks, start:stop]
+
+        if copy:
+            data = data.copy()
+
+        types = [channel_type(self.info, idx) for idx in picks]
+        n_channel_types = 0
+        ch_types_used = []
+
+        scalings = _mutable_defaults(('scalings', scalings))[0]
+        for t in scalings.keys():
+            if t in types:
+                n_channel_types += 1
+                ch_types_used.append(t)
+
+        for t in ch_types_used:
+            scaling = scalings[t]
+            idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+            if len(idx) > 0:
+                data[idx] *= scaling
+
+        assert times.shape[0] == data.shape[1]
+        col_names = [self.ch_names[k] for k in picks]
+
+        df = pd.DataFrame(data.T, columns=col_names)
+        df.insert(0, 'time', times * scale_time)
+
+        if use_time_index is True:
+            with warnings.catch_warnings(True):
+                df.set_index('time', inplace=True)
+            df.index = df.index.astype(int)
+
+        return df
+
+    def to_nitime(self, picks=None, start=None, stop=None,
+                  use_first_samp=False, copy=True):
+        """ Raw data as nitime TimeSeries
+
+        Parameters
+        ----------
+        picks : array-like | None
+            Indices of channels to apply. If None, all channels will be
+            exported.
+        start : int | None
+            Data-extraction start index. If None, data will be exported from
+            the first sample.
+        stop : int | None
+            Data-extraction stop index. If None, data will be exported to the
+            last index.
+        use_first_samp: bool
+            If True, the time returned is relative to the session onset, else
+            relative to the recording onset.
+        copy : bool
+            Whether to copy the raw data or not.
+
+        Returns
+        -------
+        raw_ts : instance of nitime.TimeSeries
+        """
+        try:
+            from nitime import TimeSeries  # to avoid strong dependency
+        except ImportError:
+            raise Exception('the nitime package is missing')
+
+        data, _ = self[picks, start:stop]
+        if copy:
+            data = data.copy()
+
+        start_time = self.index_as_time(start if start else 0, use_first_samp)
+        raw_ts = TimeSeries(data, sampling_rate=self.info['sfreq'],
+                            t0=start_time)
+
+        raw_ts.ch_names = [self.ch_names[k] for k in picks]
+
+        return raw_ts
+
+    @verbose
+    def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
+                      verbose=None, projector=None):
+        """Read a chunk of raw data
+
+        Parameters
+        ----------
+        start : int, (optional)
+            first sample to include (first is 0). If omitted, defaults to the
+            first sample in data.
+        stop : int, (optional)
+            First sample to not include.
+            If omitted, data is included to the end.
+        sel : array, optional
+            Indices of channels to select.
+        data_buffer : array or str, optional
+            numpy array to fill with data read, must have the correct shape.
+            If str, a np.memmap with the correct data type will be used
+            to store the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        projector : array
+            SSP operator to apply to the data.
+
+        Returns
+        -------
+        data : array, [channels x samples]
+           the data matrix (channels x samples).
+        times : array, [samples]
+            returns the time values corresponding to the samples.
+        """
+        #  Initial checks
+        start = int(start)
+        stop = self.n_times if stop is None else min([int(stop), self.n_times])
+
+        if start >= stop:
+            raise ValueError('No data in this range')
+
+        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
+                    (start, stop - 1, start / float(self.info['sfreq']),
+                     (stop - 1) / float(self.info['sfreq'])))
+
+        #  Initialize the data and calibration vector
+        nchan = self.info['nchan']
+
+        n_sel_channels = nchan if sel is None else len(sel)
+        # convert sel to a slice if possible for efficiency
+        if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
+            sel = slice(sel[0], sel[-1] + 1)
+        idx = slice(None, None, None) if sel is None else sel
+        data_shape = (n_sel_channels, stop - start)
+        if isinstance(data_buffer, np.ndarray):
+            if data_buffer.shape != data_shape:
+                raise ValueError('data_buffer has incorrect shape')
+            data = data_buffer
+        else:
+            data = None  # we will allocate it later, once we know the type
+
+        mult = list()
+        for ri in range(len(self._raw_lengths)):
+            mult.append(np.diag(self.cals.ravel()))
+            if self.comp is not None:
+                mult[ri] = np.dot(self.comp[idx, :], mult[ri])
+            if projector is not None:
+                mult[ri] = np.dot(projector, mult[ri])
+
+        # deal with having multiple files accessed by the raw object
+        cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
+                                                   dtype='int')))
+        cumul_lens = np.cumsum(cumul_lens)
+        files_used = np.logical_and(np.less(start, cumul_lens[1:]),
+                                    np.greater_equal(stop - 1,
+                                                     cumul_lens[:-1]))
+
+        first_file_used = False
+        s_off = 0
+        dest = 0
+        if isinstance(idx, slice):
+            cals = self.cals.ravel()[idx][:, np.newaxis]
+        else:
+            cals = self.cals.ravel()[:, np.newaxis]
+
+        for fi in np.nonzero(files_used)[0]:
+            start_loc = self._first_samps[fi]
+            # first iteration (only) could start in the middle somewhere
+            if not first_file_used:
+                first_file_used = True
+                start_loc += start - cumul_lens[fi]
+            stop_loc = np.min([stop - 1 - cumul_lens[fi] +
+                               self._first_samps[fi], self._last_samps[fi]])
+            if start_loc < self._first_samps[fi]:
+                raise ValueError('Bad array indexing, could be a bug')
+            if stop_loc > self._last_samps[fi]:
+                raise ValueError('Bad array indexing, could be a bug')
+            if stop_loc < start_loc:
+                raise ValueError('Bad array indexing, could be a bug')
+            len_loc = stop_loc - start_loc + 1
+
+            for this in self.rawdirs[fi]:
+
+                #  Do we need this buffer
+                if this['last'] >= start_loc:
+                    #  The picking logic is a bit complicated
+                    if stop_loc > this['last'] and start_loc < this['first']:
+                        #    We need the whole buffer
+                        first_pick = 0
+                        last_pick = this['nsamp']
+                        logger.debug('W')
+
+                    elif start_loc >= this['first']:
+                        first_pick = start_loc - this['first']
+                        if stop_loc <= this['last']:
+                            #   Something from the middle
+                            last_pick = this['nsamp'] + stop_loc - this['last']
+                            logger.debug('M')
+                        else:
+                            #   From the middle to the end
+                            last_pick = this['nsamp']
+                            logger.debug('E')
+                    else:
+                        #    From the beginning to the middle
+                        first_pick = 0
+                        last_pick = stop_loc - this['first'] + 1
+                        logger.debug('B')
+
+                    #   Now we are ready to pick
+                    picksamp = last_pick - first_pick
+                    if picksamp > 0:
+                        # only read data if it exists
+                        if this['ent'] is not None:
+                            one = read_tag(self.fids[fi], this['ent'].pos,
+                                           shape=(this['nsamp'], nchan),
+                                           rlims=(first_pick, last_pick)).data
+                            if np.isrealobj(one):
+                                dtype = np.float
+                            else:
+                                dtype = np.complex128
+                            one.shape = (picksamp, nchan)
+                            one = one.T.astype(dtype)
+                            # use proj + cal factors in mult
+                            if mult is not None:
+                                one = np.dot(mult[fi], one)
+                            else:  # apply just the calibration factors
+                                # this logic is designed to limit memory copies
+                                if isinstance(idx, slice):
+                                    # This is a view operation, so it's fast
+                                    one[idx] *= cals
+                                else:
+                                    # Extra operations are actually faster here
+                                    # than creating a new array
+                                    # (fancy indexing)
+                                    one *= cals
+
+                            # if not already done, allocate array with
+                            # right type
+                            data = _allocate_data(data, data_buffer,
+                                                  data_shape, dtype)
+                            if isinstance(idx, slice):
+                                # faster to slice in data than doing
+                                # one = one[idx] sooner
+                                data[:, dest:(dest + picksamp)] = one[idx]
+                            else:
+                                # faster than doing one = one[idx]
+                                data_view = data[:, dest:(dest + picksamp)]
+                                for ii, ix in enumerate(idx):
+                                    data_view[ii] = one[ix]
+                        dest += picksamp
+
+                #   Done?
+                if this['last'] >= stop_loc:
+                    # if not already done, allocate array with float dtype
+                    data = _allocate_data(data, data_buffer, data_shape,
+                                          np.float)
+                    break
+
+            self.fids[fi].seek(0, 0)  # Go back to beginning of the file
+            s_off += len_loc
+            # double-check our math
+            if not s_off == dest:
+                raise ValueError('Incorrect file reading')
+
+        logger.info('[done]')
+        times = np.arange(start, stop) / self.info['sfreq']
+
+        return data, times
+
+    def __repr__(self):
+        s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
+                                                self.n_times)
+        return "<Raw  |  %s>" % s
+
+
+def _allocate_data(data, data_buffer, data_shape, dtype):
+    if data is None:
+        # if not already done, allocate array with right type
+        if isinstance(data_buffer, basestring):
+            # use a memmap
+            data = np.memmap(data_buffer, mode='w+',
+                             dtype=dtype, shape=data_shape)
+        else:
+            data = np.zeros(data_shape, dtype=dtype)
+    return data
+
+
+def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False):
+    """Convert time to indices
+
+    Parameters
+    ----------
+    times : list-like | float | int
+        List of numbers or a number representing points in time.
+    use_first_samp : boolean
+        If True, time is treated as relative to the session onset, else
+        as relative to the recording onset.
+
+    Returns
+    -------
+    index : ndarray
+        Indices corresponding to the times supplied.
+    """
+    index = np.atleast_1d(times) * sfreq
+    index -= (first_samp if use_first_samp else 0)
+    return index.astype(int)
+
+
+def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
+    """Convert time to indices
+
+    Parameters
+    ----------
+    index : list-like | int
+        List of ints or int representing points in time.
+    use_first_samp : boolean
+        If True, the time returned is relative to the session onset, else
+        relative to the recording onset.
+
+    Returns
+    -------
+    times : ndarray
+        Times corresponding to the index supplied.
+    """
+    times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)
+    return times / sfreq
+
+
+class _RawShell():
+    """Used for creating a temporary raw object"""
+    def __init__(self):
+        self.first_samp = None
+        self.last_samp = None
+        self.cals = None
+        self.rawdir = None
+        self._projector = None
+
+    @property
+    def n_times(self):
+        return self.last_samp - self.first_samp + 1
+
+
+###############################################################################
+# Writing
+
+from .write import start_file, end_file, start_block, end_block, \
+                   write_dau_pack16, write_float, write_double, \
+                   write_complex64, write_complex128, write_int, write_id
+
+
+def start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
+                      reset_range=True):
+    """Start write raw data in file
+
+    Data will be written in float
+
+    Parameters
+    ----------
+    name : string
+        Name of the file to create.
+    info : dict
+        Measurement info.
+    sel : array of int, optional
+        Indices of channels to include. By default all channels are included.
+    data_type : int
+        The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
+        5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.
+    reset_range : bool
+        If True, the info['chs'][k]['range'] parameter will be set to unity.
+
+    Returns
+    -------
+    fid : file
+        The file descriptor.
+    cals : list
+        calibration factors.
+    """
+    #
+    #  Create the file and save the essentials
+    #
+    fid = start_file(name)
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    if info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+    #
+    #    Measurement info
+    #
+    info = copy.deepcopy(info)
+    if sel is not None:
+        info['chs'] = [info['chs'][k] for k in sel]
+        info['nchan'] = len(sel)
+
+        ch_names = [c['ch_name'] for c in info['chs']]  # name of good channels
+        comps = copy.deepcopy(info['comps'])
+        for c in comps:
+            row_idx = [k for k, n in enumerate(c['data']['row_names'])
+                                                            if n in ch_names]
+            row_names = [c['data']['row_names'][i] for i in row_idx]
+            rowcals = c['rowcals'][row_idx]
+            c['rowcals'] = rowcals
+            c['data']['nrow'] = len(row_names)
+            c['data']['row_names'] = row_names
+            c['data']['data'] = c['data']['data'][row_idx]
+        info['comps'] = comps
+
+    cals = []
+    for k in range(info['nchan']):
+        #
+        #   Scan numbers may have been messed up
+        #
+        info['chs'][k]['scanno'] = k + 1  # scanno starts at 1 in FIF format
+        if reset_range is True:
+            info['chs'][k]['range'] = 1.0
+        cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])
+
+    write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)
+
+    #
+    # Start the raw data
+    #
+    start_block(fid, FIFF.FIFFB_RAW_DATA)
+
+    return fid, cals
+
+
+def write_raw_buffer(fid, buf, cals, format, inv_comp):
+    """Write raw buffer
+
+    Parameters
+    ----------
+    fid : file descriptor
+        an open raw data file.
+    buf : array
+        The buffer to write.
+    cals : array
+        Calibration factors.
+    format : str
+        'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
+        float for each item. This will be doubled for complex datatypes. Note
+        that short and int formats cannot be used for complex data.
+    inv_comp : array | None
+        The CTF compensation matrix used to revert compensation
+        change when reading.
+    """
+    if buf.shape[0] != len(cals):
+        raise ValueError('buffer and calibration sizes do not match')
+
+    if not format in ['short', 'int', 'single', 'double']:
+        raise ValueError('format must be "short", "single", or "double"')
+
+    if np.isrealobj(buf):
+        if format == 'short':
+            write_function = write_dau_pack16
+        elif format == 'int':
+            write_function = write_int
+        elif format == 'single':
+            write_function = write_float
+        else:
+            write_function = write_double
+    else:
+        if format == 'single':
+            write_function = write_complex64
+        elif format == 'double':
+            write_function = write_complex128
+        else:
+            raise ValueError('only "single" and "double" supported for '
+                             'writing complex data')
+
+    if inv_comp is not None:
+        buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)
+    else:
+        buf = buf / np.ravel(cals)[:, None]
+
+    write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
+
+
+def finish_writing_raw(fid):
+    """Finish writing raw FIF file
+
+    Parameters
+    ----------
+    fid : file descriptor
+        an open raw data file.
+    """
+    end_block(fid, FIFF.FIFFB_RAW_DATA)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
+
+
+def _envelope(x):
+    """ Compute envelope signal """
+    return np.abs(hilbert(x))
+
+
+def _check_raw_compatibility(raw):
+    """Check to make sure all instances of Raw
+    in the input list raw have compatible parameters"""
+    for ri in range(1, len(raw)):
+        if not raw[ri].info['nchan'] == raw[0].info['nchan']:
+            raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
+        if not raw[ri].info['bads'] == raw[0].info['bads']:
+            raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
+        if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
+            raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
+        if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
+            raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
+        if not all(raw[ri].cals == raw[0].cals):
+            raise ValueError('raw[%d].cals must match' % ri)
+        if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
+            raise ValueError('SSP projectors in raw files must be the same')
+        if not all(proj_equal(p1, p2) for p1, p2 in
+                   zip(raw[0].info['projs'], raw[ri].info['projs'])):
+            raise ValueError('SSP projectors in raw files must be the same')
+    if not all([r.orig_format == raw[0].orig_format for r in raw]):
+        warnings.warn('raw files do not all have the same data format, '
+                      'could result in precision mismatch. Setting '
+                      'raw.orig_format="unknown"')
+        raw[0].orig_format = 'unknown'
+
+
+def concatenate_raws(raws, preload=None):
+    """Concatenate raw instances as if they were continuous. Note that raws[0]
+    is modified in-place to achieve the concatenation.
+
+    Parameters
+    ----------
+    raws : list
+        list of Raw instances to concatenate (in order).
+
+    preload : bool, or None
+        If None, preload status is inferred using the preload status of the
+        raw files passed in. True or False sets the resulting raw file to
+        have or not have data preloaded.
+
+    Returns
+    -------
+    raw : instance of Raw
+        The result of the concatenation (first Raw instance passed in).
+    """
+    raws[0].append(raws[1:], preload)
+    return raws[0]
diff --git a/mne/fiff/tag.py b/mne/fiff/tag.py
new file mode 100644
index 0000000..1039bbb
--- /dev/null
+++ b/mne/fiff/tag.py
@@ -0,0 +1,472 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import struct
+import numpy as np
+from scipy import linalg
+import os
+import gzip
+
+from .constants import FIFF
+
+
+class Tag(object):
+    """Tag in FIF tree structure
+
+    Parameters
+    ----------
+    kind : int
+        Kind of Tag.
+    type_ : int
+        Type of Tag.
+    size : int
+        Size in bytes.
+    int : next
+        Position of next Tag.
+    pos : int
+        Position of Tag is the original file.
+    """
+
+    def __init__(self, kind, type_, size, next, pos=None):
+        self.kind = int(kind)
+        self.type = int(type_)
+        self.size = int(size)
+        self.next = int(next)
+        self.pos = pos if pos is not None else next
+        self.pos = int(self.pos)
+        self.data = None
+
+    def __repr__(self):
+        out = ("kind: %s - type: %s - size: %s - next: %s - pos: %s"
+               % (self.kind, self.type, self.size, self.next, self.pos))
+        if hasattr(self, 'data'):
+            out += " - data: %s" % self.data
+        out += "\n"
+        return out
+
+    def __cmp__(self, tag):
+        is_equal = (self.kind == tag.kind and
+                    self.type == tag.type and
+                    self.size == tag.size and
+                    self.next == tag.next and
+                    self.pos == tag.pos and
+                    self.data == tag.data)
+        if is_equal:
+            return 0
+        else:
+            return 1
+
+
+def read_big(fid, size=None):
+    """Function to read large chunks of data (>16MB) Windows-friendly
+
+    Parameters
+    ----------
+    fid : file
+        Open file to read from.
+    size : int or None
+        Number of bytes to read. If None, the whole file is read.
+
+    Returns
+    -------
+    buf : str
+        The data.
+
+    Notes
+    -----
+    Windows (argh) can't handle reading large chunks of data, so we
+    have to do it piece-wise, possibly related to:
+       http://stackoverflow.com/questions/4226941
+
+    Examples
+    --------
+    This code should work for normal files and .gz files:
+
+        >>> import numpy as np
+        >>> import gzip, os, tempfile, shutil
+        >>> fname = tempfile.mkdtemp()
+        >>> fname_gz = os.path.join(fname, 'temp.gz')
+        >>> fname = os.path.join(fname, 'temp.bin')
+        >>> randgen = np.random.RandomState(9)
+        >>> x = randgen.randn(3000000)  # > 16MB data
+        >>> with open(fname, 'wb') as fid: x.tofile(fid)
+        >>> with open(fname, 'rb') as fid: y = np.fromstring(read_big(fid))
+        >>> assert np.all(x == y)
+        >>> with gzip.open(fname_gz, 'wb') as fid: fid.write(x.tostring())
+        24000000
+        >>> with gzip.open(fname_gz, 'rb') as fid: y = np.fromstring(read_big(fid))
+        >>> assert np.all(x == y)
+        >>> shutil.rmtree(os.path.dirname(fname))
+
+    """
+    # buf_size is chosen as a largest working power of 2 (16 MB):
+    buf_size = 16777216
+    if size is None:
+        # it's not possible to get .gz uncompressed file size
+        if not isinstance(fid, gzip.GzipFile):
+            size = os.fstat(fid.fileno()).st_size - fid.tell()
+
+    if size is not None:
+        # Use pre-buffering method
+        segments = np.r_[np.arange(0, size, buf_size), size]
+        buf = bytearray(' ' * size)
+        for start, end in zip(segments[:-1], segments[1:]):
+            data = fid.read(end - start)
+            if len(data) != end - start:
+                raise ValueError('Read error')
+            buf[start:end] = data
+        buf = str(buf)
+    else:
+        # Use presumably less efficient concatenating method
+        buf = ['']
+        new = fid.read(buf_size)
+        while len(new) > 0:
+            buf.append(new)
+            new = fid.read(buf_size)
+        buf = ''.join(buf)
+
+    return buf
+
+
+def read_tag_info(fid):
+    """Read Tag info (or header)
+    """
+    s = fid.read(4 * 4)
+    if len(s) == 0:
+        return None
+    tag = Tag(*struct.unpack(">iiii", s))
+    if tag.next == 0:
+        fid.seek(tag.size, 1)
+    elif tag.next > 0:
+        fid.seek(tag.next, 0)
+    return tag
+
+
+def _fromstring_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
+    """Helper for getting a range of rows from a large tag"""
+    if shape is not None:
+        item_size = np.dtype(dtype).itemsize
+        if not len(shape) == 2:
+            raise ValueError('Only implemented for 2D matrices')
+        if not np.prod(shape) == tag_size / item_size:
+            raise ValueError('Wrong shape specified')
+        if not len(rlims) == 2:
+            raise ValueError('rlims must have two elements')
+        n_row_out = rlims[1] - rlims[0]
+        if n_row_out <= 0:
+            raise ValueError('rlims must yield at least one output')
+        row_size = item_size * shape[1]
+        # # of bytes to skip at the beginning, # to read, where to end
+        start_skip = rlims[0] * row_size
+        read_size = n_row_out * row_size
+        end_pos = fid.tell() + tag_size
+        # Move the pointer ahead to the read point
+        fid.seek(start_skip, 1)
+        # Do the reading
+        out = np.fromstring(fid.read(read_size), dtype=dtype)
+        # Move the pointer ahead to the end of the tag
+        fid.seek(end_pos)
+    else:
+        out = np.fromstring(fid.read(tag_size), dtype=dtype)
+    return out
+
+
+def read_tag(fid, pos=None, shape=None, rlims=None):
+    """Read a Tag from a file at a given position
+
+    Parameters
+    ----------
+    fid : file
+        The open FIF file descriptor.
+    pos : int
+        The position of the Tag in the file.
+    shape : tuple | None
+        If tuple, the shape of the stored matrix. Only to be used with
+        data stored as a vector (not implemented for matrices yet).
+    rlims : tuple | None
+        If tuple, the first and last rows to retrieve. Note that data are
+        assumed to be stored row-major in the file. Only to be used with
+        data stored as a vector (not implemented for matrices yet).
+
+    Returns
+    -------
+    tag : Tag
+        The Tag read.
+    """
+    if pos is not None:
+        fid.seek(pos, 0)
+
+    s = fid.read(4 * 4)
+    tag = Tag(*struct.unpack(">iIii", s))
+
+    #
+    #   The magic hexadecimal values
+    #
+    is_matrix = 4294901760  # ffff0000
+    matrix_coding_dense = 16384      # 4000
+    matrix_coding_CCS = 16400      # 4010
+    matrix_coding_RCS = 16416      # 4020
+    data_type = 65535      # ffff
+    #
+    if tag.size > 0:
+        matrix_coding = is_matrix & tag.type
+        if matrix_coding != 0:
+            matrix_coding = matrix_coding >> 16
+
+            # This should be easy to implement (see _fromstring_rows)
+            # if we need it, but for now, it's not...
+            if shape is not None:
+                raise ValueError('Row reading not implemented for matrices '
+                                 'yet')
+
+            #   Matrices
+            if matrix_coding == matrix_coding_dense:
+                # Find dimensions and return to the beginning of tag data
+                pos = fid.tell()
+                fid.seek(tag.size - 4, 1)
+                ndim = int(np.fromstring(fid.read(4), dtype='>i4'))
+                fid.seek(-(ndim + 1) * 4, 1)
+                dims = np.fromstring(fid.read(4 * ndim), dtype='>i4')[::-1]
+                #
+                # Back to where the data start
+                #
+                fid.seek(pos, 0)
+
+                if ndim > 3:
+                    raise Exception('Only 2 or 3-dimensional matrices are '
+                                    'supported at this time')
+
+                matrix_type = data_type & tag.type
+
+                if matrix_type == FIFF.FIFFT_INT:
+                    tag.data = np.fromstring(read_big(fid, 4 * dims.prod()),
+                                             dtype='>i4').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_JULIAN:
+                    tag.data = np.fromstring(read_big(fid, 4 * dims.prod()),
+                                             dtype='>i4').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_FLOAT:
+                    tag.data = np.fromstring(read_big(fid, 4 * dims.prod()),
+                                             dtype='>f4').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_DOUBLE:
+                    tag.data = np.fromstring(read_big(fid, 8 * dims.prod()),
+                                             dtype='>f8').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_COMPLEX_FLOAT:
+                    data = np.fromstring(read_big(fid, 4 * 2 * dims.prod()),
+                                         dtype='>f4')
+                    # Note: we need the non-conjugate transpose here
+                    tag.data = (data[::2] + 1j * data[1::2]).reshape(dims)
+                elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                    data = np.fromstring(read_big(fid, 8 * 2 * dims.prod()),
+                                         dtype='>f8')
+                    # Note: we need the non-conjugate transpose here
+                    tag.data = (data[::2] + 1j * data[1::2]).reshape(dims)
+                else:
+                    raise Exception('Cannot handle matrix of type %d yet'
+                                    % matrix_type)
+
+            elif matrix_coding == matrix_coding_CCS or \
+                                    matrix_coding == matrix_coding_RCS:
+                from scipy import sparse
+                # Find dimensions and return to the beginning of tag data
+                pos = fid.tell()
+                fid.seek(tag.size - 4, 1)
+                ndim = int(np.fromstring(fid.read(4), dtype='>i4'))
+                fid.seek(-(ndim + 2) * 4, 1)
+                dims = np.fromstring(fid.read(4 * (ndim + 1)), dtype='>i4')
+                if ndim != 2:
+                    raise Exception('Only two-dimensional matrices are '
+                                    'supported at this time')
+
+                # Back to where the data start
+                fid.seek(pos, 0)
+                nnz = dims[0]
+                nrow = dims[1]
+                ncol = dims[2]
+                sparse_data = np.fromstring(fid.read(4 * nnz), dtype='>f4')
+                shape = (dims[1], dims[2])
+                if matrix_coding == matrix_coding_CCS:
+                    #    CCS
+                    sparse.csc_matrix()
+                    sparse_indices = np.fromstring(fid.read(4 * nnz),
+                                                   dtype='>i4')
+                    sparse_ptrs = np.fromstring(fid.read(4 * (ncol + 1)),
+                                                dtype='>i4')
+                    tag.data = sparse.csc_matrix((sparse_data, sparse_indices,
+                                                 sparse_ptrs), shape=shape)
+                else:
+                    #    RCS
+                    sparse_indices = np.fromstring(fid.read(4 * nnz),
+                                                   dtype='>i4')
+                    sparse_ptrs = np.fromstring(fid.read(4 * (nrow + 1)),
+                                                dtype='>i4')
+                    tag.data = sparse.csr_matrix((sparse_data, sparse_indices,
+                                                 sparse_ptrs), shape=shape)
+            else:
+                raise Exception('Cannot handle other than dense or sparse '
+                                'matrices yet')
+        else:
+            #   All other data types
+
+            #   Simple types
+            if tag.type == FIFF.FIFFT_BYTE:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">B1",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_SHORT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">i2",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_INT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">i4",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_USHORT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">u2",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_UINT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">u4",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_FLOAT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f4",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_DOUBLE:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f8",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_STRING:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">c",
+                                            shape=shape, rlims=rlims)
+                tag.data = ''.join(tag.data)
+            elif tag.type == FIFF.FIFFT_DAU_PACK16:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">i2",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                # data gets stored twice as large
+                if shape is not None:
+                    shape = (shape[0], shape[1] * 2)
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f4",
+                                            shape=shape, rlims=rlims)
+                tag.data = tag.data[::2] + 1j * tag.data[1::2]
+            elif tag.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                # data gets stored twice as large
+                if shape is not None:
+                    shape = (shape[0], shape[1] * 2)
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f8",
+                                            shape=shape, rlims=rlims)
+                tag.data = tag.data[::2] + 1j * tag.data[1::2]
+            #
+            #   Structures
+            #
+            elif tag.type == FIFF.FIFFT_ID_STRUCT:
+                tag.data = dict()
+                tag.data['version'] = int(np.fromstring(fid.read(4),
+                                                        dtype=">i4"))
+                tag.data['version'] = int(np.fromstring(fid.read(4),
+                                                        dtype=">i4"))
+                tag.data['machid'] = np.fromstring(fid.read(8), dtype=">i4")
+                tag.data['secs'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data['usecs'] = int(np.fromstring(fid.read(4),
+                                                      dtype=">i4"))
+            elif tag.type == FIFF.FIFFT_DIG_POINT_STRUCT:
+                tag.data = dict()
+                tag.data['kind'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data['ident'] = int(np.fromstring(fid.read(4),
+                                                      dtype=">i4"))
+                tag.data['r'] = np.fromstring(fid.read(12), dtype=">f4")
+                tag.data['coord_frame'] = 0
+            elif tag.type == FIFF.FIFFT_COORD_TRANS_STRUCT:
+                tag.data = dict()
+                tag.data['from'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data['to'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                rot = np.fromstring(fid.read(36), dtype=">f4").reshape(3, 3)
+                move = np.fromstring(fid.read(12), dtype=">f4")
+                tag.data['trans'] = np.r_[np.c_[rot, move],
+                                          np.array([[0], [0], [0], [1]]).T]
+                #
+                # Skip over the inverse transformation
+                # It is easier to just use inverse of trans in Matlab
+                #
+                fid.seek(12 * 4, 1)
+            elif tag.type == FIFF.FIFFT_CH_INFO_STRUCT:
+                d = dict()
+                d['scanno'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['logno'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['kind'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['range'] = float(np.fromstring(fid.read(4), dtype=">f4"))
+                d['cal'] = float(np.fromstring(fid.read(4), dtype=">f4"))
+                d['coil_type'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                #
+                #   Read the coil coordinate system definition
+                #
+                d['loc'] = np.fromstring(fid.read(48), dtype=">f4")
+                d['coil_trans'] = None
+                d['eeg_loc'] = None
+                d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
+                tag.data = d
+                #
+                #   Convert loc into a more useful format
+                #
+                loc = tag.data['loc']
+                kind = tag.data['kind']
+                if kind == FIFF.FIFFV_MEG_CH or kind == FIFF.FIFFV_REF_MEG_CH:
+                    tag.data['coil_trans'] = np.concatenate(
+                            [loc.reshape(4, 3).T[:, [1, 2, 3, 0]],
+                             np.array([0, 0, 0, 1]).reshape(1, 4)])
+                    tag.data['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                elif tag.data['kind'] == FIFF.FIFFV_EEG_CH:
+                    if linalg.norm(loc[3:6]) > 0.:
+                        tag.data['eeg_loc'] = np.c_[loc[0:3], loc[3:6]]
+                    else:
+                        tag.data['eeg_loc'] = loc[0:3]
+                    tag.data['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                #
+                #   Unit and exponent
+                #
+                tag.data['unit'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data['unit_mul'] = int(np.fromstring(fid.read(4),
+                                                         dtype=">i4"))
+                #
+                #   Handle the channel name
+                #
+                ch_name = np.fromstring(fid.read(16), dtype=">c")
+                #
+                # Omit nulls
+                #
+                tag.data['ch_name'] = \
+                    ''.join(ch_name[:np.where(ch_name == '')[0][0]])
+
+            elif tag.type == FIFF.FIFFT_OLD_PACK:
+                offset = float(np.fromstring(fid.read(4), dtype=">f4"))
+                scale = float(np.fromstring(fid.read(4), dtype=">f4"))
+                tag.data = np.fromstring(fid.read(tag.size - 8), dtype=">h2")
+                tag.data = scale * tag.data + offset
+            elif tag.type == FIFF.FIFFT_DIR_ENTRY_STRUCT:
+                tag.data = list()
+                for _ in range(tag.size / 16 - 1):
+                    s = fid.read(4 * 4)
+                    tag.data.append(Tag(*struct.unpack(">iIii", s)))
+            else:
+                raise Exception('Unimplemented tag data type %s' % tag.type)
+
+    if tag.next != FIFF.FIFFV_NEXT_SEQ:
+        # f.seek(tag.next,0)
+        fid.seek(tag.next, 1)  # XXX : fix? pb when tag.next < 0
+
+    return tag
+
+
+def find_tag(fid, node, findkind):
+    """Find Tag in an open FIF file descriptor
+    """
+    for p in range(node['nent']):
+        if node['directory'][p].kind == findkind:
+            return read_tag(fid, node['directory'][p].pos)
+    tag = None
+    return tag
+
+
+def has_tag(node, kind):
+    """Does the node contains a Tag of a given kind?
+    """
+    for d in node['directory']:
+        if d.kind == kind:
+            return True
+    return False
diff --git a/mne/fiff/tests/__init__.py b/mne/fiff/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/fiff/tests/data/process_raw.sh b/mne/fiff/tests/data/process_raw.sh
new file mode 100755
index 0000000..a13a078
--- /dev/null
+++ b/mne/fiff/tests/data/process_raw.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Generate events
+mne_process_raw --raw test_raw.fif --eventsout test-eve.fif
+
+# Averaging no filter
+mne_process_raw --raw test_raw.fif --projon --filteroff \
+        --saveavetag -nf-ave --ave test-no-reject.ave
+
+# Averaging 40Hz
+mne_process_raw --raw test_raw.fif --lowpass 40 --projoff \
+        --saveavetag -ave --ave test.ave
+
+# Compute the noise covariance matrix
+mne_process_raw --raw test_raw.fif --filteroff --projon \
+        --savecovtag -cov --cov test.cov
+
+# Compute the noise covariance matrix with keepsamplemean
+mne_process_raw --raw test_raw.fif --filteroff --projon \
+        --savecovtag -km-cov --cov test_keepmean.cov
+
+# Compute projection
+mne_process_raw --raw test_raw.fif --events test-eve.fif --makeproj \
+           --projtmin -0.2 --projtmax 0.3 --saveprojtag _proj \
+           --projnmag 1 --projngrad 1 --projevent 1 \
+           --projmagrej 600000 --projgradrej 500000  --filteroff
diff --git a/mne/fiff/tests/data/test-ave-2.log b/mne/fiff/tests/data/test-ave-2.log
new file mode 100644
index 0000000..3d3f21c
--- /dev/null
+++ b/mne/fiff/tests/data/test-ave-2.log
@@ -0,0 +1,28 @@
+Reading mne/fiff/tests/data/test-ave.fif ...
+    Read a total of 4 projection items:
+        PCA-v1 (1 x 102)  idle
+        PCA-v2 (1 x 102)  idle
+        PCA-v3 (1 x 102)  idle
+        Average EEG reference (1 x 60)  idle
+    Found the data of interest:
+        t =    -199.80 ...     499.49 ms (Right Auditory)
+        0 CTF compensation matrices available
+        nave = 6 - aspect type = 100
+Created an SSP operator (subspace dimension = 4)
+4 projection items activated
+SSP projectors applied...
+No baseline correction applied...
+Reading mne/fiff/tests/data/test-ave.fif ...
+    Read a total of 4 projection items:
+        PCA-v1 (1 x 102)  idle
+        PCA-v2 (1 x 102)  idle
+        PCA-v3 (1 x 102)  idle
+        Average EEG reference (1 x 60)  idle
+    Found the data of interest:
+        t =    -199.80 ...     499.49 ms (Right Auditory)
+        0 CTF compensation matrices available
+        nave = 6 - aspect type = 100
+Created an SSP operator (subspace dimension = 4)
+4 projection items activated
+SSP projectors applied...
+No baseline correction applied...
diff --git a/mne/fiff/tests/data/test-ave.fif b/mne/fiff/tests/data/test-ave.fif
new file mode 100755
index 0000000..67a3db6
Binary files /dev/null and b/mne/fiff/tests/data/test-ave.fif differ
diff --git a/mne/fiff/tests/data/test-ave.fif.gz b/mne/fiff/tests/data/test-ave.fif.gz
new file mode 100644
index 0000000..ca4b283
Binary files /dev/null and b/mne/fiff/tests/data/test-ave.fif.gz differ
diff --git a/mne/fiff/tests/data/test-ave.log b/mne/fiff/tests/data/test-ave.log
new file mode 100644
index 0000000..d663417
--- /dev/null
+++ b/mne/fiff/tests/data/test-ave.log
@@ -0,0 +1,14 @@
+Reading mne/fiff/tests/data/test-ave.fif ...
+    Read a total of 4 projection items:
+        PCA-v1 (1 x 102)  idle
+        PCA-v2 (1 x 102)  idle
+        PCA-v3 (1 x 102)  idle
+        Average EEG reference (1 x 60)  idle
+    Found the data of interest:
+        t =    -199.80 ...     499.49 ms (Right Auditory)
+        0 CTF compensation matrices available
+        nave = 6 - aspect type = 100
+Created an SSP operator (subspace dimension = 4)
+4 projection items activated
+SSP projectors applied...
+No baseline correction applied...
diff --git a/mne/fiff/tests/data/test-cov.fif b/mne/fiff/tests/data/test-cov.fif
new file mode 100755
index 0000000..3806071
Binary files /dev/null and b/mne/fiff/tests/data/test-cov.fif differ
diff --git a/mne/fiff/tests/data/test-cov.fif.gz b/mne/fiff/tests/data/test-cov.fif.gz
new file mode 100644
index 0000000..8017b4a
Binary files /dev/null and b/mne/fiff/tests/data/test-cov.fif.gz differ
diff --git a/mne/fiff/tests/data/test-eve-1.eve b/mne/fiff/tests/data/test-eve-1.eve
new file mode 100644
index 0000000..238e645
--- /dev/null
+++ b/mne/fiff/tests/data/test-eve-1.eve
@@ -0,0 +1 @@
+ 27977       0   2
diff --git a/mne/fiff/tests/data/test-eve-1.fif b/mne/fiff/tests/data/test-eve-1.fif
new file mode 100644
index 0000000..a16a420
Binary files /dev/null and b/mne/fiff/tests/data/test-eve-1.fif differ
diff --git a/mne/fiff/tests/data/test-eve-old-style.eve b/mne/fiff/tests/data/test-eve-old-style.eve
new file mode 100644
index 0000000..803a7d7
--- /dev/null
+++ b/mne/fiff/tests/data/test-eve-old-style.eve
@@ -0,0 +1,31 @@
+ 27977 46.581       0   2
+ 28345 47.193       0   3
+ 28771 47.903       0   1
+ 29219 48.648       0   4
+ 29652 49.369       0   2
+ 30025 49.990       0   3
+ 30450 50.698       0   1
+ 30839 51.346       0   4
+ 31240 52.013       0   2
+ 31665 52.721       0   3
+ 32101 53.447       0   1
+ 32519 54.143       0   4
+ 32935 54.835       0   2
+ 33325 55.485       0   3
+ 33712 56.129       0   1
+ 34089 56.757       0   5
+ 34532 57.494       0   2
+ 34649 57.689       0  32
+ 34956 58.200       0   3
+ 35428 58.986       0   1
+ 35850 59.689       0   4
+ 36211 60.290       0   2
+ 36576 60.898       0   3
+ 37007 61.615       0   1
+ 37460 62.369       0   4
+ 37910 63.119       0   2
+ 38326 63.811       0   3
+ 38711 64.452       0   1
+ 39130 65.150       0   4
+ 39563 65.871       0   2
+ 39926 66.475       0   3
diff --git a/mne/fiff/tests/data/test-eve.eve b/mne/fiff/tests/data/test-eve.eve
new file mode 100644
index 0000000..31dedda
--- /dev/null
+++ b/mne/fiff/tests/data/test-eve.eve
@@ -0,0 +1,31 @@
+ 27977       0   2
+ 28345       0   3
+ 28771       0   1
+ 29219       0   4
+ 29652       0   2
+ 30025       0   3
+ 30450       0   1
+ 30839       0   4
+ 31240       0   2
+ 31665       0   3
+ 32101       0   1
+ 32519       0   4
+ 32935       0   2
+ 33325       0   3
+ 33712       0   1
+ 34089       0   5
+ 34532       0   2
+ 34649       0  32
+ 34956       0   3
+ 35428       0   1
+ 35850       0   4
+ 36211       0   2
+ 36576       0   3
+ 37007       0   1
+ 37460       0   4
+ 37910       0   2
+ 38326       0   3
+ 38711       0   1
+ 39130       0   4
+ 39563       0   2
+ 39926       0   3
diff --git a/mne/fiff/tests/data/test-eve.fif b/mne/fiff/tests/data/test-eve.fif
new file mode 100755
index 0000000..d3cc74d
Binary files /dev/null and b/mne/fiff/tests/data/test-eve.fif differ
diff --git a/mne/fiff/tests/data/test-eve.fif.gz b/mne/fiff/tests/data/test-eve.fif.gz
new file mode 100644
index 0000000..e2ea818
Binary files /dev/null and b/mne/fiff/tests/data/test-eve.fif.gz differ
diff --git a/mne/fiff/tests/data/test-km-cov.fif b/mne/fiff/tests/data/test-km-cov.fif
new file mode 100644
index 0000000..3cd3d2a
Binary files /dev/null and b/mne/fiff/tests/data/test-km-cov.fif differ
diff --git a/mne/fiff/tests/data/test-mpr-eve.eve b/mne/fiff/tests/data/test-mpr-eve.eve
new file mode 100644
index 0000000..62145d9
--- /dev/null
+++ b/mne/fiff/tests/data/test-mpr-eve.eve
@@ -0,0 +1,32 @@
+ 25800 42.956       0   0
+ 27977 46.581       0   2
+ 28345 47.193       0   3
+ 28771 47.903       0   1
+ 29219 48.648       0   4
+ 29652 49.369       0   2
+ 30025 49.990       0   3
+ 30450 50.698       0   1
+ 30839 51.346       0   4
+ 31240 52.013       0   2
+ 31665 52.721       0   3
+ 32101 53.447       0   1
+ 32519 54.143       0   4
+ 32935 54.835       0   2
+ 33325 55.485       0   3
+ 33712 56.129       0   1
+ 34089 56.757       0   5
+ 34532 57.494       0   2
+ 34649 57.689       0  32
+ 34956 58.200       0   3
+ 35428 58.986       0   1
+ 35850 59.689       0   4
+ 36211 60.290       0   2
+ 36576 60.898       0   3
+ 37007 61.615       0   1
+ 37460 62.369       0   4
+ 37910 63.119       0   2
+ 38326 63.811       0   3
+ 38711 64.452       0   1
+ 39130 65.150       0   4
+ 39563 65.871       0   2
+ 39926 66.475       0   3
diff --git a/mne/fiff/tests/data/test-nf-ave.fif b/mne/fiff/tests/data/test-nf-ave.fif
new file mode 100644
index 0000000..0ea99b2
Binary files /dev/null and b/mne/fiff/tests/data/test-nf-ave.fif differ
diff --git a/mne/fiff/tests/data/test-no-reject.ave b/mne/fiff/tests/data/test-no-reject.ave
new file mode 100755
index 0000000..cfabf40
--- /dev/null
+++ b/mne/fiff/tests/data/test-no-reject.ave
@@ -0,0 +1,49 @@
+#
+#	These are comments
+#
+average {
+#
+#	Output files
+#
+#	outfile         test-ave.fif
+#	logfile         test-ave.log
+#	eventfile	test.eve
+#
+#	Rejection values
+#
+    # gradReject    4000e-13
+    # magReject 4e-12
+    # eegReject 40e-6
+    # eogReject 150e-6
+#
+#	Category specifications
+#
+	category {
+		name	"Left Auditory"
+		event	1
+		tmin	-0.2
+		tmax	0.5
+		color	1 1 0
+	}
+	category {
+		name	"Right Auditory"
+		event	2
+		tmin	-0.2
+		tmax	0.5
+		color	1 0 0
+	}
+	category {
+		name	"Left visual"
+		event	3
+		tmin	-0.2
+		tmax	0.5
+		color	0 1 0
+	}
+	category {
+		name	"Right visual"
+		event	4
+		tmin	-0.2
+		tmax	0.5
+		color   0.5 0.8 1
+	}
+}
diff --git a/mne/fiff/tests/data/test.ave b/mne/fiff/tests/data/test.ave
new file mode 100755
index 0000000..2ee6dab
--- /dev/null
+++ b/mne/fiff/tests/data/test.ave
@@ -0,0 +1,49 @@
+#
+#	These are comments
+#
+average {
+#
+#	Output files
+#
+#	outfile         test-ave.fif
+#	logfile         test-ave.log
+#	eventfile	test.eve
+#
+#	Rejection values
+#
+    gradReject    4000e-13
+    magReject 4e-12
+    eegReject 40e-6
+    eogReject 150e-6
+#
+#	Category specifications
+#
+	category {
+		name	"Left Auditory"
+		event	1
+		tmin	-0.2
+		tmax	0.5
+		color	1 1 0
+	}
+	category {
+		name	"Right Auditory"
+		event	2
+		tmin	-0.2
+		tmax	0.5
+		color	1 0 0
+	}
+	category {
+		name	"Left visual"
+		event	3
+		tmin	-0.2
+		tmax	0.5
+		color	0 1 0
+	}
+	category {
+		name	"Right visual"
+		event	4
+		tmin	-0.2
+		tmax	0.5
+		color   0.5 0.8 1
+	}
+}
diff --git a/mne/fiff/tests/data/test.cov b/mne/fiff/tests/data/test.cov
new file mode 100755
index 0000000..9ba2b75
--- /dev/null
+++ b/mne/fiff/tests/data/test.cov
@@ -0,0 +1,55 @@
+#
+#	These are comments
+#
+cov {
+#
+#	Output files
+#
+	outfile         test-cov.fif
+	logfile         test-cov.log
+#
+#	Rejection values
+#
+    gradReject    10000e-13
+    magReject 4e-12
+    eegReject 80e-6
+    eogReject 150e-6
+#
+#	What to include in the covariance matrix?
+#
+	def {
+		name	"Left Auditory"
+		event	1
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+	def {
+		name	"Right Auditory"
+		event	2
+		ignore	0
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+	def {
+		name	"Left visual"
+		event	3
+		ignore	0
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+	def {
+		name	"Right visual"
+		event	4
+		ignore	0
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+}
diff --git a/mne/fiff/tests/data/test_bads.txt b/mne/fiff/tests/data/test_bads.txt
new file mode 100644
index 0000000..9b69461
--- /dev/null
+++ b/mne/fiff/tests/data/test_bads.txt
@@ -0,0 +1,2 @@
+MEG 0422
+MEG 0433
diff --git a/mne/fiff/tests/data/test_ctf_comp_raw.fif b/mne/fiff/tests/data/test_ctf_comp_raw.fif
new file mode 100644
index 0000000..4259791
Binary files /dev/null and b/mne/fiff/tests/data/test_ctf_comp_raw.fif differ
diff --git a/mne/fiff/tests/data/test_ctf_raw.fif b/mne/fiff/tests/data/test_ctf_raw.fif
new file mode 100644
index 0000000..37a6f32
Binary files /dev/null and b/mne/fiff/tests/data/test_ctf_raw.fif differ
diff --git a/mne/fiff/tests/data/test_empty_room.cov b/mne/fiff/tests/data/test_empty_room.cov
new file mode 100644
index 0000000..c038c76
--- /dev/null
+++ b/mne/fiff/tests/data/test_empty_room.cov
@@ -0,0 +1,44 @@
+cov {
+#    name         "Empty Room"
+#
+#    Output files
+#    The log file is useful for debugging and
+#    selection of interesting events using 'eventfile'
+#
+    outfile         test_erm-cov.fif
+    logfile         test_erm-cov.log
+#
+#    Rejection limits
+#
+#    stimIgnore is optional to omit a stimulus artefact from
+#    the rejection
+#
+#    fixSkew
+#    logfile          erm-ave.log
+    # gradReject    10000e-13
+    # magReject    3e-12
+    # magFlat         1e-14
+    # gradflat    1000e-15
+
+#    Additional rejection parameters
+#
+#    eegReject       20e-6
+#    ecgReject    10e-3
+#
+#    The first definition follows
+#
+    def {
+#
+#        The name of the category (condition) is irrelevant
+#        but useful as a comment
+#
+#        'event' can be left out to compute covariance matrix
+#        from continuous data
+#
+#        'ignore' is a mask to apply to the trigger line
+#        before searching for 'event' (default = 0)
+#
+        tmin    0
+        tmax    99999
+    }
+}
\ No newline at end of file
diff --git a/mne/fiff/tests/data/test_erm-cov.fif b/mne/fiff/tests/data/test_erm-cov.fif
new file mode 100644
index 0000000..cd637f3
Binary files /dev/null and b/mne/fiff/tests/data/test_erm-cov.fif differ
diff --git a/mne/fiff/tests/data/test_ica.lout b/mne/fiff/tests/data/test_ica.lout
new file mode 100644
index 0000000..75a63bc
--- /dev/null
+++ b/mne/fiff/tests/data/test_ica.lout
@@ -0,0 +1,3 @@
+   -0.03     0.63    -0.03     0.33
+000     0.00     0.00     0.30     0.30 ICA 001
+001     0.30     0.00     0.30     0.30 ICA 002
diff --git a/mne/fiff/tests/data/test_keepmean.cov b/mne/fiff/tests/data/test_keepmean.cov
new file mode 100755
index 0000000..c94cda8
--- /dev/null
+++ b/mne/fiff/tests/data/test_keepmean.cov
@@ -0,0 +1,56 @@
+#
+#	These are comments
+#
+cov {
+#
+#	Output files
+#
+	outfile         test-cov.fif
+	logfile         test-cov.log
+#
+#	Rejection values
+#
+    gradReject    10000e-13
+    magReject 4e-12
+    eegReject 80e-6
+    eogReject 150e-6
+    keepsamplemean
+#
+#	What to include in the covariance matrix?
+#
+	def {
+		name	"Left Auditory"
+		event	1
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+	def {
+		name	"Right Auditory"
+		event	2
+		ignore	0
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+	def {
+		name	"Left visual"
+		event	3
+		ignore	0
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+	def {
+		name	"Right visual"
+		event	4
+		ignore	0
+		tmin	-0.2
+		tmax	0.0
+		basemin -0.1
+		basemax 0
+	}
+}
diff --git a/mne/fiff/tests/data/test_proj.fif b/mne/fiff/tests/data/test_proj.fif
new file mode 100644
index 0000000..2812dd8
Binary files /dev/null and b/mne/fiff/tests/data/test_proj.fif differ
diff --git a/mne/fiff/tests/data/test_proj.fif.gz b/mne/fiff/tests/data/test_proj.fif.gz
new file mode 100644
index 0000000..62d1bd3
Binary files /dev/null and b/mne/fiff/tests/data/test_proj.fif.gz differ
diff --git a/mne/fiff/tests/data/test_raw-eve.fif b/mne/fiff/tests/data/test_raw-eve.fif
new file mode 100755
index 0000000..4e2f4e6
Binary files /dev/null and b/mne/fiff/tests/data/test_raw-eve.fif differ
diff --git a/mne/fiff/tests/data/test_raw.fif b/mne/fiff/tests/data/test_raw.fif
new file mode 100755
index 0000000..35d068a
Binary files /dev/null and b/mne/fiff/tests/data/test_raw.fif differ
diff --git a/mne/fiff/tests/data/test_raw.fif.gz b/mne/fiff/tests/data/test_raw.fif.gz
new file mode 100644
index 0000000..9cb7a92
Binary files /dev/null and b/mne/fiff/tests/data/test_raw.fif.gz differ
diff --git a/mne/fiff/tests/data/test_raw.lout b/mne/fiff/tests/data/test_raw.lout
new file mode 100644
index 0000000..07268ed
--- /dev/null
+++ b/mne/fiff/tests/data/test_raw.lout
@@ -0,0 +1,61 @@
+  -23.94    26.11   -21.40    21.06
+001    -5.97    16.10     5.00     4.00 EEG 001
+002    -1.89    16.66     5.00     4.00 EEG 002
+003     2.85    16.40     5.00     4.00 EEG 003
+004   -12.68    12.80     5.00     4.00 EEG 004
+005    -8.26    12.16     5.00     4.00 EEG 005
+006     4.87    12.09     5.00     4.00 EEG 006
+007     8.87    14.05     5.00     4.00 EEG 007
+008   -16.75     9.42     5.00     4.00 EEG 008
+009   -12.67     8.25     5.00     4.00 EEG 009
+010    -9.32     8.18     5.00     4.00 EEG 010
+011    -5.29     7.49     5.00     4.00 EEG 011
+012    -1.89     7.95     5.00     4.00 EEG 012
+013     2.05     7.94     5.00     4.00 EEG 013
+014     5.38     7.58     5.00     4.00 EEG 014
+015     9.83     9.27     5.00     4.00 EEG 015
+016    14.50     8.69     5.00     4.00 EEG 016
+017   -23.44     1.72     5.00     4.00 EEG 017
+018   -19.53     3.08     5.00     4.00 EEG 018
+019   -14.85     2.71     5.00     4.00 EEG 019
+020    -5.79     1.97     5.00     4.00 EEG 020
+021     2.61     3.28     5.00     4.00 EEG 021
+022    11.12     3.85     5.00     4.00 EEG 022
+023    16.12     3.18     5.00     4.00 EEG 023
+024    20.61     0.95     5.00     4.00 EEG 024
+025   -21.15    -6.75     5.00     4.00 EEG 025
+026   -19.05    -4.28     5.00     4.00 EEG 026
+027   -14.61    -2.28     5.00     4.00 EEG 027
+028   -10.13    -1.44     5.00     4.00 EEG 028
+029    -6.06    -0.83     5.00     4.00 EEG 029
+030    -1.65    -0.91     5.00     4.00 EEG 030
+031     2.33    -0.55     5.00     4.00 EEG 031
+032     6.48    -1.01     5.00     4.00 EEG 032
+033    11.37    -1.39     5.00     4.00 EEG 033
+034    15.95    -2.98     5.00     4.00 EEG 034
+035    18.81    -5.97     5.00     4.00 EEG 035
+036   -18.87   -13.13     5.00     4.00 EEG 036
+037   -16.48    -9.86     5.00     4.00 EEG 037
+038    -9.84    -4.79     5.00     4.00 EEG 038
+039    -5.89    -5.29     5.00     4.00 EEG 039
+040     2.23    -4.48     5.00     4.00 EEG 040
+041     6.69    -5.03     5.00     4.00 EEG 041
+042    13.42    -7.14     5.00     4.00 EEG 042
+043    16.40   -11.26     5.00     4.00 EEG 043
+044   -12.83   -12.95     5.00     4.00 EEG 044
+045   -10.43   -10.60     5.00     4.00 EEG 045
+046    -7.58    -9.14     5.00     4.00 EEG 046
+047    -4.86    -9.08     5.00     4.00 EEG 047
+048    -1.21    -8.68     5.00     4.00 EEG 048
+049     2.36    -9.29     5.00     4.00 EEG 049
+050     5.20    -8.51     5.00     4.00 EEG 050
+051     7.97    -9.46     5.00     4.00 EEG 051
+052    11.34   -10.97     5.00     4.00 EEG 052
+053   -10.01   -15.21     5.00     4.00 EEG 053
+054    -7.28   -13.76     5.00     4.00 EEG 054
+055     4.64   -12.14     5.00     4.00 EEG 055
+056     7.71   -14.34     5.00     4.00 EEG 056
+057    -5.68   -15.86     5.00     4.00 EEG 057
+058    -0.70   -16.23     5.00     4.00 EEG 058
+059     3.79   -15.23     5.00     4.00 EEG 059
+060    -1.00   -20.90     5.00     4.00 EEG 060
diff --git a/mne/fiff/tests/data/test_withbads_raw.fif b/mne/fiff/tests/data/test_withbads_raw.fif
new file mode 100644
index 0000000..4987721
Binary files /dev/null and b/mne/fiff/tests/data/test_withbads_raw.fif differ
diff --git a/mne/fiff/tests/data/test_wrong_bads.txt b/mne/fiff/tests/data/test_wrong_bads.txt
new file mode 100644
index 0000000..551c1b0
--- /dev/null
+++ b/mne/fiff/tests/data/test_wrong_bads.txt
@@ -0,0 +1,3 @@
+MEG 0422
+MEG 0433
+FOOBAR11
diff --git a/mne/fiff/tests/test_compensator.py b/mne/fiff/tests/test_compensator.py
new file mode 100644
index 0000000..539e419
--- /dev/null
+++ b/mne/fiff/tests/test_compensator.py
@@ -0,0 +1,20 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from nose.tools import assert_true
+
+from mne.fiff.compensator import make_compensator
+from mne.fiff import Raw
+
+base_dir = op.join(op.dirname(__file__), 'data')
+ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
+
+
+def test_compensation():
+    raw = Raw(ctf_comp_fname, compensation=None)
+    comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
+    assert_true(comp1.shape == (340, 340))
+    comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
+    assert_true(comp2.shape == (311, 340))
diff --git a/mne/fiff/tests/test_evoked.py b/mne/fiff/tests/test_evoked.py
new file mode 100644
index 0000000..19efcc1
--- /dev/null
+++ b/mne/fiff/tests/test_evoked.py
@@ -0,0 +1,207 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#         Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_equal,\
+                          assert_array_equal, assert_allclose
+from nose.tools import assert_true, assert_raises
+
+from mne.fiff import read_evoked, write_evoked, pick_types
+from mne.utils import _TempDir, requires_pandas, requires_nitime
+
+fname = op.join(op.dirname(__file__), 'data', 'test-ave.fif')
+fname_gz = op.join(op.dirname(__file__), 'data', 'test-ave.fif.gz')
+
+tempdir = _TempDir()
+
+
+def test_io_evoked():
+    """Test IO for evoked data (fif + gz) with integer and str args
+    """
+    ave = read_evoked(fname, 0)
+
+    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
+    ave2 = read_evoked(op.join(tempdir, 'evoked.fif'))
+
+    # This not being assert_array_equal due to windows rounding
+    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
+    assert_array_almost_equal(ave.times, ave2.times)
+    assert_equal(ave.nave, ave2.nave)
+    assert_equal(ave._aspect_kind, ave2._aspect_kind)
+    assert_equal(ave.kind, ave2.kind)
+    assert_equal(ave.last, ave2.last)
+    assert_equal(ave.first, ave2.first)
+
+    # test compressed i/o
+    ave2 = read_evoked(fname_gz, 0)
+    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
+
+    # test str access
+    setno = 'Left Auditory'
+    assert_raises(ValueError, read_evoked, fname, setno, kind='stderr')
+    assert_raises(ValueError, read_evoked, fname, setno, kind='standard_error')
+    ave3 = read_evoked(fname, setno)
+    assert_array_almost_equal(ave.data, ave3.data, 19)
+
+
+def test_shift_time_evoked():
+    """ Test for shifting of time scale
+    """
+    # Shift backward
+    ave = read_evoked(fname, 0)
+    ave.shift_time(-0.1, relative=True)
+    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
+
+    # Shift forward twice the amount
+    ave_bshift = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
+    ave_bshift.shift_time(0.2, relative=True)
+    write_evoked(op.join(tempdir, 'evoked.fif'), ave_bshift)
+
+    # Shift backward again
+    ave_fshift = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
+    ave_fshift.shift_time(-0.1, relative=True)
+    write_evoked(op.join(tempdir, 'evoked.fif'), ave_fshift)
+
+    ave_normal = read_evoked(fname, 0)
+    ave_relative = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
+
+    assert_true(np.allclose(ave_normal.data, ave_relative.data,
+                            atol=1e-16, rtol=1e-3))
+    assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
+
+    assert_equal(ave_normal.last, ave_relative.last)
+    assert_equal(ave_normal.first, ave_relative.first)
+
+    # Absolute time shift
+    ave = read_evoked(fname, 0)
+    ave.shift_time(-0.3, relative=False)
+    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
+
+    ave_absolute = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
+
+    assert_true(np.allclose(ave_normal.data, ave_absolute.data,
+                            atol=1e-16, rtol=1e-3))
+    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
+
+
+def test_evoked_resample():
+    """Test for resampling of evoked data
+    """
+    # upsample, write it out, read it in
+    ave = read_evoked(fname, 0)
+    sfreq_normal = ave.info['sfreq']
+    ave.resample(2 * sfreq_normal)
+    write_evoked(op.join(tempdir, 'evoked.fif'), ave)
+    ave_up = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
+
+    # compare it to the original
+    ave_normal = read_evoked(fname, 0)
+
+    # and compare the original to the downsampled upsampled version
+    ave_new = read_evoked(op.join(tempdir, 'evoked.fif'), 0)
+    ave_new.resample(sfreq_normal)
+
+    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
+    assert_array_almost_equal(ave_normal.times, ave_new.times)
+    assert_equal(ave_normal.nave, ave_new.nave)
+    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
+    assert_equal(ave_normal.kind, ave_new.kind)
+    assert_equal(ave_normal.last, ave_new.last)
+    assert_equal(ave_normal.first, ave_new.first)
+
+    # for the above to work, the upsampling just about had to, but
+    # we'll add a couple extra checks anyway
+    assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
+    assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
+
+
+def test_evoked_detrend():
+    """Test for detrending evoked data
+    """
+    ave = read_evoked(fname, 0)
+    ave_normal = read_evoked(fname, 0)
+    ave.detrend(0)
+    ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
+    picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
+    assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
+                            rtol=1e-8, atol=1e-16))
+
+
+def test_io_multi_evoked():
+    """Test IO for multiple evoked datasets
+    """
+    aves = read_evoked(fname, [0, 1, 2, 3])
+    write_evoked(op.join(tempdir, 'evoked.fif'), aves)
+    aves2 = read_evoked(op.join(tempdir, 'evoked.fif'), [0, 1, 2, 3])
+    types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
+    aves3 = read_evoked(op.join(tempdir, 'evoked.fif'), types)
+    for aves_new in [aves2, aves3]:
+        for [ave, ave_new] in zip(aves, aves_new):
+            assert_array_almost_equal(ave.data, ave_new.data)
+            assert_array_almost_equal(ave.times, ave_new.times)
+            assert_equal(ave.nave, ave_new.nave)
+            assert_equal(ave.kind, ave_new.kind)
+            assert_equal(ave._aspect_kind, ave_new._aspect_kind)
+            assert_equal(ave.last, ave_new.last)
+            assert_equal(ave.first, ave_new.first)
+    # this should throw an error since there are mulitple datasets
+    assert_raises(ValueError, read_evoked, fname)
+
+
+ at requires_nitime
+def test_evoked_to_nitime():
+    """ Test to_nitime """
+    aves = read_evoked(fname, [0, 1, 2, 3])
+    evoked_ts = aves[0].to_nitime()
+    assert_equal(evoked_ts.data, aves[0].data)
+
+    picks2 = [1, 2]
+    aves = read_evoked(fname, [0, 1, 2, 3])
+    evoked_ts = aves[0].to_nitime(picks=picks2)
+    assert_equal(evoked_ts.data, aves[0].data[picks2])
+
+
+ at requires_pandas
+def test_as_data_frame():
+    """Test evoked Pandas exporter"""
+    ave = read_evoked(fname, [0])[0]
+    assert_raises(ValueError, ave.as_data_frame, picks=np.arange(400))
+    df = ave.as_data_frame()
+    assert_true((df.columns == ave.ch_names).all())
+    df = ave.as_data_frame(use_time_index=False)
+    assert_true('time' in df.columns)
+    assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
+    assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
+
+
+def test_evoked_proj():
+    """Test SSP proj operations
+    """
+    for proj in [True, False]:
+        ave = read_evoked(fname, setno=0, proj=proj)
+        assert_true(all(p['active'] == proj for p in ave.info['projs']))
+
+        # test adding / deleting proj
+        if proj:
+            assert_raises(ValueError, ave.add_proj, [],
+                          {'remove_existing': True})
+            assert_raises(ValueError, ave.del_proj, 0)
+        else:
+            projs = deepcopy(ave.info['projs'])
+            n_proj = len(ave.info['projs'])
+            ave.del_proj(0)
+            assert_true(len(ave.info['projs']) == n_proj - 1)
+            ave.add_proj(projs, remove_existing=False)
+            assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
+            ave.add_proj(projs, remove_existing=True)
+            assert_true(len(ave.info['projs']) == n_proj)
+
+    ave = read_evoked(fname, setno=0, proj=False)
+    data = ave.data.copy()
+    ave.apply_proj()
+    assert_allclose(np.dot(ave._projector, data), ave.data)
diff --git a/mne/fiff/tests/test_pick.py b/mne/fiff/tests/test_pick.py
new file mode 100644
index 0000000..53fcc33
--- /dev/null
+++ b/mne/fiff/tests/test_pick.py
@@ -0,0 +1,11 @@
+from numpy.testing import assert_array_equal
+from mne.fiff.pick import pick_channels_regexp
+
+
+def test_pick_channels_regexp():
+    """Test pick with regular expression
+    """
+    ch_names = ['MEG 2331', 'MEG 2332', 'MEG 2333']
+    assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0])
+    assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2])
+    assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2])
diff --git a/mne/fiff/tests/test_raw.py b/mne/fiff/tests/test_raw.py
new file mode 100644
index 0000000..7042045
--- /dev/null
+++ b/mne/fiff/tests/test_raw.py
@@ -0,0 +1,784 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#         Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+from copy import deepcopy
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal, \
+                          assert_allclose
+from nose.tools import assert_true, assert_raises, assert_equal
+
+from mne.fiff import Raw, pick_types, pick_channels, concatenate_raws, FIFF
+from mne import concatenate_events, find_events
+from mne.utils import _TempDir, requires_nitime, requires_pandas
+
+base_dir = op.join(op.dirname(__file__), 'data')
+fif_fname = op.join(base_dir, 'test_raw.fif')
+fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
+ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
+ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
+fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
+bad_file_works = op.join(base_dir, 'test_bads.txt')
+bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
+
+tempdir = _TempDir()
+
+
+def test_copy_append():
+    """Test raw copying and appending combinations
+    """
+    raw = Raw(fif_fname, preload=True).copy()
+    raw_full = Raw(fif_fname)
+    raw_full.append(raw)
+    data = raw_full[:, :][0]
+    assert_true(data.shape[1] == 2 * raw._data.shape[1])
+
+
+def test_rank_estimation():
+    """Test raw rank estimation
+    """
+    raw = Raw(fif_fname)
+    n_meg = len(pick_types(raw.info, meg=True, eeg=False, exclude='bads'))
+    n_eeg = len(pick_types(raw.info, meg=False, eeg=True, exclude='bads'))
+    raw = Raw(fif_fname, preload=True)
+    assert_array_equal(raw.estimate_rank(), n_meg + n_eeg)
+    raw = Raw(fif_fname, preload=False)
+    raw.apply_proj()
+    n_proj = len(raw.info['projs'])
+    assert_array_equal(raw.estimate_rank(tstart=10, tstop=20),
+                       n_meg + n_eeg - n_proj)
+
+
+def test_output_formats():
+    """Test saving and loading raw data using multiple formats
+    """
+    formats = ['short', 'int', 'single', 'double']
+    tols = [1e-4, 1e-7, 1e-7, 1e-15]
+
+    # let's fake a raw file with different formats
+    raw = Raw(fif_fname, preload=True)
+    raw.crop(0, 1, copy=False)
+
+    temp_file = op.join(tempdir, 'raw.fif')
+    for ii, (format, tol) in enumerate(zip(formats, tols)):
+        # Let's test the overwriting error throwing while we're at it
+        if ii > 0:
+            assert_raises(IOError, raw.save, temp_file, format=format)
+        raw.save(temp_file, format=format, overwrite=True)
+        raw2 = Raw(temp_file)
+        raw2_data = raw2[:, :][0]
+        assert_allclose(raw2_data, raw._data, rtol=tol, atol=1e-25)
+        assert_true(raw2.orig_format == format)
+
+
+def test_multiple_files():
+    """Test loading multiple files simultaneously
+    """
+    # split file
+    raw = Raw(fif_fname, preload=True)
+    split_size = 10.  # in seconds
+    sfreq = raw.info['sfreq']
+    nsamp = (raw.last_samp - raw.first_samp)
+    tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
+    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
+    tmaxs /= sfreq
+    tmins /= sfreq
+
+    # going in reverse order so the last fname is the first file (need later)
+    raws = [None] * len(tmins)
+    for ri in range(len(tmins) - 1, -1, -1):
+        fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
+        raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
+        raws[ri] = Raw(fname)
+    events = [find_events(r, stim_channel='STI 014') for r in raws]
+    last_samps = [r.last_samp for r in raws]
+    first_samps = [r.first_samp for r in raws]
+
+    # test concatenation of split file
+    all_raw_1 = concatenate_raws(raws, preload=False)
+    assert_true(raw.first_samp == all_raw_1.first_samp)
+    assert_true(raw.last_samp == all_raw_1.last_samp)
+    assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
+    raws[0] = Raw(fname)
+    all_raw_2 = concatenate_raws(raws, preload=True)
+    assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
+
+    # test proper event treatment for split files
+    events = concatenate_events(events, first_samps, last_samps)
+    events2 = find_events(all_raw_2, stim_channel='STI 014')
+    assert_array_equal(events, events2)
+
+    # test various methods of combining files
+    n_combos = 9
+    raw_combos = [None] * n_combos
+
+    raw = Raw(fif_fname, preload=True)
+    raw_combos[0] = Raw([fif_fname, fif_fname], preload=True)
+    raw_combos[1] = Raw([fif_fname, fif_fname], preload=False)
+    raw_combos[2] = Raw([fif_fname, fif_fname], preload='memmap8.dat')
+    assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
+    assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
+    n_times = len(raw._times)
+    assert_true(raw[:, :][0].shape[1] * 2 == raw_combos[0][:, :][0].shape[1])
+    assert_true(raw_combos[0][:, :][0].shape[1] == len(raw_combos[0]._times))
+
+    # with all data preloaded, result should be preloaded
+    raw_combos[3] = Raw(fif_fname, preload=True)
+    raw_combos[3].append(Raw(fif_fname, preload=True))
+    assert_true(raw_combos[0]._preloaded == True)
+    assert_true(len(raw_combos[3]._times) == raw_combos[3]._data.shape[1])
+
+    # with any data not preloaded, don't set result as preloaded
+    raw_combos[4] = concatenate_raws([Raw(fif_fname, preload=True),
+                                      Raw(fif_fname, preload=False)])
+    assert_true(raw_combos[1]._preloaded == False)
+    assert_array_equal(find_events(raw_combos[4], stim_channel='STI 014'),
+                       find_events(raw_combos[0], stim_channel='STI 014'))
+
+    # user should be able to force data to be preloaded upon concat
+    raw_combos[5] = concatenate_raws([Raw(fif_fname, preload=False),
+                                      Raw(fif_fname, preload=True)],
+                                     preload=True)
+    assert_true(raw_combos[2]._preloaded == True)
+
+    raw_combos[6] = concatenate_raws([Raw(fif_fname, preload=False),
+                                      Raw(fif_fname, preload=True)],
+                                     preload='memmap3.dat')
+
+    raw_combos[7] = concatenate_raws([Raw(fif_fname, preload=True),
+                                      Raw(fif_fname, preload=True)],
+                                     preload='memmap4.dat')
+
+    raw_combos[8] = concatenate_raws([Raw(fif_fname, preload=False),
+                                      Raw(fif_fname, preload=False)],
+                                     preload='memmap5.dat')
+
+    # make sure that all our data match
+    times = range(0, 2 * n_times, 999)
+    # add potentially problematic points
+    times.extend([n_times - 1, n_times, 2 * n_times - 1])
+    for ti in times:  # let's do a subset of points for speed
+        orig = raw[:, ti % n_times][0]
+        for raw_combo in raw_combos:
+            # these are almost_equals because of possible dtype differences
+            assert_allclose(orig, raw_combo[:, ti][0])
+
+    # verify that combining raws with different projectors throws an exception
+    raw.add_proj([], remove_existing=True)
+    assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
+
+    # now test event treatment for concatenated raw files
+    events = [find_events(raw, stim_channel='STI 014'),
+              find_events(raw, stim_channel='STI 014')]
+    last_samps = [raw.last_samp, raw.last_samp]
+    first_samps = [raw.first_samp, raw.first_samp]
+    events = concatenate_events(events, first_samps, last_samps)
+    events2 = find_events(raw_combos[0], stim_channel='STI 014')
+    assert_array_equal(events, events2)
+
+    # check out the len method
+    assert_true(len(raw) == raw.n_times)
+    assert_true(len(raw) == raw.last_samp - raw.first_samp + 1)
+
+
+def test_load_bad_channels():
+    """Test reading/writing of bad channels
+    """
+    # Load correctly marked file (manually done in mne_process_raw)
+    raw_marked = Raw(fif_bad_marked_fname)
+    correct_bads = raw_marked.info['bads']
+    raw = Raw(fif_fname)
+    # Make sure it starts clean
+    assert_array_equal(raw.info['bads'], [])
+
+    # Test normal case
+    raw.load_bad_channels(bad_file_works)
+    # Write it out, read it in, and check
+    raw.save(op.join(tempdir, 'foo_raw.fif'))
+    raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
+    assert_equal(correct_bads, raw_new.info['bads'])
+    # Reset it
+    raw.info['bads'] = []
+
+    # Test bad case
+    assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
+
+    # Test forcing the bad case
+    with warnings.catch_warnings(record=True) as w:
+        raw.load_bad_channels(bad_file_wrong, force=True)
+        assert_equal(len(w), 1)
+        # write it out, read it in, and check
+        raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
+        raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
+        assert_equal(correct_bads, raw_new.info['bads'])
+
+    # Check that bad channels are cleared
+    raw.load_bad_channels(None)
+    raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
+    raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
+    assert_equal([], raw_new.info['bads'])
+
+
+def test_io_raw():
+    """Test IO for raw data (Neuromag + CTF + gz)
+    """
+    # Let's construct a simple test for IO first
+    raw = Raw(fif_fname, preload=True)
+    raw.crop(0, 3.5)
+    # put in some data that we know the values of
+    data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
+    raw._data[:, :] = data
+    # save it somewhere
+    fname = op.join(tempdir, 'test_copy_raw.fif')
+    raw.save(fname, buffer_size_sec=1.0)
+    # read it in, make sure the whole thing matches
+    raw = Raw(fname)
+    assert_true(np.allclose(data, raw[:, :][0], 1e-6, 1e-20))
+    # let's read portions across the 1-sec tag boundary, too
+    inds = raw.time_as_index([1.75, 2.25])
+    sl = slice(inds[0], inds[1])
+    assert_true(np.allclose(data[:, sl], raw[:, sl][0], 1e-6, 1e-20))
+
+    # now let's do some real I/O
+    fnames_in = [fif_fname, fif_gz_fname, ctf_fname]
+    fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
+    for fname_in, fname_out in zip(fnames_in, fnames_out):
+        fname_out = op.join(tempdir, fname_out)
+        raw = Raw(fname_in)
+
+        nchan = raw.info['nchan']
+        ch_names = raw.info['ch_names']
+        meg_channels_idx = [k for k in range(nchan)
+                            if ch_names[k][0] == 'M']
+        n_channels = 100
+        meg_channels_idx = meg_channels_idx[:n_channels]
+        start, stop = raw.time_as_index([0, 5])
+        data, times = raw[meg_channels_idx, start:(stop + 1)]
+        meg_ch_names = [ch_names[k] for k in meg_channels_idx]
+
+        # Set up pick list: MEG + STI 014 - bad channels
+        include = ['STI 014']
+        include += meg_ch_names
+        picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
+                           misc=True, ref_meg=True, include=include,
+                           exclude='bads')
+
+        # Writing with drop_small_buffer True
+        raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
+                 drop_small_buffer=True, overwrite=True)
+        raw2 = Raw(fname_out, preload=True)
+
+        sel = pick_channels(raw2.ch_names, meg_ch_names)
+        data2, times2 = raw2[sel, :]
+        assert_true(times2.max() <= 3)
+
+        # Writing
+        raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
+
+        if fname_in == fif_fname or fname_in == fif_fname + '.gz':
+            assert_true(len(raw.info['dig']) == 146)
+
+        raw2 = Raw(fname_out)
+
+        sel = pick_channels(raw2.ch_names, meg_ch_names)
+        data2, times2 = raw2[sel, :]
+
+        assert_true(np.allclose(data, data2, 1e-6, 1e-20))
+        assert_allclose(times, times2)
+        assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
+
+        # check transformations
+        for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
+            if raw.info[trans] is None:
+                assert_true(raw2.info[trans] is None)
+            else:
+                assert_array_equal(raw.info[trans]['trans'],
+                                   raw2.info[trans]['trans'])
+
+                # check transformation 'from' and 'to'
+                if trans.startswith('dev'):
+                    from_id = FIFF.FIFFV_COORD_DEVICE
+                else:
+                    from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+                if trans[4:8] == 'head':
+                    to_id = FIFF.FIFFV_COORD_HEAD
+                else:
+                    to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+                for raw_ in [raw, raw2]:
+                    assert_true(raw_.info[trans]['from'] == from_id)
+                    assert_true(raw_.info[trans]['to'] == to_id)
+
+        if fname_in == fif_fname or fname_in == fif_fname + '.gz':
+            assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
+
+
+def test_io_complex():
+    """Test IO with complex data types
+    """
+    dtypes = [np.complex64, np.complex128]
+
+    raw = Raw(fif_fname, preload=True)
+    picks = np.arange(5)
+    start, stop = raw.time_as_index([0, 5])
+
+    data_orig, _ = raw[picks, start:stop]
+
+    for di, dtype in enumerate(dtypes):
+        imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
+                             data_orig.shape[1]), dtype)
+
+        raw_cp = raw.copy()
+        raw_cp._data = np.array(raw_cp._data, dtype)
+        raw_cp._data[picks, start:stop] += imag_rand
+        # this should throw an error because it's complex
+        with warnings.catch_warnings(record=True) as w:
+            raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
+                        overwrite=True)
+            # warning only gets thrown on first instance
+            assert_equal(len(w), 1 if di == 0 else 0)
+
+        raw2 = Raw(op.join(tempdir, 'raw.fif'))
+        raw2_data, _ = raw2[picks, :]
+        n_samp = raw2_data.shape[1]
+        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
+        # with preloading
+        raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
+        raw2_data, _ = raw2[picks, :]
+        n_samp = raw2_data.shape[1]
+        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
+
+
+def test_getitem():
+    """Test getitem/indexing of Raw
+    """
+    for preload in [False, True, 'memmap.dat']:
+        raw = Raw(fif_fname, preload=preload)
+        data, times = raw[0, :]
+        data1, times1 = raw[0]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+        data, times = raw[0:2, :]
+        data1, times1 = raw[0:2]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+        data1, times1 = raw[[0, 1]]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+
+
+def test_proj():
+    """Test SSP proj operations
+    """
+    for proj in [True, False]:
+        raw = Raw(fif_fname, preload=False, proj=proj)
+        assert_true(all(p['active'] == proj for p in raw.info['projs']))
+
+        data, times = raw[0:2, :]
+        data1, times1 = raw[0:2]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+
+        # test adding / deleting proj
+        if proj:
+            assert_raises(ValueError, raw.add_proj, [],
+                          {'remove_existing': True})
+            assert_raises(ValueError, raw.del_proj, 0)
+        else:
+            projs = deepcopy(raw.info['projs'])
+            n_proj = len(raw.info['projs'])
+            raw.del_proj(0)
+            assert_true(len(raw.info['projs']) == n_proj - 1)
+            raw.add_proj(projs, remove_existing=False)
+            assert_true(len(raw.info['projs']) == 2 * n_proj - 1)
+            raw.add_proj(projs, remove_existing=True)
+            assert_true(len(raw.info['projs']) == n_proj)
+
+    # test apply_proj() with and without preload
+    for preload in [True, False]:
+        raw = Raw(fif_fname, preload=preload, proj=False)
+        data, times = raw[:, 0:2]
+        raw.apply_proj()
+        data_proj_1 = np.dot(raw._projector, data)
+
+        # load the file again without proj
+        raw = Raw(fif_fname, preload=preload, proj=False)
+
+        # write the file with proj. activated, make sure proj has been applied
+        raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
+        raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
+        data_proj_2, _ = raw2[:, 0:2]
+        assert_allclose(data_proj_1, data_proj_2)
+        assert_true(all(p['active'] for p in raw2.info['projs']))
+
+        # read orig file with proj. active
+        raw2 = Raw(fif_fname, preload=preload, proj=True)
+        data_proj_2, _ = raw2[:, 0:2]
+        assert_allclose(data_proj_1, data_proj_2)
+        assert_true(all(p['active'] for p in raw2.info['projs']))
+
+        # test that apply_proj works
+        raw.apply_proj()
+        data_proj_2, _ = raw[:, 0:2]
+        assert_allclose(data_proj_1, data_proj_2)
+        assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
+
+
+def test_preload_modify():
+    """ Test preloading and modifying data
+    """
+    for preload in [False, True, 'memmap.dat']:
+        raw = Raw(fif_fname, preload=preload)
+
+        nsamp = raw.last_samp - raw.first_samp + 1
+        picks = pick_types(raw.info, meg='grad', exclude='bads')
+
+        data = np.random.randn(len(picks), nsamp / 2)
+
+        try:
+            raw[picks, :nsamp / 2] = data
+        except RuntimeError as err:
+            if not preload:
+                continue
+            else:
+                raise err
+
+        tmp_fname = op.join(tempdir, 'raw.fif')
+        raw.save(tmp_fname, overwrite=True)
+
+        raw_new = Raw(tmp_fname)
+        data_new, _ = raw_new[picks, :nsamp / 2]
+
+        assert_allclose(data, data_new)
+
+
+def test_filter():
+    """ Test filtering (FIR and IIR) and Raw.apply_function interface """
+    raw = Raw(fif_fname, preload=True).crop(0, 10, False)
+    sig_dec = 11
+    sig_dec_notch = 12
+    sig_dec_notch_fit = 12
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+
+    raw_lp = raw.copy()
+    raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
+
+    raw_hp = raw.copy()
+    raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
+
+    raw_bp = raw.copy()
+    raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
+
+    raw_bs = raw.copy()
+    raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
+
+    data, _ = raw[picks, :]
+
+    lp_data, _ = raw_lp[picks, :]
+    hp_data, _ = raw_hp[picks, :]
+    bp_data, _ = raw_bp[picks, :]
+    bs_data, _ = raw_bs[picks, :]
+
+    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
+    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
+
+    raw_lp_iir = raw.copy()
+    raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
+    raw_hp_iir = raw.copy()
+    raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
+    raw_bp_iir = raw.copy()
+    raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
+    lp_data_iir, _ = raw_lp_iir[picks, :]
+    hp_data_iir, _ = raw_hp_iir[picks, :]
+    bp_data_iir, _ = raw_bp_iir[picks, :]
+    summation = lp_data_iir + hp_data_iir + bp_data_iir
+    assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
+                              sig_dec)
+
+    # make sure we didn't touch other channels
+    data, _ = raw[picks_meg[4:], :]
+    bp_data, _ = raw_bp[picks_meg[4:], :]
+    assert_array_equal(data, bp_data)
+    bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
+    assert_array_equal(data, bp_data_iir)
+
+    # do a very simple check on line filtering
+    raw_bs = raw.copy()
+    with warnings.catch_warnings(True) as w:
+        raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
+        data_bs, _ = raw_bs[picks, :]
+        raw_notch = raw.copy()
+        raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
+    data_notch, _ = raw_notch[picks, :]
+    assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
+
+    # now use the sinusoidal fitting
+    raw_notch = raw.copy()
+    raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
+    data_notch, _ = raw_notch[picks, :]
+    data, _ = raw[picks, :]
+    assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
+
+
+def test_crop():
+    """Test cropping raw files
+    """
+    # split a concatenated file to test a difficult case
+    raw = Raw([fif_fname, fif_fname], preload=True)
+    split_size = 10.  # in seconds
+    sfreq = raw.info['sfreq']
+    nsamp = (raw.last_samp - raw.first_samp + 1)
+
+    # do an annoying case (off-by-one splitting)
+    tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
+    tmins = np.sort(tmins)
+    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
+    tmaxs /= sfreq
+    tmins /= sfreq
+    raws = [None] * len(tmins)
+    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
+        raws[ri] = raw.crop(tmin, tmax, True)
+    all_raw_2 = concatenate_raws(raws, preload=True)
+    assert_true(raw.first_samp == all_raw_2.first_samp)
+    assert_true(raw.last_samp == all_raw_2.last_samp)
+    assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
+
+    tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
+    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
+    tmaxs /= sfreq
+    tmins /= sfreq
+
+    # going in revere order so the last fname is the first file (need it later)
+    raws = [None] * len(tmins)
+    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
+        raws[ri] = raw.copy()
+        raws[ri].crop(tmin, tmax, False)
+    # test concatenation of split file
+    all_raw_1 = concatenate_raws(raws, preload=True)
+
+    all_raw_2 = raw.crop(0, None, True)
+    for ar in [all_raw_1, all_raw_2]:
+        assert_true(raw.first_samp == ar.first_samp)
+        assert_true(raw.last_samp == ar.last_samp)
+        assert_array_equal(raw[:, :][0], ar[:, :][0])
+
+
+def test_resample():
+    """ Test resample (with I/O and multiple files) """
+    raw = Raw(fif_fname, preload=True).crop(0, 3, False)
+    raw_resamp = raw.copy()
+    sfreq = raw.info['sfreq']
+    # test parallel on upsample
+    raw_resamp.resample(sfreq * 2, n_jobs=2)
+    raw_resamp.save(op.join(tempdir, 'raw_resamp.fif'))
+    raw_resamp = Raw(op.join(tempdir, 'raw_resamp.fif'), preload=True)
+    assert_true(sfreq == raw_resamp.info['sfreq'] / 2)
+    assert_true(raw.n_times == raw_resamp.n_times / 2)
+    assert_true(raw_resamp._data.shape[1] == raw_resamp.n_times)
+    assert_true(raw._data.shape[0] == raw_resamp._data.shape[0])
+    # test non-parallel on downsample
+    raw_resamp.resample(sfreq, n_jobs=1)
+    assert_true(raw_resamp.info['sfreq'] == sfreq)
+    assert_true(raw._data.shape == raw_resamp._data.shape)
+    assert_true(raw.first_samp == raw_resamp.first_samp)
+    assert_true(raw.last_samp == raw.last_samp)
+    # upsampling then downsampling doubles resampling error, but this still
+    # works (hooray). Note that the stim channels had to be sub-sampled
+    # without filtering to be accurately preserved
+    # note we have to treat MEG and EEG+STIM channels differently (tols)
+    assert_allclose(raw._data[:306, 200:-200],
+                    raw_resamp._data[:306, 200:-200],
+                    rtol=1e-2, atol=1e-12)
+    assert_allclose(raw._data[306:, 200:-200],
+                    raw_resamp._data[306:, 200:-200],
+                    rtol=1e-2, atol=1e-7)
+
+    # now check multiple file support w/resampling, as order of operations
+    # (concat, resample) should not affect our data
+    raw1 = raw.copy()
+    raw2 = raw.copy()
+    raw3 = raw.copy()
+    raw4 = raw.copy()
+    raw1 = concatenate_raws([raw1, raw2])
+    raw1.resample(10)
+    raw3.resample(10)
+    raw4.resample(10)
+    raw3 = concatenate_raws([raw3, raw4])
+    assert_array_equal(raw1._data, raw3._data)
+    assert_array_equal(raw1._first_samps, raw3._first_samps)
+    assert_array_equal(raw1._last_samps, raw3._last_samps)
+    assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
+    assert_equal(raw1.first_samp, raw3.first_samp)
+    assert_equal(raw1.last_samp, raw3.last_samp)
+    assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
+
+
+def test_hilbert():
+    """ Test computation of analytic signal using hilbert """
+    raw = Raw(fif_fname, preload=True)
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+
+    raw2 = raw.copy()
+    raw.apply_hilbert(picks)
+    raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
+
+    env = np.abs(raw._data[picks, :])
+    assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
+
+
+def test_raw_copy():
+    """ Test Raw copy"""
+    raw = Raw(fif_fname, preload=True)
+    data, _ = raw[:, :]
+    copied = raw.copy()
+    copied_data, _ = copied[:, :]
+    assert_array_equal(data, copied_data)
+    assert_equal(sorted(raw.__dict__.keys()),
+                 sorted(copied.__dict__.keys()))
+
+    raw = Raw(fif_fname, preload=False)
+    data, _ = raw[:, :]
+    copied = raw.copy()
+    copied_data, _ = copied[:, :]
+    assert_array_equal(data, copied_data)
+    assert_equal(sorted(raw.__dict__.keys()),
+                 sorted(copied.__dict__.keys()))
+
+
+ at requires_nitime
+def test_raw_to_nitime():
+    """ Test nitime export """
+    raw = Raw(fif_fname, preload=True)
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+    raw_ts = raw.to_nitime(picks=picks)
+    assert_true(raw_ts.data.shape[0] == len(picks))
+
+    raw = Raw(fif_fname, preload=False)
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+    raw_ts = raw.to_nitime(picks=picks)
+    assert_true(raw_ts.data.shape[0] == len(picks))
+
+    raw = Raw(fif_fname, preload=True)
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+    raw_ts = raw.to_nitime(picks=picks, copy=False)
+    assert_true(raw_ts.data.shape[0] == len(picks))
+
+    raw = Raw(fif_fname, preload=False)
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+    raw_ts = raw.to_nitime(picks=picks, copy=False)
+    assert_true(raw_ts.data.shape[0] == len(picks))
+
+
+ at requires_pandas
+def test_as_data_frame():
+    """Test raw Pandas exporter"""
+    raw = Raw(fif_fname, preload=True)
+    df = raw.as_data_frame()
+    assert_true((df.columns == raw.ch_names).all())
+    df = raw.as_data_frame(use_time_index=False)
+    assert_true('time' in df.columns)
+    assert_array_equal(df.values[:, 1], raw._data[0] * 1e13)
+    assert_array_equal(df.values[:, 3], raw._data[2] * 1e15)
+
+
+def test_raw_index_as_time():
+    """ Test index as time conversion"""
+    raw = Raw(fif_fname, preload=True)
+    t0 = raw.index_as_time([0], True)[0]
+    t1 = raw.index_as_time([100], False)[0]
+    t2 = raw.index_as_time([100], True)[0]
+    assert_true((t2 - t1) == t0)
+    # ensure we can go back and forth
+    t3 = raw.index_as_time(raw.time_as_index([0], True), True)
+    assert_array_almost_equal(t3, [0.0], 2)
+    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
+    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
+    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
+    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
+    i0 = raw.time_as_index(raw.index_as_time([0], True), True)
+    assert_true(i0[0] == 0)
+    i1 = raw.time_as_index(raw.index_as_time([100], True), True)
+    assert_true(i1[0] == 100)
+    # Have to add small amount of time because we truncate via int casting
+    i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
+    assert_true(i1[0] == 100)
+
+
+def test_raw_time_as_index():
+    """ Test time as index conversion"""
+    raw = Raw(fif_fname, preload=True)
+    first_samp = raw.time_as_index([0], True)[0]
+    assert_true(raw.first_samp == -first_samp)
+
+
+def test_save():
+    """ Test saving raw"""
+    raw = Raw(fif_fname, preload=False)
+    # can't write over file being read
+    assert_raises(ValueError, raw.save, fif_fname)
+    raw = Raw(fif_fname, preload=True)
+    # can't overwrite file without overwrite=True
+    assert_raises(IOError, raw.save, fif_fname)
+
+    # test abspath support
+    new_fname = op.join(op.abspath(op.curdir), 'break.fif')
+    raw.save(op.join(tempdir, new_fname), overwrite=True)
+    new_raw = Raw(op.join(tempdir, new_fname), preload=False)
+    assert_raises(ValueError, new_raw.save, new_fname)
+    # make sure we can overwrite the file we loaded when preload=True
+    new_raw = Raw(op.join(tempdir, new_fname), preload=True)
+    new_raw.save(op.join(tempdir, new_fname), overwrite=True)
+    os.remove(new_fname)
+
+
+def test_with_statement():
+    """ Test with statement """
+    for preload in [True, False]:
+        with Raw(fif_fname, preload=preload) as raw_:
+            print raw_
+
+
+def test_compensation_raw():
+    raw1 = Raw(ctf_comp_fname, compensation=None)
+    assert_true(raw1.comp is None)
+    data1, times1 = raw1[:, :]
+    raw2 = Raw(ctf_comp_fname, compensation=3)
+    data2, times2 = raw2[:, :]
+    assert_true(raw2.comp is None)  # unchanged (data come with grade 3)
+    assert_array_equal(times1, times2)
+    assert_array_equal(data1, data2)
+    raw3 = Raw(ctf_comp_fname, compensation=1)
+    data3, times3 = raw3[:, :]
+    assert_true(raw3.comp is not None)
+    assert_array_equal(times1, times3)
+    # make sure it's different with a different compensation:
+    assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
+    assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
+
+    # Try IO with compensation
+    temp_file = op.join(tempdir, 'raw.fif')
+
+    raw1.save(temp_file, overwrite=True)
+    raw4 = Raw(temp_file)
+    data4, times4 = raw4[:, :]
+    assert_array_equal(times1, times4)
+    assert_array_equal(data1, data4)
+
+    # Now save the file that has modified compensation
+    # and make sure we can the same data as input ie. compensation
+    # is undone
+    raw3.save(temp_file, overwrite=True)
+    raw5 = Raw(temp_file)
+    data5, times5 = raw5[:, :]
+    assert_array_equal(times1, times5)
+    assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
diff --git a/mne/fiff/tree.py b/mne/fiff/tree.py
new file mode 100644
index 0000000..3132342
--- /dev/null
+++ b/mne/fiff/tree.py
@@ -0,0 +1,154 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import logging
+logger = logging.getLogger('mne')
+
+from .tag import read_tag
+from .. import verbose
+
+
+def dir_tree_find(tree, kind):
+    """[nodes] = dir_tree_find(tree,kind)
+
+       Find nodes of the given kind from a directory tree structure
+
+       Returns a list of matching nodes
+    """
+    nodes = []
+
+    if isinstance(tree, list):
+        for t in tree:
+            nodes += dir_tree_find(t, kind)
+    else:
+        #   Am I desirable myself?
+        if tree['block'] == kind:
+            nodes.append(tree)
+
+        #   Search the subtrees
+        for child in tree['children']:
+            nodes += dir_tree_find(child, kind)
+    return nodes
+
+
+ at verbose
+def make_dir_tree(fid, directory, start=0, indent=0, verbose=None):
+    """Create the directory tree structure
+    """
+    FIFF_BLOCK_START = 104
+    FIFF_BLOCK_END = 105
+    FIFF_FILE_ID = 100
+    FIFF_BLOCK_ID = 103
+    FIFF_PARENT_BLOCK_ID = 110
+
+    if directory[start].kind == FIFF_BLOCK_START:
+        tag = read_tag(fid, directory[start].pos)
+        block = tag.data
+    else:
+        block = 0
+
+    logger.debug('    ' * indent + 'start { %d' % block)
+
+    this = start
+
+    tree = dict()
+    tree['block'] = block
+    tree['id'] = None
+    tree['parent_id'] = None
+    tree['nent'] = 0
+    tree['nchild'] = 0
+    tree['directory'] = directory[this]
+    tree['children'] = []
+
+    while this < len(directory):
+        if directory[this].kind == FIFF_BLOCK_START:
+            if this != start:
+                child, this = make_dir_tree(fid, directory, this, indent + 1)
+                tree['nchild'] += 1
+                tree['children'].append(child)
+        elif directory[this].kind == FIFF_BLOCK_END:
+            tag = read_tag(fid, directory[start].pos)
+            if tag.data == block:
+                break
+        else:
+            tree['nent'] += 1
+            if tree['nent'] == 1:
+                tree['directory'] = list()
+            tree['directory'].append(directory[this])
+
+            #  Add the id information if available
+            if block == 0:
+                if directory[this].kind == FIFF_FILE_ID:
+                    tag = read_tag(fid, directory[this].pos)
+                    tree['id'] = tag.data
+            else:
+                if directory[this].kind == FIFF_BLOCK_ID:
+                    tag = read_tag(fid, directory[this].pos)
+                    tree['id'] = tag.data
+                elif directory[this].kind == FIFF_PARENT_BLOCK_ID:
+                    tag = read_tag(fid, directory[this].pos)
+                    tree['parent_id'] = tag.data
+
+        this += 1
+
+    # Eliminate the empty directory
+    if tree['nent'] == 0:
+        tree['directory'] = None
+
+    logger.debug('    ' * (indent + 1) + 'block = %d nent = %d nchild = %d'
+                % (tree['block'], tree['nent'], tree['nchild']))
+    logger.debug('    ' * indent + 'end } %d' % block)
+    last = this
+    return tree, last
+
+###############################################################################
+# Writing
+
+import numpy as np
+import struct
+from .constants import FIFF
+from .tag import Tag
+from .write import write_id, start_block, end_block, _write
+
+
+def copy_tree(fidin, in_id, nodes, fidout):
+    """Copies directory subtrees from fidin to fidout"""
+
+    if len(nodes) <= 0:
+        return
+
+    if not isinstance(nodes, list):
+        nodes = [nodes]
+
+    for node in nodes:
+        start_block(fidout, node['block'])
+        if node['id'] is not None:
+            if in_id is not None:
+                write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id)
+
+            write_id(fidout, FIFF.FIFF_BLOCK_ID)
+            write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id'])
+
+        if node['directory'] is not None:
+            for d in node['directory']:
+                #   Do not copy these tags
+                if d.kind == FIFF.FIFF_BLOCK_ID or \
+                        d.kind == FIFF.FIFF_PARENT_BLOCK_ID or \
+                        d.kind == FIFF.FIFF_PARENT_FILE_ID:
+                    continue
+
+                #   Read and write tags, pass data through transparently
+                fidin.seek(d.pos, 0)
+
+                s = fidin.read(4 * 4)
+                tag = Tag(*struct.unpack(">iIii", s))
+                tag.data = np.fromstring(fidin.read(tag.size), dtype='>B')
+
+                _write(fidout, tag.data, tag.kind, 1, tag.type, '>B')
+
+        for child in node['children']:
+            copy_tree(fidin, in_id, child, fidout)
+
+        end_block(fidout, node['block'])
diff --git a/mne/fiff/write.py b/mne/fiff/write.py
new file mode 100644
index 0000000..77d58a2
--- /dev/null
+++ b/mne/fiff/write.py
@@ -0,0 +1,354 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import time
+import numpy as np
+from scipy import linalg
+import os.path as op
+import gzip
+import logging
+logger = logging.getLogger('mne')
+
+from .constants import FIFF
+
+
+def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
+    if isinstance(data, np.ndarray):
+        data_size *= data.size
+    if isinstance(data, str):
+        data_size *= len(data)
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(data, dtype=dtype).tostring())
+
+
+def write_int(fid, kind, data):
+    """Writes a 32-bit integer tag to a fif file"""
+    data_size = 4
+    data = np.array(data, dtype='>i4').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4')
+
+
+def write_double(fid, kind, data):
+    """Writes a double-precision floating point tag to a fif file"""
+    data_size = 8
+    data = np.array(data, dtype='>f8').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8')
+
+
+def write_float(fid, kind, data):
+    """Writes a single-precision floating point tag to a fif file"""
+    data_size = 4
+    data = np.array(data, dtype='>f4').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4')
+
+
+def write_dau_pack16(fid, kind, data):
+    """Writes a dau_pack16 tag to a fif file"""
+    data_size = 2
+    data = np.array(data, dtype='>i2').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2')
+
+
+def write_complex64(fid, kind, data):
+    """Writes a 64 bit complex floating point tag to a fif file"""
+    data_size = 8
+    data = np.array(data, dtype='>c8').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8')
+
+
+def write_complex128(fid, kind, data):
+    """Writes a 128 bit complex floating point tag to a fif file"""
+    data_size = 16
+    data = np.array(data, dtype='>c16').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
+
+
+def write_string(fid, kind, data):
+    """Writes a string tag"""
+    data_size = 1
+    _write(fid, str(data), kind, data_size, FIFF.FIFFT_STRING, '>c')
+
+
+def write_name_list(fid, kind, data):
+    """Writes a colon-separated list of names
+
+    Parameters
+    ----------
+    data : list of strings
+    """
+    write_string(fid, kind, ':'.join(data))
+
+
+def write_float_matrix(fid, kind, mat):
+    """Writes a single-precision floating-point matrix tag"""
+    FIFFT_MATRIX = 1 << 30
+    FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
+
+    data_size = 4 * mat.size + 4 * (mat.ndim + 1)
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(mat, dtype='>f4').tostring())
+
+    dims = np.empty(mat.ndim + 1, dtype=np.int32)
+    dims[:mat.ndim] = mat.shape[::-1]
+    dims[-1] = mat.ndim
+    fid.write(np.array(dims, dtype='>i4').tostring())
+
+
+def write_double_matrix(fid, kind, mat):
+    """Writes a double-precision floating-point matrix tag"""
+    FIFFT_MATRIX = 1 << 30
+    FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX
+
+    data_size = 8 * mat.size + 4 * (mat.ndim + 1)
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(mat, dtype='>f8').tostring())
+
+    dims = np.empty(mat.ndim + 1, dtype=np.int32)
+    dims[:mat.ndim] = mat.shape[::-1]
+    dims[-1] = mat.ndim
+    fid.write(np.array(dims, dtype='>i4').tostring())
+
+
+def write_int_matrix(fid, kind, mat):
+    """Writes integer 32 matrix tag"""
+    FIFFT_MATRIX = 1 << 30
+    FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX
+
+    data_size = 4 * mat.size + 4 * 3
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(mat, dtype='>i4').tostring())
+
+    dims = np.empty(3, dtype=np.int32)
+    dims[0] = mat.shape[1]
+    dims[1] = mat.shape[0]
+    dims[2] = 2
+    fid.write(np.array(dims, dtype='>i4').tostring())
+
+
+def write_id(fid, kind, id_=None):
+    """Writes fiff id"""
+
+    if id_ is None:
+        id_ = dict()
+        id_['version'] = (1 << 16) | 2
+        id_['machid'] = 65536 * np.random.rand(2)  # Machine id (andom for now)
+        id_['secs'] = time.time()
+        id_['usecs'] = 0            # Do not know how we could get this XXX
+
+    FIFFT_ID_STRUCT = 31
+    FIFFV_NEXT_SEQ = 0
+
+    data_size = 5 * 4                       # The id comprises five integers
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_ID_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    # Collect the bits together for one write
+    data = np.empty(5, dtype=np.int32)
+    data[0] = id_['version']
+    data[1] = id_['machid'][0]
+    data[2] = id_['machid'][1]
+    data[3] = id_['secs']
+    data[4] = id_['usecs']
+    fid.write(np.array(data, dtype='>i4').tostring())
+
+
+def start_block(fid, kind):
+    """Writes a FIFF_BLOCK_START tag"""
+    write_int(fid, FIFF.FIFF_BLOCK_START, kind)
+
+
+def end_block(fid, kind):
+    """Writes a FIFF_BLOCK_END tag"""
+    write_int(fid, FIFF.FIFF_BLOCK_END, kind)
+
+
+def start_file(fname):
+    """Opens a fif file for writing and writes the compulsory header tags
+
+    Parameters
+    ----------
+    fname : string | fid
+        The name of the file to open. It is recommended
+        that the name ends with .fif or .fif.gz. Can also be an
+        already opened file.
+    """
+    if isinstance(fname, basestring):
+        if op.splitext(fname)[1].lower() == '.gz':
+            logger.debug('Writing using gzip')
+            # defaults to compression level 9, which is barely smaller but much
+            # slower. 2 offers a good compromise.
+            fid = gzip.open(fname, "wb", compresslevel=2)
+        else:
+            logger.debug('Writing using normal I/O')
+            fid = open(fname, "wb")
+    else:
+        logger.debug('Writing using %s I/O' % type(fname))
+        fid = fname
+        fid.seek(0)
+    #   Write the compulsory items
+    write_id(fid, FIFF.FIFF_FILE_ID)
+    write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
+    write_int(fid, FIFF.FIFF_FREE_LIST, -1)
+    return fid
+
+
+def end_file(fid):
+    """Writes the closing tags to a fif file and closes the file"""
+    data_size = 0
+    fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_NONE, dtype='>i4').tostring())
+    fid.close()
+
+
+def write_coord_trans(fid, trans):
+    """Writes a coordinate transformation structure"""
+
+    #?typedef struct _fiffCoordTransRec {
+    #  fiff_int_t   from;                          /*!< Source coordinate system. */
+    #  fiff_int_t   to;                        /*!< Destination coordinate system. */
+    #  fiff_float_t rot[3][3];             /*!< The forward transform (rotation part) */
+    #  fiff_float_t move[3];                   /*!< The forward transform (translation part) */
+    #  fiff_float_t invrot[3][3];              /*!< The inverse transform (rotation part) */
+    #  fiff_float_t invmove[3];            /*!< The inverse transform (translation part) */
+    #} *fiffCoordTrans, fiffCoordTransRec; /*!< Coordinate transformation descriptor */
+
+    data_size = 4 * 2 * 12 + 4 * 2
+    fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(trans['from'], dtype='>i4').tostring())
+    fid.write(np.array(trans['to'], dtype='>i4').tostring())
+
+    #   The transform...
+    rot = trans['trans'][:3, :3]
+    move = trans['trans'][:3, 3]
+    fid.write(np.array(rot, dtype='>f4').tostring())
+    fid.write(np.array(move, dtype='>f4').tostring())
+
+    #   ...and its inverse
+    trans_inv = linalg.inv(trans['trans'])
+    rot = trans_inv[:3, :3]
+    move = trans_inv[:3, 3]
+    fid.write(np.array(rot, dtype='>f4').tostring())
+    fid.write(np.array(move, dtype='>f4').tostring())
+
+
+def write_ch_info(fid, ch):
+    """Writes a channel information record to a fif file"""
+
+    #typedef struct _fiffChPosRec {
+    #  fiff_int_t   coil_type;      /*!< What kind of coil. */
+    #  fiff_float_t r0[3];          /*!< Coil coordinate system origin */
+    #  fiff_float_t ex[3];          /*!< Coil coordinate system x-axis unit vector */
+    #  fiff_float_t ey[3];          /*!< Coil coordinate system y-axis unit vector */
+    #  fiff_float_t ez[3];                   /*!< Coil coordinate system z-axis unit vector */
+    #} fiffChPosRec,*fiffChPos;                /*!< Measurement channel position and coil type */
+
+    #typedef struct _fiffChInfoRec {
+    #  fiff_int_t    scanNo;    /*!< Scanning order # */
+    #  fiff_int_t    logNo;     /*!< Logical channel # */
+    #  fiff_int_t    kind;      /*!< Kind of channel */
+    #  fiff_float_t  range;     /*!< Voltmeter range (only applies to raw data ) */
+    #  fiff_float_t  cal;       /*!< Calibration from volts to... */
+    #  fiff_ch_pos_t chpos;     /*!< Channel location */
+    #  fiff_int_t    unit;      /*!< Unit of measurement */
+    #  fiff_int_t    unit_mul;  /*!< Unit multiplier exponent */
+    #  fiff_char_t   ch_name[16];   /*!< Descriptive name for the channel */
+    #} fiffChInfoRec,*fiffChInfo;   /*!< Description of one channel */
+
+    data_size = 4 * 13 + 4 * 7 + 16
+
+    fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    #   Start writing fiffChInfoRec
+    fid.write(np.array(ch['scanno'], dtype='>i4').tostring())
+    fid.write(np.array(ch['logno'], dtype='>i4').tostring())
+    fid.write(np.array(ch['kind'], dtype='>i4').tostring())
+    fid.write(np.array(ch['range'], dtype='>f4').tostring())
+    fid.write(np.array(ch['cal'], dtype='>f4').tostring())
+    fid.write(np.array(ch['coil_type'], dtype='>i4').tostring())
+    fid.write(np.array(ch['loc'], dtype='>f4').tostring())  # writing 12 values
+
+    #   unit and unit multiplier
+    fid.write(np.array(ch['unit'], dtype='>i4').tostring())
+    fid.write(np.array(ch['unit_mul'], dtype='>i4').tostring())
+
+    #   Finally channel name
+    if len(ch['ch_name']):
+        ch_name = ch['ch_name'][:15]
+    else:
+        ch_name = ch['ch_name']
+
+    fid.write(np.array(ch_name, dtype='>c').tostring())
+    if len(ch_name) < 16:
+        fid.write('\0' * (16 - len(ch_name)))
+
+
+def write_dig_point(fid, dig):
+    """Writes a digitizer data point into a fif file"""
+    #?typedef struct _fiffDigPointRec {
+    #  fiff_int_t kind;               /*!< FIFF_POINT_CARDINAL,
+    #                                  *   FIFF_POINT_HPI, or
+    #                                  *   FIFF_POINT_EEG */
+    #  fiff_int_t ident;              /*!< Number identifying this point */
+    #  fiff_float_t r[3];             /*!< Point location */
+    #} *fiffDigPoint,fiffDigPointRec; /*!< Digitization point description */
+
+    data_size = 5 * 4
+
+    fid.write(np.array(FIFF.FIFF_DIG_POINT, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    #   Start writing fiffDigPointRec
+    fid.write(np.array(dig['kind'], dtype='>i4').tostring())
+    fid.write(np.array(dig['ident'], dtype='>i4').tostring())
+    fid.write(np.array(dig['r'][:3], dtype='>f4').tostring())
+
+
+def write_float_sparse_rcs(fid, kind, mat):
+    """Writes a single-precision floating-point matrix tag"""
+    FIFFT_MATRIX = 16416 << 16
+    FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
+
+    nnzm = mat.nnz
+    nrow = mat.shape[0]
+    data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    fid.write(np.array(mat.data, dtype='>f4').tostring())
+    fid.write(np.array(mat.indices, dtype='>i4').tostring())
+    fid.write(np.array(mat.indptr, dtype='>i4').tostring())
+
+    dims = [nnzm, mat.shape[0], mat.shape[1], 2]
+    fid.write(np.array(dims, dtype='>i4').tostring())
diff --git a/mne/filter.py b/mne/filter.py
new file mode 100644
index 0000000..91d8752
--- /dev/null
+++ b/mne/filter.py
@@ -0,0 +1,1338 @@
+"""IIR and FIR filtering functions"""
+
+import warnings
+import numpy as np
+from scipy.fftpack import fft, ifftshift, fftfreq
+from scipy.signal import freqz, iirdesign, iirfilter, filter_dict, get_window
+from scipy import signal, stats
+from copy import deepcopy
+
+import logging
+logger = logging.getLogger('mne')
+
+from .fixes import firwin2, filtfilt  # back port for old scipy
+from .time_frequency.multitaper import dpss_windows, _mt_spectra
+from . import verbose
+from .parallel import parallel_func
+from .cuda import setup_cuda_fft_multiply_repeated, fft_multiply_repeated, \
+                  setup_cuda_fft_resample, fft_resample, _smart_pad
+
+
+def is_power2(num):
+    """Test if number is a power of 2
+
+    Parameters
+    ----------
+    num : int
+        Number.
+
+    Returns
+    -------
+    b : bool
+        True if is power of 2.
+
+    Example
+    -------
+    >>> is_power2(2 ** 3)
+    True
+    >>> is_power2(5)
+    False
+    """
+    num = int(num)
+    return num != 0 and ((num & (num - 1)) == 0)
+
+
+def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
+                        n_jobs=1):
+    """ Filter using overlap-add FFTs.
+
+    Filters the signal x using a filter with the impulse response h.
+    If zero_phase==True, the amplitude response is scaled and the filter is
+    applied in forward and backward direction, resulting in a zero-phase
+    filter.
+
+    WARNING: This operates on the data in-place.
+
+    Parameters
+    ----------
+    x : 2d array
+        Signal to filter.
+    h : 1d array
+        Filter impulse response (FIR filter coefficients).
+    n_fft : int
+        Length of the FFT. If None, the best size is determined automatically.
+    zero_phase : bool
+        If True: the filter is applied in forward and backward direction,
+        resulting in a zero-phase filter.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+
+    Returns
+    -------
+    xf : 2d array
+        x filtered.
+    """
+    if picks is None:
+        picks = np.arange(x.shape[0])
+
+    # Extend the signal by mirroring the edges to reduce transient filter
+    # response
+    n_h = len(h)
+    n_edge = min(n_h, x.shape[1])
+
+    n_x = x.shape[1] + 2 * n_edge - 2
+
+    # Determine FFT length to use
+    if n_fft is None:
+        if n_x > n_h:
+            n_tot = 2 * n_x if zero_phase else n_x
+
+            min_fft = 2 * n_h - 1
+            max_fft = n_x
+
+            # cost function based on number of multiplications
+            N = 2 ** np.arange(np.ceil(np.log2(min_fft)),
+                               np.ceil(np.log2(max_fft)) + 1, dtype=int)
+            cost = (np.ceil(n_tot / (N - n_h + 1).astype(np.float))
+                    * N * (np.log2(N) + 1))
+
+            # add a heuristic term to prevent too-long FFT's which are slow
+            # (not predicted by mult. cost alone, 4e-5 exp. determined)
+            cost += 4e-5 * N * n_tot
+
+            n_fft = N[np.argmin(cost)]
+        else:
+            # Use only a single block
+            n_fft = 2 ** int(np.ceil(np.log2(n_x + n_h - 1)))
+
+    if n_fft < 2 * n_h - 1:
+        raise ValueError('n_fft is too short, has to be at least '
+                         '"2 * len(h) - 1"')
+
+    if not is_power2(n_fft):
+        warnings.warn("FFT length is not a power of 2. Can be slower.")
+
+    # Filter in frequency domain
+    h_fft = fft(np.r_[h, np.zeros(n_fft - n_h, dtype=h.dtype)])
+
+    if zero_phase:
+        # We will apply the filter in forward and backward direction: Scale
+        # frequency response of the filter so that the shape of the amplitude
+        # response stays the same when it is applied twice
+
+        # be careful not to divide by too small numbers
+        idx = np.where(np.abs(h_fft) > 1e-6)
+        h_fft[idx] = h_fft[idx] / np.sqrt(np.abs(h_fft[idx]))
+
+    # Segment length for signal x
+    n_seg = n_fft - n_h + 1
+
+    # Number of segments (including fractional segments)
+    n_segments = int(np.ceil(n_x / float(n_seg)))
+
+    # Figure out if we should use CUDA
+    n_jobs, cuda_dict, h_fft = setup_cuda_fft_multiply_repeated(n_jobs, h_fft)
+
+    # Process each row separately
+    if n_jobs == 1:
+        for p in picks:
+            x[p] = _1d_overlap_filter(x[p], h_fft, n_edge, n_fft, zero_phase,
+                                      n_segments, n_seg, cuda_dict)
+    else:
+        _check_njobs(n_jobs, can_be_cuda=True)
+        parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs)
+        data_new = parallel(p_fun(x[p], h_fft, n_edge, n_fft, zero_phase,
+                                  n_segments, n_seg, cuda_dict)
+                            for p in picks)
+        for pp, p in enumerate(picks):
+            x[p] = data_new[pp]
+
+    return x
+
+
+def _1d_overlap_filter(x, h_fft, n_edge, n_fft, zero_phase, n_segments, n_seg,
+                       cuda_dict):
+    """Do one-dimensional overlap-add FFT FIR filtering"""
+    # pad to reduce ringing
+    x_ext = _smart_pad(x, n_edge - 1)
+    n_x = len(x_ext)
+    filter_input = x_ext
+    x_filtered = np.zeros_like(filter_input)
+
+    for pass_no in range(2) if zero_phase else range(1):
+
+        if pass_no == 1:
+            # second pass: flip signal
+            filter_input = np.flipud(x_filtered)
+            x_filtered = np.zeros_like(x_ext)
+
+        for seg_idx in range(n_segments):
+            seg = filter_input[seg_idx * n_seg:(seg_idx + 1) * n_seg]
+            seg = np.r_[seg, np.zeros(n_fft - len(seg))]
+            prod = fft_multiply_repeated(h_fft, seg, cuda_dict)
+            if seg_idx * n_seg + n_fft < n_x:
+                x_filtered[seg_idx * n_seg:seg_idx * n_seg + n_fft] += prod
+            else:
+                # Last segment
+                x_filtered[seg_idx * n_seg:] += prod[:n_x - seg_idx * n_seg]
+
+    # Remove mirrored edges that we added
+    x_filtered = x_filtered[n_edge - 1:-n_edge + 1]
+
+    if zero_phase:
+        # flip signal back
+        x_filtered = np.flipud(x_filtered)
+
+    x_filtered = x_filtered.astype(x.dtype)
+    return x_filtered
+
+
+def _filter_attenuation(h, freq, gain):
+    """Compute minimum attenuation at stop frequency"""
+
+    _, filt_resp = freqz(h.ravel(), worN=np.pi * freq)
+    filt_resp = np.abs(filt_resp)  # use amplitude response
+    filt_resp /= np.max(filt_resp)
+    filt_resp[np.where(gain == 1)] = 0
+    idx = np.argmax(filt_resp)
+    att_db = -20 * np.log10(filt_resp[idx])
+    att_freq = freq[idx]
+
+    return att_db, att_freq
+
+
+def _1d_fftmult_ext(x, B, extend_x, cuda_dict):
+    """Helper to parallelize FFT FIR, with extension if necessary"""
+    # extend, if necessary
+    if extend_x is True:
+        x = np.r_[x, x[-1]]
+
+    # do Fourier transforms
+    xf = fft_multiply_repeated(B, x, cuda_dict)
+
+    # put back to original size and type
+    if extend_x is True:
+        xf = xf[:-1]
+
+    xf = xf.astype(x.dtype)
+    return xf
+
+
+def _prep_for_filtering(x, copy, picks=None):
+    """Set up array as 2D for filtering ease"""
+    if copy is True:
+        x = x.copy()
+    orig_shape = x.shape
+    x = np.atleast_2d(x)
+    x.shape = (np.prod(x.shape[:-1]), x.shape[-1])
+    if picks is None:
+        picks = np.arange(x.shape[0])
+    return x, orig_shape, picks
+
+
+def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
+            copy=True):
+    """Filter signal using gain control points in the frequency domain.
+
+    The filter impulse response is constructed from a Hamming window (window
+    used in "firwin2" function) to avoid ripples in the frequency response
+    (windowing is a smoothing in frequency domain). The filter is zero-phase.
+
+    If x is multi-dimensional, this operates along the last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    freq : 1d array
+        Frequency sampling points in Hz.
+    gain : 1d array
+        Filter gain at frequency sampling points.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+    """
+    # set up array for filtering, reshape to 2D, operate on last axis
+    x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+
+    # issue a warning if attenuation is less than this
+    min_att_db = 20
+
+    # normalize frequencies
+    freq = np.array([f / (Fs / 2) for f in freq])
+    gain = np.array(gain)
+    filter_length = _get_filter_length(filter_length, Fs, len_x=x.shape[1])
+
+    if filter_length is None or x.shape[1] <= filter_length:
+        # Use direct FFT filtering for short signals
+
+        Norig = x.shape[1]
+
+        extend_x = False
+        if (gain[-1] == 0.0 and Norig % 2 == 1) \
+                or (gain[-1] == 1.0 and Norig % 2 != 1):
+            # Gain at Nyquist freq: 1: make x EVEN, 0: make x ODD
+            extend_x = True
+
+        N = x.shape[1] + (extend_x is True)
+
+        H = firwin2(N, freq, gain)[np.newaxis, :]
+
+        att_db, att_freq = _filter_attenuation(H, freq, gain)
+        if att_db < min_att_db:
+            att_freq *= Fs / 2
+            warnings.warn('Attenuation at stop frequency %0.1fHz is only '
+                          '%0.1fdB.' % (att_freq, att_db))
+
+        # Make zero-phase filter function
+        B = np.abs(fft(H)).ravel()
+
+        # Figure out if we should use CUDA
+        n_jobs, cuda_dict, B = setup_cuda_fft_multiply_repeated(n_jobs, B)
+
+        if n_jobs == 1:
+            for p in picks:
+                x[p] = _1d_fftmult_ext(x[p], B, extend_x, cuda_dict)
+        else:
+            _check_njobs(n_jobs, can_be_cuda=True)
+            parallel, p_fun, _ = parallel_func(_1d_fftmult_ext, n_jobs)
+            data_new = parallel(p_fun(x[p], B, extend_x, cuda_dict)
+                                for p in picks)
+            for pp, p in enumerate(picks):
+                x[p] = data_new[pp]
+    else:
+        # Use overlap-add filter with a fixed length
+        N = filter_length
+
+        if (gain[-1] == 0.0 and N % 2 == 1) \
+                or (gain[-1] == 1.0 and N % 2 != 1):
+            # Gain at Nyquist freq: 1: make N EVEN, 0: make N ODD
+            N += 1
+
+        H = firwin2(N, freq, gain)
+
+        att_db, att_freq = _filter_attenuation(H, freq, gain)
+        att_db += 6  # the filter is applied twice (zero phase)
+        if att_db < min_att_db:
+            att_freq *= Fs / 2
+            warnings.warn('Attenuation at stop frequency %0.1fHz is only '
+                          '%0.1fdB. Increase filter_length for higher '
+                          'attenuation.' % (att_freq, att_db))
+
+        x = _overlap_add_filter(x, H, zero_phase=True, picks=picks,
+                                n_jobs=n_jobs)
+
+    x.shape = orig_shape
+    return x
+
+
+def _filtfilt(x, b, a, padlen, picks, n_jobs, copy):
+    """Helper to more easily call filtfilt"""
+    # set up array for filtering, reshape to 2D, operate on last axis
+    x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+    if n_jobs == 1:
+        for p in picks:
+            x[p] = filtfilt(b, a, x[p], padlen=padlen)
+    else:
+        _check_njobs(n_jobs)
+        parallel, p_fun, _ = parallel_func(filtfilt, n_jobs)
+        data_new = parallel(p_fun(b, a, x[p], padlen=padlen)
+                            for p in picks)
+        for pp, p in enumerate(picks):
+            x[p] = data_new[pp]
+    x.shape = orig_shape
+    return x
+
+
+def _estimate_ringing_samples(b, a):
+    """Helper function for determining IIR padding"""
+    x = np.zeros(1000)
+    x[0] = 1
+    h = signal.lfilter(b, a, x)
+    return np.where(np.abs(h) > 0.001 * np.max(np.abs(h)))[0][-1]
+
+
+def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
+                         f_pass=None, f_stop=None, sfreq=None, btype=None,
+                         return_copy=True):
+    """Use IIR parameters to get filtering coefficients
+
+    This function works like a wrapper for iirdesign and iirfilter in
+    scipy.signal to make filter coefficients for IIR filtering. It also
+    estimates the number of padding samples based on the filter ringing.
+    It creates a new iir_params dict (or updates the one passed to the
+    function) with the filter coefficients ('b' and 'a') and an estimate
+    of the padding necessary ('padlen') so IIR filtering can be performed.
+
+    Parameters
+    ----------
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        If iir_params['b'] and iir_params['a'] exist, these will be used
+        as coefficients to perform IIR filtering. Otherwise, if
+        iir_params['order'] and iir_params['ftype'] exist, these will be
+        used with scipy.signal.iirfilter to make a filter. Otherwise, if
+        iir_params['gpass'] and iir_params['gstop'] exist, these will be
+        used with scipy.signal.iirdesign to design a filter.
+        iir_params['padlen'] defines the number of samples to pad (and
+        an estimate will be calculated if it is not given). See Notes for
+        more details.
+    f_pass : float or list of float
+        Frequency for the pass-band. Low-pass and high-pass filters should
+        be a float, band-pass should be a 2-element list of float.
+    f_stop : float or list of float
+        Stop-band frequency (same size as f_pass). Not used if 'order' is
+        specified in iir_params.
+    btype : str
+        Type of filter. Should be 'lowpass', 'highpass', or 'bandpass'
+        (or analogous string representations known to scipy.signal).
+    return_copy : bool
+        If False, the 'b', 'a', and 'padlen' entries in iir_params will be
+        set inplace (if they weren't already). Otherwise, a new iir_params
+        instance will be created and returned with these entries.
+
+    Returns
+    -------
+    iir_params : dict
+        Updated iir_params dict, with the entries (set only if they didn't
+        exist before) for 'b', 'a', and 'padlen' for IIR filtering.
+
+    Notes
+    -----
+    This function triages calls to scipy.signal.iirfilter and iirdesign
+    based on the input arguments (see descriptions of these functions
+    and scipy's scipy.signal.filter_design documentation for details).
+
+    Examples
+    --------
+    iir_params can have several forms. Consider constructing a low-pass
+    filter at 40 Hz with 1000 Hz sampling rate.
+
+    In the most basic (2-parameter) form of iir_params, the order of the
+    filter 'N' and the type of filtering 'ftype' are specified. To get
+    coefficients for a 4th-order Butterworth filter, this would be:
+
+    >>> iir_params = dict(order=4, ftype='butter')
+    >>> iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low', return_copy=False)
+    >>> print (len(iir_params['b']), len(iir_params['a']), iir_params['padlen'])
+    (5, 5, 82)
+
+    Filters can also be constructed using filter design methods. To get a
+    40 Hz Chebyshev type 1 lowpass with specific gain characteristics in the
+    pass and stop bands (assuming the desired stop band is at 45 Hz), this
+    would be a filter with much longer ringing:
+
+    >>> iir_params = dict(ftype='cheby1', gpass=3, gstop=20)
+    >>> iir_params = construct_iir_filter(iir_params, 40, 50, 1000, 'low')
+    >>> print (len(iir_params['b']), len(iir_params['a']), iir_params['padlen'])
+    (6, 6, 439)
+
+    Padding and/or filter coefficients can also be manually specified. For
+    a 10-sample moving window with no padding during filtering, for example,
+    one can just do:
+
+    >>> iir_params = dict(b=np.ones((10)), a=[1, 0], padlen=0)
+    >>> iir_params = construct_iir_filter(iir_params, return_copy=False)
+    >>> print (iir_params['b'], iir_params['a'], iir_params['padlen'])
+    (array([ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.]), [1, 0], 0)
+
+    """
+    a = None
+    b = None
+    # if the filter has been designed, we're good to go
+    if 'a' in iir_params and 'b' in iir_params:
+        [b, a] = [iir_params['b'], iir_params['a']]
+    else:
+        # ensure we have a valid ftype
+        if not 'ftype' in iir_params:
+            raise RuntimeError('ftype must be an entry in iir_params if ''b'' '
+                               'and ''a'' are not specified')
+        ftype = iir_params['ftype']
+        if not ftype in filter_dict:
+            raise RuntimeError('ftype must be in filter_dict from '
+                               'scipy.signal (e.g., butter, cheby1, etc.) not '
+                               '%s' % ftype)
+
+        # use order-based design
+        Wp = np.asanyarray(f_pass) / (float(sfreq) / 2)
+        if 'order' in iir_params:
+            [b, a] = iirfilter(iir_params['order'], Wp, btype=btype,
+                               ftype=ftype)
+        else:
+            # use gpass / gstop design
+            Ws = np.asanyarray(f_stop) / (float(sfreq) / 2)
+            if not 'gpass' in iir_params or not 'gstop' in iir_params:
+                raise ValueError('iir_params must have at least ''gstop'' and'
+                                 ' ''gpass'' (or ''N'') entries')
+            [b, a] = iirdesign(Wp, Ws, iir_params['gpass'],
+                               iir_params['gstop'], ftype=ftype)
+
+    if a is None or b is None:
+        raise RuntimeError('coefficients could not be created from iir_params')
+
+    # now deal with padding
+    if not 'padlen' in iir_params:
+        padlen = _estimate_ringing_samples(b, a)
+    else:
+        padlen = iir_params['padlen']
+
+    if return_copy:
+        iir_params = deepcopy(iir_params)
+
+    iir_params.update(dict(b=b, a=a, padlen=padlen))
+    return iir_params
+
+
+ at verbose
+def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s',
+                     l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
+                     method='fft', iir_params=dict(order=4, ftype='butter'),
+                     picks=None, n_jobs=1, copy=True, verbose=None):
+    """Bandpass filter for the signal x.
+
+    Applies a zero-phase bandpass filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp1 : float
+        Low cut-off frequency in Hz.
+    Fp2 : float
+        High cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    l_trans_bandwidth : float
+        Width of the transition band at the low cut-off frequency in Hz.
+    h_trans_bandwidth : float
+        Width of the transition band at the high cut-off frequency in Hz.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by
+                     ----------
+                   /|         | \
+                  / |         |  \
+                 /  |         |   \
+                /   |         |    \
+      ----------    |         |     -----------------
+                    |         |
+              Fs1  Fp1       Fp2   Fs2
+
+    Where
+    Fs1 = Fp1 - l_trans_bandwidth in Hz
+    Fs2 = Fp2 + h_trans_bandwidth in Hz
+    """
+
+    method = method.lower()
+    if method not in ['fft', 'iir']:
+        raise RuntimeError('method should be fft or iir (not %s)' % method)
+
+    Fs = float(Fs)
+    Fp1 = float(Fp1)
+    Fp2 = float(Fp2)
+    Fs1 = Fp1 - l_trans_bandwidth
+    Fs2 = Fp2 + h_trans_bandwidth
+
+    if Fs1 <= 0:
+        raise ValueError('Filter specification invalid: Lower stop frequency '
+                         'too low (%0.1fHz). Increase Fp1 or reduce '
+                         'transition bandwidth (l_trans_bandwidth)' % Fs1)
+
+    if method == 'fft':
+        freq = [0, Fs1, Fp1, Fp2, Fs2, Fs / 2]
+        gain = [0, 0, 1, 1, 0, 0]
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        iir_params = construct_iir_filter(iir_params, [Fp1, Fp2],
+                                          [Fs1, Fs2], Fs, 'bandpass')
+        padlen = min(iir_params['padlen'], len(x))
+        xf = _filtfilt(x, iir_params['b'], iir_params['a'], padlen,
+                       picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
+                     l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
+                     method='fft', iir_params=dict(order=4, ftype='butter'),
+                     picks=None, n_jobs=1, copy=True, verbose=None):
+    """Bandstop filter for the signal x.
+
+    Applies a zero-phase bandstop filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp1 : float | array of float
+        Low cut-off frequency in Hz.
+    Fp2 : float | array of float
+        High cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    l_trans_bandwidth : float
+        Width of the transition band at the low cut-off frequency in Hz.
+    h_trans_bandwidth : float
+        Width of the transition band at the high cut-off frequency in Hz.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by
+      ----------                   ----------
+               |\                 /|
+               | \               / |
+               |  \             /  |
+               |   \           /   |
+               |    -----------    |
+               |    |         |    |
+              Fp1  Fs1       Fs2  Fp2
+
+    Where
+    Fs1 = Fp1 - l_trans_bandwidth in Hz
+    Fs2 = Fp2 + h_trans_bandwidth in Hz
+
+    Note that multiple stop bands can be specified using arrays.
+    """
+
+    method = method.lower()
+    if method not in ['fft', 'iir']:
+        raise RuntimeError('method should be fft or iir (not %s)' % method)
+    Fp1 = np.atleast_1d(Fp1)
+    Fp2 = np.atleast_1d(Fp2)
+    if not len(Fp1) == len(Fp2):
+        raise ValueError('Fp1 and Fp2 must be the same length')
+
+    Fs = float(Fs)
+    Fp1 = Fp1.astype(float)
+    Fp2 = Fp2.astype(float)
+    Fs1 = Fp1 + l_trans_bandwidth
+    Fs2 = Fp2 - h_trans_bandwidth
+
+    if np.any(Fs1 <= 0):
+        raise ValueError('Filter specification invalid: Lower stop frequency '
+                         'too low (%0.1fHz). Increase Fp1 or reduce '
+                         'transition bandwidth (l_trans_bandwidth)' % Fs1)
+
+    if method == 'fft':
+        freq = np.r_[0, Fp1, Fs1, Fs2, Fp2, Fs / 2]
+        gain = np.r_[1, np.ones_like(Fp1), np.zeros_like(Fs1),
+                     np.zeros_like(Fs2), np.ones_like(Fp2), 1]
+        order = np.argsort(freq)
+        freq = freq[order]
+        gain = gain[order]
+        if np.any(np.abs(np.diff(gain, 2)) > 1):
+            raise ValueError('Stop bands are not sufficiently separated.')
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        for fp_1, fp_2, fs_1, fs_2 in zip(Fp1, Fp2, Fs1, Fs2):
+            iir_params_new = construct_iir_filter(iir_params, [fp_1, fp_2],
+                                                  [fs_1, fs_2], Fs, 'bandstop')
+            padlen = min(iir_params_new['padlen'], len(x))
+            xf = _filtfilt(x, iir_params_new['b'], iir_params_new['a'], padlen,
+                           picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def low_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
+                    method='fft', iir_params=dict(order=4, ftype='butter'),
+                    picks=None, n_jobs=1, copy=True, verbose=None):
+    """Lowpass filter for the signal x.
+
+    Applies a zero-phase lowpass filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp : float
+        Cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    trans_bandwidth : float
+        Width of the transition band in Hz.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by
+      -------------------------
+                              | \
+                              |  \
+                              |   \
+                              |    \
+                              |     -----------------
+                              |
+                              Fp  Fp+trans_bandwidth
+
+    """
+
+    method = method.lower()
+    if method not in ['fft', 'iir']:
+        raise RuntimeError('method should be fft or iir (not %s)' % method)
+
+    Fs = float(Fs)
+    Fp = float(Fp)
+    Fstop = Fp + trans_bandwidth
+    if method == 'fft':
+        freq = [0, Fp, Fstop, Fs / 2]
+        gain = [1, 1, 0, 0]
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        iir_params = construct_iir_filter(iir_params, Fp, Fstop, Fs, 'low')
+        padlen = min(iir_params['padlen'], len(x))
+        xf = _filtfilt(x, iir_params['b'], iir_params['a'], padlen,
+                       picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def high_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
+                     method='fft', iir_params=dict(order=4, ftype='butter'),
+                     picks=None, n_jobs=1, copy=True, verbose=None):
+    """Highpass filter for the signal x.
+
+    Applies a zero-phase highpass filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp : float
+        Cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    trans_bandwidth : float
+        Width of the transition band in Hz.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by
+                   -----------------------
+                 /|
+                / |
+               /  |
+              /   |
+    ----------    |
+                  |
+           Fstop  Fp
+
+    where Fstop = Fp - trans_bandwidth
+    """
+
+    method = method.lower()
+    if method not in ['fft', 'iir']:
+        raise RuntimeError('method should be fft or iir (not %s)' % method)
+
+    Fs = float(Fs)
+    Fp = float(Fp)
+
+    Fstop = Fp - trans_bandwidth
+    if Fstop <= 0:
+        raise ValueError('Filter specification invalid: Stop frequency too low'
+                         '(%0.1fHz). Increase Fp or reduce transition '
+                         'bandwidth (trans_bandwidth)' % Fstop)
+
+    if method == 'fft':
+        freq = [0, Fstop, Fp, Fs / 2]
+        gain = [0, 0, 1, 1]
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        iir_params = construct_iir_filter(iir_params, Fp, Fstop, Fs, 'high')
+        padlen = min(iir_params['padlen'], len(x))
+        xf = _filtfilt(x, iir_params['b'], iir_params['a'], padlen,
+                       picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
+                 trans_bandwidth=1, method='fft',
+                 iir_params=dict(order=4, ftype='butter'), mt_bandwidth=None,
+                 p_value=0.05, picks=None, n_jobs=1, copy=True, verbose=None):
+    """Notch filter for the signal x.
+
+    Applies a zero-phase notch filter to the signal x, operating on the last
+    dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    freqs : float | array of float | None
+        Frequencies to notch filter in Hz, e.g. np.arange(60, 241, 60).
+        None can only be used with the mode 'spectrum_fit', where an F
+        test is used to find sinusoidal components.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    notch_widths : float | array of float | None
+        Width of the stop band (centred at each freq in freqs) in Hz.
+        If None, freqs / 200 is used.
+    trans_bandwidth : float
+        Width of the transition band in Hz.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt). 'spectrum_fit' will
+        use multi-taper estimation of sinusoidal components. If freqs=None
+        and method='spectrum_fit', significant sinusoidal components
+        are detected using an F test, and noted by logging.
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'spectrum_fit' mode.
+    p_value : float
+        p-value to use in F-test thresholding to determine significant
+        sinusoidal components to remove when method='spectrum_fit' and
+        freqs=None. Note that this will be Bonferroni corrected for the
+        number of frequencies, so large p-values may be justified.
+    picks : list of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by
+      ----------         -----------
+               |\       /|
+               | \     / |
+               |  \   /  |
+               |   \ /   |
+               |    -    |
+               |    |    |
+              Fp1 freq  Fp2
+
+    For each freq in freqs, where:
+    Fp1 = freq - trans_bandwidth / 2 in Hz
+    Fs2 = freq + trans_bandwidth / 2 in Hz
+
+    References
+    ----------
+    Multi-taper removal is inspired by code from the Chronux toolbox, see
+    www.chronux.org and the book "Observed Brain Dynamics" by Partha Mitra
+    & Hemant Bokil, Oxford University Press, New York, 2008. Please
+    cite this in publications if method 'spectrum_fit' is used.
+    """
+
+    method = method.lower()
+    if method not in ['fft', 'iir', 'spectrum_fit']:
+        raise RuntimeError('method should be fft, iir, or spectrum_fit '
+                           '(not %s)' % method)
+
+    if freqs is not None:
+        freqs = np.atleast_1d(freqs)
+    elif method != 'spectrum_fit':
+            raise ValueError('freqs=None can only be used with method '
+                             'spectrum_fit')
+
+    # Only have to deal with notch_widths for non-autodetect
+    if freqs is not None:
+        if notch_widths is None:
+            notch_widths = freqs / 200.0
+        elif np.any(notch_widths < 0):
+            raise ValueError('notch_widths must be >= 0')
+        else:
+            notch_widths = np.atleast_1d(notch_widths)
+            if len(notch_widths) == 1:
+                notch_widths = notch_widths[0] * np.ones_like(freqs)
+            elif len(notch_widths) != len(freqs):
+                raise ValueError('notch_widths must be None, scalar, or the '
+                                 'same length as freqs')
+
+    if method in ['fft', 'iir']:
+        # Speed this up by computing the fourier coefficients once
+        tb_2 = trans_bandwidth / 2.0
+        lows = [freq - nw / 2.0 - tb_2
+                for freq, nw in zip(freqs, notch_widths)]
+        highs = [freq + nw / 2.0 + tb_2
+                 for freq, nw in zip(freqs, notch_widths)]
+        xf = band_stop_filter(x, Fs, lows, highs, filter_length, tb_2, tb_2,
+                              method, iir_params, picks, n_jobs, copy)
+    elif method == 'spectrum_fit':
+        xf = _mt_spectrum_proc(x, Fs, freqs, notch_widths, mt_bandwidth,
+                               p_value, picks, n_jobs, copy)
+
+    return xf
+
+
+def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth,
+                      p_value, picks, n_jobs, copy):
+    """Helper to more easily call _mt_spectrum_remove"""
+    # set up array for filtering, reshape to 2D, operate on last axis
+    x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+    if n_jobs == 1:
+        freq_list = list()
+        for ii, x_ in enumerate(x):
+            if ii in picks:
+                x[ii], f = _mt_spectrum_remove(x_, sfreq, line_freqs,
+                                               notch_widths, mt_bandwidth,
+                                               p_value)
+                freq_list.append(f)
+    else:
+        _check_njobs(n_jobs)
+        parallel, p_fun, _ = parallel_func(_mt_spectrum_remove, n_jobs)
+        data_new = parallel(p_fun(x_, sfreq, line_freqs, notch_widths,
+                                  mt_bandwidth, p_value)
+                            for xi, x_ in enumerate(x)
+                            if xi in picks)
+        freq_list = [d[1] for d in data_new]
+        data_new = np.array([d[0] for d in data_new])
+        x[picks, :] = data_new
+
+    # report found frequencies
+    for rm_freqs in freq_list:
+        if line_freqs is None:
+            if len(rm_freqs) > 0:
+                logger.info('Detected notch frequencies:\n%s'
+                            % ', '.join([str(f) for f in rm_freqs]))
+            else:
+                logger.info('Detected notch frequecies:\nNone')
+
+    x.shape = orig_shape
+    return x
+
+
+def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,
+                        mt_bandwidth, p_value):
+    """Use MT-spectrum to remove line frequencies
+
+    Based on Chronux. If line_freqs is specified, all freqs within notch_width
+    of each line_freq is set to zero.
+    """
+    # XXX need to implement the moving window version for raw files
+    n_times = x.size
+
+    # max taper size chosen because it has an max error < 1e-3:
+    # >>> np.max(np.diff(dpss_windows(953, 4, 100)[0]))
+    # 0.00099972447657578449
+    # so we use 1000 because it's the first "nice" number bigger than 953:
+    dpss_n_times_max = 1000
+
+    # figure out what tapers to use
+    if mt_bandwidth is not None:
+        half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
+    else:
+        half_nbw = 4
+
+    # compute dpss windows
+    n_tapers_max = int(2 * half_nbw)
+    window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+        low_bias=False, interp_from=min(n_times, dpss_n_times_max))
+
+    # drop the even tapers
+    n_tapers = len(window_fun)
+    tapers_odd = np.arange(0, n_tapers, 2)
+    tapers_even = np.arange(1, n_tapers, 2)
+    tapers_use = window_fun[tapers_odd]
+
+    # sum tapers for (used) odd prolates across time (n_tapers, 1)
+    H0 = np.sum(tapers_use, axis=1)
+
+    # sum of squares across tapers (1, )
+    H0_sq = np.sum(H0 ** 2)
+
+    # make "time" vector
+    rads = 2 * np.pi * (np.arange(n_times) / float(sfreq))
+
+    # compute mt_spectrum (returning n_ch, n_tapers, n_freq)
+    x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq)
+
+    # sum of the product of x_p and H0 across tapers (1, n_freqs)
+    x_p_H0 = np.sum(x_p[:, tapers_odd, :] *
+                    H0[np.newaxis, :, np.newaxis], axis=1)
+
+    # resulting calculated amplitudes for all freqs
+    A = x_p_H0 / H0_sq
+
+    if line_freqs is None:
+        # figure out which freqs to remove using F stat
+
+        # estimated coefficient
+        x_hat = A * H0[:, np.newaxis]
+
+        # numerator for F-statistic
+        num = (n_tapers - 1) * (np.abs(A) ** 2) * H0_sq
+        # denominator for F-statistic
+        den = (np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) +
+               np.sum(np.abs(x_p[:, tapers_even, :]) ** 2, 1))
+        den[den == 0] = np.inf
+        f_stat = num / den
+        # F-stat of 1-p point
+        threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * n_tapers - 2)
+
+        # find frequencies to remove
+        indices = np.where(f_stat > threshold)[1]
+        rm_freqs = freqs[indices]
+    else:
+        # specify frequencies
+        indices_1 = np.unique([np.argmin(np.abs(freqs - lf))
+                               for lf in line_freqs])
+        notch_widths /= 2.0
+        indices_2 = [np.logical_and(freqs > lf - nw, freqs < lf + nw)
+                     for lf, nw in zip(line_freqs, notch_widths)]
+        indices_2 = np.where(np.any(np.array(indices_2), axis=0))[0]
+        indices = np.unique(np.r_[indices_1, indices_2])
+        rm_freqs = freqs[indices]
+
+    fits = list()
+    for ind in indices:
+        c = 2 * A[0, ind]
+        fit = np.abs(c) * np.cos(freqs[ind] * rads + np.angle(c))
+        fits.append(fit)
+
+    if len(fits) == 0:
+        datafit = 0.0
+    else:
+        # fitted sinusoids are summed, and subtracted from data
+        datafit = np.sum(np.atleast_2d(fits), axis=0)
+
+    return x - datafit, rm_freqs
+
+
+ at verbose
+def resample(x, up, down, npad=100, window='boxcar', n_jobs=1, verbose=None):
+    """Resample the array x
+
+    Operates along the last dimension of the array.
+
+    Parameters
+    ----------
+    x : n-d array
+        Signal to resample.
+    up : float
+        Factor to upsample by.
+    down : float
+        Factor to downsample by.
+    npad : integer
+        Number of samples to use at the beginning and end for padding.
+    window : string or tuple
+        See scipy.signal.resample for description.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x resampled.
+
+    Notes
+    -----
+    This uses (hopefully) intelligent edge padding and frequency-domain
+    windowing improve scipy.signal.resample's resampling method, which
+    we have adapted for our use here. Choices of npad and window have
+    important consequences, and the default choices should work well
+    for most natural signals.
+
+    Resampling arguments are broken into "up" and "down" components for future
+    compatibility in case we decide to use an upfirdn implementation. The
+    current implementation is functionally equivalent to passing
+    up=up/down and down=1.
+    """
+    # make sure our arithmetic will work
+    ratio = float(up) / down
+    x, orig_shape = _prep_for_filtering(x, False)[:2]
+
+    x_len = x.shape[1]
+    if x_len > 0:
+        # prep for resampling now
+        orig_len = x_len + 2 * npad  # length after padding
+        new_len = int(round(ratio * orig_len))  # length after resampling
+        to_remove = np.round(ratio * npad).astype(int)
+
+        # figure out windowing function
+        if window is not None:
+            if callable(window):
+                W = window(fftfreq(orig_len))
+            elif isinstance(window, np.ndarray) and \
+                    window.shape == (orig_len,):
+                W = window
+            else:
+                W = ifftshift(get_window(window, orig_len))
+        else:
+            W = np.ones(orig_len)
+        W *= (float(new_len) / float(orig_len))
+        W = W.astype(np.complex128)
+
+        # figure out if we should use CUDA
+        n_jobs, cuda_dict, W = setup_cuda_fft_resample(n_jobs, W, new_len)
+
+        # do the resampling using an adaptation of scipy's FFT-based resample()
+        # use of the 'flat' window is recommended for minimal ringing
+        if n_jobs == 1:
+            y = np.zeros((len(x), new_len - 2 * to_remove), dtype=x.dtype)
+            for xi, x_ in enumerate(x):
+                y[xi] = fft_resample(x_, W, new_len, npad, to_remove,
+                                     cuda_dict)
+        else:
+            _check_njobs(n_jobs, can_be_cuda=True)
+            parallel, p_fun, _ = parallel_func(fft_resample, n_jobs)
+            y = parallel(p_fun(x_, W, new_len, npad, to_remove, cuda_dict)
+                         for x_ in x)
+            y = np.array(y)
+
+        # Restore the original array shape (modified for resampling)
+        orig_shape = list(orig_shape)
+        orig_shape[-1] = y.shape[1]
+        y.shape = tuple(orig_shape)
+    else:
+        warnings.warn('x has zero length along last axis, returning a copy of '
+                      'x')
+        y = x.copy()
+    return y
+
+
+def detrend(x, order=1, axis=-1):
+    """Detrend the array x.
+
+    Parameters
+    ----------
+    x : n-d array
+        Signal to detrend.
+    order : int
+        Fit order. Currently must be '0' or '1'.
+    axis : integer
+        Axis of the array to operate on.
+
+    Returns
+    -------
+    xf : array
+        x detrended.
+
+    Examples
+    --------
+    As in scipy.signal.detrend:
+        >>> randgen = np.random.RandomState(9)
+        >>> npoints = 1e3
+        >>> noise = randgen.randn(npoints)
+        >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
+        >>> (detrend(x) - noise).max() < 0.01
+        True
+    """
+    if axis > len(x.shape):
+        raise ValueError('x does not have %d axes' % axis)
+    if order == 0:
+        fit = 'constant'
+    elif order == 1:
+        fit = 'linear'
+    else:
+        raise ValueError('order must be 0 or 1')
+
+    y = signal.detrend(x, axis=axis, type=fit)
+
+    return y
+
+
+def _get_filter_length(filter_length, sfreq, min_length=128, len_x=np.inf):
+    """Helper to determine a reasonable filter length"""
+    if not isinstance(min_length, int):
+        raise ValueError('min_length must be an int')
+    if isinstance(filter_length, basestring):
+        # parse time values
+        if filter_length[-2:].lower() == 'ms':
+            mult_fact = 1e-3
+            filter_length = filter_length[:-2]
+        elif filter_length[-1].lower() == 's':
+            mult_fact = 1
+            filter_length = filter_length[:-1]
+        else:
+            raise ValueError('filter_length, if a string, must be a '
+                             'human-readable time (e.g., "10s"), not '
+                             '"%s"' % filter_length)
+        # now get the number
+        try:
+            filter_length = float(filter_length)
+        except ValueError:
+            raise ValueError('filter_length, if a string, must be a '
+                             'human-readable time (e.g., "10s"), not '
+                             '"%s"' % filter_length)
+        filter_length = 2 ** int(np.ceil(np.log2(filter_length
+                                                 * mult_fact * sfreq)))
+        # shouldn't make filter longer than length of x
+        if filter_length >= len_x:
+            filter_length = len_x
+        # only need to check min_length if the filter is shorter than len_x
+        elif filter_length < min_length:
+            filter_length = min_length
+            warnings.warn('filter_length was too short, using filter of '
+                          'length %d samples ("%0.1fs")'
+                          % (filter_length, filter_length / float(sfreq)))
+
+    if filter_length is not None:
+        if not isinstance(filter_length, int):
+            raise ValueError('filter_length must be str, int, or None')
+    return filter_length
+
+
+def _check_njobs(n_jobs, can_be_cuda=False):
+    if not isinstance(n_jobs, int):
+        if can_be_cuda is True:
+            raise ValueError('n_jobs must be an integer, or "cuda"')
+        else:
+            raise ValueError('n_jobs must be an integer')
+    if n_jobs < 1:
+        raise ValueError('n_jobs must be >= 1')
diff --git a/mne/fixes.py b/mne/fixes.py
new file mode 100644
index 0000000..6f9068e
--- /dev/null
+++ b/mne/fixes.py
@@ -0,0 +1,521 @@
+"""Compatibility fixes for older version of python, numpy and scipy
+
+If you add content to this file, please give the version of the package
+at which the fixe is no longer needed.
+
+# XXX : copied from scikit-learn
+
+"""
+# Authors: Emmanuelle Gouillart <emmanuelle.gouillart at normalesup.org>
+#          Gael Varoquaux <gael.varoquaux at normalesup.org>
+#          Fabian Pedregosa <fpedregosa at acm.org>
+#          Lars Buitinck <L.J.Buitinck at uva.nl>
+# License: BSD
+
+import collections
+from operator import itemgetter
+import inspect
+
+import numpy as np
+import scipy
+from math import ceil, log
+from numpy.fft import irfft
+from scipy.signal import filtfilt as sp_filtfilt
+from distutils.version import LooseVersion
+
+try:
+    Counter = collections.Counter
+except AttributeError:
+    class Counter(collections.defaultdict):
+        """Partial replacement for Python 2.7 collections.Counter."""
+        def __init__(self, iterable=(), **kwargs):
+            super(Counter, self).__init__(int, **kwargs)
+            self.update(iterable)
+
+        def most_common(self):
+            return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
+
+        def update(self, other):
+            """Adds counts for elements in other"""
+            if isinstance(other, self.__class__):
+                for x, n in other.iteritems():
+                    self[x] += n
+            else:
+                for x in other:
+                    self[x] += 1
+
+
+def lsqr(X, y, tol=1e-3):
+    import scipy.sparse.linalg as sp_linalg
+    from ..utils.extmath import safe_sparse_dot
+
+    if hasattr(sp_linalg, 'lsqr'):
+        # scipy 0.8 or greater
+        return sp_linalg.lsqr(X, y)
+    else:
+        n_samples, n_features = X.shape
+        if n_samples > n_features:
+            coef, _ = sp_linalg.cg(safe_sparse_dot(X.T, X),
+                                   safe_sparse_dot(X.T, y),
+                                   tol=tol)
+        else:
+            coef, _ = sp_linalg.cg(safe_sparse_dot(X, X.T), y, tol=tol)
+            coef = safe_sparse_dot(X.T, coef)
+
+        residues = y - safe_sparse_dot(X, coef)
+        return coef, None, None, residues
+
+
+def _unique(ar, return_index=False, return_inverse=False):
+    """A replacement for the np.unique that appeared in numpy 1.4.
+
+    While np.unique existed long before, keyword return_inverse was
+    only added in 1.4.
+    """
+    try:
+        ar = ar.flatten()
+    except AttributeError:
+        if not return_inverse and not return_index:
+            items = sorted(set(ar))
+            return np.asarray(items)
+        else:
+            ar = np.asarray(ar).flatten()
+
+    if ar.size == 0:
+        if return_inverse and return_index:
+            return ar, np.empty(0, np.bool), np.empty(0, np.bool)
+        elif return_inverse or return_index:
+            return ar, np.empty(0, np.bool)
+        else:
+            return ar
+
+    if return_inverse or return_index:
+        perm = ar.argsort()
+        aux = ar[perm]
+        flag = np.concatenate(([True], aux[1:] != aux[:-1]))
+        if return_inverse:
+            iflag = np.cumsum(flag) - 1
+            iperm = perm.argsort()
+            if return_index:
+                return aux[flag], perm[flag], iflag[iperm]
+            else:
+                return aux[flag], iflag[iperm]
+        else:
+            return aux[flag], perm[flag]
+
+    else:
+        ar.sort()
+        flag = np.concatenate(([True], ar[1:] != ar[:-1]))
+        return ar[flag]
+
+np_version = []
+for x in np.__version__.split('.'):
+    try:
+        np_version.append(int(x))
+    except ValueError:
+        # x may be of the form dev-1ea1592
+        np_version.append(x)
+
+if np_version[:2] < (1, 5):
+    unique = _unique
+else:
+    unique = np.unique
+
+
+def _bincount(X, weights=None, minlength=None):
+    """Replacing np.bincount in numpy < 1.6 to provide minlength."""
+    result = np.bincount(X, weights)
+    if len(result) >= minlength:
+        return result
+    out = np.zeros(minlength, np.int)
+    out[:len(result)] = result
+    return out
+
+if np_version[:2] < (1, 6):
+    bincount = _bincount
+else:
+    bincount = np.bincount
+
+
+def _copysign(x1, x2):
+    """Slow replacement for np.copysign, which was introduced in numpy 1.4"""
+    return np.abs(x1) * np.sign(x2)
+
+if not hasattr(np, 'copysign'):
+    copysign = _copysign
+else:
+    copysign = np.copysign
+
+
+def _in1d(ar1, ar2, assume_unique=False):
+    """Replacement for in1d that is provided for numpy >= 1.4"""
+    if not assume_unique:
+        ar1, rev_idx = unique(ar1, return_inverse=True)
+        ar2 = np.unique(ar2)
+    ar = np.concatenate((ar1, ar2))
+    # We need this to be a stable sort, so always use 'mergesort'
+    # here. The values from the first array should always come before
+    # the values from the second array.
+    order = ar.argsort(kind='mergesort')
+    sar = ar[order]
+    equal_adj = (sar[1:] == sar[:-1])
+    flag = np.concatenate((equal_adj, [False]))
+    indx = order.argsort(kind='mergesort')[:len(ar1)]
+
+    if assume_unique:
+        return flag[indx]
+    else:
+        return flag[indx][rev_idx]
+
+if not hasattr(np, 'in1d'):
+    in1d = _in1d
+else:
+    in1d = np.in1d
+
+
+def _tril_indices(n, k=0):
+    """Replacement for tril_indices that is provided for numpy >= 1.4"""
+    mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
+    indices = np.where(mask)
+
+    return indices
+
+if not hasattr(np, 'tril_indices'):
+    tril_indices = _tril_indices
+else:
+    tril_indices = np.tril_indices
+
+
+def _unravel_index(indices, dims):
+    """Add support for multiple indices in unravel_index that is provided
+    for numpy >= 1.4"""
+    indices_arr = np.asarray(indices)
+    if indices_arr.size == 1:
+        return np.unravel_index(indices, dims)
+    else:
+        if indices_arr.ndim != 1:
+            raise ValueError('indices should be one dimensional')
+
+        ndims = len(dims)
+        unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
+        for coord, idx in zip(unraveled_coords, indices_arr):
+            coord[:] = np.unravel_index(idx, dims)
+        return tuple(unraveled_coords.T)
+
+
+if np_version[:2] < (1, 4):
+    unravel_index = _unravel_index
+else:
+    unravel_index = np.unravel_index
+
+
+def qr_economic(A, **kwargs):
+    """Compat function for the QR-decomposition in economic mode
+
+    Scipy 0.9 changed the keyword econ=True to mode='economic'
+    """
+    import scipy.linalg
+    # trick: triangular solve has introduced in 0.9
+    if hasattr(scipy.linalg, 'solve_triangular'):
+        return scipy.linalg.qr(A, mode='economic', **kwargs)
+    else:
+        import warnings
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore", DeprecationWarning)
+            return scipy.linalg.qr(A, econ=True, **kwargs)
+
+
+def savemat(file_name, mdict, oned_as="column", **kwargs):
+    """MATLAB-format output routine that is compatible with SciPy 0.7's.
+
+    0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
+    value. It issues a warning if this is not provided, stating that "This will
+    change to 'row' in future versions."
+    """
+    import scipy.io
+    try:
+        return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
+    except TypeError:
+        return scipy.io.savemat(file_name, mdict, **kwargs)
+
+if hasattr(np, 'count_nonzero'):
+    from numpy import count_nonzero
+else:
+    def count_nonzero(X):
+        return len(np.flatnonzero(X))
+
+# little dance to see if np.copy has an 'order' keyword argument
+if 'order' in inspect.getargspec(np.copy)[0]:
+    def safe_copy(X):
+        # Copy, but keep the order
+        return np.copy(X, order='K')
+else:
+    # Before an 'order' argument was introduced, numpy wouldn't muck with
+    # the ordering
+    safe_copy = np.copy
+
+
+# wrap filtfilt, excluding padding arguments
+def _filtfilt(*args, **kwargs):
+    # cut out filter args
+    if len(args) > 4:
+        args = args[:4]
+    if 'padlen' in kwargs:
+        del kwargs['padlen']
+    return sp_filtfilt(*args, **kwargs)
+
+if 'padlen' not in inspect.getargspec(sp_filtfilt)[0]:
+    filtfilt = _filtfilt
+else:
+    filtfilt = sp_filtfilt
+
+
+###############################################################################
+# Back porting firwin2 for older scipy
+
+# Original version of firwin2 from scipy ticket #457, submitted by "tash".
+#
+# Rewritten by Warren Weckesser, 2010.
+
+
+def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
+    """FIR filter design using the window method.
+
+    From the given frequencies `freq` and corresponding gains `gain`,
+    this function constructs an FIR filter with linear phase and
+    (approximately) the given frequency response.
+
+    Parameters
+    ----------
+    numtaps : int
+        The number of taps in the FIR filter.  `numtaps` must be less than
+        `nfreqs`.  If the gain at the Nyquist rate, `gain[-1]`, is not 0,
+        then `numtaps` must be odd.
+
+    freq : array-like, 1D
+        The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
+        Nyquist.  The Nyquist frequency can be redefined with the argument
+        `nyq`.
+
+        The values in `freq` must be nondecreasing.  A value can be repeated
+        once to implement a discontinuity.  The first value in `freq` must
+        be 0, and the last value must be `nyq`.
+
+    gain : array-like
+        The filter gains at the frequency sampling points.
+
+    nfreqs : int, optional
+        The size of the interpolation mesh used to construct the filter.
+        For most efficient behavior, this should be a power of 2 plus 1
+        (e.g, 129, 257, etc).  The default is one more than the smallest
+        power of 2 that is not less than `numtaps`.  `nfreqs` must be greater
+        than `numtaps`.
+
+    window : string or (string, float) or float, or None, optional
+        Window function to use. Default is "hamming".  See
+        `scipy.signal.get_window` for the complete list of possible values.
+        If None, no window function is applied.
+
+    nyq : float
+        Nyquist frequency.  Each frequency in `freq` must be between 0 and
+        `nyq` (inclusive).
+
+    Returns
+    -------
+    taps : numpy 1D array of length `numtaps`
+        The filter coefficients of the FIR filter.
+
+    Examples
+    --------
+    A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
+    that decreases linearly on [0.5, 1.0] from 1 to 0:
+
+    >>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+    >>> print(taps[72:78])
+    [-0.02286961 -0.06362756  0.57310236  0.57310236 -0.06362756 -0.02286961]
+
+    See also
+    --------
+    scipy.signal.firwin
+
+    Notes
+    -----
+
+    From the given set of frequencies and gains, the desired response is
+    constructed in the frequency domain.  The inverse FFT is applied to the
+    desired response to create the associated convolution kernel, and the
+    first `numtaps` coefficients of this kernel, scaled by `window`, are
+    returned.
+
+    The FIR filter will have linear phase.  The filter is Type I if `numtaps`
+    is odd and Type II if `numtaps` is even.  Because Type II filters always
+    have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
+    is not zero.
+
+    .. versionadded:: 0.9.0
+
+    References
+    ----------
+    .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
+       Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
+       (See, for example, Section 7.4.)
+
+    .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
+       Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
+
+    """
+
+    if len(freq) != len(gain):
+        raise ValueError('freq and gain must be of same length.')
+
+    if nfreqs is not None and numtaps >= nfreqs:
+        raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
+                    'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
+
+    if freq[0] != 0 or freq[-1] != nyq:
+        raise ValueError('freq must start with 0 and end with `nyq`.')
+    d = np.diff(freq)
+    if (d < 0).any():
+        raise ValueError('The values in freq must be nondecreasing.')
+    d2 = d[:-1] + d[1:]
+    if (d2 == 0).any():
+        raise ValueError('A value in freq must not occur more than twice.')
+
+    if numtaps % 2 == 0 and gain[-1] != 0.0:
+        raise ValueError("A filter with an even number of coefficients must "
+                            "have zero gain at the Nyquist rate.")
+
+    if nfreqs is None:
+        nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
+
+    # Tweak any repeated values in freq so that interp works.
+    eps = np.finfo(float).eps
+    for k in range(len(freq)):
+        if k < len(freq) - 1 and freq[k] == freq[k + 1]:
+            freq[k] = freq[k] - eps
+            freq[k + 1] = freq[k + 1] + eps
+
+    # Linearly interpolate the desired response on a uniform mesh `x`.
+    x = np.linspace(0.0, nyq, nfreqs)
+    fx = np.interp(x, freq, gain)
+
+    # Adjust the phases of the coefficients so that the first `ntaps` of the
+    # inverse FFT are the desired filter coefficients.
+    shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
+    fx2 = fx * shift
+
+    # Use irfft to compute the inverse FFT.
+    out_full = irfft(fx2)
+
+    if window is not None:
+        # Create the window to apply to the filter coefficients.
+        from scipy.signal.signaltools import get_window
+        wind = get_window(window, numtaps, fftbins=False)
+    else:
+        wind = 1
+
+    # Keep only the first `numtaps` coefficients in `out`, and multiply by
+    # the window.
+    out = out_full[:numtaps] * wind
+
+    return out
+
+if hasattr(scipy.signal, 'firwin2'):
+    from scipy.signal import firwin2
+else:
+    firwin2 = _firwin2
+
+
+###############################################################################
+# Back porting matrix_rank for numpy < 1.7
+
+
+def _matrix_rank(M, tol=None):
+    """ Return matrix rank of array using SVD method
+
+    Rank of the array is the number of SVD singular values of the array that
+    are greater than `tol`.
+
+    Parameters
+    ----------
+    M : {(M,), (M, N)} array_like
+        array of <=2 dimensions
+    tol : {None, float}, optional
+       threshold below which SVD values are considered zero. If `tol` is
+       None, and ``S`` is an array with singular values for `M`, and
+       ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+       set to ``S.max() * max(M.shape) * eps``.
+
+    Notes
+    -----
+    The default threshold to detect rank deficiency is a test on the magnitude
+    of the singular values of `M`. By default, we identify singular values less
+    than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
+    the symbols defined above). This is the algorithm MATLAB uses [1]. It also
+    appears in *Numerical recipes* in the discussion of SVD solutions for
+    linear least squares [2].
+
+    This default threshold is designed to detect rank deficiency accounting
+    for the numerical errors of the SVD computation. Imagine that there is a
+    column in `M` that is an exact (in floating point) linear combination of
+    other columns in `M`. Computing the SVD on `M` will not produce a
+    singular value exactly equal to 0 in general: any difference of the
+    smallest SVD value from 0 will be caused by numerical imprecision in the
+    calculation of the SVD. Our threshold for small SVD values takes this
+    numerical imprecision into account, and the default threshold will detect
+    such numerical rank deficiency. The threshold may declare a matrix `M`
+    rank deficient even if the linear combination of some columns of `M` is
+    not exactly equal to another column of `M` but only numerically very
+    close to another column of `M`.
+
+    We chose our default threshold because it is in wide use. Other
+    thresholds are possible. For example, elsewhere in the 2007 edition of
+    *Numerical recipes* there is an alternative threshold of ``S.max() *
+    np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
+    this threshold as being based on "expected roundoff error" (p 71).
+
+    The thresholds above deal with floating point roundoff error in the
+    calculation of the SVD. However, you may have more information about the
+    sources of error in `M` that would make you consider other tolerance
+    values to detect *effective* rank deficiency. The most useful measure of
+    the tolerance depends on the operations you intend to use on your matrix.
+    For example, if your data come from uncertain measurements with
+    uncertainties greater than floating point epsilon, choosing a tolerance
+    near that uncertainty may be preferable. The tolerance may be absolute if
+    the uncertainties are absolute rather than relative.
+
+    References
+    ----------
+    .. [1] MATLAB reference documention, "Rank"
+           http://www.mathworks.com/help/techdoc/ref/rank.html
+    .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
+           "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
+           page 795.
+
+    Examples
+    --------
+    >>> from numpy.linalg import matrix_rank
+    >>> matrix_rank(np.eye(4)) # Full rank matrix
+    4
+    >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
+    >>> matrix_rank(I)
+    3
+    >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
+    1
+    >>> matrix_rank(np.zeros((4,)))
+    0
+    """
+    M = np.asarray(M)
+    if M.ndim > 2:
+        raise TypeError('array should have 2 or fewer dimensions')
+    if M.ndim < 2:
+        return np.int(not all(M == 0))
+    S = np.linalg.svd(M, compute_uv=False)
+    if tol is None:
+        tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
+    return np.sum(S > tol)
+
+if LooseVersion(np.__version__) > '1.7.1':
+    from numpy.linalg import matrix_rank
+else:
+    matrix_rank = _matrix_rank
diff --git a/mne/forward.py b/mne/forward.py
new file mode 100644
index 0000000..e5b12eb
--- /dev/null
+++ b/mne/forward.py
@@ -0,0 +1,1468 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from time import time
+import warnings
+from copy import deepcopy
+
+import numpy as np
+from scipy import linalg, sparse
+
+import shutil
+import os
+from os import path as op
+import tempfile
+
+import logging
+logger = logging.getLogger('mne')
+
+from .fiff.constants import FIFF
+from .fiff.open import fiff_open
+from .fiff.tree import dir_tree_find
+from .fiff.channels import read_bad_channels
+from .fiff.tag import find_tag, read_tag
+from .fiff.matrix import _read_named_matrix, _transpose_named_matrix, \
+                         write_named_matrix
+from .fiff.pick import pick_channels_forward, pick_info, pick_channels, \
+                       pick_types
+from .fiff.write import write_int, start_block, end_block, \
+                        write_coord_trans, write_ch_info, write_name_list, \
+                        write_string, start_file, end_file, write_id
+from .fiff.raw import Raw
+from .fiff.evoked import Evoked, write_evoked
+from .event import make_fixed_length_events
+from .epochs import Epochs
+from .source_space import read_source_spaces_from_tree, \
+                          find_source_space_hemi, write_source_spaces_to_fid
+from .transforms import transform_source_space_to, invert_transform, \
+                        write_trans
+from .utils import _check_fname, get_subjects_dir, has_command_line_tools, \
+                   run_subprocess
+from . import verbose
+
+
+def _block_diag(A, n):
+    """Constructs a block diagonal from a packed structure
+
+    You have to try it on a matrix to see what it's doing.
+
+    If A is not sparse, then returns a sparse block diagonal "bd",
+    diagonalized from the
+    elements in "A".
+    "A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
+    Each submatrix is ma x "n", and these submatrices are
+    placed down the diagonal of the matrix.
+
+    If A is already sparse, then the operation is reversed, yielding
+    a block
+    row matrix, where each set of n columns corresponds to a block element
+    from the block diagonal.
+
+    Parameters
+    ----------
+    A : array
+        The matrix
+    n : int
+        The block size
+    Returns
+    -------
+    bd : sparse matrix
+        The block diagonal matrix
+    """
+    if sparse.issparse(A):  # then make block sparse
+        raise NotImplemented('sparse reversal not implemented yet')
+    ma, na = A.shape
+    bdn = na / int(n)  # number of submatrices
+
+    if na % n > 0:
+        raise ValueError('Width of matrix must be a multiple of n')
+
+    tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
+    tmp = np.tile(tmp, (1, n))
+    ii = tmp.ravel()
+
+    jj = np.arange(na, dtype=np.int)[None, :]
+    jj = jj * np.ones(ma, dtype=np.int)[:, None]
+    jj = jj.T.ravel()  # column indices foreach sparse bd
+
+    bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
+
+    return bd
+
+
+def _inv_block_diag(A, n):
+    """Constructs an inverse block diagonal from a packed structure
+
+    You have to try it on a matrix to see what it's doing.
+
+    "A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
+    Each submatrix is ma x "n", and the inverses of these submatrices
+    are placed down the diagonal of the matrix.
+
+    Parameters
+    ----------
+    A : array
+        The matrix.
+    n : int
+        The block size.
+    Returns
+    -------
+    bd : sparse matrix
+        The block diagonal matrix.
+    """
+    ma, na = A.shape
+    bdn = na / int(n)  # number of submatrices
+
+    if na % n > 0:
+        raise ValueError('Width of matrix must be a multiple of n')
+
+    # modify A in-place to invert each sub-block
+    A = A.copy()
+    for start in xrange(0, na, 3):
+        # this is a view
+        A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])
+
+    tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
+    tmp = np.tile(tmp, (1, n))
+    ii = tmp.ravel()
+
+    jj = np.arange(na, dtype=np.int)[None, :]
+    jj = jj * np.ones(ma, dtype=np.int)[:, None]
+    jj = jj.T.ravel()  # column indices foreach sparse bd
+
+    bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
+
+    return bd
+
+
+def _read_one(fid, node):
+    """Read all interesting stuff for one forward solution
+    """
+    if node is None:
+        return None
+
+    one = dict()
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
+    if tag is None:
+        fid.close()
+        raise ValueError('Source orientation tag not found')
+    one['source_ori'] = int(tag.data)
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        fid.close()
+        raise ValueError('Coordinate frame tag not found')
+    one['coord_frame'] = int(tag.data)
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+    if tag is None:
+        fid.close()
+        raise ValueError('Number of sources not found')
+    one['nsource'] = int(tag.data)
+
+    tag = find_tag(fid, node, FIFF.FIFF_NCHAN)
+    if tag is None:
+        fid.close()
+        raise ValueError('Number of channels not found')
+    one['nchan'] = int(tag.data)
+
+    try:
+        one['sol'] = _read_named_matrix(fid, node,
+                                        FIFF.FIFF_MNE_FORWARD_SOLUTION)
+        one['sol'] = _transpose_named_matrix(one['sol'], copy=False)
+    except:
+        fid.close()
+        logger.error('Forward solution data not found')
+        raise
+
+    try:
+        one['sol_grad'] = _read_named_matrix(fid, node,
+                                        FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD)
+        one['sol_grad'] = _transpose_named_matrix(one['sol_grad'], copy=False)
+    except:
+        one['sol_grad'] = None
+
+    if one['sol']['data'].shape[0] != one['nchan'] or \
+                (one['sol']['data'].shape[1] != one['nsource'] and
+                 one['sol']['data'].shape[1] != 3 * one['nsource']):
+        fid.close()
+        raise ValueError('Forward solution matrix has wrong dimensions')
+
+    if one['sol_grad'] is not None:
+        if one['sol_grad']['data'].shape[0] != one['nchan'] or \
+                (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and
+                 one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):
+            fid.close()
+            raise ValueError('Forward solution gradient matrix has '
+                             'wrong dimensions')
+
+    return one
+
+
+def read_forward_meas_info(tree, fid):
+    """Read light measurement info from forward operator
+
+    Parameters
+    ----------
+    tree : tree
+        FIF tree structure.
+    fid : file id
+        The file id.
+
+    Returns
+    -------
+    info : dict
+        The measurement info.
+    """
+    info = dict()
+
+    # Information from the MRI file
+    parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    if len(parent_mri) == 0:
+        fid.close()
+        raise ValueError('No parent MEG information found in operator')
+    parent_mri = parent_mri[0]
+
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)
+    info['mri_file'] = tag.data if tag is not None else None
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)
+    info['mri_id'] = tag.data if tag is not None else None
+
+    # Information from the MEG file
+    parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
+    if len(parent_meg) == 0:
+        fid.close()
+        raise ValueError('No parent MEG information found in operator')
+    parent_meg = parent_meg[0]
+
+    tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)
+    info['meas_file'] = tag.data if tag is not None else None
+    tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)
+    info['meas_id'] = tag.data if tag is not None else None
+
+    # Add channel information
+    chs = list()
+    for k in range(parent_meg['nent']):
+        kind = parent_meg['directory'][k].kind
+        pos = parent_meg['directory'][k].pos
+        if kind == FIFF.FIFF_CH_INFO:
+            tag = read_tag(fid, pos)
+            chs.append(tag.data)
+    info['chs'] = chs
+
+    info['ch_names'] = [c['ch_name'] for c in chs]
+    info['nchan'] = len(chs)
+
+    #   Get the MRI <-> head coordinate transformation
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+    if tag is None:
+        fid.close()
+        raise ValueError('MRI/head coordinate transformation not found')
+    else:
+        cand = tag.data
+        if cand['from'] == FIFF.FIFFV_COORD_MRI and \
+                            cand['to'] == FIFF.FIFFV_COORD_HEAD:
+            info['mri_head_t'] = cand
+        else:
+            raise ValueError('MEG device/head coordinate transformation not '
+                                 'found')
+
+    #   Get the MEG device <-> head coordinate transformation
+    tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)
+    if tag is None:
+        fid.close()
+        raise ValueError('MEG/head coordinate transformation not found')
+    else:
+        cand = tag.data
+        if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
+                            cand['to'] == FIFF.FIFFV_COORD_HEAD:
+            info['dev_head_t'] = cand
+        elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
+                            cand['to'] == FIFF.FIFFV_COORD_HEAD:
+            info['ctf_head_t'] = cand
+        else:
+            raise ValueError('MEG device/head coordinate transformation not '
+                                 'found')
+
+    info['bads'] = read_bad_channels(fid, parent_meg)
+    return info
+
+
+def _subject_from_forward(forward):
+    """Get subject id from inverse operator"""
+    return forward['src'][0].get('subject_his_id', None)
+
+
+ at verbose
+def read_forward_solution(fname, force_fixed=False, surf_ori=False,
+                              include=[], exclude=[], verbose=None):
+    """Read a forward solution a.k.a. lead field
+
+    Parameters
+    ----------
+    fname : string
+        The file name.
+    force_fixed : bool, optional (default False)
+        Force fixed source orientation mode?
+    surf_ori : bool, optional (default False)
+        Use surface based source coordinate system?
+    include : list, optional
+        List of names of channels to include. If empty all channels
+        are included.
+    exclude : list, optional
+        List of names of channels to exclude. If empty include all
+        channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fwd : dict
+        The forward solution.
+    """
+
+    #   Open the file, create directory
+    logger.info('Reading forward solution from %s...' % fname)
+    fid, tree, _ = fiff_open(fname)
+
+    #   Find all forward solutions
+    fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+    if len(fwds) == 0:
+        fid.close()
+        raise ValueError('No forward solutions in %s' % fname)
+
+    #   Parent MRI data
+    parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    if len(parent_mri) == 0:
+        fid.close()
+        raise ValueError('No parent MRI information in %s' % fname)
+    parent_mri = parent_mri[0]
+
+    try:
+        src = read_source_spaces_from_tree(fid, tree, add_geom=False)
+    except Exception as inst:
+        fid.close()
+        raise ValueError('Could not read the source spaces (%s)' % inst)
+
+    for s in src:
+        s['id'] = find_source_space_hemi(s)
+
+    fwd = None
+
+    #   Locate and read the forward solutions
+    megnode = None
+    eegnode = None
+    for k in range(len(fwds)):
+        tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
+        if tag is None:
+            fid.close()
+            raise ValueError('Methods not listed for one of the forward '
+                             'solutions')
+
+        if tag.data == FIFF.FIFFV_MNE_MEG:
+            megnode = fwds[k]
+        elif tag.data == FIFF.FIFFV_MNE_EEG:
+            eegnode = fwds[k]
+
+    megfwd = _read_one(fid, megnode)
+    if megfwd is not None:
+        if is_fixed_orient(megfwd):
+            ori = 'fixed'
+        else:
+            ori = 'free'
+        logger.info('    Read MEG forward solution (%d sources, %d channels, '
+                    '%s orientations)' % (megfwd['nsource'], megfwd['nchan'],
+                                          ori))
+
+    eegfwd = _read_one(fid, eegnode)
+    if eegfwd is not None:
+        if is_fixed_orient(eegfwd):
+            ori = 'fixed'
+        else:
+            ori = 'free'
+        logger.info('    Read EEG forward solution (%d sources, %d channels, '
+                    '%s orientations)' % (eegfwd['nsource'], eegfwd['nchan'],
+                                          ori))
+
+    #   Merge the MEG and EEG solutions together
+    if megfwd is not None and eegfwd is not None:
+        if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or
+                megfwd['source_ori'] != eegfwd['source_ori'] or
+                megfwd['nsource'] != eegfwd['nsource'] or
+                megfwd['coord_frame'] != eegfwd['coord_frame']):
+            fid.close()
+            raise ValueError('The MEG and EEG forward solutions do not match')
+
+        fwd = megfwd
+        fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]
+        fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']
+
+        fwd['sol']['row_names'] = fwd['sol']['row_names'] + \
+                                  eegfwd['sol']['row_names']
+        if fwd['sol_grad'] is not None:
+            fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],
+                                            eegfwd['sol_grad']['data']]
+            fwd['sol_grad']['nrow'] = fwd['sol_grad']['nrow'] + \
+                                      eegfwd['sol_grad']['nrow']
+            fwd['sol_grad']['row_names'] = fwd['sol_grad']['row_names'] + \
+                                           eegfwd['sol_grad']['row_names']
+
+        fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']
+        logger.info('    MEG and EEG forward solutions combined')
+    elif megfwd is not None:
+        fwd = megfwd
+    else:
+        fwd = eegfwd
+
+    del megfwd
+    del eegfwd
+
+    #   Get the MRI <-> head coordinate transformation
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+    if tag is None:
+        fid.close()
+        raise ValueError('MRI/head coordinate transformation not found')
+    else:
+        mri_head_t = tag.data
+        if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
+                mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
+            mri_head_t = invert_transform(mri_head_t)
+            if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI
+                    or mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
+                fid.close()
+                raise ValueError('MRI/head coordinate transformation not '
+                                 'found')
+    fwd['mri_head_t'] = mri_head_t
+
+    #
+    # get parent MEG info
+    #
+    fwd['info'] = read_forward_meas_info(tree, fid)
+
+    # MNE environment
+    parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
+    if len(parent_env) > 0:
+        parent_env = parent_env[0]
+        tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
+        if tag is not None:
+            fwd['info']['working_dir'] = tag.data
+        tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
+        if tag is not None:
+            fwd['info']['command_line'] = tag.data
+
+    fid.close()
+
+    #   Transform the source spaces to the correct coordinate frame
+    #   if necessary
+
+    if (fwd['coord_frame'] != FIFF.FIFFV_COORD_MRI and
+            fwd['coord_frame'] != FIFF.FIFFV_COORD_HEAD):
+        raise ValueError('Only forward solutions computed in MRI or head '
+                         'coordinates are acceptable')
+
+    nuse = 0
+    for s in src:
+        try:
+            s = transform_source_space_to(s, fwd['coord_frame'], mri_head_t)
+        except Exception as inst:
+            raise ValueError('Could not transform source space (%s)' % inst)
+
+        nuse += s['nuse']
+
+    if nuse != fwd['nsource']:
+        raise ValueError('Source spaces do not match the forward solution.')
+
+    logger.info('    Source spaces transformed to the forward solution '
+                'coordinate frame')
+    fwd['src'] = src
+
+    #   Handle the source locations and orientations
+    fwd['source_rr'] = np.concatenate([s['rr'][s['vertno'], :] for s in src],
+                                      axis=0)
+    if is_fixed_orient(fwd) or force_fixed:
+        nuse = 0
+        fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]
+                                           for s in src], axis=0)
+
+        #   Modify the forward solution for fixed source orientations
+        if not is_fixed_orient(fwd):
+            logger.info('    Changing to fixed-orientation forward '
+                        'solution...')
+            fix_rot = _block_diag(fwd['source_nn'].T, 1)
+            # newer versions of numpy require explicit casting here, so *= no
+            # longer works
+            fwd['sol']['data'] = (fwd['sol']['data']
+                                  * fix_rot).astype('float32')
+            fwd['sol']['ncol'] = fwd['nsource']
+            fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
+
+            if fwd['sol_grad'] is not None:
+                fwd['sol_grad']['data'] = np.dot(fwd['sol_grad']['data'],
+                                                 np.kron(fix_rot, np.eye(3)))
+                fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
+            logger.info('    [done]')
+    elif surf_ori:
+        #   Rotate the local source coordinate systems
+        logger.info('    Converting to surface-based source orientations...')
+        nuse_total = sum([s['nuse'] for s in src])
+        fwd['source_nn'] = np.empty((3 * nuse_total, 3), dtype=np.float)
+        if s['patch_inds'] is not None:
+            use_ave_nn = True
+            logger.info('    Average patch normals will be employed in the '
+                        'rotation to the local surface coordinates....')
+        else:
+            use_ave_nn = False
+        nuse = 0
+        pp = 0
+        for s in src:
+            for p in range(s['nuse']):
+                #  Project out the surface normal and compute SVD
+                if use_ave_nn is True:
+                    nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]
+                    nn = np.sum(nn, axis=0)[:, np.newaxis]
+                    nn /= linalg.norm(nn)
+                else:
+                    nn = s['nn'][s['vertno'][p], :][:, np.newaxis]
+                U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
+                #  Make sure that ez is in the direction of nn
+                if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:
+                    U *= -1.0
+                fwd['source_nn'][pp:pp + 3, :] = U.T
+                pp += 3
+            nuse += s['nuse']
+
+        surf_rot = _block_diag(fwd['source_nn'].T, 3)
+        fwd['sol']['data'] = fwd['sol']['data'] * surf_rot
+        if fwd['sol_grad'] is not None:
+            fwd['sol_grad']['data'] = np.dot(fwd['sol_grad']['data'] *
+                                             np.kron(surf_rot, np.eye(3)))
+        logger.info('[done]')
+    else:
+        logger.info('    Cartesian source orientations...')
+        fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
+        logger.info('[done]')
+
+    fwd['surf_ori'] = surf_ori
+
+    fwd = pick_channels_forward(fwd, include=include, exclude=exclude)
+
+    return fwd
+
+
+ at verbose
+def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
+    """Write forward solution to a file
+
+    Parameters
+    ----------
+    fname : str
+        File name to save the forward solution to.
+    fwd : dict
+        Forward solution.
+    overwrite : bool
+        If True, overwrite destination file (if it exists).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    # check for file existence
+    _check_fname(fname, overwrite)
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MNE)
+
+    #
+    # MNE env
+    #
+    start_block(fid, FIFF.FIFFB_MNE_ENV)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    data = fwd['info'].get('working_dir', None)
+    if data is not None:
+        write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
+    data = fwd['info'].get('command_line', None)
+    if data is not None:
+        write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
+    end_block(fid, FIFF.FIFFB_MNE_ENV)
+
+    #
+    # Information from the MRI file
+    #
+    start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])
+    if fwd['info']['mri_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])
+    write_coord_trans(fid, fwd['info']['mri_head_t'])
+    end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+
+    # write measurement info
+    write_forward_meas_info(fid, fwd['info'])
+
+    # invert our original source space transform
+    src = list()
+    for s in fwd['src']:
+        s = deepcopy(s)
+        try:
+            s = transform_source_space_to(s, fwd['mri_head_t']['from'],
+                                          fwd['mri_head_t'])
+        except Exception as inst:
+            raise ValueError('Could not transform source space (%s)' % inst)
+        src.append(s)
+
+    #
+    # Write the source spaces (again)
+    #
+    write_source_spaces_to_fid(fid, src)
+    n_vert = sum([s['nuse'] for s in src])
+    n_col = fwd['sol']['data'].shape[1]
+    if fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
+        assert n_col == n_vert
+    else:
+        assert n_col == 3 * n_vert
+
+    # Undo surf_ori rotation
+    sol = fwd['sol']['data']
+    if fwd['sol_grad'] is not None:
+        sol_grad = fwd['sol_grad']['data']
+    else:
+        sol_grad = None
+
+    if fwd['surf_ori'] is True:
+        inv_rot = _inv_block_diag(fwd['source_nn'].T, 3)
+        sol = sol * inv_rot
+        if sol_grad is not None:
+            sol_grad = np.dot(sol_grad * np.kron(inv_rot, np.eye(3)))
+
+    #
+    # MEG forward solution
+    #
+    picks_meg = pick_types(fwd['info'], meg=True, eeg=False, exclude=[])
+    picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, exclude=[])
+    n_meg = len(picks_meg)
+    n_eeg = len(picks_eeg)
+    row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]
+    row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]
+
+    if n_meg > 0:
+        meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,
+                            row_names=row_names_meg, col_names=[])
+        meg_solution = _transpose_named_matrix(meg_solution, copy=False)
+        start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+        write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd['source_ori'])
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
+        write_int(fid, FIFF.FIFF_NCHAN, n_meg)
+        write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)
+        if sol_grad is not None:
+            meg_solution_grad = dict(data=sol_grad[picks_meg],
+                                     nrow=n_meg, ncol=n_col,
+                                     row_names=row_names_meg, col_names=[])
+            meg_solution_grad = _transpose_named_matrix(meg_solution_grad,
+                                                        copy=False)
+            write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
+                               meg_solution_grad)
+        end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+
+    #
+    #  EEG forward solution
+    #
+    if n_eeg > 0:
+        eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,
+                            row_names=row_names_eeg, col_names=[])
+        eeg_solution = _transpose_named_matrix(eeg_solution, copy=False)
+        start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+        write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd['source_ori'])
+        write_int(fid, FIFF.FIFF_NCHAN, n_eeg)
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
+        write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)
+        if sol_grad is not None:
+            eeg_solution_grad = dict(data=sol_grad[picks_eeg],
+                                     nrow=n_eeg, ncol=n_col,
+                                     row_names=row_names_eeg, col_names=[])
+            meg_solution_grad = _transpose_named_matrix(eeg_solution_grad,
+                                                        copy=False)
+            write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
+                               eeg_solution_grad)
+        end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+
+    end_block(fid, FIFF.FIFFB_MNE)
+    end_file(fid)
+
+
+def _to_fixed_ori(forward):
+    """Helper to convert the forward solution to fixed ori from free"""
+    if not forward['surf_ori'] or is_fixed_orient(forward):
+        raise ValueError('Only surface-oriented, free-orientation forward '
+                         'solutions can be converted to fixed orientaton')
+    forward['sol']['data'] = forward['sol']['data'][:, 2::3]
+    forward['sol']['ncol'] = forward['sol']['ncol'] / 3
+    forward['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
+    logger.info('    Converted the forward solution into the '
+                'fixed-orientation mode.')
+    return forward
+
+
+def is_fixed_orient(forward):
+    """Has forward operator fixed orientation?
+    """
+    is_fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
+    return is_fixed_ori
+
+
+def write_forward_meas_info(fid, info):
+    """Write measurement info stored in forward solution
+
+    Parameters
+    ----------
+    fid : file id
+        The file id
+    info : dict
+        The measurement info.
+    """
+    #
+    # Information from the MEG file
+    #
+    start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
+    write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])
+    if info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+    meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))
+    if meg_head_t is None:
+        fid.close()
+        raise ValueError('Head<-->sensor transform not found')
+    write_coord_trans(fid, meg_head_t)
+
+    if 'chs' in info:
+        #  Channel information
+        write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))
+        for k, c in enumerate(info['chs']):
+            #   Scan numbers may have been messed up
+            c = deepcopy(c)
+            c['scanno'] = k + 1
+            write_ch_info(fid, c)
+    if 'bads' in info and len(info['bads']) > 0:
+        #   Bad channels
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
+
+
+ at verbose
+def compute_orient_prior(forward, loose=0.2, verbose=None):
+    """Compute orientation prior
+
+    Parameters
+    ----------
+    forward : dict
+        Forward operator.
+    loose : float in [0, 1] or None
+        The loose orientation parameter.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    orient_prior : array
+        Orientation priors.
+    """
+    is_fixed_ori = is_fixed_orient(forward)
+    n_sources = forward['sol']['data'].shape[1]
+
+    if loose is not None:
+        if not (0 <= loose <= 1):
+            raise ValueError('loose value should be smaller than 1 and bigger '
+                             'than 0, or None for not loose orientations.')
+
+        if loose < 1 and not forward['surf_ori']:
+            raise ValueError('Forward operator is not oriented in surface '
+                             'coordinates. loose parameter should be None '
+                             'not %s.' % loose)
+
+        if is_fixed_ori:
+            warnings.warn('Ignoring loose parameter with forward operator '
+                          'with fixed orientation.')
+
+    orient_prior = np.ones(n_sources, dtype=np.float)
+    if (not is_fixed_ori) and (loose is not None) and (loose < 1):
+        logger.info('Applying loose dipole orientations. Loose value '
+                    'of %s.' % loose)
+        orient_prior[np.mod(np.arange(n_sources), 3) != 2] *= loose
+
+    return orient_prior
+
+
+def _restrict_gain_matrix(G, info):
+    """Restrict gain matrix entries for optimal depth weighting"""
+    # Figure out which ones have been used
+    if not (len(info['chs']) == G.shape[0]):
+        raise ValueError("G.shape[0] and length of info['chs'] do not match: "
+                         "%d != %d" % (G.shape[0], len(info['chs'])))
+    sel = pick_types(info, meg='grad', exclude=[])
+    if len(sel) > 0:
+        G = G[sel]
+        logger.info('    %d planar channels' % len(sel))
+    else:
+        sel = pick_types(info, meg='mag', exclude=[])
+        if len(sel) > 0:
+            G = G[sel]
+            logger.info('    %d magnetometer or axial gradiometer '
+                        'channels' % len(sel))
+        else:
+            sel = pick_types(info, meg=False, eeg=True, exclude=[])
+            if len(sel) > 0:
+                G = G[sel]
+                logger.info('    %d EEG channels' % len(sel))
+            else:
+                logger.warn('Could not find MEG or EEG channels')
+    return G
+
+
+def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
+                        patch_areas=None, limit_depth_chs=False):
+    """Compute weighting for depth prior
+    """
+    logger.info('Creating the depth weighting matrix...')
+
+    # If possible, pick best depth-weighting channels
+    if limit_depth_chs is True:
+        G = _restrict_gain_matrix(G, gain_info)
+
+    # Compute the gain matrix
+    if is_fixed_ori:
+        d = np.sum(G ** 2, axis=0)
+    else:
+        n_pos = G.shape[1] // 3
+        d = np.zeros(n_pos)
+        for k in xrange(n_pos):
+            Gk = G[:, 3 * k:3 * (k + 1)]
+            d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
+
+    # XXX Currently the fwd solns never have "patch_areas" defined
+    if patch_areas is not None:
+        d /= patch_areas ** 2
+        logger.info('    Patch areas taken into account in the depth '
+                    'weighting')
+
+    w = 1.0 / d
+    ws = np.sort(w)
+    weight_limit = limit ** 2
+    if limit_depth_chs is False:
+        # match old mne-python behavor
+        ind = np.argmin(ws)
+        n_limit = ind
+        limit = ws[ind] * weight_limit
+        wpp = (np.minimum(w / limit, 1)) ** exp
+    else:
+        # match C code behavior
+        limit = ws[-1]
+        n_limit = len(d)
+        if ws[-1] > weight_limit * ws[0]:
+            ind = np.where(ws > weight_limit * ws[0])[0][0]
+            limit = ws[ind]
+            n_limit = ind
+
+    logger.info('    limit = %d/%d = %f'
+                % (n_limit + 1, len(d),
+                np.sqrt(limit / ws[0])))
+    scale = 1.0 / limit
+    logger.info('    scale = %g exp = %g' % (scale, exp))
+    wpp = np.minimum(w / limit, 1) ** exp
+
+    depth_prior = wpp if is_fixed_ori else np.repeat(wpp, 3)
+
+    return depth_prior
+
+
+def _stc_src_sel(src, stc):
+    """ Select the vertex indices of a source space using a source estimate
+    """
+    src_sel_lh = np.intersect1d(src[0]['vertno'], stc.vertno[0])
+    src_sel_lh = np.searchsorted(src[0]['vertno'], src_sel_lh)
+
+    src_sel_rh = np.intersect1d(src[1]['vertno'], stc.vertno[1])
+    src_sel_rh = np.searchsorted(src[1]['vertno'], src_sel_rh)\
+                 + len(src[0]['vertno'])
+
+    src_sel = np.r_[src_sel_lh, src_sel_rh]
+
+    return src_sel
+
+
+def _fill_measurement_info(info, fwd, sfreq):
+    """ Fill the measurement info of a Raw or Evoked object
+    """
+    sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])
+    info = pick_info(info, sel)
+    info['bads'] = []
+
+    info['filename'] = None
+    # this is probably correct based on what's done in meas_info.py...
+    info['meas_id'] = fwd['info']['meas_id']
+    info['file_id'] = info['meas_id']
+
+    now = time()
+    sec = np.floor(now)
+    usec = 1e6 * (now - sec)
+
+    info['meas_date'] = np.array([sec, usec], dtype=np.int32)
+    info['highpass'] = 0.0
+    info['lowpass'] = sfreq / 2.0
+    info['sfreq'] = sfreq
+    info['projs'] = []
+
+    return info
+
+
+ at verbose
+def _apply_forward(fwd, stc, start=None, stop=None, verbose=None):
+    """ Apply forward model and return data, times, ch_names
+    """
+    if not is_fixed_orient(fwd):
+        raise ValueError('Only fixed-orientation forward operators are '
+                         'supported.')
+
+    if np.all(stc.data > 0):
+        warnings.warn('Source estimate only contains currents with positive '
+                      'values. Use pick_normal=True when computing the '
+                      'inverse to compute currents not current magnitudes.')
+
+    max_cur = np.max(np.abs(stc.data))
+    if max_cur > 1e-7:  # 100 nAm threshold for warning
+        warnings.warn('The maximum current magnitude is %0.1f nAm, which is '
+                      'very large. Are you trying to apply the forward model '
+                      'to dSPM values? The result will only be correct if '
+                      'currents are used.' % 1e9 * max_cur)
+
+    src_sel = _stc_src_sel(fwd['src'], stc)
+    n_src = sum([len(v) for v in stc.vertno])
+    if len(src_sel) != n_src:
+        raise RuntimeError('Only %i of %i SourceEstimate vertices found in '
+                           'fwd' % (len(src_sel), n_src))
+
+    gain = fwd['sol']['data'][:, src_sel]
+
+    logger.info('Projecting source estimate to sensor space...')
+    data = np.dot(gain, stc.data[:, start:stop])
+    logger.info('[done]')
+
+    times = deepcopy(stc.times[start:stop])
+
+    return data, times
+
+
+ at verbose
+def apply_forward(fwd, stc, evoked_template, start=None, stop=None,
+                  verbose=None):
+    """
+    Project source space currents to sensor space using a forward operator.
+
+    The sensor space data is computed for all channels present in fwd. Use
+    pick_channels_forward or pick_types_forward to restrict the solution to a
+    subset of channels.
+
+    The function returns an Evoked object, which is constructed from
+    evoked_template. The evoked_template should be from the same MEG system on
+    which the original data was acquired. An exception will be raised if the
+    forward operator contains channels that are not present in the template.
+
+
+    Parameters
+    ----------
+    forward : dict
+        Forward operator to use. Has to be fixed-orientation.
+    stc : SourceEstimate
+        The source estimate from which the sensor space data is computed.
+    evoked_template : Evoked object
+        Evoked object used as template to generate the output argument.
+    start : int, optional
+        Index of first time sample (index not time is seconds).
+    stop : int, optional
+        Index of first time sample not to include (index not time is seconds).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked : Evoked
+        Evoked object with computed sensor space data.
+
+    See Also
+    --------
+    apply_forward_raw: Compute sensor space data and return a Raw object.
+    """
+
+    # make sure evoked_template contains all channels in fwd
+    for ch_name in fwd['sol']['row_names']:
+        if ch_name not in evoked_template.ch_names:
+            raise ValueError('Channel %s of forward operator not present in '
+                             'evoked_template.' % ch_name)
+
+    # project the source estimate to the sensor space
+    data, times = _apply_forward(fwd, stc, start, stop)
+
+    # store sensor data in an Evoked object using the template
+    evoked = deepcopy(evoked_template)
+
+    evoked.nave = 1
+    evoked.data = data
+    evoked.times = times
+
+    sfreq = float(1.0 / stc.tstep)
+    evoked.first = int(np.round(evoked.times[0] * sfreq))
+    evoked.last = evoked.first + evoked.data.shape[1] - 1
+
+    # fill the measurement info
+    evoked.info = _fill_measurement_info(evoked.info, fwd, sfreq)
+
+    return evoked
+
+
+ at verbose
+def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
+                      verbose=None):
+    """Project source space currents to sensor space using a forward operator
+
+    The sensor space data is computed for all channels present in fwd. Use
+    pick_channels_forward or pick_types_forward to restrict the solution to a
+    subset of channels.
+
+    The function returns a Raw object, which is constructed from raw_template.
+    The raw_template should be from the same MEG system on which the original
+    data was acquired. An exception will be raised if the forward operator
+    contains channels that are not present in the template.
+
+    Parameters
+    ----------
+    forward : dict
+        Forward operator to use. Has to be fixed-orientation.
+    stc : SourceEstimate
+        The source estimate from which the sensor space data is computed.
+    raw_template : Raw object
+        Raw object used as template to generate the output argument.
+    start : int, optional
+        Index of first time sample (index not time is seconds).
+    stop : int, optional
+        Index of first time sample not to include (index not time is seconds).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Raw object
+        Raw object with computed sensor space data.
+
+    See Also
+    --------
+    apply_forward: Compute sensor space data and return an Evoked object.
+    """
+
+    # make sure raw_template contains all channels in fwd
+    for ch_name in fwd['sol']['row_names']:
+        if ch_name not in raw_template.ch_names:
+            raise ValueError('Channel %s of forward operator not present in '
+                             'raw_template.' % ch_name)
+
+    # project the source estimate to the sensor space
+    data, times = _apply_forward(fwd, stc, start, stop)
+
+    # store sensor data in Raw object using the template
+    raw = deepcopy(raw_template)
+    raw._preloaded = True
+    raw._data = data
+    raw._times = times
+
+    sfreq = float(1.0 / stc.tstep)
+    raw.first_samp = int(np.round(raw._times[0] * sfreq))
+    raw.last_samp = raw.first_samp + raw._data.shape[1] - 1
+
+    # fill the measurement info
+    raw.info = _fill_measurement_info(raw.info, fwd, sfreq)
+
+    raw.info['projs'] = []
+    raw._projector = None
+
+    return raw
+
+
+def restrict_forward_to_stc(fwd, stc):
+    """Restricts forward operator to active sources in a source estimate
+
+    Parameters
+    ----------
+    fwd : dict
+        Forward operator.
+    stc : SourceEstimate
+        Source estimate.
+
+    Returns
+    -------
+    fwd_out : dict
+        Restricted forward operator.
+    """
+
+    fwd_out = deepcopy(fwd)
+    src_sel = _stc_src_sel(fwd['src'], stc)
+
+    fwd_out['source_rr'] = fwd['source_rr'][src_sel]
+    fwd_out['nsource'] = len(src_sel)
+
+    if is_fixed_orient(fwd):
+        idx = src_sel
+    else:
+        idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
+
+    fwd_out['source_nn'] = fwd['source_nn'][idx]
+    fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]
+    fwd_out['sol']['ncol'] = len(idx)
+
+    for i in range(2):
+        fwd_out['src'][i]['vertno'] = stc.vertno[i]
+        fwd_out['src'][i]['nuse'] = len(stc.vertno[i])
+        fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
+        fwd_out['src'][i]['inuse'].fill(0)
+        fwd_out['src'][i]['inuse'][stc.vertno[i]] = 1
+        fwd_out['src'][i]['use_tris'] = np.array([])
+        fwd_out['src'][i]['nuse_tri'] = np.array([0])
+
+    return fwd_out
+
+
+def restrict_forward_to_label(fwd, labels):
+    """Restricts forward operator to labels
+
+    Parameters
+    ----------
+    fwd : dict
+        Forward operator.
+    labels : label object | list
+        Label object or list of label objects.
+
+    Returns
+    -------
+    fwd_out : dict
+        Restricted forward operator.
+    """
+
+    if not isinstance(labels, list):
+        labels = [labels]
+
+    fwd_out = deepcopy(fwd)
+    fwd_out['source_rr'] = np.zeros((0, 3))
+    fwd_out['nsource'] = 0
+    fwd_out['source_nn'] = np.zeros((0, 3))
+    fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))
+    fwd_out['sol']['ncol'] = 0
+
+    for i in range(2):
+        fwd_out['src'][i]['vertno'] = np.array([])
+        fwd_out['src'][i]['nuse'] = 0
+        fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
+        fwd_out['src'][i]['inuse'].fill(0)
+        fwd_out['src'][i]['use_tris'] = np.array([])
+        fwd_out['src'][i]['nuse_tri'] = np.array([0])
+
+    for label in labels:
+        if label.hemi == 'lh':
+            i = 0
+            src_sel = np.intersect1d(fwd['src'][0]['vertno'], label.vertices)
+            src_sel = np.searchsorted(fwd['src'][0]['vertno'], src_sel)
+        else:
+            i = 1
+            src_sel = np.intersect1d(fwd['src'][1]['vertno'], label.vertices)
+            src_sel = np.searchsorted(fwd['src'][1]['vertno'], src_sel)\
+                        + len(fwd['src'][0]['vertno'])
+
+        fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],
+                                          fwd['source_rr'][src_sel]])
+        fwd_out['nsource'] += len(src_sel)
+
+        fwd_out['src'][i]['vertno'] = np.r_[fwd_out['src'][i]['vertno'],
+                                            src_sel]
+        fwd_out['src'][i]['nuse'] += len(src_sel)
+        fwd_out['src'][i]['inuse'][src_sel] = 1
+
+        if is_fixed_orient(fwd):
+            idx = src_sel
+        else:
+            idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
+
+        fwd_out['source_nn'] = np.vstack([fwd_out['source_nn'],
+                                          fwd['source_nn'][idx]])
+        fwd_out['sol']['data'] = np.hstack([fwd_out['sol']['data'],
+                                            fwd['sol']['data'][:, idx]])
+        fwd_out['sol']['ncol'] += len(idx)
+
+    return fwd_out
+
+
+ at verbose
+def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
+                        mindist=None, bem=None, mri=None, trans=None,
+                        eeg=True, meg=True, fixed=False, grad=False,
+                        mricoord=False, overwrite=False, subjects_dir=None,
+                        verbose=None):
+    """Calculate a forward solution for a subject
+
+    This function wraps to mne_do_forward_solution, so the mne
+    command-line tools must be installed.
+
+    Parameters
+    ----------
+    subject : str
+        Name of the subject.
+    meas : Raw | Epochs | Evoked | str
+        If Raw or Epochs, a temporary evoked file will be created and
+        saved to a temporary directory. If str, then it should be a
+        filename to a file with measurement information the mne
+        command-line tools can understand (i.e., raw or evoked).
+    fname : str | None
+        Destination forward solution filename. If None, the solution
+        will be created in a temporary directory, loaded, and deleted.
+    src : str | None
+        Source space name. If None, the MNE default is used.
+    spacing : str | None
+        Source space spacing to use. If None, the MNE default is used.
+    mindist : float | str | None
+        Minimum distance of sources from inner skull surface (in mm).
+        If None, the MNE default value is used. If string, 'all'
+        indicates to include all points.
+    bem : str | None
+        Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None
+        (Default), the MNE default will be used.
+    trans : str | None
+        File name of the trans file. If None, mri must not be None.
+    mri : dict | str | None
+        Either a transformation (usually made using mne_analyze) or an
+        info dict (usually opened using read_trans()), or a filename.
+        If dict, the trans will be saved in a temporary directory. If
+        None, trans must not be None.
+    eeg : bool
+        If True (Default), include EEG computations.
+    meg : bool
+        If True (Default), include MEG computations.
+    fixed : bool
+        If True, make a fixed-orientation forward solution (Default:
+        False). Note that fixed-orientation inverses can still be
+        created from free-orientation forward solutions.
+    grad : bool
+        If True, compute the gradient of the field with respect to the
+        dipole coordinates as well (Default: False).
+    mricoord : bool
+        If True, calculate in MRI coordinates (Default: False).
+    overwrite : bool
+        If True, the destination file (if it exists) will be overwritten.
+        If False (default), an error will be raised if the file exists.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fwd : dict
+        The generated forward solution.
+    """
+    if not has_command_line_tools():
+        raise RuntimeError('mne command line tools could not be found')
+
+    # check for file existence
+    temp_dir = tempfile.mkdtemp()
+    if fname is None:
+        fname = op.join(temp_dir, 'temp-fwd.fif')
+    _check_fname(fname, overwrite)
+
+    if not isinstance(subject, basestring):
+        raise ValueError('subject must be a string')
+
+    # check for meas to exist as string, or try to make evoked
+    if not isinstance(meas, basestring):
+        # See if we need to make a meas file
+        if isinstance(meas, Raw):
+            events = make_fixed_length_events(meas, 1)[0][np.newaxis, :]
+            meas = Epochs(meas, events, 1, 0, 1, proj=False)
+        if isinstance(meas, Epochs):
+            meas = meas.average()
+        if isinstance(meas, Evoked):
+            meas_data = meas
+            meas = op.join(temp_dir, 'evoked.fif')
+            write_evoked(meas, meas_data)
+        if not isinstance(meas, basestring):
+            raise ValueError('meas must be string, Raw, Epochs, or Evoked')
+    if not op.isfile(meas):
+        raise IOError('measurement file "%s" could not be found' % meas)
+
+    # deal with trans/mri
+    if mri is not None and trans is not None:
+        raise ValueError('trans and mri cannot both be specified')
+    if mri is None and trans is None:
+        # MNE allows this to default to a trans/mri in the subject's dir,
+        # but let's be safe here and force the user to pass us a trans/mri
+        raise ValueError('Either trans or mri must be specified')
+
+    if trans is not None:
+        if not isinstance(trans, basestring):
+            raise ValueError('trans must be a string')
+        if not op.isfile(trans):
+            raise IOError('trans file "%s" not found' % trans)
+    if mri is not None:
+        # deal with trans
+        if not isinstance(mri, basestring):
+            if isinstance(mri, dict):
+                mri_data = deepcopy(mri)
+                mri = op.join(temp_dir, 'mri-trans.fif')
+                try:
+                    write_trans(mri, mri_data)
+                except Exception:
+                    raise IOError('mri was a dict, but could not be '
+                                  'written to disk as a transform file')
+            else:
+                raise ValueError('trans must be a string or dict (trans)')
+        if not op.isfile(mri):
+            raise IOError('trans file "%s" could not be found' % trans)
+
+    # deal with meg/eeg
+    if not meg and not eeg:
+        raise ValueError('meg or eeg (or both) must be True')
+
+    path, fname = op.split(fname)
+    if not op.splitext(fname)[1] == '.fif':
+        raise ValueError('Forward name does not end with .fif')
+    path = op.abspath(path)
+
+    # deal with mindist
+    if mindist is not None:
+        if isinstance(mindist, basestring):
+            if not mindist.lower() == 'all':
+                raise ValueError('mindist, if string, must be "all"')
+            mindist = ['--all']
+        else:
+            mindist = ['--mindist', '%g' % mindist]
+
+    # src, spacing, bem
+    if src is not None:
+        if not isinstance(src, basestring):
+            raise ValueError('src must be a string or None')
+    if spacing is not None:
+        if not isinstance(spacing, basestring):
+            raise ValueError('spacing must be a string or None')
+    if bem is not None:
+        if not isinstance(bem, basestring):
+            raise ValueError('bem must be a string or None')
+
+    # put together the actual call
+    cmd = ['mne_do_forward_solution',
+           '--subject', subject,
+           '--meas', meas,
+           '--fwd', fname,
+           '--destdir', path]
+    if src is not None:
+        cmd += ['--src', src]
+    if spacing is not None:
+        cmd += ['--spacing', spacing]
+    if mindist is not None:
+        cmd += mindist
+    if bem is not None:
+        cmd += ['--bem', bem]
+    if mri is not None:
+        cmd += ['--mri', '%s' % mri]
+    if trans is not None:
+        cmd += ['--trans', '%s' % trans]
+    if not meg:
+        cmd.append('--eegonly')
+    if not eeg:
+        cmd.append('--megonly')
+    if fixed:
+        cmd.append('--fixed')
+    if grad:
+        cmd.append('--grad')
+    if mricoord:
+        cmd.append('--mricoord')
+    if overwrite:
+        cmd.append('--overwrite')
+
+    env = os.environ.copy()
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    env['SUBJECTS_DIR'] = subjects_dir
+
+    try:
+        logger.info('Running forward solution generation command with '
+                    'subjects_dir %s' % subjects_dir)
+        run_subprocess(cmd, env=env)
+    except Exception as exception:
+        raise exception
+    else:
+        fwd = read_forward_solution(op.join(path, fname))
+    finally:
+        shutil.rmtree(temp_dir, ignore_errors=True)
+    return fwd
+
+
+ at verbose
+def average_forward_solutions(fwds, weights=None):
+    """Average forward solutions
+
+    Parameters
+    ----------
+    fwds : list of dict
+        Forward solutions to average. Each entry (dict) should be a
+        forward solution.
+    weights : array | None
+        Weights to apply to each forward solution in averaging. If None,
+        forward solutions will be equally weighted. Weights must be
+        non-negative, and will be adjusted to sum to one.
+
+    Returns
+    -------
+    fwd : dict
+        The averaged forward solution.
+    """
+    # check for fwds being a list
+    if not isinstance(fwds, list):
+        raise TypeError('fwds must be a list')
+    if not len(fwds) > 0:
+        raise ValueError('fwds must not be empty')
+
+    # check weights
+    if weights is None:
+        weights = np.ones(len(fwds))
+    if not np.all(weights >= 0):
+        raise ValueError('weights must be non-negative')
+    if not len(weights) == len(fwds):
+        raise ValueError('weights must be None or the same length as fwds')
+    w_sum = np.sum(weights)
+    if not w_sum > 0:
+        raise ValueError('weights cannot all be zero')
+    weights /= w_sum
+
+    # check our forward solutions
+    for fwd in fwds:
+        # check to make sure it's a forward solution
+        if not isinstance(fwd, dict):
+            raise TypeError('Each entry in fwds must be a dict')
+        # check to make sure the dict is actually a fwd
+        if not all([key in fwd for key in ['info', 'sol_grad', 'nchan',
+                'src', 'source_nn', 'sol', 'source_rr', 'source_ori',
+                'surf_ori', 'coord_frame', 'mri_head_t', 'nsource']]):
+            raise KeyError('forward solution dict does not have all standard '
+                           'entries, cannot compute average.')
+
+    # check forward solution compatibility
+    if any([fwd['sol'][k] != fwds[0]['sol'][k]
+            for fwd in fwds[1:] for k in ['nrow', 'ncol']]):
+        raise ValueError('Forward solutions have incompatible dimensions')
+    if any([fwd[k] != fwds[0][k] for fwd in fwds[1:]
+            for k in ['source_ori', 'surf_ori', 'coord_frame']]):
+        raise ValueError('Forward solutions have incompatible orientations')
+
+    # actually average them
+    fwd_ave = deepcopy(fwds[0])
+    fwd_ave['sol']['data'] *= weights[0]
+    for fwd, w in zip(fwds[1:], weights[1:]):
+        fwd_ave['sol']['data'] += w * fwd['sol']['data']
+    return fwd_ave
diff --git a/mne/inverse_sparse/__init__.py b/mne/inverse_sparse/__init__.py
new file mode 100644
index 0000000..981622d
--- /dev/null
+++ b/mne/inverse_sparse/__init__.py
@@ -0,0 +1,8 @@
+"""Non-Linear sparse inverse solvers"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+from .mxne_inverse import mixed_norm, tf_mixed_norm
+from ._gamma_map import gamma_map
diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py
new file mode 100644
index 0000000..eeaf3c1
--- /dev/null
+++ b/mne/inverse_sparse/_gamma_map.py
@@ -0,0 +1,304 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <gramfort at nmr.mgh.harvard.edu>
+# License: Simplified BSD
+from copy import deepcopy
+
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..forward import is_fixed_orient, _to_fixed_ori
+from ..fiff.pick import pick_channels_evoked
+from ..minimum_norm.inverse import _prepare_forward
+from .. import verbose
+from .mxne_inverse import _make_sparse_stc, _prepare_gain
+
+
+ at verbose
+def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
+                   group_size=1, gammas=None, verbose=None):
+    """Hierarchical Bayes (Gamma-MAP)
+
+    Parameters
+    ----------
+    M : array, shape=(n_sensors, n_times)
+        Observation.
+    G : array, shape=(n_sensors, n_sources)
+        Forward operator.
+    alpha : float
+        Regularization parameter (noise variance).
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter for convergence.
+    group_size : int
+        Number of consecutive sources which use the same gamma.
+    update_mode : int
+        Update mode, 1: MacKay update (default), 3: Modified MacKay update.
+    gammas : array, shape=(n_sources,)
+        Initial values for posterior variances (gammas). If None, a
+        variance of 1.0 is used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : array, shape=(n_active, n_times)
+        Estimated source time courses.
+    active_set : array, shape=(n_active,)
+        Indices of active sources.
+
+    References
+    ----------
+    [1] Wipf et al. Analysis of Empirical Bayesian Methods for
+    Neuroelectromagnetic Source Localization, Advances in Neural Information
+    Processing Systems (2007).
+    """
+    G = G.copy()
+    M = M.copy()
+
+    if gammas is None:
+        gammas = np.ones(G.shape[1], dtype=np.float)
+
+    eps = np.finfo(float).eps
+
+    n_sources = G.shape[1]
+    n_sensors, n_times = M.shape
+
+    # apply normalization so the numerical values are sane
+    M_normalize_constant = linalg.norm(np.dot(M, M.T), ord='fro')
+    M /= np.sqrt(M_normalize_constant)
+    alpha /= M_normalize_constant
+    G_normalize_constant = linalg.norm(G, ord=np.inf)
+    G /= G_normalize_constant
+
+    if n_sources % group_size != 0:
+        raise ValueError('Number of sources has to be evenly dividable by the '
+                         'group size')
+
+    n_active = n_sources
+    active_set = np.arange(n_sources)
+
+    gammas_full_old = gammas.copy()
+
+    if update_mode == 2:
+        denom_fun = np.sqrt
+    else:
+        # do nothing
+        denom_fun = lambda x: x
+
+    for itno in np.arange(maxit):
+        gammas[np.isnan(gammas)] = 0.0
+
+        gidx = (np.abs(gammas) > eps)
+        active_set = active_set[gidx]
+        gammas = gammas[gidx]
+
+        # update only active gammas (once set to zero it stays at zero)
+        if n_active > len(active_set):
+            n_active = active_set.size
+            G = G[:, gidx]
+
+        CM = alpha * np.eye(n_sensors) + np.dot(G * gammas[np.newaxis, :], G.T)
+        # Invert CM keeping symmetry
+        U, S, V = linalg.svd(CM, full_matrices=False)
+        S = S[np.newaxis, :]
+        CM = np.dot(U * S, U.T)
+        CMinv = np.dot(U / (S + eps), U.T)
+
+        CMinvG = np.dot(CMinv, G)
+        A = np.dot(CMinvG.T, M)  # mult. w. Diag(gamma) in gamma update
+
+        if update_mode == 1:
+            # MacKay fixed point update (10) in [1]
+            numer = gammas ** 2 * np.mean(np.abs(A) ** 2, axis=1)
+            denom = gammas * np.sum(G * CMinvG, axis=0)
+        elif update_mode == 2:
+            # modified MacKay fixed point update (11) in [1]
+            numer = gammas * np.sqrt(np.mean(np.abs(A) ** 2, axis=1))
+            denom = np.sum(G * CMinvG, axis=0)  # sqrt is applied below
+        else:
+            raise ValueError('Invalid value for update_mode')
+
+        if group_size == 1:
+            if denom is None:
+                gammas = numer
+            else:
+                gammas = numer / denom_fun(denom)
+        else:
+            numer_comb = np.sum(numer.reshape(-1, group_size), axis=1)
+            if denom is None:
+                gammas_comb = numer_comb
+            else:
+                denom_comb = np.sum(denom.reshape(-1, group_size), axis=1)
+                gammas_comb = numer_comb / denom_fun(denom_comb)
+
+            gammas = np.repeat(gammas_comb / group_size, group_size)
+
+        # compute convergence criterion
+        gammas_full = np.zeros(n_sources, dtype=np.float)
+        gammas_full[active_set] = gammas
+
+        err = (np.sum(np.abs(gammas_full - gammas_full_old))
+               / np.sum(np.abs(gammas_full_old)))
+
+        gammas_full_old = gammas_full
+
+        logger.info('Iteration: %d\t active set size: %d\t convergence: %0.3e'
+                    % (itno, len(gammas), err))
+
+        if err < tol:
+            break
+
+        if n_active == 0:
+            break
+
+    if itno < maxit - 1:
+        print('\nConvergence reached !\n')
+    else:
+        print('\nConvergence NOT reached !\n')
+
+    # undo normalization and compute final posterior mean
+    n_const = np.sqrt(M_normalize_constant) / G_normalize_constant
+    x_active = n_const * gammas[:, None] * A
+
+    return x_active, active_set
+
+
+ at verbose
+def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
+              xyz_same_gamma=True, maxit=10000, tol=1e-6, update_mode=1,
+              gammas=None, pca=True, return_residual=False, verbose=None):
+    """Hierarchical Bayes (Gamma-MAP) sparse source localization method
+
+    Models each source time course using a zero-mean Gaussian prior with an
+    unknown variance (gamma) parameter. During estimation, most gammas are
+    driven to zero, resulting in a sparse source estimate.
+
+    For fixed-orientation forward operators, a separate gamma is used for each
+    source time course, while for free-orientation forward operators, the same
+    gamma is used for the three source time courses at each source space point
+    (separate gammas can be used in this case by using xyz_same_gamma=False).
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        Noise covariance to compute whitener.
+    alpha : float
+        Regularization parameter (noise variance).
+    loose : float in [0, 1]
+        Value that weights the source variances of the dipole components
+        that are parallel (tangential) to the cortical surface. If loose
+        is 0 or None then the solution is computed with fixed orientation.
+        If loose is 1, it corresponds to free orientations.
+    depth: None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    xyz_same_gamma : bool
+        Use same gamma for xyz current components at each source space point.
+        Recommended for free-orientation forward solutions.
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter for convergence.
+    update_mode : int
+        Update mode, 1: MacKay update (default), 2: Modified MacKay update.
+    gammas : array, shape=(n_sources,)
+        Initial values for posterior variances (gammas). If None, a
+        variance of 1.0 is used.
+    pca : bool
+        If True the rank of the data is reduced to the true dimension.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : instance of SourceEstimate
+        Source time courses.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the sources.
+        Only returned if return_residual is True.
+
+    References
+    ----------
+    Wipf et al. Analysis of Empirical Bayesian Methods for Neuroelectromagnetic
+    Source Localization, Advances in Neural Information Process. Systems (2007)
+
+    Wipf et al. A unified Bayesian framework for MEG/EEG source imaging,
+    NeuroImage, vol. 44, no. 3, pp. 947-66, Mar. 2009.
+    """
+    # make forward solution in fixed orientation if necessary
+    if loose is None and not is_fixed_orient(forward):
+        forward = deepcopy(forward)
+        _to_fixed_ori(forward)
+
+    if is_fixed_orient(forward) or not xyz_same_gamma:
+        group_size = 1
+    else:
+        group_size = 3
+
+    gain_info, gain, _, whitener, _ = _prepare_forward(forward, evoked.info,
+                                                       noise_cov, pca)
+
+    # get the data
+    sel = [evoked.ch_names.index(name) for name in gain_info['ch_names']]
+    M = evoked.data[sel]
+
+    # whiten and prepare gain matrix
+    gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
+                                                 depth, loose, None,
+                                                 None)
+    # whiten the data
+    M = np.dot(whitener, M)
+
+    # run the optimization
+    X, active_set = _gamma_map_opt(M, gain, alpha, maxit=maxit, tol=tol,
+                                   update_mode=update_mode, gammas=gammas,
+                                   group_size=group_size, verbose=verbose)
+
+    if len(active_set) == 0:
+        raise Exception("No active dipoles found. alpha is too big.")
+
+    # reapply weights to have correct unit
+    X /= source_weighting[active_set][:, None]
+
+    if return_residual:
+        sel = [forward['sol']['row_names'].index(c)
+               for c in gain_info['ch_names']]
+        residual = evoked.copy()
+        residual = pick_channels_evoked(residual,
+                                        include=gain_info['ch_names'])
+        residual.data -= np.dot(forward['sol']['data'][sel, :][:, active_set],
+                                X)
+
+    if group_size == 1 and not is_fixed_orient(forward):
+        # make sure each source has 3 components
+        active_src = np.unique(active_set // 3)
+        in_pos = 0
+        if len(X) < 3 * len(active_src):
+            X_xyz = np.zeros((3 * len(active_src), X.shape[1]), dtype=X.dtype)
+            for ii in xrange(len(active_src)):
+                for jj in xrange(3):
+                    if in_pos >= len(active_set):
+                        break
+                    if (active_set[in_pos] + jj) % 3 == 0:
+                        X_xyz[3 * ii + jj] = X[in_pos]
+                        in_pos += 1
+            X = X_xyz
+
+    tmin = evoked.times[0]
+    tstep = 1.0 / evoked.info['sfreq']
+    stc = _make_sparse_stc(X, active_set, forward, tmin, tstep,
+                           active_is_idx=True, verbose=verbose)
+
+    if return_residual:
+        return stc, residual
+    else:
+        return stc
diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py
new file mode 100755
index 0000000..c794ea4
--- /dev/null
+++ b/mne/inverse_sparse/mxne_debiasing.py
@@ -0,0 +1,133 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from math import sqrt
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..utils import check_random_state
+from .. import verbose
+
+
+def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0):
+    """Find the largest singular value for the matrix kron(C.T, A)
+
+    It uses power iterations.
+
+    Parameters
+    ----------
+    A : array
+        An array
+    C : array
+        An array
+    max_iter : int
+        Maximum number of iterations
+    random_state : int | RandomState | None
+        Random state for random number generation
+
+    Returns
+    -------
+    L : float
+        largest singular value
+
+    Notes
+    -----
+    http://en.wikipedia.org/wiki/Power_iteration
+    """
+    AS_size = C.shape[0]
+    rng = check_random_state(random_state)
+    B = rng.randn(AS_size, AS_size)
+    B /= linalg.norm(B, 'fro')
+    ATA = np.dot(A.T, A)
+    CCT = np.dot(C, C.T)
+    L0 = np.inf
+    for _ in range(max_iter):
+        Y = np.dot(np.dot(ATA, B), CCT)
+        L = linalg.norm(Y, 'fro')
+
+        if abs(L - L0) < tol:
+            break
+
+        B = Y / L
+        L0 = L
+    return L
+
+
+ at verbose
+def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None):
+    """Compute scaling to correct amplitude bias
+
+    It solves the following optimization problem using FISTA:
+
+    min 1/2 * (|| M - GDX ||fro)^2
+    s.t. D >= 1 and D is a diagonal matrix
+
+    Reference for the FISTA algorithm:
+    Amir Beck and Marc Teboulle
+    A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse
+    Problems, SIAM J. Imaging Sci., 2(1), 183-202. (20 pages)
+    http://epubs.siam.org/doi/abs/10.1137/080716542
+
+    Parameters
+    ----------
+    M : array
+        measurement data.
+    G : array
+        leadfield matrix.
+    X : array
+        reconstructed time courses with amplitude bias.
+    max_iter : int
+        Maximum number of iterations.
+    tol : float
+        The tolerance on convergence.
+    n_orient : int
+        The number of orientations (1 for fixed and 3 otherwise).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    D : array
+        Debiasing weights.
+    """
+    n_sources = X.shape[0]
+
+    lipschitz_constant = 1.1 * power_iteration_kron(G, X)
+
+    # initializations
+    D = np.ones(n_sources)
+    Y = np.ones(n_sources)
+    t = 1.0
+
+    for i in xrange(max_iter):
+        D0 = D
+
+        # gradient step
+        R = M - np.dot(G * Y, X)
+        D = Y + np.sum(np.dot(G.T, R) * X, axis=1) / lipschitz_constant
+        # Equivalent but faster than:
+        # D = Y + np.diag(np.dot(np.dot(G.T, R), X.T)) / lipschitz_constant
+
+        # prox ie projection on constraint
+        if n_orient != 1:  # take care of orientations
+            # The scaling has to be the same for all orientations
+            D = np.mean(D.reshape(-1, n_orient), axis=1)
+            D = np.tile(D, [n_orient, 1]).T.ravel()
+        D = np.maximum(D, 1.0)
+
+        t0 = t
+        t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
+        Y.fill(0.0)
+        dt = (t0 - 1.0) / t
+        Y = D + dt * (D - D0)
+        if linalg.norm(D - D0, np.inf) < tol:
+            logger.info("Debiasing converged after %d iterations" % i)
+            break
+    else:
+        logger.info("Debiasing did not converge")
+    return D
diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py
new file mode 100644
index 0000000..4932653
--- /dev/null
+++ b/mne/inverse_sparse/mxne_inverse.py
@@ -0,0 +1,435 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+from copy import deepcopy
+import numpy as np
+from scipy import linalg, signal
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..source_estimate import SourceEstimate
+from ..minimum_norm.inverse import combine_xyz, _prepare_forward
+from ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori
+from ..fiff.pick import pick_channels_evoked
+from .mxne_optim import mixed_norm_solver, norm_l2inf, tf_mixed_norm_solver
+from .. import verbose
+
+
+ at verbose
+def _prepare_gain(gain, forward, whitener, depth, loose, weights, weights_min,
+                  verbose=None):
+    logger.info('Whitening lead field matrix.')
+    gain = np.dot(whitener, gain)
+
+    # Handle depth prior scaling
+    source_weighting = np.sum(gain ** 2, axis=0) ** depth
+
+    # apply loose orientations
+    orient_prior = compute_orient_prior(forward, loose)
+
+    source_weighting /= orient_prior
+    source_weighting = np.sqrt(source_weighting)
+    gain /= source_weighting[None, :]
+
+    # Handle weights
+    mask = None
+    if weights is not None:
+        if isinstance(weights, SourceEstimate):
+            # weights = np.sqrt(np.sum(weights.data ** 2, axis=1))
+            weights = np.max(np.abs(weights.data), axis=1)
+        weights_max = np.max(weights)
+        if weights_min > weights_max:
+            raise ValueError('weights_min > weights_max (%s > %s)' %
+                             (weights_min, weights_max))
+        weights_min = weights_min / weights_max
+        weights = weights / weights_max
+        n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+        weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
+        if len(weights) != gain.shape[1]:
+            raise ValueError('weights do not have the correct dimension '
+                             ' (%d != %d)' % (len(weights), gain.shape[1]))
+        nz_idx = np.where(weights != 0.0)[0]
+        source_weighting[nz_idx] /= weights[nz_idx]
+        gain *= weights[None, :]
+
+        if weights_min is not None:
+            mask = (weights > weights_min)
+            gain = gain[:, mask]
+            n_sources = np.sum(mask) / n_dip_per_pos
+            logger.info("Reducing source space to %d sources" % n_sources)
+
+    return gain, source_weighting, mask
+
+
+ at verbose
+def _make_sparse_stc(X, active_set, forward, tmin, tstep,
+                     active_is_idx=False, verbose=None):
+    if not is_fixed_orient(forward):
+        logger.info('combining the current components...')
+        X = combine_xyz(X)
+
+    if not active_is_idx:
+        active_idx = np.where(active_set)[0]
+    else:
+        active_idx = active_set
+
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    if n_dip_per_pos > 1:
+        active_idx = np.unique(active_idx // n_dip_per_pos)
+
+    src = forward['src']
+
+    n_lh_points = len(src[0]['vertno'])
+    lh_vertno = src[0]['vertno'][active_idx[active_idx < n_lh_points]]
+    rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points]
+                                             - n_lh_points]
+    vertices = [lh_vertno, rh_vertno]
+    stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
+    return stc
+
+
+ at verbose
+def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
+               maxit=3000, tol=1e-4, active_set_size=10, pca=True,
+               debias=True, time_pca=True, weights=None, weights_min=None,
+               solver='auto', return_residual=False, verbose=None):
+    """Mixed-norm estimate (MxNE)
+
+    Compute L1/L2 mixed-norm solution on evoked data.
+
+    References:
+    Gramfort A., Kowalski M. and Hamalainen, M,
+    Mixed-norm estimates for the M/EEG inverse problem using accelerated
+    gradient methods, Physics in Medicine and Biology, 2012
+    http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+    Parameters
+    ----------
+    evoked : instance of Evoked or list of instances of Evoked
+        Evoked data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        Noise covariance to compute whitener.
+    alpha : float
+        Regularization parameter.
+    loose : float in [0, 1]
+        Value that weights the source variances of the dipole components
+        that are parallel (tangential) to the cortical surface. If loose
+        is 0 or None then the solution is computed with fixed orientation.
+        If loose is 1, it corresponds to free orientations.
+    depth: None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter.
+    active_set_size : int | None
+        Size of active set increment. If None, no active set strategy is used.
+    pca : bool
+        If True the rank of the data is reduced to true dimension.
+    debias : bool
+        Remove coefficient amplitude bias due to L1 penalty.
+    time_pca : bool or int
+        If True the rank of the concatenated epochs is reduced to
+        its true dimension. If is 'int' the rank is limited to this value.
+    weights : None | array | SourceEstimate
+        Weight for penalty in mixed_norm. Can be None or
+        1d array of length n_sources or a SourceEstimate e.g. obtained
+        with wMNE or dSPM or fMRI.
+    weights_min : float
+        Do not consider in the estimation sources for which weights
+        is less than weights_min.
+    solver : 'prox' | 'cd' | 'auto'
+        The algorithm to use for the optimization. prox stands for
+        proximal interations using the FISTA algorithm while cd uses
+        coordinate descent. cd is only available for fixed orientation.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+
+    Returns
+    -------
+    stc : SourceEstimate | list of SourceEstimate
+        Source time courses for each evoked data passed as input.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the sources.
+        Only returned if return_residual is True.
+    """
+    if not isinstance(evoked, list):
+        evoked = [evoked]
+
+    all_ch_names = evoked[0].ch_names
+    if not all(all_ch_names == evoked[i].ch_names
+                                            for i in range(1, len(evoked))):
+        raise Exception('All the datasets must have the same good channels.')
+
+    # put the forward solution in fixed orientation if it's not already
+    if loose is None and not is_fixed_orient(forward):
+        forward = deepcopy(forward)
+        _to_fixed_ori(forward)
+
+    info = evoked[0].info
+    gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
+                                                       noise_cov, pca)
+
+    # Whiten lead field.
+    gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
+                                                 depth, loose, weights,
+                                                 weights_min)
+
+    sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
+    M = np.concatenate([e.data[sel] for e in evoked], axis=1)
+
+    # Whiten data
+    logger.info('Whitening data matrix.')
+    M = np.dot(whitener, M)
+
+    if time_pca:
+        U, s, Vh = linalg.svd(M, full_matrices=False)
+        if not isinstance(time_pca, bool) and isinstance(time_pca, int):
+            U = U[:, :time_pca]
+            s = s[:time_pca]
+            Vh = Vh[:time_pca]
+        M = U * s
+
+    # Scaling to make setting of alpha easy
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
+    alpha_max *= 0.01
+    gain /= alpha_max
+    source_weighting *= alpha_max
+
+    X, active_set, E = mixed_norm_solver(M, gain, alpha,
+                                         maxit=maxit, tol=tol,
+                                         active_set_size=active_set_size,
+                                         debias=debias,
+                                         n_orient=n_dip_per_pos,
+                                         solver=solver)
+
+    if mask is not None:
+        active_set_tmp = np.zeros(len(mask), dtype=np.bool)
+        active_set_tmp[mask] = active_set
+        active_set = active_set_tmp
+        del active_set_tmp
+
+    if time_pca:
+        X = np.dot(X, Vh)
+
+    if active_set.sum() == 0:
+        raise Exception("No active dipoles found. alpha is too big.")
+
+    # Reapply weights to have correct unit
+    X /= source_weighting[active_set][:, None]
+
+    stcs = list()
+    residual = list()
+    cnt = 0
+    for e in evoked:
+        tmin = e.times[0]
+        tstep = 1.0 / e.info['sfreq']
+        Xe = X[:, cnt:(cnt + len(e.times))]
+        stc = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
+        stcs.append(stc)
+        cnt += len(e.times)
+
+        if return_residual:
+            sel = [forward['sol']['row_names'].index(c)
+                                                for c in gain_info['ch_names']]
+            r = deepcopy(e)
+            r = pick_channels_evoked(r, include=gain_info['ch_names'])
+            r.data -= np.dot(forward['sol']['data'][sel, :][:, active_set], Xe)
+            residual.append(r)
+
+    logger.info('[done]')
+
+    if len(stcs) == 1:
+        out = stcs[0]
+        if return_residual:
+            residual = residual[0]
+    else:
+        out = stcs
+
+    if return_residual:
+        out = out, residual
+
+    return out
+
+
+def _window_evoked(evoked, size):
+    """Window evoked (size in seconds)"""
+    if isinstance(size, (float, int)):
+        lsize = rsize = float(size)
+    else:
+        lsize, rsize = size
+    evoked = deepcopy(evoked)
+    sfreq = float(evoked.info['sfreq'])
+    lsize = int(lsize * sfreq)
+    rsize = int(rsize * sfreq)
+    lhann = signal.hann(lsize * 2)
+    rhann = signal.hann(rsize * 2)
+    window = np.r_[lhann[:lsize],
+                   np.ones(len(evoked.times) - lsize - rsize),
+                   rhann[-rsize:]]
+    evoked.data *= window[None, :]
+    return evoked
+
+
+ at verbose
+def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
+                  loose=0.2, depth=0.8, maxit=3000, tol=1e-4,
+                  weights=None, weights_min=None, pca=True, debias=True,
+                  wsize=64, tstep=4, window=0.02,
+                  return_residual=False, verbose=None):
+    """Time-Frequency Mixed-norm estimate (TF-MxNE)
+
+    Compute L1/L2 + L1 mixed-norm solution on time frequency
+    dictionary. Works with evoked data.
+
+    References:
+
+    A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+    Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
+    non-stationary source activations
+    Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
+    DOI: 10.1016/j.neuroimage.2012.12.051.
+
+    A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+    Functional Brain Imaging with M/EEG Using Structured Sparsity in
+    Time-Frequency Dictionaries
+    Proceedings Information Processing in Medical Imaging
+    Lecture Notes in Computer Science, 2011, Volume 6801/2011,
+    600-611, DOI: 10.1007/978-3-642-22092-0_49
+    http://dx.doi.org/10.1007/978-3-642-22092-0_49
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        Noise covariance to compute whitener.
+    alpha_space : float
+        Regularization parameter for spatial sparsity. If larger than 100,
+        then no source will be active.
+    alpha_time : float
+        Regularization parameter for temporal sparsity. It set to 0,
+        no temporal regularization is applied. It this case, TF-MxNE is
+        equivalent to MxNE with L21 norm.
+    loose : float in [0, 1]
+        Value that weights the source variances of the dipole components
+        that are parallel (tangential) to the cortical surface. If loose
+        is 0 or None then the solution is computed with fixed orientation.
+        If loose is 1, it corresponds to free orientations.
+    depth: None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter.
+    weights: None | array | SourceEstimate
+        Weight for penalty in mixed_norm. Can be None or
+        1d array of length n_sources or a SourceEstimate e.g. obtained
+        with wMNE or dSPM or fMRI.
+    weights_min: float
+        Do not consider in the estimation sources for which weights
+        is less than weights_min.
+    pca: bool
+        If True the rank of the data is reduced to true dimension.
+    wsize: int
+        Length of the STFT window in samples (must be a multiple of 4).
+    tstep: int
+        Step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2).
+    window : float or (float, float)
+        Length of time window used to take care of edge artifacts in seconds.
+        It can be one float or float if the values are different for left
+        and right window length.
+    debias: bool
+        Remove coefficient amplitude bias due to L1 penalty.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    verbose: bool
+        Verbose output or not.
+
+    Returns
+    -------
+    stc : instance of SourceEstimate
+        Source time courses.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the sources.
+        Only returned if return_residual is True.
+    """
+    all_ch_names = evoked.ch_names
+    info = evoked.info
+
+    # put the forward solution in fixed orientation if it's not already
+    if loose is None and not is_fixed_orient(forward):
+        forward = deepcopy(forward)
+        _to_fixed_ori(forward)
+
+    gain_info, gain, _, whitener, _ = _prepare_forward(forward,
+                                                      info, noise_cov, pca)
+
+    # Whiten lead field.
+    gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
+                                        depth, loose, weights, weights_min)
+
+    if window is not None:
+        evoked = _window_evoked(evoked, window)
+
+    sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
+    M = evoked.data[sel]
+
+    # Whiten data
+    logger.info('Whitening data matrix.')
+    M = np.dot(whitener, M)
+
+    # Scaling to make setting of alpha easy
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
+    alpha_max *= 0.01
+    gain /= alpha_max
+    source_weighting *= alpha_max
+
+    X, active_set, E = tf_mixed_norm_solver(M, gain,
+                                            alpha_space, alpha_time,
+                                            wsize=wsize, tstep=tstep,
+                                            maxit=maxit, tol=tol,
+                                            verbose=verbose,
+                                            n_orient=n_dip_per_pos,
+                                            debias=debias)
+
+    if active_set.sum() == 0:
+        raise Exception("No active dipoles found. alpha is too big.")
+
+    if mask is not None:
+        active_set_tmp = np.zeros(len(mask), dtype=np.bool)
+        active_set_tmp[mask] = active_set
+        active_set = active_set_tmp
+        del active_set_tmp
+
+    # Reapply weights to have correct unit
+    X /= source_weighting[active_set][:, None]
+
+    if return_residual:
+        sel = [forward['sol']['row_names'].index(c)
+                                            for c in gain_info['ch_names']]
+        residual = deepcopy(evoked)
+        residual = pick_channels_evoked(residual, include=gain_info['ch_names'])
+        residual.data -= np.dot(forward['sol']['data'][sel, :][:, active_set],
+                                X)
+
+    tmin = evoked.times[0]
+    tstep = 1.0 / info['sfreq']
+    out = _make_sparse_stc(X, active_set, forward, tmin, tstep)
+    logger.info('[done]')
+
+    if return_residual:
+        out = out, residual
+
+    return out
diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py
new file mode 100644
index 0000000..64acea5
--- /dev/null
+++ b/mne/inverse_sparse/mxne_optim.py
@@ -0,0 +1,631 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+import warnings
+from math import sqrt, ceil
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from .mxne_debiasing import compute_bias
+from .. import verbose
+from ..time_frequency.stft import stft_norm2, stft, istft
+
+
+def groups_norm2(A, n_orient):
+    """compute squared L2 norms of groups inplace"""
+    n_positions = A.shape[0] // n_orient
+    return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
+
+
+def norm_l2inf(A, n_orient, copy=True):
+    """L2-inf norm"""
+    if A.size == 0:
+        return 0.0
+    if copy:
+        A = A.copy()
+    return sqrt(np.max(groups_norm2(A, n_orient)))
+
+
+def norm_l21(A, n_orient, copy=True):
+    """L21 norm"""
+    if A.size == 0:
+        return 0.0
+    if copy:
+        A = A.copy()
+    return np.sum(np.sqrt(groups_norm2(A, n_orient)))
+
+
+def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
+    """proximity operator for l21 norm
+
+    L2 over columns and L1 over rows => groups contain n_orient rows.
+
+    It can eventually take into account the negative frequencies
+    when a complex value is passed and is_stft=True.
+
+    Example
+    -------
+    >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
+    >>> Y = np.r_[Y, np.zeros_like(Y)]
+    >>> print Y
+    [[ 0.  4.  3.  0.  0.]
+     [ 0.  4.  3.  0.  0.]
+     [ 0.  0.  0.  0.  0.]
+     [ 0.  0.  0.  0.  0.]]
+    >>> Yp, active_set = prox_l21(Y, 2, 2)
+    >>> print Yp
+    [[ 0.          2.86862915  2.15147186  0.          0.        ]
+     [ 0.          2.86862915  2.15147186  0.          0.        ]]
+    >>> print active_set
+    [ True  True False False]
+    """
+    if len(Y) == 0:
+        return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
+    if shape is not None:
+        shape_init = Y.shape
+        Y = Y.reshape(*shape)
+    n_positions = Y.shape[0] // n_orient
+
+    if is_stft:
+        rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
+    else:
+        rows_norm = np.sqrt(np.sum((np.abs(Y) ** 2).reshape(n_positions, -1),
+                                    axis=1))
+    # Ensure shrink is >= 0 while avoiding any division by zero
+    shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
+    active_set = shrink > 0.0
+    if n_orient > 1:
+        active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
+        shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
+    Y = Y[active_set]
+    if shape is None:
+        Y *= shrink[active_set][:, np.newaxis]
+    else:
+        Y *= shrink[active_set][:, np.newaxis, np.newaxis]
+        Y = Y.reshape(-1, *shape_init[1:])
+    return Y, active_set
+
+
+def prox_l1(Y, alpha, n_orient):
+    """proximity operator for l1 norm with multiple orientation support
+
+    L2 over orientation and L1 over position (space + time)
+
+    Example
+    -------
+    >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
+    >>> Y = np.r_[Y, np.zeros_like(Y)]
+    >>> print Y
+    [[ 1.  2.  3.  2.  0.]
+     [ 1.  2.  3.  2.  0.]
+     [ 0.  0.  0.  0.  0.]
+     [ 0.  0.  0.  0.  0.]]
+    >>> Yp, active_set = prox_l1(Y, 2, 2)
+    >>> print Yp
+    [[ 0.          0.58578644  1.58578644  0.58578644  0.        ]
+     [ 0.          0.58578644  1.58578644  0.58578644  0.        ]]
+    >>> print active_set
+    [ True  True False False]
+    """
+    n_positions = Y.shape[0] // n_orient
+    norms = np.sqrt(np.sum((np.abs(Y) ** 2).T.reshape(-1, n_orient), axis=1))
+    # Ensure shrink is >= 0 while avoiding any division by zero
+    shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
+    shrink = shrink.reshape(-1, n_positions).T
+    active_set = np.any(shrink > 0.0, axis=1)
+    shrink = shrink[active_set]
+    if n_orient > 1:
+        active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
+    Y = Y[active_set]
+    if len(Y) > 0:
+        for o in range(n_orient):
+            Y[o::n_orient] *= shrink
+    return Y, active_set
+
+
+def dgap_l21(M, G, X, active_set, alpha, n_orient):
+    """Duality gaps for the mixed norm inverse problem
+
+    For details see:
+    Gramfort A., Kowalski M. and Hamalainen, M,
+    Mixed-norm estimates for the M/EEG inverse problem using accelerated
+    gradient methods, Physics in Medicine and Biology, 2012
+    http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+    Parameters
+    ----------
+    M : array of shape [n_sensors, n_times]
+        data
+    G : array of shape [n_sensors, n_active]
+        Gain matrix a.k.a. lead field
+    X : array of shape [n_active, n_times]
+        Sources
+    active_set : array of bool
+        Mask of active sources
+    alpha : float
+        Regularization parameter
+    n_orient : int
+        Number of dipoles per locations (typically 1 or 3)
+
+    Returns
+    -------
+    gap : float
+        Dual gap
+    pobj : float
+        Primal cost
+    dobj : float
+        Dual cost. gap = pobj - dobj
+    R : array of shape [n_sensors, n_times]
+        Current residual of M - G * X
+    """
+    GX = np.dot(G[:, active_set], X)
+    R = M - GX
+    penalty = norm_l21(X, n_orient, copy=True)
+    nR2 = np.sum(R ** 2)
+    pobj = 0.5 * nR2 + alpha * penalty
+    dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
+    scaling = alpha / dual_norm
+    scaling = min(scaling, 1.0)
+    dobj = 0.5 * (scaling ** 2) * nR2 + scaling * np.sum(R * GX)
+    gap = pobj - dobj
+    return gap, pobj, dobj, R
+
+
+ at verbose
+def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
+                       init=None, n_orient=1):
+    """Solves L21 inverse problem with proximal iterations and FISTA"""
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+
+    lipschitz_constant = 1.1 * linalg.norm(G, ord=2) ** 2
+
+    if n_sources < n_sensors:
+        gram = np.dot(G.T, G)
+        GTM = np.dot(G.T, M)
+    else:
+        gram = None
+
+    if init is None:
+        X = 0.0
+        R = M.copy()
+        if gram is not None:
+            R = np.dot(G.T, R)
+    else:
+        X = init
+        if gram is None:
+            R = M - np.dot(G, X)
+        else:
+            R = GTM - np.dot(gram, X)
+
+    t = 1.0
+    Y = np.zeros((n_sources, n_times))  # FISTA aux variable
+    E = []  # track cost function
+
+    active_set = np.ones(n_sources, dtype=np.bool)  # start with full AS
+
+    for i in xrange(maxit):
+        X0, active_set_0 = X, active_set  # store previous values
+        if gram is None:
+            Y += np.dot(G.T, R) / lipschitz_constant  # ISTA step
+        else:
+            Y += R / lipschitz_constant  # ISTA step
+        X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
+
+        t0 = t
+        t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
+        Y.fill(0.0)
+        dt = ((t0 - 1.0) / t)
+        Y[active_set] = (1.0 + dt) * X
+        Y[active_set_0] -= dt * X0
+        Y_as = active_set_0 | active_set
+
+        if gram is None:
+            R = M - np.dot(G[:, Y_as], Y[Y_as])
+        else:
+            R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
+
+        gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
+        E.append(pobj)
+        logger.debug("pobj : %s -- gap : %s" % (pobj, gap))
+        if gap < tol:
+            logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
+            break
+    return X, active_set, E
+
+
+ at verbose
+def _mixed_norm_solver_cd(M, G, alpha, maxit=10000, tol=1e-8,
+                          verbose=None, init=None, n_orient=1):
+    """Solves L21 inverse problem with coordinate descent"""
+    from sklearn.linear_model.coordinate_descent import MultiTaskLasso
+
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+
+    if init is not None:
+        init = init.T
+
+    clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol, normalize=False,
+                         fit_intercept=False, max_iter=maxit).fit(G, M,
+                         coef_init=init)
+    X = clf.coef_.T
+    active_set = np.any(X, axis=1)
+    X = X[active_set]
+    gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
+    return X, active_set, pobj
+
+
+ at verbose
+def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
+                      active_set_size=50, debias=True, n_orient=1,
+                      solver='auto'):
+    """Solves L21 inverse solver with active set strategy
+
+    Algorithm is detailed in:
+    Gramfort A., Kowalski M. and Hamalainen, M,
+    Mixed-norm estimates for the M/EEG inverse problem using accelerated
+    gradient methods, Physics in Medicine and Biology, 2012
+    http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+    Parameters
+    ----------
+    M : array
+        The data
+    G : array
+        The forward operator
+    alpha : float
+        The regularization parameter. It should be between 0 and 100.
+        A value of 100 will lead to an empty active set (no active source).
+    maxit : int
+        The number of iterations
+    tol : float
+        Tolerance on dual gap for convergence checking
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    active_set_size : int
+        Size of active set increase at each iteration.
+    debias : bool
+        Debias source estimates
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    solver : 'prox' | 'cd' | 'auto'
+        The algorithm to use for the optimization.
+
+    Returns
+    -------
+    X : array
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function over the iterations.
+    """
+    n_dipoles = G.shape[1]
+    n_positions = n_dipoles // n_orient
+    alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
+    logger.info("-- ALPHA MAX : %s" % alpha_max)
+    alpha = float(alpha)
+
+    has_sklearn = True
+    try:
+        from sklearn.linear_model.coordinate_descent import MultiTaskLasso
+    except ImportError:
+        has_sklearn = False
+
+    if solver == 'auto':
+        if has_sklearn and (n_orient == 1):
+            solver = 'cd'
+        else:
+            solver = 'prox'
+
+    if solver == 'cd':
+        if n_orient == 1 and not has_sklearn:
+            warnings.warn("Scikit-learn >= 0.12 cannot be found. "
+                          "Using proximal iterations instead of coordinate "
+                          "descent.")
+            solver = 'prox'
+        if n_orient > 1:
+            warnings.warn("Coordinate descent is only available for fixed "
+                          "orientation. Using proximal iterations instead of "
+                          "coordinate descent")
+            solver = 'prox'
+
+    if solver == 'cd':
+        logger.info("Using coordinate descent")
+        l21_solver = _mixed_norm_solver_cd
+    else:
+        logger.info("Using proximal iterations")
+        l21_solver = _mixed_norm_solver_prox
+
+    if active_set_size is not None:
+        X_init = None
+        n_sensors, n_times = M.shape
+        idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
+        active_set = np.zeros(n_positions, dtype=np.bool)
+        active_set[idx_large_corr[-active_set_size:]] = True
+        if n_orient > 1:
+            active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
+        for k in xrange(maxit):
+            X, as_, E = l21_solver(M, G[:, active_set], alpha,
+                                   maxit=maxit, tol=tol, init=X_init,
+                                   n_orient=n_orient)
+            as_ = np.where(active_set)[0][as_]
+            gap, pobj, dobj, R = dgap_l21(M, G, X, as_, alpha, n_orient)
+            logger.info('gap = %s, pobj = %s' % (gap, pobj))
+            if gap < tol:
+                logger.info('Convergence reached ! (gap: %s < %s)'
+                            % (gap, tol))
+                break
+            else:  # add sources
+                idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
+                                                         n_orient))
+                new_active_idx = idx_large_corr[-active_set_size:]
+                if n_orient > 1:
+                    new_active_idx = n_orient * new_active_idx[:, None] + \
+                                                np.arange(n_orient)[None, :]
+                    new_active_idx = new_active_idx.ravel()
+                idx_old_active_set = as_
+                active_set_old = active_set.copy()
+                active_set[new_active_idx] = True
+                as_size = np.sum(active_set)
+                logger.info('active set size %s' % as_size)
+                X_init = np.zeros((as_size, n_times), dtype=X.dtype)
+                idx_active_set = np.where(active_set)[0]
+                idx = np.searchsorted(idx_active_set, idx_old_active_set)
+                X_init[idx] = X
+                if np.all(active_set_old == active_set):
+                    logger.info('Convergence stopped (AS did not change) !')
+                    break
+        else:
+            logger.warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
+
+        active_set = np.zeros_like(active_set)
+        active_set[as_] = True
+    else:
+        X, active_set, E = l21_solver(M, G, alpha, maxit=maxit,
+                                      tol=tol, n_orient=n_orient)
+
+    if (active_set.sum() > 0) and debias:
+        bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+        X *= bias[:, np.newaxis]
+
+    return X, active_set, E
+
+
+###############################################################################
+# TF-MxNE
+
+ at verbose
+def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
+    """Compute lipschitz constant for FISTA
+
+    It uses a power iteration method.
+    """
+    n_times = M.shape[1]
+    n_points = G.shape[1]
+    iv = np.ones((n_points, n_times), dtype=np.float)
+    v = phi(iv)
+    L = 1e100
+    for it in range(100):
+        L_old = L
+        logger.info('Lipschitz estimation: iteration = %d' % it)
+        iv = np.real(phiT(v))
+        Gv = np.dot(G, iv)
+        GtGv = np.dot(G.T, Gv)
+        w = phi(GtGv)
+        L = np.max(np.abs(w))  # l_inf norm
+        v = w / L
+        if abs((L - L_old) / L_old) < tol:
+            break
+    return L
+
+
+def safe_max_abs(A, ia):
+    """Compute np.max(np.abs(A[ia])) possible with empty A"""
+    if np.sum(ia):  # ia is not empty
+        return np.max(np.abs(A[ia]))
+    else:
+        return 0.
+
+
+def safe_max_abs_diff(A, ia, B, ib):
+    """Compute np.max(np.abs(A)) possible with empty A"""
+    A = A[ia] if np.sum(ia) else 0.0
+    B = B[ib] if np.sum(ia) else 0.0
+    return np.max(np.abs(A - B))
+
+
+class _Phi(object):
+    """Util class to have phi stft as callable without using
+    a lambda that does not pickle"""
+    def __init__(self, wsize, tstep, n_coefs):
+        self.wsize = wsize
+        self.tstep = tstep
+        self.n_coefs = n_coefs
+
+    def __call__(self, x):
+        return stft(x, self.wsize, self.tstep,
+                    verbose=False).reshape(-1, self.n_coefs)
+
+
+class _PhiT(object):
+    """Util class to have phi.T istft as callable without using
+    a lambda that does not pickle"""
+    def __init__(self, tstep, n_freq, n_step, n_times):
+        self.tstep = tstep
+        self.n_freq = n_freq
+        self.n_step = n_step
+        self.n_times = n_times
+
+    def __call__(self, z):
+        return istft(z.reshape(-1, self.n_freq, self.n_step), self.tstep,
+                     self.n_times)
+
+
+ at verbose
+def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
+                         n_orient=1, maxit=200, tol=1e-8, log_objective=True,
+                         lipschitz_constant=None, debias=True, verbose=None):
+    """Solves TF L21+L1 inverse solver
+
+    Algorithm is detailed in:
+
+    A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+    Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
+    non-stationary source activations
+    Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
+    DOI: 10.1016/j.neuroimage.2012.12.051.
+
+    Functional Brain Imaging with M/EEG Using Structured Sparsity in
+    Time-Frequency Dictionaries
+    Gramfort A., Strohmeier D., Haueisen J., Hamalainen M. and Kowalski M.
+    INFORMATION PROCESSING IN MEDICAL IMAGING
+    Lecture Notes in Computer Science, 2011, Volume 6801/2011,
+    600-611, DOI: 10.1007/978-3-642-22092-0_49
+    http://dx.doi.org/10.1007/978-3-642-22092-0_49
+
+    Parameters
+    ----------
+    M : array
+        The data.
+    G : array
+        The forward operator.
+    alpha_space : float
+        The spatial regularization parameter. It should be between 0 and 100.
+    alpha_time : float
+        The temporal regularization parameter. The higher it is the smoother
+        will be the estimated time series.
+    wsize: int
+        length of the STFT window in samples (must be a multiple of 4).
+    tstep: int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2).
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    maxit : int
+        The number of iterations.
+    tol : float
+        If absolute difference between estimates at 2 successive iterations
+        is lower than tol, the convergence is reached.
+    log_objective : bool
+        If True, the value of the minimized objective function is computed
+        and stored at every iteration.
+    lipschitz_constant : float | None
+        The lipschitz constant of the spatio temporal linear operator.
+        If None it is estimated.
+    debias : bool
+        Debias source estimates.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : array
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function at each iteration. If log_objective
+        is False, it will be empty.
+    """
+    n_sensors, n_times = M.shape
+    n_dipoles = G.shape[1]
+
+    n_step = int(ceil(n_times / float(tstep)))
+    n_freq = wsize / 2 + 1
+    n_coefs = n_step * n_freq
+    phi = _Phi(wsize, tstep, n_coefs)
+    phiT = _PhiT(tstep, n_freq, n_step, n_times)
+
+    Z = np.zeros((0, n_coefs), dtype=np.complex)
+    active_set = np.zeros(n_dipoles, dtype=np.bool)
+    R = M.copy()  # residual
+
+    if lipschitz_constant is None:
+        lipschitz_constant = 1.1 * tf_lipschitz_constant(M, G, phi, phiT)
+
+    logger.info("lipschitz_constant : %s" % lipschitz_constant)
+
+    t = 1.0
+    Y = np.zeros((n_dipoles, n_coefs), dtype=np.complex)  # FISTA aux variable
+    Y[active_set] = Z
+    E = []  # track cost function
+    Y_time_as = None
+    Y_as = None
+
+    alpha_time_lc = alpha_time / lipschitz_constant
+    alpha_space_lc = alpha_space / lipschitz_constant
+    for i in xrange(maxit):
+        Z0, active_set_0 = Z, active_set  # store previous values
+
+        if active_set.sum() < len(R) and Y_time_as is not None:
+            # trick when using tight frame to do a first screen based on
+            # L21 prox (L21 norms are not changed by phi)
+            GTR = np.dot(G.T, R) / lipschitz_constant
+            A = GTR.copy()
+            A[Y_as] += Y_time_as
+            _, active_set_l21 = prox_l21(A, alpha_space_lc, n_orient)
+            # just compute prox_l1 on rows that won't be zeroed by prox_l21
+            B = Y[active_set_l21] + phi(GTR[active_set_l21])
+            Z, active_set_l1 = prox_l1(B, alpha_time_lc, n_orient)
+            active_set_l21[active_set_l21] = active_set_l1
+            active_set_l1 = active_set_l21
+        else:
+            Y += np.dot(G.T, phi(R)) / lipschitz_constant  # ISTA step
+            Z, active_set_l1 = prox_l1(Y, alpha_time_lc, n_orient)
+
+        Z, active_set_l21 = prox_l21(Z, alpha_space_lc, n_orient,
+                                shape=(-1, n_freq, n_step), is_stft=True)
+        active_set = active_set_l1
+        active_set[active_set_l1] = active_set_l21
+
+        # Check convergence : max(abs(Z - Z0)) < tol
+        stop = (safe_max_abs(Z, True - active_set_0[active_set]) < tol and
+                safe_max_abs(Z0, True - active_set[active_set_0]) < tol and
+                safe_max_abs_diff(Z, active_set_0[active_set],
+                                  Z0, active_set[active_set_0]) < tol)
+        if stop:
+            print 'Convergence reached !'
+            break
+
+        # FISTA 2 steps
+        # compute efficiently : Y = Z + ((t0 - 1.0) / t) * (Z - Z0)
+        t0 = t
+        t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
+        Y.fill(0.0)
+        dt = ((t0 - 1.0) / t)
+        Y[active_set] = (1.0 + dt) * Z
+        if len(Z0):
+            Y[active_set_0] -= dt * Z0
+        Y_as = active_set_0 | active_set
+
+        Y_time_as = phiT(Y[Y_as])
+        R = M - np.dot(G[:, Y_as], Y_time_as)
+
+        if log_objective:  # log cost function value
+            Z2 = np.abs(Z)
+            Z2 **= 2
+            X = phiT(Z)
+            RZ = M - np.dot(G[:, active_set], X)
+            pobj = 0.5 * linalg.norm(RZ, ord='fro') ** 2 \
+               + alpha_space * norm_l21(X, n_orient) \
+               + alpha_time * np.sqrt(np.sum(Z2.T.reshape(-1, n_orient),
+                                             axis=1)).sum()
+            E.append(pobj)
+            logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
+                        pobj, np.sum(active_set)))
+        else:
+            logger.info("Iteration %d" % i + 1)
+
+    X = phiT(Z)
+
+    if (active_set.sum() > 0) and debias:
+        bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+        X *= bias[:, np.newaxis]
+
+    return X, active_set, E
diff --git a/mne/inverse_sparse/tests/__init__.py b/mne/inverse_sparse/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/inverse_sparse/tests/test_gamma_map.py b/mne/inverse_sparse/tests/test_gamma_map.py
new file mode 100644
index 0000000..c6bc337
--- /dev/null
+++ b/mne/inverse_sparse/tests/test_gamma_map.py
@@ -0,0 +1,51 @@
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+import os.path as op
+import numpy as np
+from nose.tools import assert_true
+from numpy.testing import assert_array_almost_equal
+
+import mne
+from mne.datasets import sample
+from mne import fiff, read_cov, read_forward_solution
+from mne.inverse_sparse import gamma_map
+
+data_path = sample.data_path()
+fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-eeg-oct-6-fwd.fif')
+
+
+forward = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True)
+evoked = fiff.Evoked(fname_evoked, setno=0, baseline=(None, 0))
+evoked.crop(tmin=0, tmax=0.3)
+
+cov = read_cov(fname_cov)
+cov = mne.cov.regularize(cov, evoked.info)
+
+
+def test_gamma_map():
+    """Test Gamma MAP inverse"""
+
+    alpha = 0.2
+    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
+                    xyz_same_gamma=True, update_mode=1)
+    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
+    assert_true(np.concatenate(stc.vertno)[idx] == 96397)
+
+    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
+                    xyz_same_gamma=False, update_mode=1)
+    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
+    assert_true(np.concatenate(stc.vertno)[idx] == 82010)
+
+    # force fixed orientation
+    stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
+                         xyz_same_gamma=False, update_mode=2,
+                         loose=None, return_residual=True)
+    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
+    assert_true(np.concatenate(stc.vertno)[idx] == 83398)
+
+    assert_array_almost_equal(evoked.times, res.times)
diff --git a/mne/inverse_sparse/tests/test_mxne_debiasing.py b/mne/inverse_sparse/tests/test_mxne_debiasing.py
new file mode 100755
index 0000000..e48795a
--- /dev/null
+++ b/mne/inverse_sparse/tests/test_mxne_debiasing.py
@@ -0,0 +1,22 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+
+from mne.inverse_sparse.mnxe_debiasing import compute_bias
+
+
+def test_compute_debiasing():
+    """Test source amplitude debiasing"""
+    rng = np.random.RandomState(42)
+    G = rng.randn(10, 4)
+    X = rng.randn(4, 20)
+    debias_true = np.arange(1, 5, dtype=np.float)
+    M = np.dot(G, X * debias_true[:, np.newaxis])
+    debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7)
+    assert_almost_equal(debias, debias_true, decimal=5)
+    debias = compute_bias(M, G, X, max_iter=10000, n_orient=2, tol=1e-5)
+    assert_almost_equal(debias, [1.8, 1.8, 3.72, 3.72], decimal=2)
diff --git a/mne/inverse_sparse/tests/test_mxne_inverse.py b/mne/inverse_sparse/tests/test_mxne_inverse.py
new file mode 100644
index 0000000..42853de
--- /dev/null
+++ b/mne/inverse_sparse/tests/test_mxne_inverse.py
@@ -0,0 +1,95 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#         Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: Simplified BSD
+
+import os.path as op
+import copy
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.datasets import sample
+from mne.label import read_label
+from mne import fiff, read_cov, read_forward_solution
+from mne.inverse_sparse import mixed_norm, tf_mixed_norm
+from mne.minimum_norm import apply_inverse, make_inverse_operator
+
+
+data_path = sample.data_path()
+fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-meg-oct-6-fwd.fif')
+label = 'Aud-rh'
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+
+evoked = fiff.Evoked(fname_data, setno=1, baseline=(None, 0))
+
+# Read noise covariance matrix
+cov = read_cov(fname_cov)
+
+# Handling average file
+setno = 0
+loose = None
+depth = 0.9
+
+evoked = fiff.read_evoked(fname_data, setno=setno, baseline=(None, 0))
+evoked.crop(tmin=-0.1, tmax=0.4)
+
+evoked_l21 = copy.deepcopy(evoked)
+evoked_l21.crop(tmin=0.08, tmax=0.1)
+
+# Handling forward solution
+forward = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True)
+label = read_label(fname_label)
+
+# Reduce source space to make test computation faster
+inverse_operator = make_inverse_operator(evoked.info, forward, cov,
+                                         loose=loose, depth=depth,
+                                         fixed=True)
+stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
+                         method='dSPM')
+del inverse_operator
+stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
+stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
+weights_min = 0.5
+
+
+def test_mxne_inverse():
+    """Test MxNE inverse computation"""
+    alpha = 60  # spatial regularization parameter
+
+    stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
+                          depth=0.9, maxit=1000, tol=1e-8, active_set_size=10,
+                          solver='prox')
+    stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
+                        depth=0.9, maxit=1000, tol=1e-8, active_set_size=10,
+                        solver='cd')
+    assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
+    assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
+    assert_array_almost_equal(stc_prox.data, stc_cd.data, 5)
+    assert_true(stc_prox.vertno[1][0] in label.vertices)
+    assert_true(stc_cd.vertno[1][0] in label.vertices)
+
+    stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
+                        depth=depth, maxit=500, tol=1e-4, active_set_size=10,
+                        weights=stc_dspm, weights_min=weights_min,
+                        return_residual=True)
+
+    assert_array_almost_equal(stc.times, evoked_l21.times, 5)
+    assert_true(stc.vertno[1][0] in label.vertices)
+
+
+def test_tf_mxne_inverse():
+    """Test TF-MxNE inverse computation"""
+    alpha_space = 60.  # spatial regularization parameter
+    alpha_time = 1.  # temporal regularization parameter
+
+    stc, _ = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
+                           loose=loose, depth=depth, maxit=100, tol=1e-4,
+                           tstep=4, wsize=16, window=0.1, weights=stc_dspm,
+                           weights_min=weights_min, return_residual=True)
+
+    assert_array_almost_equal(stc.times, evoked.times, 5)
+    assert_true(stc.vertno[1][0] in label.vertices)
diff --git a/mne/inverse_sparse/tests/test_mxne_optim.py b/mne/inverse_sparse/tests/test_mxne_optim.py
new file mode 100644
index 0000000..714296e
--- /dev/null
+++ b/mne/inverse_sparse/tests/test_mxne_optim.py
@@ -0,0 +1,121 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+import numpy as np
+import warnings
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+
+from mne.inverse_sparse.mxne_optim import mixed_norm_solver, tf_mixed_norm_solver
+
+
+def _generate_tf_data():
+    n, p, t = 30, 40, 64
+    rng = np.random.RandomState(0)
+    G = rng.randn(n, p)
+    G /= np.std(G, axis=0)[None, :]
+    X = np.zeros((p, t))
+    active_set = [0, 4]
+    times = np.linspace(0, 2 * np.pi, t)
+    X[0] = np.sin(times)
+    X[4] = -2 * np.sin(4 * times)
+    X[4, times <= np.pi / 2] = 0
+    X[4, times >= np.pi] = 0
+    M = np.dot(G, X)
+    M += 1 * rng.randn(*M.shape)
+    return M, G, active_set
+
+
+def test_l21_mxne():
+    """Test convergence of MxNE solver"""
+    n, p, t, alpha = 30, 40, 20, 1
+    rng = np.random.RandomState(0)
+    G = rng.randn(n, p)
+    G /= np.std(G, axis=0)[None, :]
+    X = np.zeros((p, t))
+    X[0] = 3
+    X[4] = -2
+    M = np.dot(G, X)
+
+    X_hat_prox, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=None, debias=True,
+                            solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_cd, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=None, debias=True,
+                            solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
+
+    X_hat_prox, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=2, debias=True,
+                            solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_cd, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=2, debias=True,
+                            solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
+
+    X_hat_prox, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=2, debias=True,
+                            n_orient=2, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    # suppress a coordinate-descent warning here
+    with warnings.catch_warnings(True) as w:
+        X_hat_cd, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=2, debias=True,
+                            n_orient=2, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    assert_array_equal(X_hat_prox, X_hat_cd)
+
+    X_hat_prox, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=2, debias=True,
+                            n_orient=5)
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    X_hat_cd, active_set, _ = mixed_norm_solver(M,
+                            G, alpha, maxit=1000, tol=1e-8,
+                            active_set_size=2, debias=True,
+                            n_orient=5, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+
+
+def test_tf_mxne():
+    """Test convergence of TF-MxNE solver"""
+    alpha_space = 10
+    alpha_time = 5
+
+    M, G, active_set = _generate_tf_data()
+
+    X_hat, active_set_hat, E = tf_mixed_norm_solver(M, G,
+                                alpha_space, alpha_time, maxit=200,
+                                tol=1e-8, verbose=True,
+                                n_orient=1, tstep=4, wsize=32)
+
+    assert_array_equal(np.where(active_set_hat)[0], active_set)
+
+
+def test_tf_mxne_vs_mxne():
+    """Test equivalence of TF-MxNE (with alpha_time=0) and MxNE"""
+    alpha_space = 60
+    alpha_time = 0
+
+    M, G, active_set = _generate_tf_data()
+
+    X_hat, active_set_hat, E = tf_mixed_norm_solver(M, G,
+                                alpha_space, alpha_time, maxit=200,
+                                tol=1e-8, verbose=True, debias=False,
+                                n_orient=1, tstep=4, wsize=32)
+
+    # Also run L21 and check that we get the same
+    X_hat_l21, _, _ = mixed_norm_solver(M, G, alpha_space, maxit=200,
+                            tol=1e-8, verbose=False, n_orient=1,
+                            active_set_size=None, debias=False)
+    assert_array_almost_equal(X_hat, X_hat_l21, decimal=2)
diff --git a/mne/label.py b/mne/label.py
new file mode 100644
index 0000000..17ab7ab
--- /dev/null
+++ b/mne/label.py
@@ -0,0 +1,976 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from os import path as op
+import os
+import copy as cp
+import numpy as np
+import re
+from scipy import linalg, sparse
+
+import logging
+logger = logging.getLogger('mne')
+
+from .utils import get_subjects_dir, _check_subject
+from .source_estimate import _read_stc, mesh_edges, mesh_dist, morph_data, \
+                             SourceEstimate
+from .surface import read_surface
+from . import verbose
+
+
+class Label(object):
+    """A FreeSurfer/MNE label with vertices restricted to one hemisphere
+
+    Labels can be combined with the ``+`` operator:
+     - Duplicate vertices are removed.
+     - If duplicate vertices have conflicting position values, an error is
+       raised.
+     - Values of duplicate vertices are summed.
+
+    Parameters
+    ----------
+    vertices : array (length N)
+        vertex indices (0 based).
+    pos : array (N by 3) | None
+        locations in meters. If None, then zeros are used.
+    values : array (length N) | None
+        values at the vertices. If None, then ones are used.
+    hemi : 'lh' | 'rh'
+        Hemisphere to which the label applies.
+    comment, name, fpath : str
+        Kept as information but not used by the object itself.
+    subject : str | None
+        Name of the subject the label is from.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    comment : str
+        Comment from the first line of the label file.
+    hemi : 'lh' | 'rh'
+        Hemisphere.
+    name : None | str
+        A name for the label. It is OK to change that attribute manually.
+    pos : array, shape = (n_pos, 3)
+        Locations in meters.
+    subject : str | None
+        Subject name. It is best practice to set this to the proper
+        value on initialization, but it can also be set manually.
+    values : array, len = n_pos
+        Values at the vertices.
+    verbose : bool, str, int, or None
+        See above.
+    vertices : array, len = n_pos
+        Vertex indices (0 based)
+    """
+    @verbose
+    def __init__(self, vertices, pos=None, values=None, hemi=None, comment="",
+                 name=None, filename=None, subject=None, verbose=None):
+        if not isinstance(hemi, basestring):
+            raise ValueError('hemi must be a string, not %s' % type(hemi))
+        vertices = np.asarray(vertices)
+        if values is None:
+            values = np.ones(len(vertices))
+        if pos is None:
+            pos = np.zeros((len(vertices), 3))
+        values = np.asarray(values)
+        pos = np.asarray(pos)
+        if not (len(vertices) == len(values) == len(pos)):
+            err = ("vertices, values and pos need to have same length (number "
+                   "of vertices)")
+            raise ValueError(err)
+
+        # name
+        if name is None and filename is not None:
+            name = op.basename(filename[:-6])
+
+        self.vertices = vertices
+        self.pos = pos
+        self.values = values
+        self.hemi = hemi
+        self.comment = comment
+        self.verbose = verbose
+        self.subject = _check_subject(None, subject, False)
+        self.name = name
+        self.filename = filename
+
+    def __setstate__(self, state):
+        self.vertices = state['vertices']
+        self.pos = state['pos']
+        self.values = state['values']
+        self.hemi = state['hemi']
+        self.comment = state['comment']
+        self.verbose = state['verbose']
+        self.subject = state.get('subject', None)
+        self.name = state['name']
+        self.filename = state['filename']
+
+    def __getstate__(self):
+        out = dict(vertices=self.vertices,
+                   pos=self.pos,
+                   values=self.values,
+                   hemi=self.hemi,
+                   comment=self.comment,
+                   verbose=self.verbose,
+                   subject=self.subject,
+                   name=self.name,
+                   filename=self.filename)
+        return out
+
+    def __repr__(self):
+        name = 'unknown, ' if self.subject is None else self.subject + ', '
+        name += repr(self.name) if self.name is not None else "unnamed"
+        n_vert = len(self)
+        return "<Label  |  %s, %s : %i vertices>" % (name, self.hemi, n_vert)
+
+    def __len__(self):
+        return len(self.vertices)
+
+    def __add__(self, other):
+        if isinstance(other, BiHemiLabel):
+            return other + self
+        elif isinstance(other, Label):
+            if self.subject != other.subject:
+                raise ValueError('Label subject parameters must match, got '
+                                 '"%s" and "%s". Consider setting the '
+                                 'subject parameter on initialization, or '
+                                 'setting label.subject manually before '
+                                 'combining labels.' % (self.subject,
+                                                        other.subject))
+            if self.hemi != other.hemi:
+                name = '%s + %s' % (self.name, other.name)
+                if self.hemi == 'lh':
+                    lh, rh = self.copy(), other.copy()
+                else:
+                    lh, rh = other.copy(), self.copy()
+                return BiHemiLabel(lh, rh, name=name)
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        # check for overlap
+        duplicates = np.intersect1d(self.vertices, other.vertices)
+        n_dup = len(duplicates)
+        if n_dup:
+            self_dup = [np.where(self.vertices == d)[0][0]
+                        for d in duplicates]
+            other_dup = [np.where(other.vertices == d)[0][0]
+                         for d in duplicates]
+            if not np.all(self.pos[self_dup] == other.pos[other_dup]):
+                err = ("Labels %r and %r: vertices overlap but differ in "
+                       "position values" % (self.name, other.name))
+                raise ValueError(err)
+
+            isnew = np.array([v not in duplicates for v in other.vertices])
+
+            vertices = np.hstack((self.vertices, other.vertices[isnew]))
+            pos = np.vstack((self.pos, other.pos[isnew]))
+
+            # find position of other's vertices in new array
+            tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices]
+            n_self = len(self.values)
+            n_other = len(other.values)
+            new_len = n_self + n_other - n_dup
+            values = np.zeros(new_len, dtype=self.values.dtype)
+            values[:n_self] += self.values
+            values[tgt_idx] += other.values
+        else:
+            vertices = np.hstack((self.vertices, other.vertices))
+            pos = np.vstack((self.pos, other.pos))
+            values = np.hstack((self.values, other.values))
+
+        name0 = self.name if self.name else 'unnamed'
+        name1 = other.name if other.name else 'unnamed'
+
+        label = Label(vertices, pos=pos, values=values, hemi=self.hemi,
+                      comment="%s + %s" % (self.comment, other.comment),
+                      name="%s + %s" % (name0, name1))
+        return label
+
+    def save(self, filename):
+        "calls write_label to write the label to disk"
+        write_label(filename, self)
+
+    def copy(self):
+        """Copy the label instance.
+
+        Returns
+        -------
+        label : instance of Label
+            The copied label.
+        """
+        return cp.deepcopy(self)
+
+    @verbose
+    def smooth(self, subject=None, smooth=2, grade=None,
+               subjects_dir=None, n_jobs=1, copy=True, verbose=None):
+        """Smooth the label
+
+        Useful for filling in labels made in a
+        decimated source space for display.
+
+        Parameters
+        ----------
+        subject : str | None
+            The name of the subject used. If None, the value will be
+            taken from self.subject.
+        smooth : int
+            Number of iterations for the smoothing of the surface data.
+            Cannot be None here since not all vertices are used. For a
+            grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a
+            label.
+        grade : int, list (of two arrays), array, or None
+            Resolution of the icosahedral mesh (typically 5). If None, all
+            vertices will be used (potentially filling the surface). If a list,
+            values will be morphed to the set of vertices specified in grade[0]
+            and grade[1], assuming that these are vertices for the left and
+            right hemispheres. Note that specifying the vertices (e.g.,
+            grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+            standard grade 5 source space) can be substantially faster than
+            computing vertex locations. If one array is used, it is assumed
+            that all vertices belong to the hemisphere of the label. To create
+            a label filling the surface, use None.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        n_jobs : int
+            Number of jobs to run in parallel
+        copy : bool
+            If False, smoothing is done in-place.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        label : instance of Label
+            The smoothed label.
+
+        Notes
+        -----
+        This function will set label.pos to be all zeros. If the positions
+        on the new surface are required, consider using mne.read_surface
+        with label.vertices.
+        """
+        subject = _check_subject(self.subject, subject)
+        return self.morph(subject, subject, smooth, grade, subjects_dir,
+                          n_jobs, copy)
+
+    @verbose
+    def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None,
+              subjects_dir=None, n_jobs=1, copy=True, verbose=None):
+        """Morph the label
+
+        Useful for transforming a label from one subject to another.
+
+        Parameters
+        ----------
+        subject_from : str | None
+            The name of the subject of the current label. If None, the
+            initial subject will be taken from self.subject.
+        subject_to : str
+            The name of the subject to morph the label to. This will
+            be put in label.subject of the output label file.
+        smooth : int
+            Number of iterations for the smoothing of the surface data.
+            Cannot be None here since not all vertices are used.
+        grade : int, list (of two arrays), array, or None
+            Resolution of the icosahedral mesh (typically 5). If None, all
+            vertices will be used (potentially filling the surface). If a list,
+            values will be morphed to the set of vertices specified in grade[0]
+            and grade[1], assuming that these are vertices for the left and
+            right hemispheres. Note that specifying the vertices (e.g.,
+            grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+            standard grade 5 source space) can be substantially faster than
+            computing vertex locations. If one array is used, it is assumed
+            that all vertices belong to the hemisphere of the label. To create
+            a label filling the surface, use None.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        copy : bool
+            If False, the morphing is done in-place.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        label : instance of Label
+            The morphed label.
+
+        Notes
+        -----
+        This function will set label.pos to be all zeros. If the positions
+        on the new surface are required, consider using mne.read_surface
+        with label.vertices.
+        """
+        subject_from = _check_subject(self.subject, subject_from)
+        if not isinstance(subject_to, basestring):
+            raise TypeError('"subject_to" must be entered as a string')
+        if not isinstance(smooth, int):
+            raise ValueError('smooth must be an integer')
+        if np.all(self.values == 0):
+            raise ValueError('Morphing label with all zero values will result '
+                             'in the label having no vertices. Consider using '
+                             'something like label.values.fill(1.0).')
+        if(isinstance(grade, np.ndarray)):
+            if self.hemi == 'lh':
+                grade = [grade, np.array([])]
+            else:
+                grade = [np.array([]), grade]
+        if self.hemi == 'lh':
+            vertices = [self.vertices, np.array([])]
+        else:
+            vertices = [np.array([]), self.vertices]
+        data = self.values[:, np.newaxis]
+        stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
+                             subject=subject_from)
+        stc = morph_data(subject_from, subject_to, stc, grade=grade,
+                         smooth=smooth, subjects_dir=subjects_dir,
+                         n_jobs=n_jobs)
+        inds = np.nonzero(stc.data)[0]
+        if copy is True:
+            label = self.copy()
+        else:
+            label = self
+        label.values = stc.data[inds, :].ravel()
+        label.pos = np.zeros((len(inds), 3))
+        if label.hemi == 'lh':
+            label.vertices = stc.vertno[0][inds]
+        else:
+            label.vertices = stc.vertno[1][inds]
+        label.subject = subject_to
+        return label
+
+
+class BiHemiLabel(object):
+    """A freesurfer/MNE label with vertices in both hemispheres
+
+    Parameters
+    ----------
+    lh, rh : Label
+        Label objects representing the left and the right hemisphere,
+        respectively
+    name : None | str
+        name for the label
+
+    Attributes
+    ----------
+    lh, rh : Label
+        Labels for the left and right hemisphere, respectively.
+    name : None | str
+        A name for the label. It is OK to change that attribute manually.
+    subject : str | None
+        Subject the label is from.
+    """
+    hemi = 'both'
+
+    def __init__(self, lh, rh, name=None):
+        if lh.subject != rh.subject:
+            raise ValueError('lh.subject (%s) and rh.subject (%s) must '
+                             'agree' % (lh.subject, rh.subject))
+        self.lh = lh
+        self.rh = rh
+        self.name = name
+        self.subject = lh.subject
+
+    def __repr__(self):
+        temp = "<BiHemiLabel  |  %s, lh : %i vertices,  rh : %i vertices>"
+        name = 'unknown, ' if self.subject is None else self.subject + ', '
+        name += repr(self.name) if self.name is not None else "unnamed"
+        return temp % (name, len(self.lh), len(self.rh))
+
+    def __len__(self):
+        return len(self.lh) + len(self.rh)
+
+    def __add__(self, other):
+        if isinstance(other, Label):
+            if other.hemi == 'lh':
+                lh = self.lh + other
+                rh = self.rh
+            else:
+                lh = self.lh
+                rh = self.rh + other
+        elif isinstance(other, BiHemiLabel):
+            lh = self.lh + other.lh
+            rh = self.rh + other.rh
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        name = '%s + %s' % (self.name, other.name)
+        return BiHemiLabel(lh, rh, name=name)
+
+
+def read_label(filename, subject=None):
+    """Read FreeSurfer Label file
+
+    Parameters
+    ----------
+    filename : string
+        Path to label file.
+    subject : str | None
+        Name of the subject the data are defined for.
+        It is good practice to set this attribute to avoid combining
+        incompatible labels and SourceEstimates (e.g., ones from other
+        subjects). Note that due to file specification limitations, the
+        subject name isn't saved to or loaded from files written to disk.
+
+    Returns
+    -------
+    label : Label
+        Instance of Label object with attributes:
+            comment        comment from the first line of the label file
+            vertices       vertex indices (0 based, column 1)
+            pos            locations in meters (columns 2 - 4 divided by 1000)
+            values         values at the vertices (column 5)
+    """
+    fid = open(filename, 'r')
+    comment = fid.readline().replace('\n', '')[1:]
+    if subject is not None and not isinstance(subject, basestring):
+        raise TypeError('subject must be a string')
+
+    nv = int(fid.readline())
+    data = np.empty((5, nv))
+    for i, line in enumerate(fid):
+        data[:, i] = line.split()
+
+    basename = op.basename(filename)
+    if basename.endswith('lh.label') or basename.startswith('lh.'):
+        hemi = 'lh'
+    elif basename.endswith('rh.label') or basename.startswith('rh.'):
+        hemi = 'rh'
+    else:
+        raise ValueError('Cannot find which hemisphere it is. File should end'
+                         ' with lh.label or rh.label')
+    fid.close()
+
+    label = Label(vertices=np.array(data[0], dtype=np.int32),
+                  pos=1e-3 * data[1:4].T, values=data[4], hemi=hemi,
+                  comment=comment, filename=filename, subject=subject)
+
+    return label
+
+
+ at verbose
+def write_label(filename, label, verbose=None):
+    """Write a FreeSurfer label
+
+    Parameters
+    ----------
+    filename : string
+        Path to label file to produce.
+    label : Label
+        The label object to save.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    hemi = label.hemi
+    path_head, name = op.split(filename)
+    if name.endswith('.label'):
+        name = name[:-6]
+    if not (name.startswith(hemi) or name.endswith(hemi)):
+        name += '-' + hemi
+    filename = op.join(path_head, name) + '.label'
+
+    logger.info('Saving label to : %s' % filename)
+
+    fid = open(filename, 'wb')
+    n_vertices = len(label.vertices)
+    data = np.zeros((n_vertices, 5), dtype=np.float)
+    data[:, 0] = label.vertices
+    data[:, 1:4] = 1e3 * label.pos
+    data[:, 4] = label.values
+    fid.write("#%s\n" % label.comment)
+    fid.write("%d\n" % n_vertices)
+    for d in data:
+        fid.write("%d %f %f %f %f\n" % tuple(d))
+
+    return label
+
+
+def label_time_courses(labelfile, stcfile):
+    """Extract the time courses corresponding to a label file from an stc file
+
+    Parameters
+    ----------
+    labelfile : string
+        Path to the label file.
+    stcfile : string
+        Path to the stc file. The name of the stc file (must be on the
+        same subject and hemisphere as the stc file).
+
+    Returns
+    -------
+    values : 2d array
+        The time courses.
+    times : 1d array
+        The time points.
+    vertices : array
+        The indices of the vertices corresponding to the time points.
+    """
+    stc = _read_stc(stcfile)
+    lab = read_label(labelfile)
+
+    vertices = np.intersect1d(stc['vertices'], lab.vertices)
+    idx = [k for k in range(len(stc['vertices']))
+           if stc['vertices'][k] in vertices]
+
+    if len(vertices) == 0:
+        raise ValueError('No vertices match the label in the stc file')
+
+    values = stc['data'][idx]
+    times = stc['tmin'] + stc['tstep'] * np.arange(stc['data'].shape[1])
+
+    return values, times, vertices
+
+
+def label_sign_flip(label, src):
+    """Compute sign for label averaging
+
+    Parameters
+    ----------
+    label : Label
+        A label.
+    src : list of dict
+        The source space over which the label is defined.
+
+    Returns
+    -------
+    flip : array
+        Sign flip vector (contains 1 or -1)
+    """
+    if len(src) != 2:
+        raise ValueError('Only source spaces with 2 hemisphers are accepted')
+
+    lh_vertno = src[0]['vertno']
+    rh_vertno = src[1]['vertno']
+
+    # get source orientations
+    if label.hemi == 'lh':
+        vertno_sel = np.intersect1d(lh_vertno, label.vertices)
+        if len(vertno_sel) == 0:
+            return np.array([])
+        ori = src[0]['nn'][vertno_sel]
+    elif label.hemi == 'rh':
+        vertno_sel = np.intersect1d(rh_vertno, label.vertices)
+        if len(vertno_sel) == 0:
+            return np.array([])
+        ori = src[1]['nn'][vertno_sel]
+    else:
+        raise Exception("Unknown hemisphere type")
+
+    _, _, Vh = linalg.svd(ori, full_matrices=False)
+
+    # Comparing to the direction of the first right singular vector
+    flip = np.sign(np.dot(ori, Vh[:, 0] if len(vertno_sel) > 3 else Vh[0]))
+    return flip
+
+
+def stc_to_label(stc, src=None, smooth=5, subjects_dir=None):
+    """Compute a label from the non-zero sources in an stc object.
+
+    Parameters
+    ----------
+    stc : SourceEstimate
+        The source estimates.
+    src : list of dict | string | None
+        The source space over which the source estimates are defined.
+        If it's a string it should the subject name (e.g. fsaverage).
+        Can be None if stc.subject is not None.
+    smooth : int
+        Number of smoothing steps to use.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+
+    Returns
+    -------
+    labels : list of Labels
+        The generated labels. One per hemisphere containing sources.
+    """
+    src = stc.subject if src is None else src
+    if src is None:
+        raise ValueError('src cannot be None if stc.subject is None')
+    if isinstance(src, basestring):
+        subject = src
+    else:
+        subject = stc.subject
+
+    if not stc.is_surface():
+        raise ValueError('SourceEstimate should be surface source estimates')
+
+    if isinstance(src, basestring):
+        subjects_dir = get_subjects_dir(subjects_dir)
+        surf_path_from = op.join(subjects_dir, src, 'surf')
+        rr_lh, tris_lh = read_surface(op.join(surf_path_from,
+                                      'lh.white'))
+        rr_rh, tris_rh = read_surface(op.join(surf_path_from,
+                                      'rh.white'))
+        rr = [rr_lh, rr_rh]
+        tris = [tris_lh, tris_rh]
+    else:
+        if len(src) != 2:
+            raise ValueError('source space should contain the 2 hemispheres')
+        tris = [src[0]['tris'], src[1]['tris']]
+        rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
+
+    labels = []
+    cnt = 0
+    for hemi, this_vertno, this_tris, this_rr in \
+                                    zip(['lh', 'rh'], stc.vertno, tris, rr):
+        if len(this_vertno) == 0:
+            continue
+        e = mesh_edges(this_tris)
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        this_data = stc.data[cnt:cnt + len(this_vertno)]
+        cnt += len(this_vertno)
+        e = e + sparse.eye(n_vertices, n_vertices)
+        idx_use = this_vertno[np.any(this_data, axis=1)]
+        if len(idx_use) == 0:
+            continue
+        for k in range(smooth):
+            e_use = e[:, idx_use]
+            data1 = e_use * np.ones(len(idx_use))
+            idx_use = np.where(data1)[0]
+
+        label = Label(vertices=idx_use,
+                      pos=this_rr[idx_use],
+                      values=np.ones(len(idx_use)),
+                      hemi=hemi,
+                      comment='Label from stc',
+                      subject=subject)
+
+        labels.append(label)
+
+    return labels
+
+
+def _verts_within_dist(graph, source, max_dist):
+    """Find all vertices wihin a maximum geodesic distance from source
+
+    Parameters
+    ----------
+    graph : scipy.sparse.csr_matrix
+        Sparse matrix with distances between adjacent vertices
+    source : int
+        Source vertex
+    max_dist: float
+        Maximum geodesic distance
+
+    Returns
+    -------
+    verts : array
+        Vertices within max_dist
+    dist : array
+        Distances from source vertex
+    """
+    dist_map = {}
+    dist_map[source] = 0
+    verts_added_last = [source]
+    # add neighbors until no more neighbors within max_dist can be found
+    while len(verts_added_last) > 0:
+        verts_added = []
+        for i in verts_added_last:
+            v_dist = dist_map[i]
+            row = graph[i, :]
+            neighbor_vert = row.indices
+            neighbor_dist = row.data
+            for j, d in zip(neighbor_vert, neighbor_dist):
+                n_dist = v_dist + d
+                if j in dist_map:
+                    if n_dist < dist_map[j]:
+                        dist_map[j] = n_dist
+                else:
+                    if n_dist <= max_dist:
+                        dist_map[j] = n_dist
+                        # we found a new vertex within max_dist
+                        verts_added.append(j)
+        verts_added_last = verts_added
+
+    verts = np.sort(np.array(dist_map.keys(), dtype=np.int))
+    dist = np.array([dist_map[v] for v in verts])
+
+    return verts, dist
+
+
+def grow_labels(subject, seeds, extents, hemis, subjects_dir=None):
+    """Generate circular labels in source space with region growing
+
+    This function generates a number of labels in source space by growing
+    regions starting from the vertices defined in "seeds". For each seed, a
+    label is generated containing all vertices within a maximum geodesic
+    distance on the white matter surface from the seed.
+
+    Note: "extents" and "hemis" can either be arrays with the same length as
+          seeds, which allows using a different extent and hemisphere for each
+          label, or integers, in which case the same extent and hemisphere is
+          used for each label.
+
+    Parameters
+    ----------
+    subject : string
+        Name of the subject as in SUBJECTS_DIR
+    seeds : array or int
+        Seed vertex numbers
+    extents : array or float
+        Extents (radius in mm) of the labels
+    hemis : array or int
+        Hemispheres to use for the labels (0: left, 1: right)
+    subjects_dir : string
+        Path to SUBJECTS_DIR if not set in the environment
+
+    Returns
+    -------
+    labels : list of Labels. The labels' ``comment`` attribute contains
+        information on the seed vertex and extent; the ``values``  attribute
+        contains distance from the seed in millimeters
+
+    """
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    # make sure the inputs are arrays
+    seeds = np.atleast_1d(seeds)
+    extents = np.atleast_1d(extents)
+    hemis = np.atleast_1d(hemis)
+
+    n_seeds = len(seeds)
+
+    if len(extents) != 1 and len(extents) != n_seeds:
+        raise ValueError('The extents parameter has to be of length 1 or '
+                         'len(seeds)')
+
+    if len(hemis) != 1 and len(hemis) != n_seeds:
+        raise ValueError('The hemis parameter has to be of length 1 or '
+                         'len(seeds)')
+
+    # make the arrays the same length as seeds
+    if len(extents) == 1:
+        extents = np.tile(extents, n_seeds)
+
+    if len(hemis) == 1:
+        hemis = np.tile(hemis, n_seeds)
+
+    hemis = ['lh' if h == 0 else 'rh' for h in hemis]
+
+    # load the surfaces and create the distance graphs
+    tris, vert, dist = {}, {}, {}
+    for hemi in set(hemis):
+        surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.white')
+        vert[hemi], tris[hemi] = read_surface(surf_fname)
+        dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
+
+    # create the patches
+    labels = []
+    for seed, extent, hemi in zip(seeds, extents, hemis):
+        label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
+
+        # create a label
+        comment = 'Circular label: seed=%d, extent=%0.1fmm' % (seed, extent)
+        label = Label(vertices=label_verts,
+                      pos=vert[hemi][label_verts],
+                      values=label_dist,
+                      hemi=hemi,
+                      comment=comment)
+        labels.append(label)
+
+    return labels
+
+
+def _read_annot(fname):
+    """Read a Freesurfer annotation from a .annot file.
+
+    Note : Copied from PySurfer
+
+    Parameters
+    ----------
+    fname : str
+        Path to annotation file
+
+    Returns
+    -------
+    annot : numpy array, shape=(n_verts)
+        Annotation id at each vertex
+    ctab : numpy array, shape=(n_verts, 5)
+        RGBA + label id colortable array
+    names : list of str
+        List of region names as stored in the annot file
+
+    """
+    if not op.isfile(fname):
+        dir_name = op.split(fname)[0]
+        if not op.isdir(dir_name):
+            raise IOError('Directory for annotation does not exist: %s',
+                          fname)
+        cands = os.listdir(dir_name)
+        cands = [c for c in cands if '.annot' in c]
+        if len(cands) == 0:
+            raise IOError('No such file %s, no candidate parcellations '
+                          'found in directory' % fname)
+        else:
+            raise IOError('No such file %s, candidate parcellations in '
+                          'that directory: %s' % (fname, ', '.join(cands)))
+    with open(fname, "rb") as fid:
+        n_verts = np.fromfile(fid, '>i4', 1)[0]
+        data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
+        annot = data[:, 1]
+        ctab_exists = np.fromfile(fid, '>i4', 1)[0]
+        if not ctab_exists:
+            raise Exception('Color table not found in annotation file')
+        n_entries = np.fromfile(fid, '>i4', 1)[0]
+        if n_entries > 0:
+            length = np.fromfile(fid, '>i4', 1)[0]
+            orig_tab = np.fromfile(fid, '>c', length)
+            orig_tab = orig_tab[:-1]
+
+            names = list()
+            ctab = np.zeros((n_entries, 5), np.int)
+            for i in xrange(n_entries):
+                name_length = np.fromfile(fid, '>i4', 1)[0]
+                name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
+                names.append(name)
+                ctab[i, :4] = np.fromfile(fid, '>i4', 4)
+                ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
+                              ctab[i, 2] * (2 ** 16) +
+                              ctab[i, 3] * (2 ** 24))
+        else:
+            ctab_version = -n_entries
+            if ctab_version != 2:
+                raise Exception('Color table version not supported')
+            n_entries = np.fromfile(fid, '>i4', 1)[0]
+            ctab = np.zeros((n_entries, 5), np.int)
+            length = np.fromfile(fid, '>i4', 1)[0]
+            _ = np.fromfile(fid, "|S%d" % length, 1)[0]  # Orig table path
+            entries_to_read = np.fromfile(fid, '>i4', 1)[0]
+            names = list()
+            for i in xrange(entries_to_read):
+                _ = np.fromfile(fid, '>i4', 1)[0]  # Structure
+                name_length = np.fromfile(fid, '>i4', 1)[0]
+                name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
+                names.append(name)
+                ctab[i, :4] = np.fromfile(fid, '>i4', 4)
+                ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
+                              ctab[i, 2] * (2 ** 16))
+        ctab[:, 3] = 255
+
+    return annot, ctab, names
+
+
+def labels_from_parc(subject, parc='aparc', hemi='both', surf_name='white',
+                     annot_fname=None, regexp=None, subjects_dir=None,
+                     verbose=None):
+    """ Read labels from FreeSurfer parcellation
+
+    Note: Only cortical labels will be returned.
+
+    Parameters
+    ----------
+    subject : str
+        The subject for which to read the parcellation for.
+    parc : str
+        The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'.
+    hemi : str
+        The hemisphere to read the parcellation for, can be 'lh', 'rh',
+        or 'both'.
+    surf_name : str
+        Surface used to obtain vertex locations, e.g., 'white', 'pial'
+    annot_fname : str or None
+        Filename of the .annot file. If not None, only this file is read
+        and 'parc' and 'hemi' are ignored.
+    regexp : str
+        Regular expression or substring to select particular labels from the
+        parcellation. E.g. 'superior' will return all labels in which this
+        substring is contained.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    Returns
+    -------
+    labels : list of Label
+        The labels, sorted by label name (ascending).
+    colors : list of tuples
+        RGBA color for obtained from the parc color table for each label.
+    """
+    logger.info('Reading labels from parcellation..')
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    # get the .annot filenames and hemispheres
+    if annot_fname is not None:
+        # we use use the .annot file specified by the user
+        hemis = [op.basename(annot_fname)[:2]]
+        if hemis[0] not in ['lh', 'rh']:
+            raise ValueError('Could not determine hemisphere from filename, '
+                             'filename has to start with "lh" or "rh".')
+        annot_fname = [annot_fname]
+    else:
+        # construct .annot file names for requested subject, parc, hemi
+        if hemi not in ['lh', 'rh', 'both']:
+            raise ValueError('hemi has to be "lh", "rh", or "both"')
+        if hemi == 'both':
+            hemis = ['lh', 'rh']
+        else:
+            hemis = [hemi]
+        annot_fname = list()
+        for hemi in hemis:
+            fname = op.join(subjects_dir, subject, 'label',
+                            '%s.%s.annot' % (hemi, parc))
+            annot_fname.append(fname)
+
+    # now we are ready to create the labels
+    n_read = 0
+    labels = list()
+    label_colors = list()
+    for fname, hemi in zip(annot_fname, hemis):
+        # read annotation
+        annot, ctab, label_names = _read_annot(fname)
+        label_rgbas = ctab[:, :4]
+        label_ids = ctab[:, -1]
+
+        # load the vertex positions from surface
+        fname_surf = op.join(subjects_dir, subject, 'surf',
+                             '%s.%s' % (hemi, surf_name))
+        vert_pos, _ = read_surface(fname_surf)
+        vert_pos /= 1e3  # the positions in labels are in meters
+        for label_id, label_name, label_rgba in\
+                zip(label_ids, label_names, label_rgbas):
+            vertices = np.where(annot == label_id)[0]
+            if len(vertices) == 0:
+                # label is not part of cortical surface
+                continue
+            pos = vert_pos[vertices, :]
+            values = np.zeros(len(vertices))
+            name = label_name + '-' + hemi
+            label = Label(vertices, pos, values, hemi, name=name)
+            labels.append(label)
+
+            # store the color
+            label_rgba = tuple(label_rgba / 255.)
+            label_colors.append(label_rgba)
+
+        n_read = len(labels) - n_read
+        logger.info('   read %d labels from %s' % (n_read, fname))
+
+    if regexp is not None:
+        # allow for convenient substring match
+        r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
+              else regexp))
+
+    # sort the labels and colors by label name
+    names = [label.name for label in labels]
+    labels_ = zip(*((label, color) for (name, label, color) in sorted(
+                    zip(names, labels, label_colors))
+                        if (r_.match(name) if regexp else True)))
+    if labels_:
+        labels, label_colors = labels_
+    else:
+        raise RuntimeError('The regular expression supplied did not match.')
+    # convert tuples to lists
+    labels = list(labels)
+    label_colors = list(label_colors)
+    logger.info('[done]')
+
+    return labels, label_colors
diff --git a/mne/layouts/CTF-275.lout b/mne/layouts/CTF-275.lout
new file mode 100755
index 0000000..53d924c
--- /dev/null
+++ b/mne/layouts/CTF-275.lout
@@ -0,0 +1,276 @@
+  -42.27    42.33   -39.99    31.80
+001    -4.09    10.91     4.00     3.00 MLC11-2622
+002    -7.25     8.87     4.00     3.00 MLC12-2622
+003   -10.79     7.43     4.00     3.00 MLC13-2622
+004   -14.40     5.31     4.00     3.00 MLC14-2622
+005   -17.45     2.88     4.00     3.00 MLC15-2622
+006   -19.94    -0.21     4.00     3.00 MLC16-2622
+007   -22.30    -3.88     4.00     3.00 MLC17-2622
+008    -7.70     5.16     4.00     3.00 MLC21-2622
+009   -11.18     3.69     4.00     3.00 MLC22-2622
+010   -14.17     1.40     4.00     3.00 MLC23-2622
+011   -16.42    -1.52     4.00     3.00 MLC24-2622
+012   -18.64    -4.88     4.00     3.00 MLC25-2622
+013   -12.55    -2.00     4.00     3.00 MLC31-2622
+014   -15.13    -5.41     4.00     3.00 MLC32-2622
+015    -9.57     0.28     4.00     3.00 MLC41-2622
+016   -11.51    -5.56     4.00     3.00 MLC42-2622
+017    -4.04     4.58     4.00     3.00 MLC51-2622
+018    -6.04     1.35     4.00     3.00 MLC52-2622
+019    -8.79    -3.34     4.00     3.00 MLC53-2622
+020    -8.32    -7.10     4.00     3.00 MLC54-2622
+021    -6.60   -10.22     4.00     3.00 MLC55-2622
+022    -4.01    -1.76     4.00     3.00 MLC61-2622
+023    -5.55    -4.97     4.00     3.00 MLC62-2622
+024    -3.74    -8.12     4.00     3.00 MLC63-2622
+025    -7.63    28.14     4.00     3.00 MLF11-2622
+026   -12.92    27.01     4.00     3.00 MLF12-2622
+027   -18.14    25.41     4.00     3.00 MLF13-2622
+028   -23.34    23.65     4.00     3.00 MLF14-2622
+029    -4.64    25.47     4.00     3.00 MLF21-2622
+030    -9.22    24.68     4.00     3.00 MLF22-2622
+031   -13.60    23.41     4.00     3.00 MLF23-2622
+032   -18.31    21.53     4.00     3.00 MLF24-2622
+033   -22.68    19.69     4.00     3.00 MLF25-2622
+034    -6.57    22.14     4.00     3.00 MLF31-2622
+035   -10.75    21.22     4.00     3.00 MLF32-2622
+036   -15.16    19.49     4.00     3.00 MLF33-2622
+037   -19.01    17.57     4.00     3.00 MLF34-2622
+038   -22.93    15.25     4.00     3.00 MLF35-2622
+039    -4.25    19.38     4.00     3.00 MLF41-2622
+040    -8.17    18.80     4.00     3.00 MLF42-2622
+041   -12.29    17.37     4.00     3.00 MLF43-2622
+042   -15.93    15.49     4.00     3.00 MLF44-2622
+043   -19.89    13.39     4.00     3.00 MLF45-2622
+044   -24.12    10.50     4.00     3.00 MLF46-2622
+045    -5.48    16.15     4.00     3.00 MLF51-2622
+046    -9.58    15.10     4.00     3.00 MLF52-2622
+047   -13.17    13.43     4.00     3.00 MLF53-2622
+048   -16.66    11.39     4.00     3.00 MLF54-2622
+049   -20.76     9.06     4.00     3.00 MLF55-2622
+050   -24.71     5.73     4.00     3.00 MLF56-2622
+051    -7.17    12.78     4.00     3.00 MLF61-2622
+052   -10.58    11.08     4.00     3.00 MLF62-2622
+053   -13.93     9.16     4.00     3.00 MLF63-2622
+054   -17.37     7.29     4.00     3.00 MLF64-2622
+055   -20.83     4.87     4.00     3.00 MLF65-2622
+056   -23.40     1.59     4.00     3.00 MLF66-2622
+057   -25.90    -2.51     4.00     3.00 MLF67-2622
+058    -6.96   -27.32     4.00     3.00 MLO11-2622
+059   -11.88   -25.97     4.00     3.00 MLO12-2622
+060   -16.48   -23.69     4.00     3.00 MLO13-2622
+061   -20.64   -20.44     4.00     3.00 MLO14-2622
+062    -4.82   -30.75     4.00     3.00 MLO21-2622
+063   -10.11   -29.77     4.00     3.00 MLO22-2622
+064   -15.52   -27.87     4.00     3.00 MLO23-2622
+065   -20.40   -24.85     4.00     3.00 MLO24-2622
+066    -7.92   -33.45     4.00     3.00 MLO31-2622
+067   -13.84   -31.94     4.00     3.00 MLO32-2622
+068   -19.61   -29.16     4.00     3.00 MLO33-2622
+069   -24.70   -25.44     4.00     3.00 MLO34-2622
+070    -5.16   -36.86     4.00     3.00 MLO41-2622
+071   -11.67   -35.84     4.00     3.00 MLO42-2622
+072   -17.98   -33.55     4.00     3.00 MLO43-2622
+073   -23.91   -30.00     4.00     3.00 MLO44-2622
+074    -8.79   -39.34     4.00     3.00 MLO51-2622
+075   -15.83   -37.54     4.00     3.00 MLO52-2622
+076   -22.47   -34.34     4.00     3.00 MLO53-2622
+077    -4.98   -13.36     4.00     3.00 MLP11-2622
+078   -10.20   -10.01     4.00     3.00 MLP12-2622
+079    -3.80   -16.69     4.00     3.00 MLP21-2622
+080    -8.73   -13.30     4.00     3.00 MLP22-2622
+081   -13.58    -8.80     4.00     3.00 MLP23-2622
+082    -5.66   -19.72     4.00     3.00 MLP31-2622
+083    -8.41   -16.83     4.00     3.00 MLP32-2622
+084   -12.08   -14.80     4.00     3.00 MLP33-2622
+085   -15.13   -11.95     4.00     3.00 MLP34-2622
+086   -17.18    -8.63     4.00     3.00 MLP35-2622
+087    -9.92   -20.16     4.00     3.00 MLP41-2622
+088   -13.37   -18.09     4.00     3.00 MLP42-2622
+089   -16.59   -15.58     4.00     3.00 MLP43-2622
+090   -19.06   -11.87     4.00     3.00 MLP44-2622
+091   -20.87    -8.06     4.00     3.00 MLP45-2622
+092    -4.02   -24.07     4.00     3.00 MLP51-2622
+093    -8.77   -23.79     4.00     3.00 MLP52-2622
+094   -12.92   -22.08     4.00     3.00 MLP53-2622
+095   -16.83   -19.50     4.00     3.00 MLP54-2622
+096   -20.23   -16.32     4.00     3.00 MLP55-2622
+097   -22.76   -11.97     4.00     3.00 MLP56-2622
+098   -24.58    -7.58     4.00     3.00 MLP57-2622
+099   -27.14    12.98     4.00     3.00 MLT11-2622
+100   -28.19     7.51     4.00     3.00 MLT12-2622
+101   -28.08     2.09     4.00     3.00 MLT13-2622
+102   -28.56    -5.98     4.00     3.00 MLT14-2622
+103   -26.96   -11.17     4.00     3.00 MLT15-2622
+104   -24.11   -16.46     4.00     3.00 MLT16-2622
+105   -27.30    17.85     4.00     3.00 MLT21-2622
+106   -31.47    10.04     4.00     3.00 MLT22-2622
+107   -31.85     3.70     4.00     3.00 MLT23-2622
+108   -32.08    -2.62     4.00     3.00 MLT24-2622
+109   -31.09    -9.80     4.00     3.00 MLT25-2622
+110   -28.71   -15.38     4.00     3.00 MLT26-2622
+111   -24.78   -20.78     4.00     3.00 MLT27-2622
+112   -28.61    21.64     4.00     3.00 MLT31-2622
+113   -32.09    15.32     4.00     3.00 MLT32-2622
+114   -35.40     5.79     4.00     3.00 MLT33-2622
+115   -35.85    -1.29     4.00     3.00 MLT34-2622
+116   -34.97    -7.76     4.00     3.00 MLT35-2622
+117   -32.89   -13.91     4.00     3.00 MLT36-2622
+118   -29.32   -20.20     4.00     3.00 MLT37-2622
+119   -33.87    18.93     4.00     3.00 MLT41-2622
+120   -36.68    11.37     4.00     3.00 MLT42-2622
+121   -38.92     2.11     4.00     3.00 MLT43-2622
+122   -38.70    -5.16     4.00     3.00 MLT44-2622
+123   -36.95   -12.13     4.00     3.00 MLT45-2622
+124   -33.72   -18.79     4.00     3.00 MLT46-2622
+125   -29.28   -25.28     4.00     3.00 MLT47-2622
+126   -38.78    14.74     4.00     3.00 MLT51-2622
+127   -41.29     6.62     4.00     3.00 MLT52-2622
+128   -41.87    -1.80     4.00     3.00 MLT53-2622
+129   -40.62    -9.63     4.00     3.00 MLT54-2622
+130   -37.78   -16.89     4.00     3.00 MLT55-2622
+131   -33.73   -24.02     4.00     3.00 MLT56-2622
+132   -28.51   -29.92     4.00     3.00 MLT57-2622
+133    -0.24    10.97     4.00     3.00 MRC11-2622
+134     2.99     8.95     4.00     3.00 MRC12-2622
+135     6.57     7.62     4.00     3.00 MRC13-2622
+136    10.22     5.56     4.00     3.00 MRC14-2622
+137    13.27     3.22     4.00     3.00 MRC15-2622
+138    15.86     0.21     4.00     3.00 MRC16-2622
+139    18.32    -3.45     4.00     3.00 MRC17-2622
+140     3.53     5.28     4.00     3.00 MRC21-2622
+141     7.00     3.85     4.00     3.00 MRC22-2622
+142    10.06     1.68     4.00     3.00 MRC23-2622
+143    12.33    -1.20     4.00     3.00 MRC24-2622
+144    14.73    -4.52     4.00     3.00 MRC25-2622
+145     8.51    -1.76     4.00     3.00 MRC31-2622
+146    11.17    -5.14     4.00     3.00 MRC32-2622
+147     5.51     0.46     4.00     3.00 MRC41-2622
+148     7.56    -5.33     4.00     3.00 MRC42-2622
+149    -0.17     4.62     4.00     3.00 MRC51-2622
+150     1.93     1.46     4.00     3.00 MRC52-2622
+151     4.78    -3.16     4.00     3.00 MRC53-2622
+152     4.39    -6.98     4.00     3.00 MRC54-2622
+153     2.73   -10.10     4.00     3.00 MRC55-2622
+154    -0.07    -1.75     4.00     3.00 MRC61-2622
+155     1.58    -4.86     4.00     3.00 MRC62-2622
+156    -0.15    -8.08     4.00     3.00 MRC63-2622
+157     2.97    28.24     4.00     3.00 MRF11-2622
+158     8.25    27.25     4.00     3.00 MRF12-2622
+159    13.54    25.74     4.00     3.00 MRF13-2622
+160    18.74    24.12     4.00     3.00 MRF14-2622
+161     0.03    25.52     4.00     3.00 MRF21-2622
+162     4.63    24.85     4.00     3.00 MRF22-2622
+163     9.03    23.67     4.00     3.00 MRF23-2622
+164    13.78    21.87     4.00     3.00 MRF24-2622
+165    18.19    20.13     4.00     3.00 MRF25-2622
+166     2.05    22.22     4.00     3.00 MRF31-2622
+167     6.27    21.38     4.00     3.00 MRF32-2622
+168    10.63    19.79     4.00     3.00 MRF33-2622
+169    14.57    17.90     4.00     3.00 MRF34-2622
+170    18.54    15.70     4.00     3.00 MRF35-2622
+171    -0.22    19.42     4.00     3.00 MRF41-2622
+172     3.75    18.84     4.00     3.00 MRF42-2622
+173     7.86    17.57     4.00     3.00 MRF43-2622
+174    11.53    15.78     4.00     3.00 MRF44-2622
+175    15.55    13.76     4.00     3.00 MRF45-2622
+176    19.83    10.96     4.00     3.00 MRF46-2622
+177     1.08    16.23     4.00     3.00 MRF51-2622
+178     5.20    15.33     4.00     3.00 MRF52-2622
+179     8.81    13.68     4.00     3.00 MRF53-2622
+180    12.37    11.71     4.00     3.00 MRF54-2622
+181    16.53     9.44     4.00     3.00 MRF55-2622
+182    20.54     6.21     4.00     3.00 MRF56-2622
+183     2.82    12.87     4.00     3.00 MRF61-2622
+184     6.27    11.29     4.00     3.00 MRF62-2622
+185     9.66     9.43     4.00     3.00 MRF63-2622
+186    13.14     7.59     4.00     3.00 MRF64-2622
+187    16.52     5.22     4.00     3.00 MRF65-2622
+188    19.31     2.05     4.00     3.00 MRF66-2622
+189    21.91    -1.92     4.00     3.00 MRF67-2622
+190     3.46   -27.20     4.00     3.00 MRO11-2622
+191     8.35   -25.76     4.00     3.00 MRO12-2622
+192    12.92   -23.40     4.00     3.00 MRO13-2622
+193    17.02   -20.06     4.00     3.00 MRO14-2622
+194     1.43   -30.69     4.00     3.00 MRO21-2622
+195     6.66   -29.60     4.00     3.00 MRO22-2622
+196    12.02   -27.57     4.00     3.00 MRO23-2622
+197    16.88   -24.46     4.00     3.00 MRO24-2622
+198     4.55   -33.35     4.00     3.00 MRO31-2622
+199    10.46   -31.70     4.00     3.00 MRO32-2622
+200    16.07   -28.88     4.00     3.00 MRO33-2622
+201    21.16   -24.93     4.00     3.00 MRO34-2622
+202     1.88   -36.78     4.00     3.00 MRO41-2622
+203     8.37   -35.64     4.00     3.00 MRO42-2622
+204    14.63   -33.19     4.00     3.00 MRO43-2622
+205    20.45   -29.57     4.00     3.00 MRO44-2622
+206     5.57   -39.20     4.00     3.00 MRO51-2622
+207    12.57   -37.26     4.00     3.00 MRO52-2622
+208    19.11   -33.96     4.00     3.00 MRO53-2622
+209     1.20   -13.27     4.00     3.00 MRP11-2622
+210     6.34    -9.81     4.00     3.00 MRP12-2622
+211     0.06   -16.65     4.00     3.00 MRP21-2622
+212     4.94   -13.15     4.00     3.00 MRP22-2622
+213     9.72    -8.56     4.00     3.00 MRP23-2622
+214     2.03   -19.64     4.00     3.00 MRP31-2622
+215     4.72   -16.72     4.00     3.00 MRP32-2622
+216     8.28   -14.64     4.00     3.00 MRP33-2622
+217    11.32   -11.68     4.00     3.00 MRP34-2622
+218    13.30    -8.29     4.00     3.00 MRP35-2622
+219     6.32   -19.99     4.00     3.00 MRP41-2622
+220     9.66   -17.86     4.00     3.00 MRP42-2622
+221    12.83   -15.29     4.00     3.00 MRP43-2622
+222    15.21   -11.53     4.00     3.00 MRP44-2622
+223    16.99    -7.64     4.00     3.00 MRP45-2622
+224     0.42   -24.03     4.00     3.00 MRP51-2622
+225     5.29   -23.71     4.00     3.00 MRP52-2622
+226     9.32   -21.86     4.00     3.00 MRP53-2622
+227    13.19   -19.21     4.00     3.00 MRP54-2622
+228    16.49   -15.99     4.00     3.00 MRP55-2622
+229    18.98   -11.54     4.00     3.00 MRP56-2622
+230    20.69    -7.11     4.00     3.00 MRP57-2622
+231    22.81    13.51     4.00     3.00 MRT11-2622
+232    23.97     8.09     4.00     3.00 MRT12-2622
+233    23.97     2.65     4.00     3.00 MRT13-2622
+234    24.63    -5.42     4.00     3.00 MRT14-2622
+235    23.16   -10.65     4.00     3.00 MRT15-2622
+236    20.37   -16.02     4.00     3.00 MRT16-2622
+237    22.88    18.38     4.00     3.00 MRT21-2622
+238    27.23    10.62     4.00     3.00 MRT22-2622
+239    27.73     4.35     4.00     3.00 MRT23-2622
+240    28.08    -1.95     4.00     3.00 MRT24-2622
+241    27.24    -9.21     4.00     3.00 MRT25-2622
+242    24.97   -14.84     4.00     3.00 MRT26-2622
+243    21.15   -20.30     4.00     3.00 MRT27-2622
+244    24.07    22.26     4.00     3.00 MRT31-2622
+245    27.72    15.94     4.00     3.00 MRT32-2622
+246    31.24     6.55     4.00     3.00 MRT33-2622
+247    31.84    -0.55     4.00     3.00 MRT34-2622
+248    31.09    -7.10     4.00     3.00 MRT35-2622
+249    29.13   -13.33     4.00     3.00 MRT36-2622
+250    25.63   -19.73     4.00     3.00 MRT37-2622
+251    29.40    19.66     4.00     3.00 MRT41-2622
+252    32.38    12.17     4.00     3.00 MRT42-2622
+253    34.86     2.97     4.00     3.00 MRT43-2622
+254    34.80    -4.39     4.00     3.00 MRT44-2622
+255    33.11   -11.36     4.00     3.00 MRT45-2622
+256    30.03   -18.16     4.00     3.00 MRT46-2622
+257    25.54   -24.88     4.00     3.00 MRT47-2622
+258    34.47    15.52     4.00     3.00 MRT51-2622
+259    37.12     7.54     4.00     3.00 MRT52-2622
+260    37.93    -0.94     4.00     3.00 MRT53-2622
+261    36.82    -8.89     4.00     3.00 MRT54-2622
+262    34.10   -16.25     4.00     3.00 MRT55-2622
+263    30.13   -23.45     4.00     3.00 MRT56-2622
+264    25.07   -29.43     4.00     3.00 MRT57-2622
+265    -2.13     7.84     4.00     3.00 MZC01-2622
+266    -2.05     1.38     4.00     3.00 MZC02-2622
+267    -1.99    -5.04     4.00     3.00 MZC03-2622
+268    -1.93   -11.44     4.00     3.00 MZC04-2622
+269    -2.33    28.50     4.00     3.00 MZF01-2622
+270    -2.28    22.54     4.00     3.00 MZF02-2622
+271    -2.20    14.52     4.00     3.00 MZF03-2622
+272    -1.77   -27.22     4.00     3.00 MZO01-2622
+273    -1.71   -34.04     4.00     3.00 MZO02-2622
+274    -1.66   -39.69     4.00     3.00 MZO03-2622
+275    -1.81   -21.05     4.00     3.00 MZP01-2622
diff --git a/mne/layouts/CTF151.lay b/mne/layouts/CTF151.lay
new file mode 100644
index 0000000..c9d68f3
--- /dev/null
+++ b/mne/layouts/CTF151.lay
@@ -0,0 +1,153 @@
+1 -0.440000 -4.000000 0.551100 0.351100 MLC11
+2 -1.200000 -4.130000 0.551100 0.351100 MLC12
+3 -2.220000 -4.270000 0.551100 0.351100 MLC13
+4 -2.820000 -4.710000 0.551100 0.351100 MLC14
+5 -3.340000 -5.230000 0.551100 0.351100 MLC15
+6 -0.820000 -4.550000 0.551100 0.351100 MLC21
+7 -1.620000 -4.570000 0.551100 0.351100 MLC22
+8 -2.160000 -4.970000 0.551100 0.351100 MLC23
+9 -2.640000 -5.370000 0.551100 0.351100 MLC24
+10 -1.270000 -5.050000 0.551100 0.351100 MLC31
+11 -1.780000 -5.450000 0.551100 0.351100 MLC32
+12 -1.300000 -5.930000 0.551100 0.351100 MLC33
+13 -0.440000 -5.050000 0.551100 0.351100 MLC41
+14 -0.820000 -5.530000 0.551100 0.351100 MLC42
+15 -0.400000 -6.010000 0.551100 0.351100 MLC43
+16 -1.170000 -2.010000 0.551100 0.351100 MLF11
+17 -2.260000 -2.230000 0.551100 0.351100 MLF12
+18 -0.490000 -2.300000 0.551100 0.351100 MLF21
+19 -1.540000 -2.470000 0.551100 0.351100 MLF22
+20 -2.540000 -2.750000 0.551100 0.351100 MLF23
+21 -1.000000 -2.750000 0.551100 0.351100 MLF31
+22 -1.950000 -2.980000 0.551100 0.351100 MLF32
+23 -2.780000 -3.300000 0.551100 0.351100 MLF33
+24 -3.440000 -3.770000 0.551100 0.351100 MLF34
+25 -0.450000 -3.100000 0.551100 0.351100 MLF41
+26 -1.380000 -3.260000 0.551100 0.351100 MLF42
+27 -2.280000 -3.570000 0.551100 0.351100 MLF43
+28 -2.870000 -4.060000 0.551100 0.351100 MLF44
+29 -3.500000 -4.510000 0.551100 0.351100 MLF45
+30 -0.850000 -3.580000 0.551100 0.351100 MLF51
+31 -1.700000 -3.790000 0.551100 0.351100 MLF52
+32 -0.470000 -7.690000 0.551100 0.351100 MLO11
+33 -1.650000 -7.420000 0.551100 0.351100 MLO12
+34 -1.210000 -7.930000 0.551100 0.351100 MLO21
+35 -2.350000 -7.580000 0.551100 0.351100 MLO22
+36 -0.600000 -8.400000 0.551100 0.351100 MLO31
+37 -1.920000 -8.120000 0.551100 0.351100 MLO32
+38 -3.110000 -7.670000 0.551100 0.351100 MLO33
+39 -1.400000 -8.560000 0.551100 0.351100 MLO41
+40 -2.750000 -8.210000 0.551100 0.351100 MLO42
+41 -3.910000 -7.620000 0.551100 0.351100 MLO43
+42 -0.840000 -6.390000 0.551100 0.351100 MLP11
+43 -1.710000 -6.320000 0.551100 0.351100 MLP12
+44 -2.240000 -5.870000 0.551100 0.351100 MLP13
+45 -0.440000 -6.900000 0.551100 0.351100 MLP21
+46 -1.220000 -6.760000 0.551100 0.351100 MLP22
+47 -0.970000 -7.220000 0.551100 0.351100 MLP31
+48 -1.900000 -6.880000 0.551100 0.351100 MLP32
+49 -2.470000 -6.390000 0.551100 0.351100 MLP33
+50 -2.990000 -5.850000 0.551100 0.351100 MLP34
+51 -3.420000 -3.120000 0.551100 0.351100 MLT11
+52 -4.100000 -4.200000 0.551100 0.351100 MLT12
+53 -4.040000 -5.030000 0.551100 0.351100 MLT13
+54 -3.780000 -5.770000 0.551100 0.351100 MLT14
+55 -3.210000 -6.440000 0.551100 0.351100 MLT15
+56 -2.570000 -7.010000 0.551100 0.351100 MLT16
+57 -3.320000 -2.550000 0.551100 0.351100 MLT21
+58 -4.260000 -3.520000 0.551100 0.351100 MLT22
+59 -4.720000 -4.710000 0.551100 0.351100 MLT23
+60 -4.520000 -5.590000 0.551100 0.351100 MLT24
+61 -4.040000 -6.350000 0.551100 0.351100 MLT25
+62 -3.280000 -7.060000 0.551100 0.351100 MLT26
+63 -4.340000 -2.900000 0.551100 0.351100 MLT31
+64 -5.040000 -4.050000 0.551100 0.351100 MLT32
+65 -5.200000 -5.210000 0.551100 0.351100 MLT33
+66 -4.820000 -6.140000 0.551100 0.351100 MLT34
+67 -4.090000 -7.000000 0.551100 0.351100 MLT35
+68 -5.210000 -3.450000 0.551100 0.351100 MLT41
+69 -5.640000 -4.620000 0.551100 0.351100 MLT42
+70 -5.500000 -5.730000 0.551100 0.351100 MLT43
+71 -4.910000 -6.720000 0.551100 0.351100 MLT44
+72 0.410000 -4.000000 0.551100 0.351100 MRC11
+73 1.170000 -4.130000 0.551100 0.351100 MRC12
+74 2.200000 -4.270000 0.551100 0.351100 MRC13
+75 2.800000 -4.710000 0.551100 0.351100 MRC14
+76 3.320000 -5.230000 0.551100 0.351100 MRC15
+77 0.800000 -4.560000 0.551100 0.351100 MRC21
+78 1.600000 -4.570000 0.551100 0.351100 MRC22
+79 2.140000 -4.970000 0.551100 0.351100 MRC23
+80 2.620000 -5.370000 0.551100 0.351100 MRC24
+81 1.260000 -5.050000 0.551100 0.351100 MRC31
+82 1.760000 -5.450000 0.551100 0.351100 MRC32
+83 1.280000 -5.930000 0.551100 0.351100 MRC33
+84 0.420000 -5.050000 0.551100 0.351100 MRC41
+85 0.810000 -5.540000 0.551100 0.351100 MRC42
+86 0.380000 -6.010000 0.551100 0.351100 MRC43
+87 1.130000 -2.010000 0.551100 0.351100 MRF11
+88 2.240000 -2.230000 0.551100 0.351100 MRF12
+89 0.460000 -2.290000 0.551100 0.351100 MRF21
+90 1.510000 -2.470000 0.551100 0.351100 MRF22
+91 2.520000 -2.740000 0.551100 0.351100 MRF23
+92 0.970000 -2.740000 0.551100 0.351100 MRF31
+93 1.920000 -2.980000 0.551100 0.351100 MRF32
+94 2.760000 -3.300000 0.551100 0.351100 MRF33
+95 3.420000 -3.770000 0.551100 0.351100 MRF34
+96 0.420000 -3.100000 0.551100 0.351100 MRF41
+97 1.360000 -3.260000 0.551100 0.351100 MRF42
+98 2.260000 -3.570000 0.551100 0.351100 MRF43
+99 2.840000 -4.050000 0.551100 0.351100 MRF44
+100 3.480000 -4.510000 0.551100 0.351100 MRF45
+101 0.820000 -3.580000 0.551100 0.351100 MRF51
+102 1.670000 -3.790000 0.551100 0.351100 MRF52
+103 0.470000 -7.690000 0.551100 0.351100 MRO11
+104 1.640000 -7.420000 0.551100 0.351100 MRO12
+105 1.200000 -7.930000 0.551100 0.351100 MRO21
+106 2.350000 -7.580000 0.551100 0.351100 MRO22
+107 0.580000 -8.390000 0.551100 0.351100 MRO31
+108 1.910000 -8.110000 0.551100 0.351100 MRO32
+109 3.110000 -7.670000 0.551100 0.351100 MRO33
+110 1.380000 -8.570000 0.551100 0.351100 MRO41
+111 2.750000 -8.220000 0.551100 0.351100 MRO42
+112 3.900000 -7.610000 0.551100 0.351100 MRO43
+113 0.820000 -6.380000 0.551100 0.351100 MRP11
+114 1.700000 -6.320000 0.551100 0.351100 MRP12
+115 2.220000 -5.870000 0.551100 0.351100 MRP13
+116 0.420000 -6.900000 0.551100 0.351100 MRP21
+117 1.200000 -6.750000 0.551100 0.351100 MRP22
+118 0.960000 -7.220000 0.551100 0.351100 MRP31
+119 1.880000 -6.870000 0.551100 0.351100 MRP32
+120 2.470000 -6.390000 0.551100 0.351100 MRP33
+121 2.990000 -5.850000 0.551100 0.351100 MRP34
+122 3.390000 -3.120000 0.551100 0.351100 MRT11
+123 4.070000 -4.190000 0.551100 0.351100 MRT12
+124 4.020000 -5.030000 0.551100 0.351100 MRT13
+125 3.760000 -5.770000 0.551100 0.351100 MRT14
+126 3.200000 -6.430000 0.551100 0.351100 MRT15
+127 2.570000 -7.010000 0.551100 0.351100 MRT16
+128 3.300000 -2.540000 0.551100 0.351100 MRT21
+129 4.230000 -3.510000 0.551100 0.351100 MRT22
+130 4.700000 -4.710000 0.551100 0.351100 MRT23
+131 4.500000 -5.590000 0.551100 0.351100 MRT24
+132 4.020000 -6.360000 0.551100 0.351100 MRT25
+133 3.260000 -7.060000 0.551100 0.351100 MRT26
+134 4.310000 -2.900000 0.551100 0.351100 MRT31
+135 5.020000 -4.050000 0.551100 0.351100 MRT32
+136 5.180000 -5.210000 0.551100 0.351100 MRT33
+137 4.800000 -6.140000 0.551100 0.351100 MRT34
+138 4.080000 -7.000000 0.551100 0.351100 MRT35
+139 5.200000 -3.450000 0.551100 0.351100 MRT41
+140 5.620000 -4.610000 0.551100 0.351100 MRT42
+141 5.480000 -5.730000 0.551100 0.351100 MRT43
+142 4.900000 -6.710000 0.551100 0.351100 MRT44
+143 0.000000 -4.510000 0.551100 0.351100 MZC01
+144 0.000000 -5.550000 0.551100 0.351100 MZC02
+145 0.000000 -1.930000 0.551100 0.351100 MZF01
+146 0.000000 -2.660000 0.551100 0.351100 MZF02
+147 0.000000 -3.510000 0.551100 0.351100 MZF03
+148 0.000000 -8.050000 0.551100 0.351100 MZO01
+149 0.000000 -8.660000 0.551100 0.351100 MZO02
+150 0.000000 -6.470000 0.551100 0.351100 MZP01
+151 0.000000 -7.290000 0.551100 0.351100 MZP02
+152 5.000000 -2.000000 0.551100 0.351100 SCALE
+153 -5.50000 -1.500000 0.551100 0.351100 COMNT
diff --git a/mne/layouts/CTF275.lay b/mne/layouts/CTF275.lay
new file mode 100644
index 0000000..2af28d3
--- /dev/null
+++ b/mne/layouts/CTF275.lay
@@ -0,0 +1,275 @@
+1 -0.029414 0.428191 0.100000 0.040000 MLC11
+2 -0.105398 0.378716 0.100000 0.040000 MLC12
+3 -0.187924 0.341472 0.100000 0.040000 MLC13
+4 -0.268071 0.285079 0.100000 0.040000 MLC14
+5 -0.330692 0.221374 0.100000 0.040000 MLC15
+6 -0.378697 0.144627 0.100000 0.040000 MLC16
+7 -0.411309 0.049716 0.100000 0.040000 MLC17
+8 -0.112105 0.295427 0.100000 0.040000 MLC21
+9 -0.189457 0.259287 0.100000 0.040000 MLC22
+10 -0.254180 0.203140 0.100000 0.040000 MLC23
+11 -0.298355 0.137997 0.100000 0.040000 MLC24
+12 -0.337649 0.050767 0.100000 0.040000 MLC25
+13 -0.213750 0.138862 0.100000 0.040000 MLC31
+14 -0.266243 0.056433 0.100000 0.040000 MLC32
+15 -0.150010 0.191395 0.100000 0.040000 MLC41
+16 -0.188739 0.067511 0.100000 0.040000 MLC42
+17 -0.027405 0.285532 0.100000 0.040000 MLC51
+18 -0.072194 0.217381 0.100000 0.040000 MLC52
+19 -0.130467 0.119358 0.100000 0.040000 MLC53
+20 -0.119656 0.041473 0.100000 0.040000 MLC54
+21 -0.083927 -0.021961 0.100000 0.040000 MLC55
+22 -0.027810 0.155198 0.100000 0.040000 MLC61
+23 -0.062042 0.088583 0.100000 0.040000 MLC62
+24 -0.025587 0.023975 0.100000 0.040000 MLC63
+25 -0.154623 0.879985 0.100000 0.040000 MLF11
+26 -0.322264 0.823233 0.100000 0.040000 MLF12
+27 -0.478342 0.740223 0.100000 0.040000 MLF13
+28 -0.622338 0.633371 0.100000 0.040000 MLF14
+29 -0.052995 0.810917 0.100000 0.040000 MLF21
+30 -0.193258 0.778479 0.100000 0.040000 MLF22
+31 -0.319702 0.726613 0.100000 0.040000 MLF23
+32 -0.447065 0.639878 0.100000 0.040000 MLF24
+33 -0.551024 0.545805 0.100000 0.040000 MLF25
+34 -0.106993 0.717661 0.100000 0.040000 MLF31
+35 -0.227303 0.683510 0.100000 0.040000 MLF32
+36 -0.344973 0.613898 0.100000 0.040000 MLF33
+37 -0.437794 0.535071 0.100000 0.040000 MLF34
+38 -0.516944 0.440135 0.100000 0.040000 MLF35
+39 -0.037498 0.646457 0.100000 0.040000 MLF41
+40 -0.145663 0.629747 0.100000 0.040000 MLF42
+41 -0.257022 0.575998 0.100000 0.040000 MLF43
+42 -0.344741 0.511350 0.100000 0.040000 MLF44
+43 -0.434608 0.430669 0.100000 0.040000 MLF45
+44 -0.512928 0.325699 0.100000 0.040000 MLF46
+45 -0.065241 0.564676 0.100000 0.040000 MLF51
+46 -0.176866 0.530203 0.100000 0.040000 MLF52
+47 -0.264799 0.476609 0.100000 0.040000 MLF53
+48 -0.344149 0.409817 0.100000 0.040000 MLF54
+49 -0.432009 0.328939 0.100000 0.040000 MLF55
+50 -0.502082 0.225317 0.100000 0.040000 MLF56
+51 -0.108196 0.473300 0.100000 0.040000 MLF61
+52 -0.191454 0.428184 0.100000 0.040000 MLF62
+53 -0.268505 0.371569 0.100000 0.040000 MLF63
+54 -0.343162 0.314227 0.100000 0.040000 MLF64
+55 -0.415355 0.241209 0.100000 0.040000 MLF65
+56 -0.459435 0.157639 0.100000 0.040000 MLF66
+57 -0.484998 0.050963 0.100000 0.040000 MLF67
+58 -0.086701 -0.382545 0.100000 0.040000 MLO11
+59 -0.173621 -0.361571 0.100000 0.040000 MLO12
+60 -0.257557 -0.329066 0.100000 0.040000 MLO13
+61 -0.337129 -0.278810 0.100000 0.040000 MLO14
+62 -0.050176 -0.456757 0.100000 0.040000 MLO21
+63 -0.138937 -0.440153 0.100000 0.040000 MLO22
+64 -0.234625 -0.414329 0.100000 0.040000 MLO23
+65 -0.323700 -0.370345 0.100000 0.040000 MLO24
+66 -0.099528 -0.519048 0.100000 0.040000 MLO31
+67 -0.201576 -0.499713 0.100000 0.040000 MLO32
+68 -0.300736 -0.464088 0.100000 0.040000 MLO33
+69 -0.395767 -0.412426 0.100000 0.040000 MLO34
+70 -0.054171 -0.598130 0.100000 0.040000 MLO41
+71 -0.162924 -0.587463 0.100000 0.040000 MLO42
+72 -0.270457 -0.559057 0.100000 0.040000 MLO43
+73 -0.375045 -0.514503 0.100000 0.040000 MLO44
+74 -0.114841 -0.674066 0.100000 0.040000 MLO51
+75 -0.232779 -0.654920 0.100000 0.040000 MLO52
+76 -0.347032 -0.617457 0.100000 0.040000 MLO53
+77 -0.050706 -0.086860 0.100000 0.040000 MLP11
+78 -0.157880 -0.022819 0.100000 0.040000 MLP12
+79 -0.027384 -0.156541 0.100000 0.040000 MLP21
+80 -0.125969 -0.090281 0.100000 0.040000 MLP22
+81 -0.229468 -0.007021 0.100000 0.040000 MLP23
+82 -0.063851 -0.221282 0.100000 0.040000 MLP31
+83 -0.117483 -0.164444 0.100000 0.040000 MLP32
+84 -0.191075 -0.130343 0.100000 0.040000 MLP33
+85 -0.256310 -0.076997 0.100000 0.040000 MLP34
+86 -0.301408 -0.017428 0.100000 0.040000 MLP35
+87 -0.145628 -0.236552 0.100000 0.040000 MLP41
+88 -0.211609 -0.201084 0.100000 0.040000 MLP42
+89 -0.277557 -0.161143 0.100000 0.040000 MLP43
+90 -0.330491 -0.093163 0.100000 0.040000 MLP44
+91 -0.372987 -0.024823 0.100000 0.040000 MLP45
+92 -0.032003 -0.311166 0.100000 0.040000 MLP51
+93 -0.120201 -0.309697 0.100000 0.040000 MLP52
+94 -0.197411 -0.282930 0.100000 0.040000 MLP53
+95 -0.273221 -0.242434 0.100000 0.040000 MLP54
+96 -0.341326 -0.192353 0.100000 0.040000 MLP55
+97 -0.397869 -0.117824 0.100000 0.040000 MLP56
+98 -0.439023 -0.040798 0.100000 0.040000 MLP57
+99 -0.600517 0.341742 0.100000 0.040000 MLT11
+100 -0.583854 0.221014 0.100000 0.040000 MLT12
+101 -0.546672 0.118228 0.100000 0.040000 MLT13
+102 -0.525679 -0.043954 0.100000 0.040000 MLT14
+103 -0.482366 -0.132402 0.100000 0.040000 MLT15
+104 -0.408785 -0.217740 0.100000 0.040000 MLT16
+105 -0.657080 0.441193 0.100000 0.040000 MLT21
+106 -0.681569 0.225254 0.100000 0.040000 MLT22
+107 -0.647357 0.101107 0.100000 0.040000 MLT23
+108 -0.618158 -0.017119 0.100000 0.040000 MLT24
+109 -0.570925 -0.147553 0.100000 0.040000 MLT25
+110 -0.505869 -0.237678 0.100000 0.040000 MLT26
+111 -0.406336 -0.310886 0.100000 0.040000 MLT27
+112 -0.758025 0.508412 0.100000 0.040000 MLT31
+113 -0.761740 0.316423 0.100000 0.040000 MLT32
+114 -0.751268 0.088675 0.100000 0.040000 MLT33
+115 -0.712573 -0.047448 0.100000 0.040000 MLT34
+116 -0.658112 -0.159355 0.100000 0.040000 MLT35
+117 -0.592395 -0.256839 0.100000 0.040000 MLT36
+118 -0.495312 -0.345113 0.100000 0.040000 MLT37
+119 -0.885393 0.353401 0.100000 0.040000 MLT41
+120 -0.847844 0.160648 0.100000 0.040000 MLT42
+121 -0.823787 -0.043736 0.100000 0.040000 MLT43
+122 -0.758805 -0.175411 0.100000 0.040000 MLT44
+123 -0.684634 -0.280647 0.100000 0.040000 MLT45
+124 -0.591783 -0.373867 0.100000 0.040000 MLT46
+125 -0.476572 -0.454666 0.100000 0.040000 MLT47
+126 -0.983285 0.161080 0.100000 0.040000 MLT51
+127 -0.944753 -0.028756 0.100000 0.040000 MLT52
+128 -0.872989 -0.188195 0.100000 0.040000 MLT53
+129 -0.785517 -0.310620 0.100000 0.040000 MLT54
+130 -0.688014 -0.407791 0.100000 0.040000 MLT55
+131 -0.571347 -0.497554 0.100000 0.040000 MLT56
+132 -0.457303 -0.565438 0.100000 0.040000 MLT57
+133 0.063389 0.426606 0.100000 0.040000 MRC11
+134 0.137902 0.375428 0.100000 0.040000 MRC12
+135 0.219516 0.336386 0.100000 0.040000 MRC13
+136 0.297688 0.277771 0.100000 0.040000 MRC14
+137 0.355955 0.213304 0.100000 0.040000 MRC15
+138 0.404150 0.135598 0.100000 0.040000 MRC16
+139 0.434870 0.040656 0.100000 0.040000 MRC17
+140 0.142678 0.292126 0.100000 0.040000 MRC21
+141 0.219470 0.254066 0.100000 0.040000 MRC22
+142 0.281922 0.196472 0.100000 0.040000 MRC23
+143 0.325059 0.128269 0.100000 0.040000 MRC24
+144 0.361805 0.044213 0.100000 0.040000 MRC25
+145 0.240157 0.132538 0.100000 0.040000 MRC31
+146 0.290750 0.048681 0.100000 0.040000 MRC32
+147 0.178346 0.187415 0.100000 0.040000 MRC41
+148 0.213493 0.062545 0.100000 0.040000 MRC42
+149 0.058440 0.284194 0.100000 0.040000 MRC51
+150 0.101359 0.215083 0.100000 0.040000 MRC52
+151 0.156968 0.115486 0.100000 0.040000 MRC53
+152 0.144211 0.038238 0.100000 0.040000 MRC54
+153 0.106635 -0.024115 0.100000 0.040000 MRC55
+154 0.055338 0.153928 0.100000 0.040000 MRC61
+155 0.088138 0.086634 0.100000 0.040000 MRC62
+156 0.049557 0.022680 0.100000 0.040000 MRC63
+157 0.197726 0.874477 0.100000 0.040000 MRF11
+158 0.364689 0.811426 0.100000 0.040000 MRF12
+159 0.518245 0.722181 0.100000 0.040000 MRF13
+160 0.658136 0.611411 0.100000 0.040000 MRF14
+161 0.095713 0.807816 0.100000 0.040000 MRF21
+162 0.233999 0.772267 0.100000 0.040000 MRF22
+163 0.358821 0.715911 0.100000 0.040000 MRF23
+164 0.484765 0.623142 0.100000 0.040000 MRF24
+165 0.585405 0.526324 0.100000 0.040000 MRF25
+166 0.147633 0.713396 0.100000 0.040000 MRF31
+167 0.265823 0.676341 0.100000 0.040000 MRF32
+168 0.382256 0.601823 0.100000 0.040000 MRF33
+169 0.473850 0.521768 0.100000 0.040000 MRF34
+170 0.548726 0.424836 0.100000 0.040000 MRF35
+171 0.075451 0.644959 0.100000 0.040000 MRF41
+172 0.182924 0.624842 0.100000 0.040000 MRF42
+173 0.292900 0.568899 0.100000 0.040000 MRF43
+174 0.379529 0.501620 0.100000 0.040000 MRF44
+175 0.465778 0.418231 0.100000 0.040000 MRF45
+176 0.541913 0.311405 0.100000 0.040000 MRF46
+177 0.102375 0.561860 0.100000 0.040000 MRF51
+178 0.212879 0.524802 0.100000 0.040000 MRF52
+179 0.299077 0.468924 0.100000 0.040000 MRF53
+180 0.376186 0.400507 0.100000 0.040000 MRF54
+181 0.461150 0.316311 0.100000 0.040000 MRF55
+182 0.527532 0.213125 0.100000 0.040000 MRF56
+183 0.143360 0.469857 0.100000 0.040000 MRF61
+184 0.224730 0.422291 0.100000 0.040000 MRF62
+185 0.301012 0.364856 0.100000 0.040000 MRF63
+186 0.373056 0.305526 0.100000 0.040000 MRF64
+187 0.443172 0.230008 0.100000 0.040000 MRF65
+188 0.482916 0.144546 0.100000 0.040000 MRF66
+189 0.509363 0.039864 0.100000 0.040000 MRF67
+190 0.101312 -0.384464 0.100000 0.040000 MRO11
+191 0.188777 -0.365285 0.100000 0.040000 MRO12
+192 0.274286 -0.333994 0.100000 0.040000 MRO13
+193 0.354824 -0.285987 0.100000 0.040000 MRO14
+194 0.062633 -0.457476 0.100000 0.040000 MRO21
+195 0.152570 -0.440791 0.100000 0.040000 MRO22
+196 0.248565 -0.418432 0.100000 0.040000 MRO23
+197 0.338845 -0.376241 0.100000 0.040000 MRO24
+198 0.111160 -0.521375 0.100000 0.040000 MRO31
+199 0.212466 -0.502957 0.100000 0.040000 MRO32
+200 0.313063 -0.468465 0.100000 0.040000 MRO33
+201 0.409385 -0.418933 0.100000 0.040000 MRO34
+202 0.063270 -0.599845 0.100000 0.040000 MRO41
+203 0.172480 -0.589865 0.100000 0.040000 MRO42
+204 0.279919 -0.563495 0.100000 0.040000 MRO43
+205 0.386742 -0.520993 0.100000 0.040000 MRO44
+206 0.121969 -0.676100 0.100000 0.040000 MRO51
+207 0.240331 -0.658743 0.100000 0.040000 MRO52
+208 0.356156 -0.623026 0.100000 0.040000 MRO53
+209 0.071855 -0.088269 0.100000 0.040000 MRP11
+210 0.180874 -0.026656 0.100000 0.040000 MRP12
+211 0.047839 -0.157479 0.100000 0.040000 MRP21
+212 0.147221 -0.093053 0.100000 0.040000 MRP22
+213 0.252807 -0.012686 0.100000 0.040000 MRP23
+214 0.082012 -0.222790 0.100000 0.040000 MRP31
+215 0.136825 -0.166819 0.100000 0.040000 MRP32
+216 0.210796 -0.134697 0.100000 0.040000 MRP33
+217 0.277587 -0.083946 0.100000 0.040000 MRP34
+218 0.322867 -0.024718 0.100000 0.040000 MRP35
+219 0.162954 -0.240118 0.100000 0.040000 MRP41
+220 0.230510 -0.205793 0.100000 0.040000 MRP42
+221 0.296283 -0.169213 0.100000 0.040000 MRP43
+222 0.351532 -0.101316 0.100000 0.040000 MRP44
+223 0.395383 -0.032706 0.100000 0.040000 MRP45
+224 0.048690 -0.312307 0.100000 0.040000 MRP51
+225 0.137008 -0.312230 0.100000 0.040000 MRP52
+226 0.214275 -0.287336 0.100000 0.040000 MRP53
+227 0.290637 -0.248388 0.100000 0.040000 MRP54
+228 0.360555 -0.199475 0.100000 0.040000 MRP55
+229 0.419086 -0.126737 0.100000 0.040000 MRP56
+230 0.463976 -0.050387 0.100000 0.040000 MRP57
+231 0.628409 0.323946 0.100000 0.040000 MRT11
+232 0.609835 0.205866 0.100000 0.040000 MRT12
+233 0.571838 0.105198 0.100000 0.040000 MRT13
+234 0.544252 -0.054539 0.100000 0.040000 MRT14
+235 0.500732 -0.143104 0.100000 0.040000 MRT15
+236 0.427582 -0.225716 0.100000 0.040000 MRT16
+237 0.685440 0.421411 0.100000 0.040000 MRT21
+238 0.705800 0.208084 0.100000 0.040000 MRT22
+239 0.667392 0.088109 0.100000 0.040000 MRT23
+240 0.637062 -0.030086 0.100000 0.040000 MRT24
+241 0.588417 -0.159092 0.100000 0.040000 MRT25
+242 0.522350 -0.247039 0.100000 0.040000 MRT26
+243 0.422093 -0.318167 0.100000 0.040000 MRT27
+244 0.789789 0.482334 0.100000 0.040000 MRT31
+245 0.786599 0.293212 0.100000 0.040000 MRT32
+246 0.770320 0.070984 0.100000 0.040000 MRT33
+247 0.731214 -0.061690 0.100000 0.040000 MRT34
+248 0.674802 -0.172109 0.100000 0.040000 MRT35
+249 0.607500 -0.268226 0.100000 0.040000 MRT36
+250 0.510484 -0.353209 0.100000 0.040000 MRT37
+251 0.910695 0.324672 0.100000 0.040000 MRT41
+252 0.867982 0.137317 0.100000 0.040000 MRT42
+253 0.839920 -0.060661 0.100000 0.040000 MRT43
+254 0.773256 -0.189639 0.100000 0.040000 MRT44
+255 0.698444 -0.293384 0.100000 0.040000 MRT45
+256 0.604482 -0.385347 0.100000 0.040000 MRT46
+257 0.489291 -0.462983 0.100000 0.040000 MRT47
+258 1.000000 0.135648 0.100000 0.040000 MRT51
+259 0.959092 -0.049055 0.100000 0.040000 MRT52
+260 0.886964 -0.204289 0.100000 0.040000 MRT53
+261 0.796842 -0.324881 0.100000 0.040000 MRT54
+262 0.698769 -0.420596 0.100000 0.040000 MRT55
+263 0.582500 -0.506810 0.100000 0.040000 MRT56
+264 0.467934 -0.572706 0.100000 0.040000 MRT57
+265 0.016063 0.355556 0.100000 0.040000 MZC01
+266 0.014747 0.217488 0.100000 0.040000 MZC02
+267 0.013199 0.087763 0.100000 0.040000 MZC03
+268 0.011197 -0.046263 0.100000 0.040000 MZC04
+269 0.022267 0.897778 0.100000 0.040000 MZF01
+270 0.019840 0.730557 0.100000 0.040000 MZF02
+271 0.017559 0.517279 0.100000 0.040000 MZF03
+272 0.007392 -0.378522 0.100000 0.040000 MZO01
+273 0.005634 -0.528155 0.100000 0.040000 MZO02
+274 0.003722 -0.675585 0.100000 0.040000 MZO03
+275 0.008864 -0.248776 0.100000 0.040000 MZP01
diff --git a/mne/layouts/Vectorview-all.lout b/mne/layouts/Vectorview-all.lout
new file mode 100755
index 0000000..b6395fb
--- /dev/null
+++ b/mne/layouts/Vectorview-all.lout
@@ -0,0 +1,307 @@
+-85.000000 90.000000 -83.000000 75.000000
+113	-73.416206	33.416687	6.000000	5.000000	MEG 0113
+112	-73.416206	38.416687	6.000000	5.000000	MEG 0112
+111	-67.416206	35.916687	6.000000	5.000000	MEG 0111
+122	-59.602242	38.489067	6.000000	5.000000	MEG 0122
+123	-59.602242	43.489067	6.000000	5.000000	MEG 0123
+121	-53.602242	40.989067	6.000000	5.000000	MEG 0121
+132	-68.018288	18.676970	6.000000	5.000000	MEG 0132
+133	-68.018288	23.676970	6.000000	5.000000	MEG 0133
+131	-62.018288	21.176970	6.000000	5.000000	MEG 0131
+143	-80.582848	8.095787	6.000000	5.000000	MEG 0143
+142	-80.582848	13.095787	6.000000	5.000000	MEG 0142
+141	-74.582848	10.595787	6.000000	5.000000	MEG 0141
+213	-56.595154	17.019251	6.000000	5.000000	MEG 0213
+212	-56.595154	22.019251	6.000000	5.000000	MEG 0212
+211	-50.595154	19.519251	6.000000	5.000000	MEG 0211
+222	-44.599728	17.543873	6.000000	5.000000	MEG 0222
+223	-44.599728	22.543873	6.000000	5.000000	MEG 0223
+221	-38.599728	20.043873	6.000000	5.000000	MEG 0221
+232	-47.416420	-0.216784	6.000000	5.000000	MEG 0232
+233	-47.416420	4.783216	6.000000	5.000000	MEG 0233
+231	-41.416420	2.283216	6.000000	5.000000	MEG 0231
+243	-59.280643	-2.761772	6.000000	5.000000	MEG 0243
+242	-59.280643	2.238228	6.000000	5.000000	MEG 0242
+241	-53.280643	-0.261772	6.000000	5.000000	MEG 0241
+313	-39.790501	47.430138	6.000000	5.000000	MEG 0313
+312	-39.790501	52.430138	6.000000	5.000000	MEG 0312
+311	-33.790501	49.930138	6.000000	5.000000	MEG 0311
+322	-38.014336	32.768585	6.000000	5.000000	MEG 0322
+323	-38.014336	37.768585	6.000000	5.000000	MEG 0323
+321	-32.014336	35.268585	6.000000	5.000000	MEG 0321
+333	-27.679966	28.868065	6.000000	5.000000	MEG 0333
+332	-27.679966	33.868065	6.000000	5.000000	MEG 0332
+331	-21.679966	31.368065	6.000000	5.000000	MEG 0331
+343	-49.684467	34.078434	6.000000	5.000000	MEG 0343
+342	-49.684467	39.078434	6.000000	5.000000	MEG 0342
+341	-43.684467	36.578434	6.000000	5.000000	MEG 0341
+413	-32.997990	15.607347	6.000000	5.000000	MEG 0413
+412	-32.997990	20.607347	6.000000	5.000000	MEG 0412
+411	-26.997990	18.107347	6.000000	5.000000	MEG 0411
+422	-21.084751	13.953575	6.000000	5.000000	MEG 0422
+423	-21.084751	18.953575	6.000000	5.000000	MEG 0423
+421	-15.084751	16.453575	6.000000	5.000000	MEG 0421
+432	-21.930935	-0.085500	6.000000	5.000000	MEG 0432
+433	-21.930935	4.914500	6.000000	5.000000	MEG 0433
+431	-15.930935	2.414500	6.000000	5.000000	MEG 0431
+443	-34.824663	0.362587	6.000000	5.000000	MEG 0443
+442	-34.824663	5.362587	6.000000	5.000000	MEG 0442
+441	-28.824663	2.862587	6.000000	5.000000	MEG 0441
+513	-27.861498	55.439636	6.000000	5.000000	MEG 0513
+512	-27.861498	60.439636	6.000000	5.000000	MEG 0512
+511	-21.861498	57.939636	6.000000	5.000000	MEG 0511
+523	-15.506709	59.619865	6.000000	5.000000	MEG 0523
+522	-15.506709	64.619865	6.000000	5.000000	MEG 0522
+521	-9.506709	62.119865	6.000000	5.000000	MEG 0521
+532	-14.616095	49.308380	6.000000	5.000000	MEG 0532
+533	-14.616095	54.308380	6.000000	5.000000	MEG 0533
+531	-8.616095	51.808380	6.000000	5.000000	MEG 0531
+542	-27.240477	43.863430	6.000000	5.000000	MEG 0542
+543	-27.240477	48.863430	6.000000	5.000000	MEG 0543
+541	-21.240477	46.363430	6.000000	5.000000	MEG 0541
+613	-14.782405	38.147827	6.000000	5.000000	MEG 0613
+612	-14.782405	43.147827	6.000000	5.000000	MEG 0612
+611	-8.782405	40.647827	6.000000	5.000000	MEG 0611
+622	-2.967276	27.260933	6.000000	5.000000	MEG 0622
+623	-2.967276	32.260933	6.000000	5.000000	MEG 0623
+621	3.032724	29.760933	6.000000	5.000000	MEG 0621
+633	-9.094766	14.700909	6.000000	5.000000	MEG 0633
+632	-9.094766	19.700909	6.000000	5.000000	MEG 0632
+631	-3.094766	17.200909	6.000000	5.000000	MEG 0631
+642	-15.199021	26.631405	6.000000	5.000000	MEG 0642
+643	-15.199021	31.631405	6.000000	5.000000	MEG 0643
+641	-9.199021	29.131405	6.000000	5.000000	MEG 0641
+713	-9.246834	1.693846	6.000000	5.000000	MEG 0713
+712	-9.246834	6.693846	6.000000	5.000000	MEG 0712
+711	-3.246834	4.193846	6.000000	5.000000	MEG 0711
+723	3.314525	1.573887	6.000000	5.000000	MEG 0723
+722	3.314525	6.573887	6.000000	5.000000	MEG 0722
+721	9.314525	4.073887	6.000000	5.000000	MEG 0721
+733	3.387173	-10.588106	6.000000	5.000000	MEG 0733
+732	3.387173	-5.588106	6.000000	5.000000	MEG 0732
+731	9.387173	-8.088106	6.000000	5.000000	MEG 0731
+743	-9.422897	-10.519942	6.000000	5.000000	MEG 0743
+742	-9.422897	-5.519942	6.000000	5.000000	MEG 0742
+741	-3.422897	-8.019942	6.000000	5.000000	MEG 0741
+813	-2.962408	61.007698	6.000000	5.000000	MEG 0813
+812	-2.962408	66.007698	6.000000	5.000000	MEG 0812
+811	3.037592	63.507698	6.000000	5.000000	MEG 0811
+822	-2.965545	50.641838	6.000000	5.000000	MEG 0822
+823	-2.965545	55.641838	6.000000	5.000000	MEG 0823
+821	3.034455	53.141838	6.000000	5.000000	MEG 0821
+913	9.504830	59.655254	6.000000	5.000000	MEG 0913
+912	9.504830	64.655254	6.000000	5.000000	MEG 0912
+911	15.504830	62.155254	6.000000	5.000000	MEG 0911
+923	21.967310	55.408710	6.000000	5.000000	MEG 0923
+922	21.967310	60.408710	6.000000	5.000000	MEG 0922
+921	27.967310	57.908710	6.000000	5.000000	MEG 0921
+932	21.254196	43.889683	6.000000	5.000000	MEG 0932
+933	21.254196	48.889683	6.000000	5.000000	MEG 0933
+931	27.254196	46.389683	6.000000	5.000000	MEG 0931
+942	8.661931	49.358044	6.000000	5.000000	MEG 0942
+943	8.661931	54.358044	6.000000	5.000000	MEG 0943
+941	14.661931	51.858044	6.000000	5.000000	MEG 0941
+1013	-2.967087	39.669956	6.000000	5.000000	MEG 1013
+1012	-2.967087	44.669956	6.000000	5.000000	MEG 1012
+1011	3.032913	42.169956	6.000000	5.000000	MEG 1011
+1023	8.751018	38.154079	6.000000	5.000000	MEG 1023
+1022	8.751018	43.154079	6.000000	5.000000	MEG 1022
+1021	14.751018	40.654079	6.000000	5.000000	MEG 1021
+1032	9.123913	26.648697	6.000000	5.000000	MEG 1032
+1033	9.123913	31.648697	6.000000	5.000000	MEG 1033
+1031	15.123913	29.148697	6.000000	5.000000	MEG 1031
+1043	3.200539	14.795620	6.000000	5.000000	MEG 1043
+1042	3.200539	19.795620	6.000000	5.000000	MEG 1042
+1041	9.200539	17.295620	6.000000	5.000000	MEG 1041
+1112	15.014965	13.912239	6.000000	5.000000	MEG 1112
+1113	15.014965	18.912239	6.000000	5.000000	MEG 1113
+1111	21.014965	16.412239	6.000000	5.000000	MEG 1111
+1123	26.958527	15.562130	6.000000	5.000000	MEG 1123
+1122	26.958527	20.562130	6.000000	5.000000	MEG 1122
+1121	32.958527	18.062130	6.000000	5.000000	MEG 1121
+1133	28.757563	0.227141	6.000000	5.000000	MEG 1133
+1132	28.757563	5.227141	6.000000	5.000000	MEG 1132
+1131	34.757563	2.727141	6.000000	5.000000	MEG 1131
+1142	15.882982	0.037700	6.000000	5.000000	MEG 1142
+1143	15.882982	5.037700	6.000000	5.000000	MEG 1143
+1141	21.882982	2.537700	6.000000	5.000000	MEG 1141
+1213	33.958897	47.388790	6.000000	5.000000	MEG 1213
+1212	33.958897	52.388790	6.000000	5.000000	MEG 1212
+1211	39.958897	49.888790	6.000000	5.000000	MEG 1211
+1223	43.923473	33.914738	6.000000	5.000000	MEG 1223
+1222	43.923473	38.914738	6.000000	5.000000	MEG 1222
+1221	49.923473	36.414738	6.000000	5.000000	MEG 1221
+1232	32.014336	32.768585	6.000000	5.000000	MEG 1232
+1233	32.014336	37.768585	6.000000	5.000000	MEG 1233
+1231	38.014336	35.268585	6.000000	5.000000	MEG 1231
+1243	21.600079	28.898149	6.000000	5.000000	MEG 1243
+1242	21.600079	33.898149	6.000000	5.000000	MEG 1242
+1241	27.600079	31.398149	6.000000	5.000000	MEG 1241
+1312	38.599728	17.543867	6.000000	5.000000	MEG 1312
+1313	38.599728	22.543867	6.000000	5.000000	MEG 1313
+1311	44.599728	20.043867	6.000000	5.000000	MEG 1311
+1323	50.558392	16.887651	6.000000	5.000000	MEG 1323
+1322	50.558392	21.887651	6.000000	5.000000	MEG 1322
+1321	56.558392	19.387651	6.000000	5.000000	MEG 1321
+1333	53.420483	-2.919475	6.000000	5.000000	MEG 1333
+1332	53.420483	2.080525	6.000000	5.000000	MEG 1332
+1331	59.420483	-0.419475	6.000000	5.000000	MEG 1331
+1342	41.371586	-0.216817	6.000000	5.000000	MEG 1342
+1343	41.371586	4.783183	6.000000	5.000000	MEG 1343
+1341	47.371586	2.283183	6.000000	5.000000	MEG 1341
+1412	53.704369	38.563030	6.000000	5.000000	MEG 1412
+1413	53.704369	43.563030	6.000000	5.000000	MEG 1413
+1411	59.704369	41.063030	6.000000	5.000000	MEG 1411
+1423	67.119286	33.843739	6.000000	5.000000	MEG 1423
+1422	67.119286	38.843739	6.000000	5.000000	MEG 1422
+1421	73.119286	36.343739	6.000000	5.000000	MEG 1421
+1433	74.438919	8.335863	6.000000	5.000000	MEG 1433
+1432	74.438919	13.335863	6.000000	5.000000	MEG 1432
+1431	80.438919	10.835863	6.000000	5.000000	MEG 1431
+1442	61.883209	18.562304	6.000000	5.000000	MEG 1442
+1443	61.883209	23.562304	6.000000	5.000000	MEG 1443
+1441	67.883209	21.062304	6.000000	5.000000	MEG 1441
+1512	-71.298943	-4.707253	6.000000	5.000000	MEG 1512
+1513	-71.298943	0.292747	6.000000	5.000000	MEG 1513
+1511	-65.298943	-2.207253	6.000000	5.000000	MEG 1511
+1522	-67.281609	-25.407852	6.000000	5.000000	MEG 1522
+1523	-67.281609	-20.407852	6.000000	5.000000	MEG 1523
+1521	-61.281609	-22.907852	6.000000	5.000000	MEG 1521
+1533	-71.702820	-40.152336	6.000000	5.000000	MEG 1533
+1532	-71.702820	-35.152336	6.000000	5.000000	MEG 1532
+1531	-65.702820	-37.652336	6.000000	5.000000	MEG 1531
+1543	-79.907913	-17.418098	6.000000	5.000000	MEG 1543
+1542	-79.907913	-12.418098	6.000000	5.000000	MEG 1542
+1541	-73.907913	-14.918098	6.000000	5.000000	MEG 1541
+1613	-56.916454	-20.312164	6.000000	5.000000	MEG 1613
+1612	-56.916454	-15.312164	6.000000	5.000000	MEG 1612
+1611	-50.916454	-17.812164	6.000000	5.000000	MEG 1611
+1622	-45.631779	-16.320436	6.000000	5.000000	MEG 1622
+1623	-45.631779	-11.320436	6.000000	5.000000	MEG 1623
+1621	-39.631779	-13.820436	6.000000	5.000000	MEG 1621
+1632	-37.896103	-30.578358	6.000000	5.000000	MEG 1632
+1633	-37.896103	-25.578358	6.000000	5.000000	MEG 1633
+1631	-31.896103	-28.078358	6.000000	5.000000	MEG 1631
+1643	-48.859089	-36.176094	6.000000	5.000000	MEG 1643
+1642	-48.859089	-31.176094	6.000000	5.000000	MEG 1642
+1641	-42.859089	-33.676094	6.000000	5.000000	MEG 1641
+1713	-56.796040	-59.082275	6.000000	5.000000	MEG 1713
+1712	-56.796040	-54.082275	6.000000	5.000000	MEG 1712
+1711	-50.796040	-56.582275	6.000000	5.000000	MEG 1711
+1722	-57.188797	-44.057373	6.000000	5.000000	MEG 1722
+1723	-57.188797	-39.057373	6.000000	5.000000	MEG 1723
+1721	-51.188797	-41.557373	6.000000	5.000000	MEG 1721
+1732	-41.902962	-58.279526	6.000000	5.000000	MEG 1732
+1733	-41.902962	-53.279526	6.000000	5.000000	MEG 1733
+1731	-35.902962	-55.779526	6.000000	5.000000	MEG 1731
+1743	-37.408134	-72.449036	6.000000	5.000000	MEG 1743
+1742	-37.408134	-67.449036	6.000000	5.000000	MEG 1742
+1741	-31.408134	-69.949036	6.000000	5.000000	MEG 1741
+1813	-33.801163	-13.768716	6.000000	5.000000	MEG 1813
+1812	-33.801163	-8.768716	6.000000	5.000000	MEG 1812
+1811	-27.801163	-11.268716	6.000000	5.000000	MEG 1811
+1822	-21.685101	-12.619589	6.000000	5.000000	MEG 1822
+1823	-21.685101	-7.619589	6.000000	5.000000	MEG 1823
+1821	-15.685101	-10.119589	6.000000	5.000000	MEG 1821
+1832	-9.600111	-22.190945	6.000000	5.000000	MEG 1832
+1833	-9.600111	-17.190945	6.000000	5.000000	MEG 1833
+1831	-3.600111	-19.690945	6.000000	5.000000	MEG 1831
+1843	-24.483526	-26.850609	6.000000	5.000000	MEG 1843
+1842	-24.483526	-21.850609	6.000000	5.000000	MEG 1842
+1841	-18.483526	-24.350609	6.000000	5.000000	MEG 1841
+1912	-25.866816	-40.850040	6.000000	5.000000	MEG 1912
+1913	-25.866816	-35.850040	6.000000	5.000000	MEG 1913
+1911	-19.866816	-38.350040	6.000000	5.000000	MEG 1911
+1923	-20.513481	-56.355225	6.000000	5.000000	MEG 1923
+1922	-20.513481	-51.355225	6.000000	5.000000	MEG 1922
+1921	-14.513481	-53.855225	6.000000	5.000000	MEG 1921
+1932	-23.428471	-67.375893	6.000000	5.000000	MEG 1932
+1933	-23.428471	-62.375893	6.000000	5.000000	MEG 1933
+1931	-17.428471	-64.875893	6.000000	5.000000	MEG 1931
+1943	-36.237587	-48.444530	6.000000	5.000000	MEG 1943
+1942	-36.237587	-43.444530	6.000000	5.000000	MEG 1942
+1941	-30.237587	-45.944530	6.000000	5.000000	MEG 1941
+2013	-10.441930	-34.308243	6.000000	5.000000	MEG 2013
+2012	-10.441930	-29.308243	6.000000	5.000000	MEG 2012
+2011	-4.441930	-31.808243	6.000000	5.000000	MEG 2011
+2023	4.357624	-34.289736	6.000000	5.000000	MEG 2023
+2022	4.357624	-29.289736	6.000000	5.000000	MEG 2022
+2021	10.357624	-31.789736	6.000000	5.000000	MEG 2021
+2032	4.645295	-46.290749	6.000000	5.000000	MEG 2032
+2033	4.645295	-41.290749	6.000000	5.000000	MEG 2033
+2031	10.645295	-43.790749	6.000000	5.000000	MEG 2031
+2042	-10.645079	-46.244335	6.000000	5.000000	MEG 2042
+2043	-10.645079	-41.244335	6.000000	5.000000	MEG 2043
+2041	-4.645079	-43.744335	6.000000	5.000000	MEG 2041
+2113	-3.052351	-58.889515	6.000000	5.000000	MEG 2113
+2112	-3.052351	-53.889515	6.000000	5.000000	MEG 2112
+2111	2.947649	-56.389515	6.000000	5.000000	MEG 2111
+2122	-2.999999	-70.362061	6.000000	5.000000	MEG 2122
+2123	-2.999999	-65.362061	6.000000	5.000000	MEG 2123
+2121	3.000001	-67.862061	6.000000	5.000000	MEG 2121
+2133	8.918572	-79.441826	6.000000	5.000000	MEG 2133
+2132	8.918572	-74.441826	6.000000	5.000000	MEG 2132
+2131	14.918572	-76.941826	6.000000	5.000000	MEG 2131
+2143	-14.987089	-79.428932	6.000000	5.000000	MEG 2143
+2142	-14.987089	-74.428932	6.000000	5.000000	MEG 2142
+2141	-8.987089	-76.928932	6.000000	5.000000	MEG 2141
+2212	15.641460	-12.579389	6.000000	5.000000	MEG 2212
+2213	15.641460	-7.579389	6.000000	5.000000	MEG 2213
+2211	21.641460	-10.079389	6.000000	5.000000	MEG 2211
+2223	27.786499	-13.669980	6.000000	5.000000	MEG 2223
+2222	27.786499	-8.669980	6.000000	5.000000	MEG 2222
+2221	33.786499	-11.169980	6.000000	5.000000	MEG 2221
+2233	18.501518	-26.949615	6.000000	5.000000	MEG 2233
+2232	18.501518	-21.949615	6.000000	5.000000	MEG 2232
+2231	24.501518	-24.449615	6.000000	5.000000	MEG 2231
+2242	3.641699	-22.206125	6.000000	5.000000	MEG 2242
+2243	3.641699	-17.206125	6.000000	5.000000	MEG 2243
+2241	9.641699	-19.706125	6.000000	5.000000	MEG 2241
+2312	19.852789	-40.871220	6.000000	5.000000	MEG 2312
+2313	19.852789	-35.871220	6.000000	5.000000	MEG 2313
+2311	25.852789	-38.371220	6.000000	5.000000	MEG 2311
+2323	30.078903	-48.474960	6.000000	5.000000	MEG 2323
+2322	30.078903	-43.474960	6.000000	5.000000	MEG 2322
+2321	36.078903	-45.974960	6.000000	5.000000	MEG 2321
+2332	17.363274	-67.365387	6.000000	5.000000	MEG 2332
+2333	17.363274	-62.365387	6.000000	5.000000	MEG 2333
+2331	23.363274	-64.865387	6.000000	5.000000	MEG 2331
+2343	14.329920	-56.380260	6.000000	5.000000	MEG 2343
+2342	14.329920	-51.380260	6.000000	5.000000	MEG 2342
+2341	20.329920	-53.880260	6.000000	5.000000	MEG 2341
+2412	39.644810	-16.175139	6.000000	5.000000	MEG 2412
+2413	39.644810	-11.175139	6.000000	5.000000	MEG 2413
+2411	45.644810	-13.675139	6.000000	5.000000	MEG 2411
+2423	50.812263	-20.401899	6.000000	5.000000	MEG 2423
+2422	50.812263	-15.401899	6.000000	5.000000	MEG 2422
+2421	56.812263	-17.901899	6.000000	5.000000	MEG 2421
+2433	42.694180	-36.278580	6.000000	5.000000	MEG 2433
+2432	42.694180	-31.278580	6.000000	5.000000	MEG 2432
+2431	48.694180	-33.778580	6.000000	5.000000	MEG 2431
+2442	31.896111	-30.578348	6.000000	5.000000	MEG 2442
+2443	31.896111	-25.578348	6.000000	5.000000	MEG 2443
+2441	37.896111	-28.078348	6.000000	5.000000	MEG 2441
+2512	35.812634	-58.300888	6.000000	5.000000	MEG 2512
+2513	35.812634	-53.300888	6.000000	5.000000	MEG 2513
+2511	41.812634	-55.800888	6.000000	5.000000	MEG 2511
+2522	51.171906	-43.981274	6.000000	5.000000	MEG 2522
+2523	51.171906	-38.981274	6.000000	5.000000	MEG 2523
+2521	57.171906	-41.481274	6.000000	5.000000	MEG 2521
+2533	50.704624	-59.132656	6.000000	5.000000	MEG 2533
+2532	50.704624	-54.132656	6.000000	5.000000	MEG 2532
+2531	56.704624	-56.632656	6.000000	5.000000	MEG 2531
+2543	31.320171	-72.484848	6.000000	5.000000	MEG 2543
+2542	31.320171	-67.484848	6.000000	5.000000	MEG 2542
+2541	37.320171	-69.984848	6.000000	5.000000	MEG 2541
+2612	65.137360	-4.702045	6.000000	5.000000	MEG 2612
+2613	65.137360	0.297955	6.000000	5.000000	MEG 2613
+2611	71.137360	-2.202045	6.000000	5.000000	MEG 2611
+2623	73.822243	-17.329140	6.000000	5.000000	MEG 2623
+2622	73.822243	-12.329140	6.000000	5.000000	MEG 2622
+2621	79.822243	-14.829140	6.000000	5.000000	MEG 2621
+2633	65.490112	-40.332645	6.000000	5.000000	MEG 2633
+2632	65.490112	-35.332645	6.000000	5.000000	MEG 2632
+2631	71.490112	-37.832645	6.000000	5.000000	MEG 2631
+2642	61.220192	-25.385981	6.000000	5.000000	MEG 2642
+2643	61.220192	-20.385981	6.000000	5.000000	MEG 2643
+2641	67.220192	-22.885981	6.000000	5.000000	MEG 2641
diff --git a/mne/layouts/Vectorview-grad.lout b/mne/layouts/Vectorview-grad.lout
new file mode 100755
index 0000000..1f133a1
--- /dev/null
+++ b/mne/layouts/Vectorview-grad.lout
@@ -0,0 +1,205 @@
+-55.000000 55.000000 -65.000000 60.000000
+113	-48.186871	26.886379	6.000000	5.000000	MEG 0113
+112	-48.186871	31.886379	6.000000	5.000000	MEG 0112
+122	-39.322296	31.036510	6.000000	5.000000	MEG 0122
+123	-39.322296	36.036510	6.000000	5.000000	MEG 0123
+132	-44.722965	14.826612	6.000000	5.000000	MEG 0132
+133	-44.722965	19.826612	6.000000	5.000000	MEG 0133
+143	-52.785782	6.169280	6.000000	5.000000	MEG 0143
+142	-52.785782	11.169280	6.000000	5.000000	MEG 0142
+213	-37.392612	13.470296	6.000000	5.000000	MEG 0213
+212	-37.392612	18.470296	6.000000	5.000000	MEG 0212
+222	-29.695013	13.899532	6.000000	5.000000	MEG 0222
+223	-29.695013	18.899532	6.000000	5.000000	MEG 0223
+232	-31.502516	-0.631914	6.000000	5.000000	MEG 0232
+233	-31.502516	4.368086	6.000000	5.000000	MEG 0233
+243	-39.115921	-2.709978	6.000000	5.000000	MEG 0243
+242	-39.115921	2.290022	6.000000	5.000000	MEG 0242
+313	-26.608879	38.351933	6.000000	5.000000	MEG 0313
+312	-26.608879	43.351933	6.000000	5.000000	MEG 0312
+322	-25.469093	26.356115	6.000000	5.000000	MEG 0322
+323	-25.469093	31.356115	6.000000	5.000000	MEG 0323
+333	-18.837411	23.164780	6.000000	5.000000	MEG 0333
+332	-18.837411	28.164780	6.000000	5.000000	MEG 0332
+343	-32.957949	27.427811	6.000000	5.000000	MEG 0343
+342	-32.957949	32.427811	6.000000	5.000000	MEG 0342
+413	-22.250046	12.315103	6.000000	5.000000	MEG 0413
+412	-22.250046	17.315103	6.000000	5.000000	MEG 0412
+422	-14.605187	10.962016	6.000000	5.000000	MEG 0422
+423	-14.605187	15.962016	6.000000	5.000000	MEG 0423
+432	-15.148193	-0.524500	6.000000	5.000000	MEG 0432
+433	-15.148193	4.475500	6.000000	5.000000	MEG 0433
+443	-23.422245	-0.157884	6.000000	5.000000	MEG 0443
+442	-23.422245	4.842116	6.000000	5.000000	MEG 0442
+513	-18.953902	44.905155	6.000000	5.000000	MEG 0513
+512	-18.953902	49.905155	6.000000	5.000000	MEG 0512
+523	-11.025696	48.325344	6.000000	5.000000	MEG 0523
+522	-11.025696	53.325344	6.000000	5.000000	MEG 0522
+532	-10.454178	39.888676	6.000000	5.000000	MEG 0532
+533	-10.454178	44.888676	6.000000	5.000000	MEG 0533
+542	-18.555386	35.433716	6.000000	5.000000	MEG 0542
+543	-18.555386	40.433716	6.000000	5.000000	MEG 0543
+613	-10.560901	30.757313	6.000000	5.000000	MEG 0613
+612	-10.560901	35.757313	6.000000	5.000000	MEG 0612
+622	-2.979000	21.849854	6.000000	5.000000	MEG 0622
+623	-2.979000	26.849854	6.000000	5.000000	MEG 0623
+633	-6.911079	11.573471	6.000000	5.000000	MEG 0633
+632	-6.911079	16.573471	6.000000	5.000000	MEG 0632
+642	-10.828249	21.334785	6.000000	5.000000	MEG 0642
+643	-10.828249	26.334785	6.000000	5.000000	MEG 0643
+713	-7.008664	0.931329	6.000000	5.000000	MEG 0713
+712	-7.008664	5.931329	6.000000	5.000000	MEG 0712
+723	1.052102	0.833180	6.000000	5.000000	MEG 0723
+722	1.052102	5.833180	6.000000	5.000000	MEG 0722
+733	1.098721	-8.987786	6.000000	5.000000	MEG 0733
+732	1.098721	-3.987786	6.000000	5.000000	MEG 0732
+743	-7.121645	-8.933109	6.000000	5.000000	MEG 0743
+742	-7.121645	-3.933109	6.000000	5.000000	MEG 0742
+813	-2.975877	49.460842	6.000000	5.000000	MEG 0813
+812	-2.975877	54.460842	6.000000	5.000000	MEG 0812
+822	-2.977890	40.979687	6.000000	5.000000	MEG 0822
+823	-2.977890	45.979687	6.000000	5.000000	MEG 0823
+913	5.024490	48.354298	6.000000	5.000000	MEG 0913
+912	5.024490	53.354298	6.000000	5.000000	MEG 0912
+923	13.021803	44.879852	6.000000	5.000000	MEG 0923
+922	13.021803	49.879852	6.000000	5.000000	MEG 0922
+932	12.564190	35.455193	6.000000	5.000000	MEG 0932
+933	12.564190	40.455193	6.000000	5.000000	MEG 0933
+942	4.483593	39.929310	6.000000	5.000000	MEG 0942
+943	4.483593	44.929310	6.000000	5.000000	MEG 0943
+1013	-2.978879	32.002693	6.000000	5.000000	MEG 1013
+1012	-2.978879	37.002693	6.000000	5.000000	MEG 1012
+1023	4.540760	30.762428	6.000000	5.000000	MEG 1023
+1022	4.540760	35.762428	6.000000	5.000000	MEG 1022
+1032	4.780051	21.348934	6.000000	5.000000	MEG 1032
+1033	4.780051	26.348934	6.000000	5.000000	MEG 1033
+1043	0.978956	11.650963	6.000000	5.000000	MEG 1043
+1042	0.978956	16.650963	6.000000	5.000000	MEG 1042
+1112	8.560405	10.928195	6.000000	5.000000	MEG 1112
+1113	8.560405	15.928195	6.000000	5.000000	MEG 1113
+1123	16.224724	12.278107	6.000000	5.000000	MEG 1123
+1122	16.224724	17.278107	6.000000	5.000000	MEG 1122
+1133	17.379185	-0.268703	6.000000	5.000000	MEG 1133
+1132	17.379185	4.731297	6.000000	5.000000	MEG 1132
+1142	9.117422	-0.423700	6.000000	5.000000	MEG 1142
+1143	9.117422	4.576300	6.000000	5.000000	MEG 1143
+1213	20.716938	38.318100	6.000000	5.000000	MEG 1213
+1212	20.716938	43.318100	6.000000	5.000000	MEG 1212
+1223	27.111319	27.293877	6.000000	5.000000	MEG 1223
+1222	27.111319	32.293877	6.000000	5.000000	MEG 1222
+1232	19.469093	26.356115	6.000000	5.000000	MEG 1232
+1233	19.469093	31.356115	6.000000	5.000000	MEG 1233
+1243	12.786146	23.189396	6.000000	5.000000	MEG 1243
+1242	12.786146	28.189396	6.000000	5.000000	MEG 1242
+1312	23.695013	13.899529	6.000000	5.000000	MEG 1312
+1313	23.695013	18.899529	6.000000	5.000000	MEG 1313
+1323	31.369019	13.362624	6.000000	5.000000	MEG 1323
+1322	31.369019	18.362624	6.000000	5.000000	MEG 1322
+1333	33.205658	-2.836478	6.000000	5.000000	MEG 1333
+1332	33.205658	2.163522	6.000000	5.000000	MEG 1332
+1342	25.473745	-0.631941	6.000000	5.000000	MEG 1342
+1343	25.473745	4.368059	6.000000	5.000000	MEG 1343
+1412	33.387833	31.097027	6.000000	5.000000	MEG 1412
+1413	33.387833	36.097027	6.000000	5.000000	MEG 1413
+1423	41.996334	27.235786	6.000000	5.000000	MEG 1423
+1422	41.996334	32.235786	6.000000	5.000000	MEG 1422
+1433	46.693424	6.365705	6.000000	5.000000	MEG 1433
+1432	46.693424	11.365705	6.000000	5.000000	MEG 1432
+1442	38.636284	14.732794	6.000000	5.000000	MEG 1442
+1443	38.636284	19.732794	6.000000	5.000000	MEG 1443
+1512	-46.828197	-4.270524	6.000000	5.000000	MEG 1512
+1513	-46.828197	0.729476	6.000000	5.000000	MEG 1513
+1522	-44.250233	-20.875282	6.000000	5.000000	MEG 1522
+1523	-44.250233	-15.875282	6.000000	5.000000	MEG 1523
+1533	-47.087372	-32.702410	6.000000	5.000000	MEG 1533
+1532	-47.087372	-27.702410	6.000000	5.000000	MEG 1532
+1543	-52.352669	-14.466389	6.000000	5.000000	MEG 1543
+1542	-52.352669	-9.466389	6.000000	5.000000	MEG 1542
+1613	-37.598797	-16.787832	6.000000	5.000000	MEG 1613
+1612	-37.598797	-11.787832	6.000000	5.000000	MEG 1612
+1622	-30.357292	-13.585911	6.000000	5.000000	MEG 1622
+1623	-30.357292	-8.585911	6.000000	5.000000	MEG 1623
+1632	-25.393221	-25.022747	6.000000	5.000000	MEG 1632
+1633	-25.393221	-20.022747	6.000000	5.000000	MEG 1633
+1643	-32.428291	-29.512911	6.000000	5.000000	MEG 1643
+1642	-32.428291	-24.512911	6.000000	5.000000	MEG 1642
+1713	-37.521523	-47.886852	6.000000	5.000000	MEG 1713
+1712	-37.521523	-42.886852	6.000000	5.000000	MEG 1712
+1722	-37.773560	-35.834789	6.000000	5.000000	MEG 1722
+1723	-37.773560	-30.834789	6.000000	5.000000	MEG 1723
+1732	-27.964468	-47.242935	6.000000	5.000000	MEG 1732
+1733	-27.964468	-42.242935	6.000000	5.000000	MEG 1733
+1743	-25.080088	-58.608849	6.000000	5.000000	MEG 1743
+1742	-25.080088	-53.608849	6.000000	5.000000	MEG 1742
+1813	-22.765453	-11.539077	6.000000	5.000000	MEG 1813
+1812	-22.765453	-6.539077	6.000000	5.000000	MEG 1812
+1822	-14.990439	-10.617317	6.000000	5.000000	MEG 1822
+1823	-14.990439	-5.617317	6.000000	5.000000	MEG 1823
+1832	-7.235366	-18.294876	6.000000	5.000000	MEG 1832
+1833	-7.235366	-13.294876	6.000000	5.000000	MEG 1833
+1843	-16.786220	-22.032574	6.000000	5.000000	MEG 1843
+1842	-16.786220	-17.032574	6.000000	5.000000	MEG 1842
+1912	-17.673892	-33.262066	6.000000	5.000000	MEG 1912
+1913	-17.673892	-28.262066	6.000000	5.000000	MEG 1913
+1923	-14.238597	-45.699379	6.000000	5.000000	MEG 1923
+1922	-14.238597	-40.699379	6.000000	5.000000	MEG 1922
+1932	-16.109179	-54.539486	6.000000	5.000000	MEG 1932
+1933	-16.109179	-49.539486	6.000000	5.000000	MEG 1933
+1943	-24.328934	-39.353901	6.000000	5.000000	MEG 1943
+1942	-24.328934	-34.353901	6.000000	5.000000	MEG 1942
+2013	-7.775570	-28.014633	6.000000	5.000000	MEG 2013
+2012	-7.775570	-23.014633	6.000000	5.000000	MEG 2012
+2023	1.721470	-27.999788	6.000000	5.000000	MEG 2023
+2022	1.721470	-22.999788	6.000000	5.000000	MEG 2022
+2032	1.906072	-37.626270	6.000000	5.000000	MEG 2032
+2033	1.906072	-32.626270	6.000000	5.000000	MEG 2033
+2042	-7.905933	-37.589039	6.000000	5.000000	MEG 2042
+2043	-7.905933	-32.589039	6.000000	5.000000	MEG 2043
+2113	-3.033595	-47.732231	6.000000	5.000000	MEG 2113
+2112	-3.033595	-42.732231	6.000000	5.000000	MEG 2112
+2122	-2.999999	-56.934807	6.000000	5.000000	MEG 2122
+2123	-2.999999	-51.934807	6.000000	5.000000	MEG 2123
+2133	4.648282	-64.218044	6.000000	5.000000	MEG 2133
+2132	4.648282	-59.218044	6.000000	5.000000	MEG 2132
+2143	-10.692250	-64.207703	6.000000	5.000000	MEG 2143
+2142	-10.692250	-59.207703	6.000000	5.000000	MEG 2142
+2212	8.962435	-10.585071	6.000000	5.000000	MEG 2212
+2213	8.962435	-5.585071	6.000000	5.000000	MEG 2213
+2223	16.756042	-11.459877	6.000000	5.000000	MEG 2223
+2222	16.756042	-6.459877	6.000000	5.000000	MEG 2222
+2233	10.797766	-22.111992	6.000000	5.000000	MEG 2233
+2232	10.797766	-17.111992	6.000000	5.000000	MEG 2232
+2242	1.262053	-18.307052	6.000000	5.000000	MEG 2242
+2243	1.262053	-13.307052	6.000000	5.000000	MEG 2243
+2312	11.664891	-33.279053	6.000000	5.000000	MEG 2312
+2313	11.664891	-28.279053	6.000000	5.000000	MEG 2313
+2323	18.227104	-39.378311	6.000000	5.000000	MEG 2323
+2322	18.227104	-34.378311	6.000000	5.000000	MEG 2322
+2332	10.067341	-54.531059	6.000000	5.000000	MEG 2332
+2333	10.067341	-49.531059	6.000000	5.000000	MEG 2333
+2343	8.120804	-45.719460	6.000000	5.000000	MEG 2343
+2342	8.120804	-40.719460	6.000000	5.000000	MEG 2342
+2412	24.365654	-13.469363	6.000000	5.000000	MEG 2412
+2413	24.365654	-8.469363	6.000000	5.000000	MEG 2413
+2423	31.531933	-16.859812	6.000000	5.000000	MEG 2423
+2422	31.531933	-11.859812	6.000000	5.000000	MEG 2422
+2433	26.322470	-29.595119	6.000000	5.000000	MEG 2433
+2432	26.322470	-24.595119	6.000000	5.000000	MEG 2432
+2442	19.393225	-25.022739	6.000000	5.000000	MEG 2442
+2443	19.393225	-20.022739	6.000000	5.000000	MEG 2443
+2512	21.906504	-47.260071	6.000000	5.000000	MEG 2512
+2513	21.906504	-42.260071	6.000000	5.000000	MEG 2513
+2522	31.762718	-35.773750	6.000000	5.000000	MEG 2522
+2523	31.762718	-30.773750	6.000000	5.000000	MEG 2523
+2533	31.462860	-47.927265	6.000000	5.000000	MEG 2533
+2532	31.462860	-42.927265	6.000000	5.000000	MEG 2532
+2543	19.023640	-58.637577	6.000000	5.000000	MEG 2543
+2542	19.023640	-53.637577	6.000000	5.000000	MEG 2542
+2612	40.724506	-4.266347	6.000000	5.000000	MEG 2612
+2613	40.724506	0.733653	6.000000	5.000000	MEG 2613
+2623	46.297695	-14.395032	6.000000	5.000000	MEG 2623
+2622	46.297695	-9.395032	6.000000	5.000000	MEG 2622
+2633	40.950874	-32.847042	6.000000	5.000000	MEG 2633
+2632	40.950874	-27.847042	6.000000	5.000000	MEG 2632
+2642	38.210819	-20.857738	6.000000	5.000000	MEG 2642
+2643	38.210819	-15.857738	6.000000	5.000000	MEG 2643
diff --git a/mne/layouts/Vectorview-mag.lout b/mne/layouts/Vectorview-mag.lout
new file mode 100755
index 0000000..c5f4c60
--- /dev/null
+++ b/mne/layouts/Vectorview-mag.lout
@@ -0,0 +1,103 @@
+-50.000000 50.000000 -50.000000 38.000000
+111	-41.408840	17.090919	6.000000	5.000000	MEG 0111
+121	-33.873951	19.857674	6.000000	5.000000	MEG 0121
+131	-38.464523	9.051075	6.000000	5.000000	MEG 0131
+141	-45.317917	3.279520	6.000000	5.000000	MEG 0141
+211	-32.233719	8.146864	6.000000	5.000000	MEG 0211
+221	-25.690760	8.433022	6.000000	5.000000	MEG 0221
+231	-27.227139	-1.254610	6.000000	5.000000	MEG 0231
+241	-33.698534	-2.642785	6.000000	5.000000	MEG 0241
+311	-23.067547	24.734621	6.000000	5.000000	MEG 0311
+321	-22.098728	16.737410	6.000000	5.000000	MEG 0321
+331	-16.461800	14.609854	6.000000	5.000000	MEG 0331
+341	-28.464256	17.451874	6.000000	5.000000	MEG 0341
+411	-19.362539	7.376735	6.000000	5.000000	MEG 0411
+421	-12.864409	6.474677	6.000000	5.000000	MEG 0421
+431	-13.325964	-1.183000	6.000000	5.000000	MEG 0431
+441	-20.358908	-0.938589	6.000000	5.000000	MEG 0441
+511	-16.560817	29.103437	6.000000	5.000000	MEG 0511
+521	-9.821842	31.383564	6.000000	5.000000	MEG 0521
+531	-9.336051	25.759117	6.000000	5.000000	MEG 0531
+541	-16.222077	22.789145	6.000000	5.000000	MEG 0541
+611	-9.426766	19.671541	6.000000	5.000000	MEG 0611
+621	-2.982150	13.733236	6.000000	5.000000	MEG 0621
+631	-6.324418	6.882314	6.000000	5.000000	MEG 0631
+641	-9.654012	13.389857	6.000000	5.000000	MEG 0641
+711	-6.407364	-0.212448	6.000000	5.000000	MEG 0711
+721	0.444286	-0.277880	6.000000	5.000000	MEG 0721
+731	0.483912	-6.911695	6.000000	5.000000	MEG 0731
+741	-6.503398	-6.874514	6.000000	5.000000	MEG 0741
+811	-2.979496	32.140564	6.000000	5.000000	MEG 0811
+821	-2.981206	26.486458	6.000000	5.000000	MEG 0821
+911	3.820817	31.402866	6.000000	5.000000	MEG 0911
+921	10.618533	29.086569	6.000000	5.000000	MEG 0921
+931	10.229562	22.803463	6.000000	5.000000	MEG 0931
+941	3.361053	25.786205	6.000000	5.000000	MEG 0941
+1011	-2.982047	20.501795	6.000000	5.000000	MEG 1011
+1021	3.409646	19.674952	6.000000	5.000000	MEG 1021
+1031	3.613043	13.399289	6.000000	5.000000	MEG 1031
+1041	0.382112	6.933975	6.000000	5.000000	MEG 1041
+1111	6.826344	6.452130	6.000000	5.000000	MEG 1111
+1121	13.341015	7.352071	6.000000	5.000000	MEG 1121
+1131	14.322306	-1.012468	6.000000	5.000000	MEG 1131
+1141	7.299809	-1.115800	6.000000	5.000000	MEG 1141
+1211	17.159397	24.712067	6.000000	5.000000	MEG 1211
+1221	22.594622	17.362583	6.000000	5.000000	MEG 1221
+1231	16.098728	16.737411	6.000000	5.000000	MEG 1231
+1241	10.418224	14.626265	6.000000	5.000000	MEG 1241
+1311	19.690762	8.433019	6.000000	5.000000	MEG 1311
+1321	26.213667	8.075083	6.000000	5.000000	MEG 1321
+1331	27.774809	-2.728805	6.000000	5.000000	MEG 1331
+1341	21.202684	-1.254627	6.000000	5.000000	MEG 1341
+1411	27.929657	19.898018	6.000000	5.000000	MEG 1411
+1421	35.246883	17.323858	6.000000	5.000000	MEG 1421
+1431	39.239410	3.410470	6.000000	5.000000	MEG 1431
+1441	32.390839	8.988529	6.000000	5.000000	MEG 1441
+1511	-40.253967	-3.703956	6.000000	5.000000	MEG 1511
+1521	-38.062698	-14.995193	6.000000	5.000000	MEG 1521
+1531	-40.474266	-23.037640	6.000000	5.000000	MEG 1531
+1541	-44.949768	-10.637144	6.000000	5.000000	MEG 1541
+1611	-32.408976	-12.215726	6.000000	5.000000	MEG 1611
+1621	-26.253698	-10.038419	6.000000	5.000000	MEG 1621
+1631	-22.034237	-17.815468	6.000000	5.000000	MEG 1631
+1641	-28.014048	-20.868780	6.000000	5.000000	MEG 1641
+1711	-32.343294	-33.363060	6.000000	5.000000	MEG 1711
+1721	-32.557526	-25.167658	6.000000	5.000000	MEG 1721
+1731	-24.219797	-32.925196	6.000000	5.000000	MEG 1731
+1741	-21.768074	-40.654018	6.000000	5.000000	MEG 1741
+1811	-19.800634	-8.646573	6.000000	5.000000	MEG 1811
+1821	-13.191874	-8.019776	6.000000	5.000000	MEG 1821
+1831	-6.600061	-13.240516	6.000000	5.000000	MEG 1831
+1841	-14.718287	-15.782150	6.000000	5.000000	MEG 1841
+1911	-15.472808	-23.418205	6.000000	5.000000	MEG 1911
+1921	-12.552808	-31.875578	6.000000	5.000000	MEG 1921
+1931	-14.142802	-37.886852	6.000000	5.000000	MEG 1931
+1941	-21.129593	-27.560652	6.000000	5.000000	MEG 1941
+2011	-7.059234	-19.849951	6.000000	5.000000	MEG 2011
+2021	1.013249	-19.839857	6.000000	5.000000	MEG 2021
+2031	1.170161	-26.385864	6.000000	5.000000	MEG 2031
+2041	-7.170043	-26.360546	6.000000	5.000000	MEG 2041
+2111	-3.028555	-33.257917	6.000000	5.000000	MEG 2111
+2121	-3.000000	-39.515667	6.000000	5.000000	MEG 2121
+2131	3.501040	-44.468269	6.000000	5.000000	MEG 2131
+2141	-9.538412	-44.461239	6.000000	5.000000	MEG 2141
+2211	7.168070	-7.997848	6.000000	5.000000	MEG 2211
+2221	13.792637	-8.592716	6.000000	5.000000	MEG 2221
+2231	8.728101	-15.836154	6.000000	5.000000	MEG 2231
+2241	0.622745	-13.248796	6.000000	5.000000	MEG 2241
+2311	9.465158	-23.429756	6.000000	5.000000	MEG 2311
+2321	15.043037	-27.577251	6.000000	5.000000	MEG 2321
+2331	8.107240	-37.881119	6.000000	5.000000	MEG 2331
+2341	6.452683	-31.889233	6.000000	5.000000	MEG 2341
+2411	20.260805	-9.959167	6.000000	5.000000	MEG 2411
+2421	26.352144	-12.264672	6.000000	5.000000	MEG 2421
+2431	21.924099	-20.924681	6.000000	5.000000	MEG 2431
+2441	16.034241	-17.815463	6.000000	5.000000	MEG 2441
+2511	18.170528	-32.936850	6.000000	5.000000	MEG 2511
+2521	26.548311	-25.126150	6.000000	5.000000	MEG 2521
+2531	26.293430	-33.390539	6.000000	5.000000	MEG 2531
+2541	15.720093	-40.673553	6.000000	5.000000	MEG 2541
+2611	34.165833	-3.701116	6.000000	5.000000	MEG 2611
+2621	38.903042	-10.588621	6.000000	5.000000	MEG 2621
+2631	34.358242	-23.135988	6.000000	5.000000	MEG 2631
+2641	32.029198	-14.983262	6.000000	5.000000	MEG 2641
diff --git a/mne/layouts/__init__.py b/mne/layouts/__init__.py
new file mode 100644
index 0000000..e612652
--- /dev/null
+++ b/mne/layouts/__init__.py
@@ -0,0 +1 @@
+from .layout import Layout, make_eeg_layout, make_grid_layout, read_layout
diff --git a/mne/layouts/layout.py b/mne/layouts/layout.py
new file mode 100644
index 0000000..f2d1c0f
--- /dev/null
+++ b/mne/layouts/layout.py
@@ -0,0 +1,471 @@
+from collections import defaultdict
+import os.path as op
+import numpy as np
+from scipy.optimize import leastsq
+from ..preprocessing.maxfilter import fit_sphere_to_headshape
+from ..fiff import FIFF, pick_types
+
+
+class Layout(object):
+    """Sensor layouts
+
+    Parameters
+    ----------
+    kind : 'Vectorview-all' | 'CTF-275' | 'Vectorview-grad' | 'Vectorview-mag'
+        Type of layout (can also be custom for EEG)
+    path : string
+        Path to folder where to find the layout file.
+
+    Attributes
+    ----------
+    box : tuple of length 4
+        The box dimension (x_min, x_max, y_min, y_max)
+    pos : array, shape=(n_channels, 4)
+        The positions of the channels in 2d (x, y, width, height)
+    names : list
+        The channel names
+    ids : list
+        The channel ids
+    kind : str
+        The type of Layout (e.g. 'Vectorview-all')
+    """
+    def __init__(self, box, pos, names, ids, kind):
+        self.box = box
+        self.pos = pos
+        self.names = names
+        self.ids = ids
+        self.kind = kind
+
+    def save(self, fname):
+        """Save Layout to disk
+
+        Parameters
+        ----------
+        fname : str
+            The file name (e.g. 'my_layout.lout')
+        """
+        x = self.pos[:, 0]
+        y = self.pos[:, 1]
+        width = self.pos[:, 2]
+        height = self.pos[:, 3]
+        if fname.endswith('.lout'):
+            out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
+        elif fname.endswith('.lay'):
+            out_str = ''
+        else:
+            raise ValueError('Unknown layout type. Should be of type '
+                             '.lout or .lay.')
+
+        for ii in range(x.shape[0]):
+            out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
+                        x[ii], y[ii], width[ii], height[ii], self.names[ii]))
+
+        f = open(fname, 'w')
+        f.write(out_str)
+        f.close()
+
+
+def _read_lout(fname):
+    """Aux function"""
+    with open(fname) as f:
+        box_line = f.readline()  # first line contains box dimension
+        box = tuple(map(float, box_line.split()))
+        names, pos, ids = [], [], []
+        for line in f:
+            splits = line.split()
+            if len(splits) == 7:
+                cid, x, y, dx, dy, chkind, nb = splits
+                name = chkind + ' ' + nb
+            else:
+                cid, x, y, dx, dy, name = splits
+            pos.append(np.array([x, y, dx, dy], dtype=np.float))
+            names.append(name)
+            ids.append(int(cid))
+
+    pos = np.array(pos)
+
+    return box, pos, names, ids
+
+
+def _read_lay(fname):
+    """Aux function"""
+    with open(fname) as f:
+        box = None
+        names, pos, ids = [], [], []
+        for line in f:
+            splits = line.split()
+            cid, x, y, dx, dy, name = splits
+            pos.append(np.array([x, y, dx, dy], dtype=np.float))
+            names.append(name)
+            ids.append(int(cid))
+
+    pos = np.array(pos)
+
+    return box, pos, names, ids
+
+
+def read_layout(kind, path=None, scale=True):
+    """Read layout from a file
+
+    Parameters
+    ----------
+    kind : str
+        The name of the .lout file (e.g. kind='Vectorview-all' for
+        'Vectorview-all.lout')
+
+    path : str | None
+        The path of the folder containing the Layout file
+
+    scale : bool
+        Apply useful scaling for out the box plotting using layout.pos
+
+    Returns
+    -------
+    layout : instance of Layout
+        The layout
+    """
+    if path is None:
+        path = op.dirname(__file__)
+
+    if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
+        kind += '.lout'
+    elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
+        kind += '.lay'
+
+    if kind.endswith('.lout'):
+        fname = op.join(path, kind)
+        kind = kind[:-5]
+        box, pos, names, ids = _read_lout(fname)
+    elif kind.endswith('.lay'):
+        fname = op.join(path, kind)
+        kind = kind[:-4]
+        box, pos, names, ids = _read_lay(fname)
+        kind.endswith('.lay')
+    else:
+        raise ValueError('Unknown layout type. Should be of type '
+                         '.lout or .lay.')
+
+    if scale:
+        pos[:, 0] -= np.min(pos[:, 0])
+        pos[:, 1] -= np.min(pos[:, 1])
+        scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
+        pos /= scaling
+        pos[:, :2] += 0.03
+        pos[:, :2] *= 0.97 / 1.03
+        pos[:, 2:] *= 0.94
+
+    return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
+
+
+def make_eeg_layout(info, radius=20, width=5, height=4):
+    """Create .lout file from EEG electrode digitization
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info (e.g., raw.info)
+    radius : float
+        Viewport radius
+    width : float
+        Viewport width
+    height : float
+        Viewport height
+
+    Returns
+    -------
+    layout : Layout
+        The generated Layout
+    """
+    radius_head, origin_head, origin_device = fit_sphere_to_headshape(info)
+    inds = pick_types(info, meg=False, eeg=True, exclude='bads')
+    hsp = [info['chs'][ii]['eeg_loc'][:, 0] for ii in inds]
+    names = [info['chs'][ii]['ch_name'] for ii in inds]
+    if len(hsp) <= 0:
+        raise ValueError('No EEG digitization points found')
+
+    if not len(hsp) == len(names):
+        raise ValueError('Channel names don\'t match digitization values')
+    hsp = np.array(hsp)
+
+    # Move points to origin
+    hsp -= origin_head / 1e3  # convert to millimeters
+
+    # Calculate angles
+    r = np.sqrt(np.sum(hsp ** 2, axis=-1))
+    theta = np.arccos(hsp[:, 2] / r)
+    phi = np.arctan2(hsp[:, 1], hsp[:, 0])
+
+    # Mark the points that might have caused bad angle estimates
+    iffy = np.nonzero(np.sum(hsp[:, :2] ** 2, axis=-1) ** (1. / 2)
+                      < np.finfo(np.float).eps * 10)
+    theta[iffy] = 0
+    phi[iffy] = 0
+
+    # Do the azimuthal equidistant projection
+    x = radius * (2.0 * theta / np.pi) * np.cos(phi)
+    y = radius * (2.0 * theta / np.pi) * np.sin(phi)
+
+    n_channels = len(x)
+    pos = np.c_[x, y, width * np.ones(n_channels),
+                height * np.ones(n_channels)]
+
+    box = (x.min() - 0.1 * width, x.max() + 1.1 * width,
+           y.min() - 0.1 * width, y.max() + 1.1 * height)
+    ids = 1 + np.arange(n_channels)
+    layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
+    return layout
+
+
+def make_grid_layout(info, picks=None):
+    """ Generate .lout file for custom data, i.e., ICA sources
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info (e.g., raw.info). If None, default names will be
+        employed.
+    picks : array-like | None
+        The indices of the channels to be included. If None, al misc channels
+        will be included.
+
+    Returns
+    -------
+    layout : Layout
+        The generated layout.
+    """
+    if picks is None:
+        picks = pick_types(info, misc=True, exclude='bads')
+
+    names = [info['chs'][k]['ch_name'] for k in picks]
+
+    if not names:
+        raise ValueError('No misc data channels found.')
+
+    ids = range(len(picks))
+    size = len(picks)
+
+    # prepare square-like layout
+    ht = wd = np.sqrt(size)  # try square
+    if wd % 1:
+        wd, ht = int(wd + 1), int(ht)  # try n * (n-1) rectangle
+
+    if wd * ht < size:  # jump to the next full square
+        ht += 1
+
+    # setup position grid and fill up
+    x, y = np.meshgrid(np.linspace(0, 1, wd), np.linspace(0, 1, ht))
+
+    # scale boxes depending on size such that square is always filled
+    width = size * .15  # value depends on mne default full-view size
+    spacing = (width * ht)
+
+    # XXX : width and height are here assumed to be equal. Could be improved.
+    x, y = (x.ravel()[:size] * spacing, y.ravel()[:size] * spacing)
+
+    # calculate pos
+    pos = np.c_[x, y, width * np.ones(size), width * np.ones(size)]
+
+    # calculate box
+    box = (x.min() - 0.1 * width, x.max() + 1.1 * width,
+           y.min() - 0.1 * width, y.max() + 1.1 * width)
+
+    layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
+    return layout
+
+
+def find_layout(chs):
+    """Choose a layout based on the channels in the chs parameter
+
+    Parameters
+    ----------
+    chs : list
+        A list of channels as contained in the info['chs'] entry.
+
+    Returns
+    -------
+    layout : Layout instance | None
+        None if layout not found.
+    """
+
+    coil_types = np.unique([ch['coil_type'] for ch in chs])
+    has_vv_mag = FIFF.FIFFV_COIL_VV_MAG_T3 in coil_types
+    has_vv_grad = FIFF.FIFFV_COIL_VV_PLANAR_T1 in coil_types
+    has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
+    if has_vv_mag and has_vv_grad:
+        layout_name = 'Vectorview-all'
+    elif has_vv_mag:
+        layout_name = 'Vectorview-mag'
+    elif has_vv_grad:
+        layout_name = 'Vectorview-grad'
+    elif has_4D_mag:
+        layout_name = 'magnesWH3600'
+    else:
+        return None
+    
+    return read_layout(layout_name)
+
+def _find_topomap_coords(chs, layout=None):
+    """Try to guess the MEG system and return appropriate topomap coordinates
+
+    Parameters
+    ----------
+    chs : list
+        A list of channels as contained in the info['chs'] entry.
+    layout : None | instance of Layout
+        Enforce using a specific layout. With None, a new map is generated.
+        With None, a layout is chosen based on the channels in the chs
+        parameter.
+
+    Returns
+    -------
+    coords : array, shape = (n_chs, 2)
+        2 dimensional coordinates for each sensor for a topomap plot.
+    """
+    if len(chs) == 0:
+        raise ValueError("Need more than 0 channels.")
+
+    if layout is not None:
+        pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
+        pos = np.asarray(pos)
+    else:
+        pos = _auto_topomap_coords(chs)
+
+    return pos
+
+
+def _auto_topomap_coords(chs):
+    """Make a 2 dimensional sensor map from sensor positions in an info dict
+
+    Parameters
+    ----------
+    chs : list
+        A list of channels as contained in the info['chs'] entry.
+
+    Returns
+    -------
+    locs : array, shape = (n_sensors, 2)
+        An array of positions of the 2 dimensional map.
+    """
+    locs3d = np.array([ch['loc'][:3] for ch in chs])
+
+    # fit the 3d sensor locations to a sphere with center (cx, cy, cz)
+    # and radius r
+
+    # error function
+    def err(params):
+        r, cx, cy, cz = params
+        return   np.sum((locs3d - [cx, cy, cz]) ** 2, 1) - r ** 2
+
+    (r, cx, cy, cz), _ = leastsq(err, (1, 0, 0, 0))
+
+    # center the sensor locations based on the sphere and scale to
+    # radius 1
+    sphere_center = np.array((cx, cy, cz))
+    locs3d -= sphere_center
+    locs3d /= r
+
+    # implement projection
+    locs2d = np.copy(locs3d[:, :2])
+    z = max(locs3d[:, 2]) - locs3d[:, 2]  # distance form top
+    r = np.sqrt(z)  # desired 2d radius
+    r_xy = np.sqrt(np.sum(locs3d[:, :2] ** 2, 1))  # current radius in xy
+    idx = (r_xy != 0)  # avoid zero division
+    F = r[idx] / r_xy[idx]  # stretching factor accounting for current r
+    locs2d[idx, :] *= F[:, None]
+
+    return locs2d
+
+
+def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
+    """Find the picks for pairing grad channels
+
+    Parameters
+    ----------
+    info : dict
+        An info dictionary containing channel information.
+    layout : Layout
+        The layout if available.
+    topomap_coords : bool
+        Return the coordinates for a topomap plot along with the picks. If
+        False, only picks are returned.
+    exclude : list of str | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads'].
+
+    Returns
+    -------
+    picks : list of int
+        Picks for the grad channels, ordered in pairs.
+    coords : array, shape = (n_grad_channels, 3)
+        Coordinates for a topomap plot (optional, only returned if
+        topomap_coords == True).
+    """
+    # find all complete pairs of grad channels
+    pairs = defaultdict(list)
+    grad_picks = pick_types(info, meg='grad', exclude=exclude)
+    for i in grad_picks:
+        ch = info['chs'][i]
+        name = ch['ch_name']
+        if name.startswith('MEG'):
+            if name.endswith(('2', '3')):
+                key = name[-4:-1]
+                pairs[key].append(ch)
+    pairs = [p for p in pairs.values() if len(p) == 2]
+    if len(pairs) == 0:
+        raise ValueError("No 'grad' channel pairs found.")
+
+    # find the picks corresponding to the grad channels
+    grad_chs = sum(pairs, [])
+    ch_names = info['ch_names']
+    picks = [ch_names.index(ch['ch_name']) for ch in grad_chs]
+
+    if topomap_coords:
+        shape = (len(pairs), 2, -1)
+        coords = _find_topomap_coords(grad_chs, layout).reshape(shape).mean(axis=1)
+        return picks, coords
+    else:
+        return picks
+
+
+def _pair_grad_sensors_from_ch_names(ch_names):
+    """Find the indexes for pairing grad channels
+
+    Parameters
+    ----------
+    ch_names : list of str
+        A list of channel names.
+
+    Returns
+    -------
+    indexes : list of int
+        Indexes of the grad channels, ordered in pairs.
+    """
+    pairs = defaultdict(list)
+    for i, name in enumerate(ch_names):
+        if name.startswith('MEG'):
+            if name.endswith(('2', '3')):
+                key = name[-4:-1]
+                pairs[key].append(i)
+
+    pairs = [p for p in pairs.values() if len(p) == 2]
+
+    grad_chs = sum(pairs, [])
+    return grad_chs
+
+
+def _merge_grad_data(data):
+    """Merge data from channel pairs using the RMS
+
+    Parameters
+    ----------
+    data : array, shape = (n_channels, n_times)
+        Data for channels, ordered in pairs.
+
+    Returns
+    -------
+    data : array, shape = (n_channels / 2, n_times)
+        The root mean square for each pair.
+    """
+    data = data.reshape((len(data) // 2, 2, -1))
+    data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
+    return data
diff --git a/mne/layouts/magnesWH3600.lout b/mne/layouts/magnesWH3600.lout
new file mode 100755
index 0000000..577e953
--- /dev/null
+++ b/mne/layouts/magnesWH3600.lout
@@ -0,0 +1,249 @@
+  -42.19    43.52   -41.70    28.71
+001    -1.28    -5.13     4.00     3.00 MEG 001
+002    -1.22    -1.43     4.00     3.00 MEG 002
+003    -1.37     2.53     4.00     3.00 MEG 003
+004    -1.36     5.90     4.00     3.00 MEG 004
+005    -1.45     9.27     4.00     3.00 MEG 005
+006    -4.89     9.36     4.00     3.00 MEG 006
+007    -5.20     5.86     4.00     3.00 MEG 007
+008    -5.26     2.40     4.00     3.00 MEG 008
+009    -5.34    -1.29     4.00     3.00 MEG 009
+010    -5.12    -5.08     4.00     3.00 MEG 010
+011    -4.73    -8.47     4.00     3.00 MEG 011
+012    -1.31    -8.81     4.00     3.00 MEG 012
+013     2.04    -8.49     4.00     3.00 MEG 013
+014     2.54    -5.16     4.00     3.00 MEG 014
+015     2.69    -1.43     4.00     3.00 MEG 015
+016     2.62     2.56     4.00     3.00 MEG 016
+017     2.50     5.89     4.00     3.00 MEG 017
+018     2.10     9.34     4.00     3.00 MEG 018
+019    -1.45    12.55     4.00     3.00 MEG 019
+020    -5.76    12.42     4.00     3.00 MEG 020
+021    -8.30     9.98     4.00     3.00 MEG 021
+022    -9.16     5.97     4.00     3.00 MEG 022
+023    -9.32     2.49     4.00     3.00 MEG 023
+024    -9.42    -1.32     4.00     3.00 MEG 024
+025    -9.13    -5.11     4.00     3.00 MEG 025
+026    -8.43    -9.18     4.00     3.00 MEG 026
+027    -5.45   -12.10     4.00     3.00 MEG 027
+028    -1.40   -12.51     4.00     3.00 MEG 028
+029     2.64   -12.08     4.00     3.00 MEG 029
+030     5.77    -9.29     4.00     3.00 MEG 030
+031     6.50    -5.19     4.00     3.00 MEG 031
+032     6.85    -1.37     4.00     3.00 MEG 032
+033     6.70     2.65     4.00     3.00 MEG 033
+034     6.46     6.18     4.00     3.00 MEG 034
+035     5.61    10.08     4.00     3.00 MEG 035
+036     2.95    12.49     4.00     3.00 MEG 036
+037    -1.47    15.77     4.00     3.00 MEG 037
+038    -5.48    15.52     4.00     3.00 MEG 038
+039    -8.97    13.31     4.00     3.00 MEG 039
+040   -11.91    10.42     4.00     3.00 MEG 040
+041   -12.96     6.84     4.00     3.00 MEG 041
+042   -13.39     3.21     4.00     3.00 MEG 042
+043   -13.58    -0.70     4.00     3.00 MEG 043
+044   -13.08    -4.42     4.00     3.00 MEG 044
+045   -12.52    -8.05     4.00     3.00 MEG 045
+046   -11.13   -11.34     4.00     3.00 MEG 046
+047    -8.45   -14.21     4.00     3.00 MEG 047
+048    -5.08   -15.56     4.00     3.00 MEG 048
+049    -1.60   -16.17     4.00     3.00 MEG 049
+050     2.22   -15.61     4.00     3.00 MEG 050
+051     5.63   -14.28     4.00     3.00 MEG 051
+052     8.38   -11.70     4.00     3.00 MEG 052
+053     9.89    -8.24     4.00     3.00 MEG 053
+054    10.43    -4.42     4.00     3.00 MEG 054
+055    10.94    -0.62     4.00     3.00 MEG 055
+056    10.72     3.35     4.00     3.00 MEG 056
+057    10.22     7.01     4.00     3.00 MEG 057
+058     9.04    10.61     4.00     3.00 MEG 058
+059     6.20    13.42     4.00     3.00 MEG 059
+060     2.52    15.65     4.00     3.00 MEG 060
+061    -1.53    18.91     4.00     3.00 MEG 061
+062    -5.68    18.61     4.00     3.00 MEG 062
+063    -9.46    16.89     4.00     3.00 MEG 063
+064   -12.95    14.48     4.00     3.00 MEG 064
+065   -15.67    11.24     4.00     3.00 MEG 065
+066   -17.06     7.05     4.00     3.00 MEG 066
+067   -17.65     3.16     4.00     3.00 MEG 067
+068   -17.98    -1.20     4.00     3.00 MEG 068
+069   -17.13    -5.53     4.00     3.00 MEG 069
+070   -16.60    -9.33     4.00     3.00 MEG 070
+071   -14.32   -12.91     4.00     3.00 MEG 071
+072   -11.85   -15.75     4.00     3.00 MEG 072
+073    -8.78   -17.93     4.00     3.00 MEG 073
+074    -5.30   -19.40     4.00     3.00 MEG 074
+075    -1.58   -19.85     4.00     3.00 MEG 075
+076     2.41   -19.42     4.00     3.00 MEG 076
+077     5.94   -18.13     4.00     3.00 MEG 077
+078     9.16   -15.98     4.00     3.00 MEG 078
+079    11.79   -13.08     4.00     3.00 MEG 079
+080    13.62    -9.59     4.00     3.00 MEG 080
+081    14.57    -5.64     4.00     3.00 MEG 081
+082    15.42    -1.35     4.00     3.00 MEG 082
+083    15.05     3.30     4.00     3.00 MEG 083
+084    14.29     7.20     4.00     3.00 MEG 084
+085    12.81    11.43     4.00     3.00 MEG 085
+086     9.96    14.67     4.00     3.00 MEG 086
+087     6.46    17.06     4.00     3.00 MEG 087
+088     2.60    18.73     4.00     3.00 MEG 088
+089    -1.60    22.21     4.00     3.00 MEG 089
+090    -5.83    21.82     4.00     3.00 MEG 090
+091    -9.75    20.43     4.00     3.00 MEG 091
+092   -13.45    18.45     4.00     3.00 MEG 092
+093   -16.67    15.62     4.00     3.00 MEG 093
+094   -19.33    12.13     4.00     3.00 MEG 094
+095   -20.94     7.82     4.00     3.00 MEG 095
+096   -21.81     3.65     4.00     3.00 MEG 096
+097   -22.23    -1.27     4.00     3.00 MEG 097
+098   -21.14    -5.87     4.00     3.00 MEG 098
+099   -20.30    -9.97     4.00     3.00 MEG 099
+100   -18.46   -13.84     4.00     3.00 MEG 100
+101   -16.07   -17.08     4.00     3.00 MEG 101
+102   -12.88   -19.71     4.00     3.00 MEG 102
+103    -9.34   -21.89     4.00     3.00 MEG 103
+104    -5.64   -23.02     4.00     3.00 MEG 104
+105    -1.72   -23.54     4.00     3.00 MEG 105
+106     2.48   -23.24     4.00     3.00 MEG 106
+107     6.42   -22.00     4.00     3.00 MEG 107
+108     9.86   -20.19     4.00     3.00 MEG 108
+109    13.22   -17.32     4.00     3.00 MEG 109
+110    15.75   -14.15     4.00     3.00 MEG 110
+111    17.67   -10.19     4.00     3.00 MEG 111
+112    18.65    -6.08     4.00     3.00 MEG 112
+113    19.69    -1.27     4.00     3.00 MEG 113
+114    19.27     3.70     4.00     3.00 MEG 114
+115    18.30     8.05     4.00     3.00 MEG 115
+116    16.46    12.48     4.00     3.00 MEG 116
+117    13.74    15.93     4.00     3.00 MEG 117
+118    10.41    18.72     4.00     3.00 MEG 118
+119     6.64    20.69     4.00     3.00 MEG 119
+120     2.67    22.02     4.00     3.00 MEG 120
+121    -1.74    25.41     4.00     3.00 MEG 121
+122    -6.59    24.84     4.00     3.00 MEG 122
+123   -11.16    23.37     4.00     3.00 MEG 123
+124   -15.46    21.07     4.00     3.00 MEG 124
+125   -19.25    17.84     4.00     3.00 MEG 125
+126   -22.45    13.89     4.00     3.00 MEG 126
+127   -24.89     8.96     4.00     3.00 MEG 127
+128   -26.13     4.36     4.00     3.00 MEG 128
+129   -26.65    -1.22     4.00     3.00 MEG 129
+130   -25.30    -6.36     4.00     3.00 MEG 130
+131   -24.16   -11.45     4.00     3.00 MEG 131
+132   -21.98   -15.88     4.00     3.00 MEG 132
+133   -18.81   -19.82     4.00     3.00 MEG 133
+134   -15.20   -22.99     4.00     3.00 MEG 134
+135   -11.11   -25.29     4.00     3.00 MEG 135
+136    -6.51   -26.74     4.00     3.00 MEG 136
+137    -1.86   -27.28     4.00     3.00 MEG 137
+138     3.17   -26.90     4.00     3.00 MEG 138
+139     7.79   -25.55     4.00     3.00 MEG 139
+140    12.07   -23.15     4.00     3.00 MEG 140
+141    15.93   -20.09     4.00     3.00 MEG 141
+142    19.04   -16.25     4.00     3.00 MEG 142
+143    21.39   -11.67     4.00     3.00 MEG 143
+144    22.75    -6.58     4.00     3.00 MEG 144
+145    23.99    -1.23     4.00     3.00 MEG 145
+146    23.36     4.49     4.00     3.00 MEG 146
+147    22.02     9.37     4.00     3.00 MEG 147
+148    19.51    14.31     4.00     3.00 MEG 148
+149    16.20    18.23     4.00     3.00 MEG 149
+150    12.16    21.54     4.00     3.00 MEG 150
+151     7.85    23.69     4.00     3.00 MEG 151
+152     3.16    25.01     4.00     3.00 MEG 152
+153   -23.01    18.82     4.00     3.00 MEG 153
+154   -26.06    15.31     4.00     3.00 MEG 154
+155   -28.76    10.18     4.00     3.00 MEG 155
+156   -31.71     3.39     4.00     3.00 MEG 156
+157   -32.05    -2.89     4.00     3.00 MEG 157
+158   -31.42    -8.67     4.00     3.00 MEG 158
+159   -26.22   -15.24     4.00     3.00 MEG 159
+160   -23.31   -19.72     4.00     3.00 MEG 160
+161   -19.33   -23.66     4.00     3.00 MEG 161
+162   -14.75   -26.73     4.00     3.00 MEG 162
+163    -9.92   -28.91     4.00     3.00 MEG 163
+164    -4.52   -30.10     4.00     3.00 MEG 164
+165     1.25   -30.15     4.00     3.00 MEG 165
+166     6.17   -29.40     4.00     3.00 MEG 166
+167    11.43   -27.39     4.00     3.00 MEG 167
+168    16.20   -24.37     4.00     3.00 MEG 168
+169    20.37   -20.27     4.00     3.00 MEG 169
+170    23.54   -15.56     4.00     3.00 MEG 170
+171    28.66    -8.94     4.00     3.00 MEG 171
+172    29.46    -3.00     4.00     3.00 MEG 172
+173    29.04     3.51     4.00     3.00 MEG 173
+174    25.94    10.77     4.00     3.00 MEG 174
+175    23.08    15.80     4.00     3.00 MEG 175
+176    19.78    19.54     4.00     3.00 MEG 176
+177   -26.70    20.52     4.00     3.00 MEG 177
+178   -29.66    16.81     4.00     3.00 MEG 178
+179   -32.55    11.68     4.00     3.00 MEG 179
+180   -32.47   -13.23     4.00     3.00 MEG 180
+181   -27.63   -19.12     4.00     3.00 MEG 181
+182   -23.75   -23.89     4.00     3.00 MEG 182
+183   -18.94   -27.77     4.00     3.00 MEG 183
+184   -13.64   -30.59     4.00     3.00 MEG 184
+185    -7.93   -32.70     4.00     3.00 MEG 185
+186    -2.12   -33.31     4.00     3.00 MEG 186
+187     4.06   -32.74     4.00     3.00 MEG 187
+188    10.04   -31.14     4.00     3.00 MEG 188
+189    15.57   -28.41     4.00     3.00 MEG 189
+190    20.44   -24.69     4.00     3.00 MEG 190
+191    24.62   -19.81     4.00     3.00 MEG 191
+192    29.49   -13.87     4.00     3.00 MEG 192
+193    29.48    12.54     4.00     3.00 MEG 193
+194    26.49    17.54     4.00     3.00 MEG 194
+195    23.28    21.40     4.00     3.00 MEG 195
+196   -36.84     4.15     4.00     3.00 MEG 196
+197   -37.22    -3.16     4.00     3.00 MEG 197
+198   -36.14    -9.68     4.00     3.00 MEG 198
+199   -28.42   -23.63     4.00     3.00 MEG 199
+200   -23.68   -28.05     4.00     3.00 MEG 200
+201   -18.03   -31.89     4.00     3.00 MEG 201
+202   -11.97   -34.42     4.00     3.00 MEG 202
+203    -5.32   -35.88     4.00     3.00 MEG 203
+204     1.03   -36.08     4.00     3.00 MEG 204
+205     7.92   -35.00     4.00     3.00 MEG 205
+206    13.99   -32.64     4.00     3.00 MEG 206
+207    19.78   -29.06     4.00     3.00 MEG 207
+208    24.79   -24.52     4.00     3.00 MEG 208
+209    33.39   -10.13     4.00     3.00 MEG 209
+210    34.62    -3.11     4.00     3.00 MEG 210
+211    34.23     4.57     4.00     3.00 MEG 211
+212   -32.38    19.14     4.00     3.00 MEG 212
+213   -35.90    13.21     4.00     3.00 MEG 213
+214   -36.70   -14.70     4.00     3.00 MEG 214
+215   -32.93   -22.44     4.00     3.00 MEG 215
+216   -28.17   -28.07     4.00     3.00 MEG 216
+217   -22.65   -32.41     4.00     3.00 MEG 217
+218   -16.53   -35.71     4.00     3.00 MEG 218
+219    -9.52   -37.92     4.00     3.00 MEG 219
+220    -2.58   -38.82     4.00     3.00 MEG 220
+221     4.65   -38.54     4.00     3.00 MEG 221
+222    11.78   -36.65     4.00     3.00 MEG 222
+223    18.43   -33.60     4.00     3.00 MEG 223
+224    24.26   -29.21     4.00     3.00 MEG 224
+225    29.52   -23.44     4.00     3.00 MEG 225
+226    33.73   -15.36     4.00     3.00 MEG 226
+227    33.02    14.20     4.00     3.00 MEG 227
+228    29.24    19.93     4.00     3.00 MEG 228
+229   -36.80    18.24     4.00     3.00 MEG 229
+230   -40.03    12.76     4.00     3.00 MEG 230
+231   -41.35     5.03     4.00     3.00 MEG 231
+232   -41.79    -3.17     4.00     3.00 MEG 232
+233   -40.48   -10.59     4.00     3.00 MEG 233
+234   -32.92   -26.79     4.00     3.00 MEG 234
+235   -27.40   -32.12     4.00     3.00 MEG 235
+236   -20.92   -36.72     4.00     3.00 MEG 236
+237   -14.11   -39.49     4.00     3.00 MEG 237
+238    -6.76   -41.18     4.00     3.00 MEG 238
+239     1.45   -41.40     4.00     3.00 MEG 239
+240     8.96   -40.25     4.00     3.00 MEG 240
+241    16.27   -37.84     4.00     3.00 MEG 241
+242    22.75   -33.68     4.00     3.00 MEG 242
+243    29.08   -28.20     4.00     3.00 MEG 243
+244    37.59   -11.05     4.00     3.00 MEG 244
+245    39.12    -3.16     4.00     3.00 MEG 245
+246    38.59     5.47     4.00     3.00 MEG 246
+247    37.16    13.60     4.00     3.00 MEG 247
+248    33.62    18.93     4.00     3.00 MEG 248
diff --git a/mne/layouts/tests/__init__.py b/mne/layouts/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/layouts/tests/test_layout.py b/mne/layouts/tests/test_layout.py
new file mode 100644
index 0000000..d6c7598
--- /dev/null
+++ b/mne/layouts/tests/test_layout.py
@@ -0,0 +1,107 @@
+import os.path as op
+import numpy as np
+from nose.tools import assert_true
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+
+from mne.layouts import make_eeg_layout, make_grid_layout, read_layout
+from mne.fiff import Raw
+from mne.utils import _TempDir
+
+fif_fname = op.join(op.dirname(__file__), '..', '..', 'fiff',
+                   'tests', 'data', 'test_raw.fif')
+
+lout_path = op.join(op.dirname(__file__), '..', '..', 'fiff',
+                    'tests', 'data')
+
+test_info = {'ch_names': ['ICA 001', 'ICA 002', 'EOG 061'],
+ 'chs': [{'cal': 1,
+   'ch_name': 'ICA 001',
+   'coil_trans': None,
+   'coil_type': 0,
+   'coord_Frame': 0,
+   'eeg_loc': None,
+   'kind': 502,
+   'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                   dtype=np.float32),
+   'logno': 1,
+   'range': 1.0,
+   'scanno': 1,
+   'unit': -1,
+   'unit_mul': 0},
+  {'cal': 1,
+   'ch_name': 'ICA 002',
+   'coil_trans': None,
+   'coil_type': 0,
+   'coord_Frame': 0,
+   'eeg_loc': None,
+   'kind': 502,
+   'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                    dtype=np.float32),
+   'logno': 2,
+   'range': 1.0,
+   'scanno': 2,
+   'unit': -1,
+   'unit_mul': 0},
+  {'cal': 0.002142000012099743,
+   'ch_name': 'EOG 061',
+   'coil_trans': None,
+   'coil_type': 1,
+   'coord_frame': 0,
+   'eeg_loc': None,
+   'kind': 202,
+   'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                    dtype=np.float32),
+   'logno': 61,
+   'range': 1.0,
+   'scanno': 376,
+   'unit': 107,
+   'unit_mul': 0}],
+   'nchan': 3}
+
+tempdir = _TempDir()
+
+
+def test_io_layout_lout():
+    """Test IO with .lout files"""
+    layout = read_layout('Vectorview-all', scale=False)
+    layout.save(op.join(tempdir, 'foobar.lout'))
+    layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
+                              scale=False)
+    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
+    assert_true(layout.names, layout_read.names)
+
+
+def test_io_layout_lay():
+    """Test IO with .lay files"""
+    layout = read_layout('CTF151', scale=False)
+    layout.save(op.join(tempdir, 'foobar.lay'))
+    layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
+                              scale=False)
+    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
+    assert_true(layout.names, layout_read.names)
+
+
+def test_make_eeg_layout():
+    """ Test creation of EEG layout """
+    tmp_name = 'foo'
+    lout_name = 'test_raw'
+    lout_orig = read_layout(kind=lout_name, path=lout_path)
+    layout = make_eeg_layout(Raw(fif_fname).info)
+    layout.save(op.join(tempdir, tmp_name + '.lout'))
+    lout_new = read_layout(kind=tmp_name, path=tempdir)
+    assert_array_equal(lout_new.kind, tmp_name)
+    assert_array_equal(lout_orig.pos, lout_new.pos)
+    assert_array_equal(lout_orig.names, lout_new.names)
+
+
+def test_make_grid_layout():
+    """ Test creation of grid layout """
+    tmp_name = 'bar'
+    lout_name = 'test_ica'
+    lout_orig = read_layout(kind=lout_name, path=lout_path)
+    layout = make_grid_layout(test_info)
+    layout.save(op.join(tempdir, tmp_name + '.lout'))
+    lout_new = read_layout(kind=tmp_name, path=tempdir)
+    assert_array_equal(lout_new.kind, tmp_name)
+    assert_array_equal(lout_orig.pos, lout_new.pos)
+    assert_array_equal(lout_orig.names, lout_new.names)
diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py
new file mode 100644
index 0000000..fefeaab
--- /dev/null
+++ b/mne/minimum_norm/__init__.py
@@ -0,0 +1,8 @@
+"""Linear inverse solvers based on L2 Minimum Norm Estimates (MNE)"""
+
+from .inverse import read_inverse_operator, apply_inverse, \
+                     apply_inverse_raw, make_inverse_operator, \
+                     apply_inverse_epochs, write_inverse_operator, \
+                     compute_rank_inverse
+from .time_frequency import source_band_induced_power, source_induced_power, \
+                            compute_source_psd, compute_source_psd_epochs
diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py
new file mode 100644
index 0000000..c788399
--- /dev/null
+++ b/mne/minimum_norm/inverse.py
@@ -0,0 +1,1342 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import warnings
+from copy import deepcopy
+from math import sqrt
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..fiff.constants import FIFF
+from ..fiff.open import fiff_open
+from ..fiff.tag import find_tag
+from ..fiff.matrix import _read_named_matrix, _transpose_named_matrix, \
+                          write_named_matrix
+from ..fiff.proj import read_proj, make_projector, write_proj
+from ..fiff.tree import dir_tree_find
+from ..fiff.write import write_int, write_float_matrix, start_file, \
+                         start_block, end_block, end_file, write_float, \
+                         write_coord_trans
+
+from ..fiff.cov import read_cov, write_cov
+from ..fiff.pick import channel_type, pick_info
+from ..cov import prepare_noise_cov
+from ..forward import compute_depth_prior, read_forward_meas_info, \
+                      write_forward_meas_info, is_fixed_orient, \
+                      compute_orient_prior, _to_fixed_ori
+from ..source_space import read_source_spaces_from_tree, \
+                           find_source_space_hemi, _get_vertno, \
+                           write_source_spaces_to_fid, label_src_vertno_sel
+from ..transforms import invert_transform, transform_source_space_to
+from ..source_estimate import SourceEstimate
+from .. import verbose
+
+
+def _pick_channels_inverse_operator(ch_names, inv):
+    """Gives the indices of the data channel to be used knowing
+    an inverse operator
+    """
+    sel = []
+    for name in inv['noise_cov']['names']:
+        if name in ch_names:
+            sel.append(ch_names.index(name))
+        else:
+            raise ValueError('The inverse operator was computed with '
+                             'channel %s which is not present in '
+                             'the data. You should compute a new inverse '
+                             'operator restricted to the good data '
+                             'channels.' % name)
+    return sel
+
+
+ at verbose
+def read_inverse_operator(fname, verbose=None):
+    """Read the inverse operator decomposition from a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the FIF file.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    inv : dict
+        The inverse operator.
+    """
+    #
+    #   Open the file, create directory
+    #
+    logger.info('Reading inverse operator decomposition from %s...'
+                % fname)
+    fid, tree, _ = fiff_open(fname, preload=True)
+    #
+    #   Find all inverse operators
+    #
+    invs = dir_tree_find(tree, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+    if invs is None or len(invs) < 1:
+        fid.close()
+        raise Exception('No inverse solutions in %s' % fname)
+
+    invs = invs[0]
+    #
+    #   Parent MRI data
+    #
+    parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    if len(parent_mri) == 0:
+        fid.close()
+        raise Exception('No parent MRI information in %s' % fname)
+    parent_mri = parent_mri[0]  # take only first one
+
+    logger.info('    Reading inverse operator info...')
+    #
+    #   Methods and source orientations
+    #
+    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INCLUDED_METHODS)
+    if tag is None:
+        fid.close()
+        raise Exception('Modalities not found')
+
+    inv = dict()
+    inv['methods'] = int(tag.data)
+
+    tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
+    if tag is None:
+        fid.close()
+        raise Exception('Source orientation constraints not found')
+
+    inv['source_ori'] = int(tag.data)
+
+    tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+    if tag is None:
+        fid.close()
+        raise Exception('Number of sources not found')
+
+    inv['nsource'] = int(tag.data)
+    inv['nchan'] = 0
+    #
+    #   Coordinate frame
+    #
+    tag = find_tag(fid, invs, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        fid.close()
+        raise Exception('Coordinate frame tag not found')
+
+    inv['coord_frame'] = tag.data
+    #
+    #   The actual source orientation vectors
+    #
+    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS)
+    if tag is None:
+        fid.close()
+        raise Exception('Source orientation information not found')
+
+    inv['source_nn'] = tag.data
+    logger.info('    [done]')
+    #
+    #   The SVD decomposition...
+    #
+    logger.info('    Reading inverse operator decomposition...')
+    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SING)
+    if tag is None:
+        fid.close()
+        raise Exception('Singular values not found')
+
+    inv['sing'] = tag.data
+    inv['nchan'] = len(inv['sing'])
+    #
+    #   The eigenleads and eigenfields
+    #
+    inv['eigen_leads_weighted'] = False
+    eigen_leads = _read_named_matrix(fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS)
+    if eigen_leads is None:
+        inv['eigen_leads_weighted'] = True
+        eigen_leads = _read_named_matrix(fid, invs,
+                                         FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED)
+    if eigen_leads is None:
+        raise ValueError('Eigen leads not found in inverse operator.')
+    #
+    #   Having the eigenleads as columns is better for the inverse calculations
+    #
+    inv['eigen_leads'] = _transpose_named_matrix(eigen_leads, copy=False)
+    inv['eigen_fields'] = _read_named_matrix(fid, invs,
+                                             FIFF.FIFF_MNE_INVERSE_FIELDS)
+    logger.info('    [done]')
+    #
+    #   Read the covariance matrices
+    #
+    inv['noise_cov'] = read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV)
+    logger.info('    Noise covariance matrix read.')
+
+    inv['source_cov'] = read_cov(fid, invs, FIFF.FIFFV_MNE_SOURCE_COV)
+    logger.info('    Source covariance matrix read.')
+    #
+    #   Read the various priors
+    #
+    inv['orient_prior'] = read_cov(fid, invs, FIFF.FIFFV_MNE_ORIENT_PRIOR_COV)
+    if inv['orient_prior'] is not None:
+        logger.info('    Orientation priors read.')
+
+    inv['depth_prior'] = read_cov(fid, invs, FIFF.FIFFV_MNE_DEPTH_PRIOR_COV)
+    if inv['depth_prior'] is not None:
+        logger.info('    Depth priors read.')
+
+    inv['fmri_prior'] = read_cov(fid, invs, FIFF.FIFFV_MNE_FMRI_PRIOR_COV)
+    if inv['fmri_prior'] is not None:
+        logger.info('    fMRI priors read.')
+
+    #
+    #   Read the source spaces
+    #
+    inv['src'] = read_source_spaces_from_tree(fid, tree, add_geom=False)
+
+    for s in inv['src']:
+        s['id'] = find_source_space_hemi(s)
+
+    #
+    #   Get the MRI <-> head coordinate transformation
+    #
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+    if tag is None:
+        fid.close()
+        raise Exception('MRI/head coordinate transformation not found')
+    else:
+        mri_head_t = tag.data
+        if mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or \
+                        mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD:
+            mri_head_t = invert_transform(mri_head_t)
+            if mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or \
+                        mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD:
+                fid.close()
+                raise Exception('MRI/head coordinate transformation '
+                                'not found')
+
+    inv['mri_head_t'] = mri_head_t
+
+    #
+    # get parent MEG info
+    #
+    inv['info'] = read_forward_meas_info(tree, fid)
+
+    #
+    #   Transform the source spaces to the correct coordinate frame
+    #   if necessary
+    #
+    if inv['coord_frame'] != FIFF.FIFFV_COORD_MRI and \
+            inv['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
+        fid.close()
+        raise Exception('Only inverse solutions computed in MRI or '
+                         'head coordinates are acceptable')
+
+    #
+    #  Number of averages is initially one
+    #
+    inv['nave'] = 1
+    #
+    #  We also need the SSP operator
+    #
+    inv['projs'] = read_proj(fid, tree)
+    #
+    #  Some empty fields to be filled in later
+    #
+    inv['proj'] = []       # This is the projector to apply to the data
+    inv['whitener'] = []   # This whitens the data
+    inv['reginv'] = []     # This the diagonal matrix implementing
+                           # regularization and the inverse
+    inv['noisenorm'] = []  # These are the noise-normalization factors
+    #
+    nuse = 0
+    for k in range(len(inv['src'])):
+        try:
+            inv['src'][k] = transform_source_space_to(inv['src'][k],
+                                                      inv['coord_frame'],
+                                                      mri_head_t)
+        except Exception as inst:
+            fid.close()
+            raise Exception('Could not transform source space (%s)' % inst)
+
+        nuse += inv['src'][k]['nuse']
+
+    logger.info('    Source spaces transformed to the inverse solution '
+                'coordinate frame')
+    #
+    #   Done!
+    #
+    fid.close()
+
+    return inv
+
+
+ at verbose
+def write_inverse_operator(fname, inv, verbose=None):
+    """Write an inverse operator to a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the FIF file.
+    inv : dict
+        The inverse operator.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    #
+    #   Open the file, create directory
+    #
+    logger.info('Write inverse operator decomposition in %s...' % fname)
+
+    # Create the file and save the essentials
+    fid = start_file(fname)
+
+    start_block(fid, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+
+    logger.info('    Writing inverse operator info...')
+
+    write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, inv['methods'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, inv['source_ori'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, inv['nsource'])
+    write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, inv['coord_frame'])
+    write_float_matrix(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS,
+                       inv['source_nn'])
+    write_float(fid, FIFF.FIFF_MNE_INVERSE_SING, inv['sing'])
+
+    #
+    #   The eigenleads and eigenfields
+    #
+    if inv['eigen_leads_weighted']:
+        write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED,
+                           _transpose_named_matrix(inv['eigen_leads']))
+    else:
+        write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_LEADS,
+                           _transpose_named_matrix(inv['eigen_leads']))
+
+    write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_FIELDS, inv['eigen_fields'])
+    logger.info('    [done]')
+    #
+    #   write the covariance matrices
+    #
+    logger.info('    Writing noise covariance matrix.')
+    write_cov(fid, inv['noise_cov'])
+
+    logger.info('    Writing source covariance matrix.')
+    write_cov(fid, inv['source_cov'])
+    #
+    #   write the various priors
+    #
+    logger.info('    Writing orientation priors.')
+    if inv['orient_prior'] is not None:
+        write_cov(fid, inv['orient_prior'])
+    if inv['depth_prior'] is not None:
+        write_cov(fid, inv['depth_prior'])
+    if inv['fmri_prior'] is not None:
+        write_cov(fid, inv['fmri_prior'])
+
+    #
+    #   Parent MRI data
+    #
+    start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    #   write the MRI <-> head coordinate transformation
+    write_coord_trans(fid, inv['mri_head_t'])
+    end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+
+    #
+    #   Parent MEG measurement info
+    #
+    write_forward_meas_info(fid, inv['info'])
+
+    #
+    #   Write the source spaces
+    #
+    if 'src' in inv:
+        write_source_spaces_to_fid(fid, inv['src'])
+
+    #
+    #  We also need the SSP operator
+    #
+    write_proj(fid, inv['projs'])
+    #
+    #   Done!
+    #
+
+    end_block(fid, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+    end_file(fid)
+
+    fid.close()
+
+
+###############################################################################
+# Compute inverse solution
+
+
+def combine_xyz(vec, square=False):
+    """Compute the three Cartesian components of a vector or matrix together
+
+    Parameters
+    ----------
+    vec : 2d array of shape [3 n x p]
+        Input [ x1 y1 z1 ... x_n y_n z_n ] where x1 ... z_n
+        can be vectors
+
+    Returns
+    -------
+    comb : array
+        Output vector [sqrt(x1^2+y1^2+z1^2), ..., sqrt(x_n^2+y_n^2+z_n^2)]
+    """
+    if vec.ndim != 2:
+        raise ValueError('Input must be 2D')
+    if (vec.shape[0] % 3) != 0:
+        raise ValueError('Input must have 3N rows')
+
+    n, p = vec.shape
+    if np.iscomplexobj(vec):
+        vec = np.abs(vec)
+    comb = vec[0::3] ** 2
+    comb += vec[1::3] ** 2
+    comb += vec[2::3] ** 2
+    if not square:
+        comb = np.sqrt(comb)
+    return comb
+
+
+def _check_ch_names(inv, info):
+    """Check that channels in inverse operator are measurements"""
+
+    inv_ch_names = inv['eigen_fields']['col_names']
+
+    if inv['noise_cov']['names'] != inv_ch_names:
+        raise ValueError('Channels in inverse operator eigen fields do not '
+                         'match noise covariance channels.')
+    data_ch_names = info['ch_names']
+
+    missing_ch_names = list()
+    for ch_name in inv_ch_names:
+        if ch_name not in data_ch_names:
+            missing_ch_names.append(ch_name)
+    n_missing = len(missing_ch_names)
+    if n_missing > 0:
+        raise ValueError('%d channels in inverse operator ' % n_missing +
+                         'are not present in the data (%s)' % missing_ch_names)
+
+
+ at verbose
+def prepare_inverse_operator(orig, nave, lambda2, method, verbose=None):
+    """Prepare an inverse operator for actually computing the inverse
+
+    Parameters
+    ----------
+    orig : dict
+        The inverse operator structure read from a file.
+    nave : int
+        Number of averages (scales the noise covariance).
+    lambda2 : float
+        The regularization factor. Recommended to be 1 / SNR**2.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    inv : dict
+        Prepared inverse operator.
+    """
+    if nave <= 0:
+        raise ValueError('The number of averages should be positive')
+
+    logger.info('Preparing the inverse operator for use...')
+    inv = deepcopy(orig)
+    #
+    #   Scale some of the stuff
+    #
+    scale = float(inv['nave']) / nave
+    inv['noise_cov']['data'] = scale * inv['noise_cov']['data']
+    # deal with diagonal case
+    if inv['noise_cov']['data'].ndim == 1:
+        logger.info('    Diagonal noise covariance found')
+        inv['noise_cov']['eig'] = inv['noise_cov']['data']
+        inv['noise_cov']['eigvec'] = np.eye(len(inv['noise_cov']['data']))
+
+    inv['noise_cov']['eig'] = scale * inv['noise_cov']['eig']
+    inv['source_cov']['data'] = scale * inv['source_cov']['data']
+    #
+    if inv['eigen_leads_weighted']:
+        inv['eigen_leads']['data'] = sqrt(scale) * inv['eigen_leads']['data']
+
+    logger.info('    Scaled noise and source covariance from nave = %d to'
+                ' nave = %d' % (inv['nave'], nave))
+    inv['nave'] = nave
+    #
+    #   Create the diagonal matrix for computing the regularized inverse
+    #
+    sing = np.array(inv['sing'], dtype=np.float64)
+    inv['reginv'] = sing / (sing ** 2 + lambda2)
+    logger.info('    Created the regularized inverter')
+    #
+    #   Create the projection operator
+    #
+    inv['proj'], ncomp, _ = make_projector(inv['projs'],
+                                           inv['noise_cov']['names'])
+    if ncomp > 0:
+        logger.info('    Created an SSP operator (subspace dimension = %d)'
+                    % ncomp)
+    else:
+        logger.info('    The projection vectors do not apply to these '
+                    'channels.')
+
+    #
+    #   Create the whitener
+    #
+    if not inv['noise_cov']['diag']:
+        inv['whitener'] = np.zeros((inv['noise_cov']['dim'],
+                                    inv['noise_cov']['dim']))
+        #
+        #   Omit the zeroes due to projection
+        #
+        eig = inv['noise_cov']['eig']
+        nzero = (eig > 0)
+        inv['whitener'][nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+        #
+        #   Rows of eigvec are the eigenvectors
+        #
+        inv['whitener'] = np.dot(inv['whitener'], inv['noise_cov']['eigvec'])
+        logger.info('    Created the whitener using a full noise '
+                    'covariance matrix (%d small eigenvalues omitted)'
+                    % (inv['noise_cov']['dim'] - np.sum(nzero)))
+    else:
+        #
+        #   No need to omit the zeroes due to projection
+        #
+        inv['whitener'] = np.diag(1.0 /
+                                  np.sqrt(inv['noise_cov']['data'].ravel()))
+        logger.info('    Created the whitener using a diagonal noise '
+                    'covariance matrix (%d small eigenvalues discarded)'
+                    % ncomp)
+
+    #
+    #   Finally, compute the noise-normalization factors
+    #
+    if method in ["dSPM", 'sLORETA']:
+        if method == "dSPM":
+            logger.info('    Computing noise-normalization factors '
+                        '(dSPM)...')
+            noise_weight = inv['reginv']
+        else:
+            logger.info('    Computing noise-normalization factors '
+                        '(sLORETA)...')
+            noise_weight = (inv['reginv'] *
+                            np.sqrt((1. + inv['sing'] ** 2 / lambda2)))
+        noise_norm = np.zeros(inv['eigen_leads']['nrow'])
+        nrm2, = linalg.get_blas_funcs(('nrm2',), (noise_norm,))
+        if inv['eigen_leads_weighted']:
+            for k in range(inv['eigen_leads']['nrow']):
+                one = inv['eigen_leads']['data'][k, :] * noise_weight
+                noise_norm[k] = nrm2(one)
+        else:
+            for k in range(inv['eigen_leads']['nrow']):
+                one = (sqrt(inv['source_cov']['data'][k]) *
+                       inv['eigen_leads']['data'][k, :] * noise_weight)
+                noise_norm[k] = nrm2(one)
+
+        #
+        #   Compute the final result
+        #
+        if inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            #
+            #   The three-component case is a little bit more involved
+            #   The variances at three consequtive entries must be squared and
+            #   added together
+            #
+            #   Even in this case return only one noise-normalization factor
+            #   per source location
+            #
+            noise_norm = combine_xyz(noise_norm[:, None]).ravel()
+
+        inv['noisenorm'] = 1.0 / np.abs(noise_norm)
+        logger.info('[done]')
+    else:
+        inv['noisenorm'] = []
+
+    return inv
+
+
+ at verbose
+def _assemble_kernel(inv, label, method, pick_normal, verbose=None):
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    eigen_leads = inv['eigen_leads']['data']
+    source_cov = inv['source_cov']['data'][:, None]
+    if method != "MNE":
+        noise_norm = inv['noisenorm'][:, None]
+
+    src = inv['src']
+    vertno = _get_vertno(src)
+
+    if label is not None:
+        vertno, src_sel = label_src_vertno_sel(label, inv['src'])
+
+        if method != "MNE":
+            noise_norm = noise_norm[src_sel]
+
+        if inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            src_sel = 3 * src_sel
+            src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
+            src_sel = src_sel.ravel()
+
+        eigen_leads = eigen_leads[src_sel]
+        source_cov = source_cov[src_sel]
+
+    if pick_normal:
+        if not inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            raise ValueError('Pick normal can only be used with a free '
+                             'orientation inverse operator.')
+
+        is_loose = 0 < inv['orient_prior']['data'][0] < 1
+        if not is_loose:
+            raise ValueError('The pick_normal parameter is only valid '
+                             'when working with loose orientations.')
+
+        # keep only the normal components
+        eigen_leads = eigen_leads[2::3]
+        source_cov = source_cov[2::3]
+
+    trans = inv['reginv'][:, None] * reduce(np.dot,
+                                            [inv['eigen_fields']['data'],
+                                            inv['whitener'],
+                                            inv['proj']])
+    #
+    #   Transformation into current distributions by weighting the eigenleads
+    #   with the weights computed above
+    #
+    if inv['eigen_leads_weighted']:
+        #
+        #     R^0.5 has been already factored in
+        #
+        logger.info('(eigenleads already weighted)...')
+        K = np.dot(eigen_leads, trans)
+    else:
+        #
+        #     R^0.5 has to be factored in
+        #
+        logger.info('(eigenleads need to be weighted)...')
+        K = np.sqrt(source_cov) * np.dot(eigen_leads, trans)
+
+    if method == "MNE":
+        noise_norm = None
+
+    return K, noise_norm, vertno
+
+
+def _check_method(method, dSPM):
+    if dSPM is not None:
+        warnings.warn('DEPRECATION: The dSPM parameter has been changed to '
+                      'method. Please update your code')
+        method = dSPM
+    if method is True:
+        warnings.warn('DEPRECATION:Inverse method should now be "MNE" or '
+                      '"dSPM" or "sLORETA".')
+        method = "dSPM"
+    if method is False:
+        warnings.warn('DEPRECATION:Inverse method should now be "MNE" or '
+                      '"dSPM" or "sLORETA".')
+        method = "MNE"
+
+    if method not in ["MNE", "dSPM", "sLORETA"]:
+        raise ValueError('method parameter should be "MNE" or "dSPM" '
+                         'or "sLORETA".')
+    return method
+
+
+def _subject_from_inverse(inverse_operator):
+    """Get subject id from inverse operator"""
+    return inverse_operator['src'][0].get('subject_his_id', None)
+
+
+ at verbose
+def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
+                  pick_normal=False, dSPM=None, verbose=None):
+    """Apply inverse operator to evoked data
+
+    Computes a L2-norm inverse solution
+    Actual code using these principles might be different because
+    the inverse operator is often reused across data sets.
+
+    Parameters
+    ----------
+    evoked : Evoked object
+        Evoked data.
+    inverse_operator: dict
+        Inverse operator read with mne.read_inverse_operator.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    pick_normal : bool
+        If True, rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The source estimates
+    """
+    method = _check_method(method, dSPM)
+    #
+    #   Set up the inverse according to the parameters
+    #
+    nave = evoked.nave
+
+    _check_ch_names(inverse_operator, evoked.info)
+
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(evoked.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    K, noise_norm, _ = _assemble_kernel(inv, None, method, pick_normal)
+    sol = np.dot(K, evoked.data[sel])  # apply imaging kernel
+
+    is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+                   and not pick_normal)
+
+    if is_free_ori:
+        logger.info('combining the current components...')
+        sol = combine_xyz(sol)
+
+    if noise_norm is not None:
+        logger.info('(dSPM)...')
+        sol *= noise_norm
+
+    tstep = 1.0 / evoked.info['sfreq']
+    tmin = float(evoked.first) / evoked.info['sfreq']
+    vertno = _get_vertno(inv['src'])
+    subject = _subject_from_inverse(inverse_operator)
+    stc = SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                         subject=subject)
+    logger.info('[done]')
+
+    return stc
+
+
+ at verbose
+def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
+                      label=None, start=None, stop=None, nave=1,
+                      time_func=None, pick_normal=False,
+                      buffer_size=None, dSPM=None, verbose=None):
+    """Apply inverse operator to Raw data
+
+    Computes a L2-norm inverse solution
+    Actual code using these principles might be different because
+    the inverse operator is often reused across data sets.
+
+    Parameters
+    ----------
+    raw : Raw object
+        Raw data.
+    inverse_operator : dict
+        Inverse operator read with mne.read_inverse_operator.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    label : Label | None
+        Restricts the source estimates to a given label. If None,
+        source estimates will be computed for the entire source space.
+    start : int
+        Index of first time sample (index not time is seconds).
+    stop : int
+        Index of first time sample not to include (index not time is seconds).
+    nave : int
+        Number of averages used to regularize the solution.
+        Set to 1 on raw data.
+    time_func : callable
+        Linear function applied to sensor space time series.
+    pick_normal : bool
+        If True, rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    buffer_size : int (or None)
+        If not None, the computation of the inverse and the combination of the
+        current components is performed in segments of length buffer_size
+        samples. While slightly slower, this is useful for long datasets as it
+        reduces the memory requirements by approx. a factor of 3 (assuming
+        buffer_size << data length).
+        Note that this setting has no effect for fixed-orientation inverse
+        operators.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The source estimates.
+    """
+    method = _check_method(method, dSPM)
+
+    _check_ch_names(inverse_operator, raw.info)
+
+    #
+    #   Set up the inverse according to the parameters
+    #
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(raw.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+
+    data, times = raw[sel, start:stop]
+
+    if time_func is not None:
+        data = time_func(data)
+
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_normal)
+
+    is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+                   and not pick_normal)
+
+    if buffer_size is not None and is_free_ori:
+        # Process the data in segments to conserve memory
+        n_seg = int(np.ceil(data.shape[1] / float(buffer_size)))
+        logger.info('computing inverse and combining the current '
+                    'components (using %d segments)...' % (n_seg))
+
+        # Allocate space for inverse solution
+        n_times = data.shape[1]
+        sol = np.empty((K.shape[0] / 3, n_times),
+                       dtype=(K[0, 0] * data[0, 0]).dtype)
+
+        for pos in xrange(0, n_times, buffer_size):
+            sol[:, pos:pos + buffer_size] = \
+                combine_xyz(np.dot(K, data[:, pos:pos + buffer_size]))
+
+            logger.info('segment %d / %d done..'
+                        % (pos / buffer_size + 1, n_seg))
+    else:
+        sol = np.dot(K, data)
+        if is_free_ori:
+            logger.info('combining the current components...')
+            sol = combine_xyz(sol)
+
+    if noise_norm is not None:
+        sol *= noise_norm
+
+    tmin = float(times[0])
+    tstep = 1.0 / raw.info['sfreq']
+    subject = _subject_from_inverse(inverse_operator)
+    stc = SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                         subject=subject)
+    logger.info('[done]')
+
+    return stc
+
+
+def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method="dSPM",
+                              label=None, nave=1, pick_normal=False, dSPM=None,
+                              verbose=None):
+    """ see apply_inverse_epochs """
+    method = _check_method(method, dSPM)
+
+    _check_ch_names(inverse_operator, epochs.info)
+
+    #
+    #   Set up the inverse according to the parameters
+    #
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(epochs.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_normal)
+
+    tstep = 1.0 / epochs.info['sfreq']
+    tmin = epochs.times[0]
+
+    is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+                   and not pick_normal)
+
+    if not is_free_ori and noise_norm is not None:
+        # premultiply kernel with noise normalization
+        K *= noise_norm
+
+    subject = _subject_from_inverse(inverse_operator)
+    for k, e in enumerate(epochs):
+        logger.info('Processing epoch : %d' % (k + 1))
+        if is_free_ori:
+            # Compute solution and combine current components (non-linear)
+            sol = np.dot(K, e[sel])  # apply imaging kernel
+            if is_free_ori:
+                logger.info('combining the current components...')
+                sol = combine_xyz(sol)
+
+                if noise_norm is not None:
+                    sol *= noise_norm
+        else:
+            # Linear inverse: do computation here or delayed
+            if len(sel) < K.shape[0]:
+                sol = (K, e[sel])
+            else:
+                sol = np.dot(K, e[sel])
+
+        stc = SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                             subject=subject)
+
+        yield stc
+
+    logger.info('[done]')
+
+
+ at verbose
+def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM",
+                         label=None, nave=1, pick_normal=False, dSPM=None,
+                         return_generator=False, verbose=None):
+    """Apply inverse operator to Epochs
+
+    Computes a L2-norm inverse solution on each epochs and returns
+    single trial source estimates.
+
+    Parameters
+    ----------
+    epochs : Epochs object
+        Single trial epochs.
+    inverse_operator : dict
+        Inverse operator read with mne.read_inverse_operator.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    label : Label | None
+        Restricts the source estimates to a given label. If None,
+        source estimates will be computed for the entire source space.
+    nave : int
+        Number of averages used to regularize the solution.
+        Set to 1 on single Epoch by default.
+    pick_normal : bool
+        If True, rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : list of SourceEstimate
+        The source estimates for all epochs.
+    """
+
+    stcs = _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2,
+                                     method=method, label=label, nave=nave,
+                                     pick_normal=pick_normal, dSPM=dSPM,
+                                     verbose=verbose)
+
+    if not return_generator:
+        # return a list
+        stcs = [stc for stc in stcs]
+
+    return stcs
+
+
+def _xyz2lf(Lf_xyz, normals):
+    """Reorient leadfield to one component matching the normal to the cortex
+
+    This program takes a leadfield matix computed for dipole components
+    pointing in the x, y, and z directions, and outputs a new lead field
+    matrix for dipole components pointing in the normal direction of the
+    cortical surfaces and in the two tangential directions to the cortex
+    (that is on the tangent cortical space). These two tangential dipole
+    components are uniquely determined by the SVD (reduction of variance).
+
+    Parameters
+    ----------
+    Lf_xyz: array of shape [n_sensors, n_positions x 3]
+        Leadfield
+    normals : array of shape [n_positions, 3]
+        Normals to the cortex
+
+    Returns
+    -------
+    Lf_cortex : array of shape [n_sensors, n_positions x 3]
+        Lf_cortex is a leadfield matrix for dipoles in rotated orientations, so
+        that the first column is the gain vector for the cortical normal dipole
+        and the following two column vectors are the gain vectors for the
+        tangential orientations (tangent space of cortical surface).
+    """
+    n_sensors, n_dipoles = Lf_xyz.shape
+    n_positions = n_dipoles / 3
+    Lf_xyz = Lf_xyz.reshape(n_sensors, n_positions, 3)
+    n_sensors, n_positions, _ = Lf_xyz.shape
+    Lf_cortex = np.zeros_like(Lf_xyz)
+
+    for k in range(n_positions):
+        lf_normal = np.dot(Lf_xyz[:, k, :], normals[k])
+        lf_normal_n = lf_normal[:, None] / linalg.norm(lf_normal)
+        P = np.eye(n_sensors, n_sensors) - np.dot(lf_normal_n, lf_normal_n.T)
+        lf_p = np.dot(P, Lf_xyz[:, k, :])
+        U, s, Vh = linalg.svd(lf_p)
+        Lf_cortex[:, k, 0] = lf_normal
+        Lf_cortex[:, k, 1:] = np.c_[U[:, 0] * s[0], U[:, 1] * s[1]]
+
+    Lf_cortex = Lf_cortex.reshape(n_sensors, n_dipoles)
+    return Lf_cortex
+
+
+###############################################################################
+# Assemble the inverse operator
+
+ at verbose
+def _prepare_forward(forward, info, noise_cov, pca=False, verbose=None):
+    """Util function to prepare forward solution for inverse solvers
+    """
+    fwd_ch_names = [c['ch_name'] for c in forward['info']['chs']]
+    ch_names = [c['ch_name'] for c in info['chs']
+                if (c['ch_name'] not in info['bads']
+                    and c['ch_name'] not in noise_cov['bads'])
+                and (c['ch_name'] in fwd_ch_names
+                     and c['ch_name'] in noise_cov.ch_names)]
+
+    if not len(info['bads']) == len(noise_cov['bads']) or \
+            not all([b in noise_cov['bads'] for b in info['bads']]):
+        logger.info('info["bads"] and noise_cov["bads"] do not match, '
+                    'excluding bad channels from both')
+
+    n_chan = len(ch_names)
+    logger.info("Computing inverse operator with %d channels." % n_chan)
+
+    #
+    #   Handle noise cov
+    #
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names)
+
+    #   Omit the zeroes due to projection
+    eig = noise_cov['eig']
+    nzero = (eig > 0)
+    n_nzero = sum(nzero)
+
+    if pca:
+        #   Rows of eigvec are the eigenvectors
+        whitener = noise_cov['eigvec'][nzero] / np.sqrt(eig[nzero])[:, None]
+        logger.info('Reducing data rank to %d' % n_nzero)
+    else:
+        whitener = np.zeros((n_chan, n_chan), dtype=np.float)
+        whitener[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+        #   Rows of eigvec are the eigenvectors
+        whitener = np.dot(whitener, noise_cov['eigvec'])
+
+    gain = forward['sol']['data']
+
+    fwd_idx = [fwd_ch_names.index(name) for name in ch_names]
+    gain = gain[fwd_idx]
+    info_idx = [info['ch_names'].index(name) for name in ch_names]
+    fwd_info = pick_info(info, info_idx)
+
+    logger.info('Total rank is %d' % n_nzero)
+
+    return fwd_info, gain, noise_cov, whitener, n_nzero
+
+
+ at verbose
+def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
+                          fixed=False, limit_depth_chs=True, verbose=None):
+    """Assemble inverse operator
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info to specify the channels to include.
+        Bad channels in info['bads'] are not used.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance matrix.
+    loose : None | float in [0, 1]
+        Value that weights the source variances of the dipole components
+        defining the tangent space of the cortical surfaces. Requires surface-
+        based, free orientation forward solutions.
+    depth : None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    fixed : bool
+        Use fixed source orientations normal to the cortical mantle. If True,
+        the loose parameter is ignored.
+    limit_depth_chs : bool
+        If True, use only grad channels in depth weighting (equivalent to MNE
+        C code). If grad chanels aren't present, only mag channels will be
+        used (if no mag, then eeg). If False, use all channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    inv : dict
+        Inverse operator.
+
+    Notes
+    -----
+    For different sets of options (**loose**, **depth**, **fixed**) to work,
+    the forward operator must have been loaded using a certain configuration
+    (i.e., with **force_fixed** and **surf_ori** set appropriately). For
+    example, given the desired inverse type (with representative choices
+    of **loose** = 0.2 and **depth** = 0.8 shown in the table in various
+    places, as these are the defaults for those parameters):
+
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | Inverse desired                             | Forward parameters allowed                 |
+        +=====================+===========+===========+===========+=================+==============+
+        |                     | **loose** | **depth** | **fixed** | **force_fixed** | **surf_ori** |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Loose constraint, | 0.2       | 0.8       | False     | False           | True         |
+        | | Depth weighted    |           |           |           |                 |              |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Loose constraint  | 0.2       | None      | False     | False           | True         |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Free orientation, | None      | 0.8       | False     | False           | True         |
+        | | Depth weighted    |           |           |           |                 |              |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Free orientation  | None      | None      | False     | False           | True | False |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Fixed constraint, | None      | 0.8       | True      | False           | True         |
+        | | Depth weighted    |           |           |           |                 |              |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Fixed constraint  | None      | None      | True      | True            | False        |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+
+    Also note that, if the source space (as stored in the forward solution)
+    has patch statistics computed, these are used to improve the depth
+    weighting. Thus slightly different results are to be expected with
+    and without this information.
+    """
+    is_fixed_ori = is_fixed_orient(forward)
+
+    if fixed and loose is not None:
+        warnings.warn("When invoking make_inverse_operator with fixed=True, "
+                      "the loose parameter is ignored.")
+        loose = None
+
+    if is_fixed_ori and not fixed:
+        raise ValueError('Forward operator has fixed orientation and can only '
+                         'be used to make a fixed-orientation inverse '
+                         'operator.')
+    if fixed:
+        if depth is not None:
+            if is_fixed_ori or not forward['surf_ori']:
+                raise ValueError('For a fixed orientation inverse solution '
+                                 'with depth weighting, the forward solution '
+                                 'must be free-orientation and in surface '
+                                 'orientation')
+        elif forward['surf_ori'] is True:
+            raise ValueError('For a fixed orientation inverse solution '
+                             'without depth weighting, the forward solution '
+                             'must not be in surface orientation')
+
+    # depth=None can use fixed fwd, depth=0<x<1 must use free ori
+    if depth is not None:
+        if not (0 < depth <= 1):
+            raise ValueError('depth should be a scalar between 0 and 1')
+        if is_fixed_ori or not forward['surf_ori']:
+            raise ValueError('You need a free-orientation, surface-oriented '
+                             'forward solution to do depth weighting even '
+                             'when calculating a fixed-orientation inverse.')
+
+    if loose is not None:
+        if not (0 <= loose <= 1):
+            raise ValueError('loose value should be smaller than 1 and bigger '
+                             'than 0, or None for not loose orientations.')
+        if loose < 1 and not forward['surf_ori']:
+            raise ValueError('Forward operator is not oriented in surface '
+                             'coordinates. A loose inverse operator requires '
+                             'a surface-based, free orientation forward '
+                             'operator.')
+
+    #
+    # 1. Read the bad channels
+    # 2. Read the necessary data from the forward solution matrix file
+    # 3. Load the projection data
+    # 4. Load the sensor noise covariance matrix and attach it to the forward
+    #
+
+    gain_info, gain, noise_cov, whitener, n_nzero = \
+        _prepare_forward(forward, info, noise_cov)
+
+    #
+    # 5. Compose the depth-weighting matrix
+    #
+
+    if depth is not None:
+        patch_areas = forward.get('patch_areas', None)
+        depth_prior = compute_depth_prior(gain, gain_info, is_fixed_ori,
+                                          exp=depth, patch_areas=patch_areas,
+                                          limit_depth_chs=limit_depth_chs)
+    else:
+        depth_prior = np.ones(gain.shape[1], dtype=gain.dtype)
+
+    # Deal with fixed orientation forward / inverse
+    if fixed:
+        if depth is not None:
+            # Convert the depth prior into a fixed-orientation one
+            logger.info('    Picked elements from a free-orientation '
+                        'depth-weighting prior into the fixed-orientation one')
+        if not is_fixed_ori:
+            # Convert to the fixed orientation forward solution now
+            depth_prior = depth_prior[2::3]
+            forward = deepcopy(forward)
+            _to_fixed_ori(forward)
+            is_fixed_ori = is_fixed_orient(forward)
+            gain_info, gain, noise_cov, whitener, n_nzero = \
+                _prepare_forward(forward, info, noise_cov, verbose=False)
+
+    logger.info("Computing inverse operator with %d channels."
+                % len(gain_info['ch_names']))
+
+    #
+    # 6. Compose the source covariance matrix
+    #
+
+    logger.info('Creating the source covariance matrix')
+    source_cov = depth_prior.copy()
+    depth_prior = dict(data=depth_prior, kind=FIFF.FIFFV_MNE_DEPTH_PRIOR_COV,
+                       bads=[], diag=True, names=[], eig=None,
+                       eigvec=None, dim=depth_prior.size, nfree=1,
+                       projs=[])
+
+    # apply loose orientations
+    if not is_fixed_ori:
+        orient_prior = compute_orient_prior(forward, loose=loose)
+        source_cov *= orient_prior
+        orient_prior = dict(data=orient_prior,
+                            kind=FIFF.FIFFV_MNE_ORIENT_PRIOR_COV,
+                            bads=[], diag=True, names=[], eig=None,
+                            eigvec=None, dim=orient_prior.size, nfree=1,
+                            projs=[])
+    else:
+        orient_prior = None
+
+    # 7. Apply fMRI weighting (not done)
+
+    #
+    # 8. Apply the linear projection to the forward solution
+    # 9. Apply whitening to the forward computation matrix
+    #
+    logger.info('Whitening the forward solution.')
+    gain = np.dot(whitener, gain)
+
+    # 10. Exclude the source space points within the labels (not done)
+
+    #
+    # 11. Do appropriate source weighting to the forward computation matrix
+    #
+
+    # Adjusting Source Covariance matrix to make trace of G*R*G' equal
+    # to number of sensors.
+    logger.info('Adjusting source covariance matrix.')
+    source_std = np.sqrt(source_cov)
+    gain *= source_std[None, :]
+    trace_GRGT = linalg.norm(gain, ord='fro') ** 2
+    scaling_source_cov = n_nzero / trace_GRGT
+    source_cov *= scaling_source_cov
+    gain *= sqrt(scaling_source_cov)
+
+    source_cov = dict(data=source_cov, dim=source_cov.size,
+                      kind=FIFF.FIFFV_MNE_SOURCE_COV, diag=True,
+                      names=[], projs=[], eig=None, eigvec=None,
+                      nfree=1, bads=[])
+
+    # now np.trace(np.dot(gain, gain.T)) == n_nzero
+    # logger.info(np.trace(np.dot(gain, gain.T)), n_nzero)
+
+    #
+    # 12. Decompose the combined matrix
+    #
+
+    logger.info('Computing SVD of whitened and weighted lead field '
+                'matrix.')
+    eigen_fields, sing, eigen_leads = linalg.svd(gain, full_matrices=False)
+    logger.info('    largest singular value = %g' % np.max(sing))
+    logger.info('    scaling factor to adjust the trace = %g' % trace_GRGT)
+
+    eigen_fields = dict(data=eigen_fields.T, col_names=gain_info['ch_names'],
+                        row_names=[], nrow=eigen_fields.shape[1],
+                        ncol=eigen_fields.shape[0])
+    eigen_leads = dict(data=eigen_leads.T, nrow=eigen_leads.shape[1],
+                       ncol=eigen_leads.shape[0], row_names=[],
+                       col_names=[])
+    nave = 1.0
+
+    # Handle methods
+    has_meg = False
+    has_eeg = False
+    ch_idx = [k for k, c in enumerate(info['chs'])
+                                    if c['ch_name'] in gain_info['ch_names']]
+    for idx in ch_idx:
+        ch_type = channel_type(info, idx)
+        if ch_type == 'eeg':
+            has_eeg = True
+        if (ch_type == 'mag') or (ch_type == 'grad'):
+            has_meg = True
+    if has_eeg and has_meg:
+        methods = FIFF.FIFFV_MNE_MEG_EEG
+    elif has_meg:
+        methods = FIFF.FIFFV_MNE_MEG
+    else:
+        methods = FIFF.FIFFV_MNE_EEG
+
+    # We set this for consistency with mne C code written inverses
+    if depth is None:
+        depth_prior = None
+    inv_op = dict(eigen_fields=eigen_fields, eigen_leads=eigen_leads,
+                  sing=sing, nave=nave, depth_prior=depth_prior,
+                  source_cov=source_cov, noise_cov=noise_cov,
+                  orient_prior=orient_prior, projs=deepcopy(info['projs']),
+                  eigen_leads_weighted=False, source_ori=forward['source_ori'],
+                  mri_head_t=deepcopy(forward['mri_head_t']),
+                  methods=methods, nsource=forward['nsource'],
+                  coord_frame=forward['coord_frame'],
+                  source_nn=forward['source_nn'].copy(),
+                  src=deepcopy(forward['src']), fmri_prior=None)
+    inv_info = deepcopy(forward['info'])
+    inv_info['bads'] = deepcopy(info['bads'])
+    inv_op['info'] = inv_info
+
+    return inv_op
+
+
+def compute_rank_inverse(inv):
+    """Compute the rank of a linear inverse operator (MNE, dSPM, etc.)
+
+    Parameters
+    ----------
+    inv : dict
+        The inverse operator.
+
+    Returns
+    -------
+    rank : int
+        The rank of the inverse operator.
+    """
+    # this code shortened from prepare_inverse_operator
+    eig = inv['noise_cov']['eig']
+    if not inv['noise_cov']['diag']:
+        rank = np.sum(eig > 0)
+    else:
+        ncomp = make_projector(inv['projs'], inv['noise_cov']['names'])[1]
+        rank = inv['noise_cov']['dim'] - ncomp
+    return rank
diff --git a/mne/minimum_norm/tests/__init__.py b/mne/minimum_norm/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py
new file mode 100644
index 0000000..7351399
--- /dev/null
+++ b/mne/minimum_norm/tests/test_inverse.py
@@ -0,0 +1,398 @@
+import os.path as op
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_equal
+from scipy import sparse
+from nose.tools import assert_true, assert_raises
+import copy
+
+from mne.datasets import sample
+from mne.label import read_label, label_sign_flip
+from mne.event import read_events
+from mne.epochs import Epochs
+from mne.source_estimate import read_source_estimate
+from mne import fiff, read_cov, read_forward_solution
+from mne.minimum_norm.inverse import apply_inverse, read_inverse_operator, \
+    apply_inverse_raw, apply_inverse_epochs, make_inverse_operator, \
+    write_inverse_operator, compute_rank_inverse
+from mne.utils import _TempDir
+
+s_path = op.join(sample.data_path(), 'MEG', 'sample')
+fname_inv = op.join(s_path, 'sample_audvis-meg-oct-6-meg-inv.fif')
+fname_inv_fixed = op.join(s_path, 'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
+fname_inv_nodepth = op.join(s_path,
+                           'sample_audvis-meg-oct-6-meg-nodepth-fixed-inv.fif')
+fname_inv_diag = op.join(s_path,
+                         'sample_audvis-meg-oct-6-meg-diagnoise-inv.fif')
+fname_vol_inv = op.join(s_path, 'sample_audvis-meg-vol-7-meg-inv.fif')
+fname_data = op.join(s_path, 'sample_audvis-ave.fif')
+fname_cov = op.join(s_path, 'sample_audvis-cov.fif')
+fname_fwd = op.join(s_path, 'sample_audvis-meg-oct-6-fwd.fif')
+fname_raw = op.join(s_path, 'sample_audvis_filt-0-40_raw.fif')
+fname_event = op.join(s_path, 'sample_audvis_filt-0-40_raw-eve.fif')
+fname_label = op.join(s_path, 'labels', '%s.label')
+
+inverse_operator = read_inverse_operator(fname_inv)
+label_lh = read_label(fname_label % 'Aud-lh')
+label_rh = read_label(fname_label % 'Aud-rh')
+noise_cov = read_cov(fname_cov)
+raw = fiff.Raw(fname_raw)
+evoked = fiff.Evoked(fname_data, setno=0, baseline=(None, 0))
+evoked.crop(0, 0.2)
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+
+tempdir = _TempDir()
+last_keys = [None] * 10
+
+
+def _compare(a, b):
+    global last_keys
+    skip_types = ['whitener', 'proj', 'reginv', 'noisenorm', 'nchan',
+                  'command_line', 'working_dir', 'mri_file', 'mri_id']
+    try:
+        if isinstance(a, dict):
+            assert_true(isinstance(b, dict))
+            for k, v in a.iteritems():
+                if not k in b and k not in skip_types:
+                    raise ValueError('First one had one second one didn\'t:\n'
+                                     '%s not in %s' % (k, b.keys()))
+                if k not in skip_types:
+                    last_keys.pop()
+                    last_keys = [k] + last_keys
+                    _compare(v, b[k])
+            for k, v in b.iteritems():
+                if not k in a and k not in skip_types:
+                    raise ValueError('Second one had one first one didn\'t:\n'
+                                     '%s not in %s' % (k, a.keys()))
+        elif isinstance(a, list):
+            assert_true(len(a) == len(b))
+            for i, j in zip(a, b):
+                _compare(i, j)
+        elif isinstance(a, sparse.csr.csr_matrix):
+            assert_array_almost_equal(a.data, b.data)
+            assert_equal(a.indices, b.indices)
+            assert_equal(a.indptr, b.indptr)
+        elif isinstance(a, np.ndarray):
+            assert_array_almost_equal(a, b)
+        else:
+            assert_true(a == b)
+    except Exception as exptn:
+        print last_keys
+        raise exptn
+
+
+def _compare_inverses_approx(inv_1, inv_2, evoked, stc_decimals,
+                             check_depth=True):
+    # depth prior
+    if check_depth:
+        if inv_1['depth_prior'] is not None:
+            assert_array_almost_equal(inv_1['depth_prior']['data'],
+                                      inv_2['depth_prior']['data'])
+        else:
+            assert_true(inv_2['depth_prior'] is None)
+    # orient prior
+    if inv_1['orient_prior'] is not None:
+        assert_array_almost_equal(inv_1['orient_prior']['data'],
+                                  inv_2['orient_prior']['data'])
+    else:
+        assert_true(inv_2['orient_prior'] is None)
+    # source cov
+    assert_array_almost_equal(inv_1['source_cov']['data'],
+                              inv_2['source_cov']['data'])
+
+    # These are not as close as we'd like XXX
+    assert_array_almost_equal(np.abs(inv_1['eigen_fields']['data']),
+                              np.abs(inv_2['eigen_fields']['data']), 0)
+    assert_array_almost_equal(np.abs(inv_1['eigen_leads']['data']),
+                              np.abs(inv_2['eigen_leads']['data']), 0)
+
+    stc_1 = apply_inverse(evoked, inv_1, lambda2, "dSPM")
+    stc_2 = apply_inverse(evoked, inv_2, lambda2, "dSPM")
+
+    assert_true(stc_1.subject == stc_2.subject)
+    assert_equal(stc_1.times, stc_2.times)
+    assert_array_almost_equal(stc_1.data, stc_2.data, stc_decimals)
+
+
+def _compare_io(inv_op, out_file_ext='.fif'):
+    if out_file_ext == '.fif':
+        out_file = op.join(tempdir, 'test-inv.fif')
+    elif out_file_ext == '.gz':
+        out_file = op.join(tempdir, 'test-inv.fif.gz')
+    else:
+        raise ValueError('IO test could not complete')
+    # Test io operations
+    inv_init = copy.deepcopy(inv_op)
+    write_inverse_operator(out_file, inv_op)
+    read_inv_op = read_inverse_operator(out_file)
+    _compare(inv_init, read_inv_op)
+    _compare(inv_init, inv_op)
+
+
+def test_apply_inverse_operator():
+    """Test MNE inverse computation (precomputed and non-precomputed)
+    """
+
+    # Test old version of inverse computation starting from forward operator
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                      loose=0.2, depth=0.8,
+                                      limit_depth_chs=False)
+    _compare_io(my_inv_op)
+    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 2,
+                             check_depth=False)
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator) == 302)
+
+    # Test MNE inverse computation starting from forward operator
+    my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                      loose=0.2, depth=0.8)
+    _compare_io(my_inv_op)
+    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 2)
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator) == 302)
+
+    stc = apply_inverse(evoked, inverse_operator, lambda2, "MNE")
+    assert_true(stc.subject == 'sample')
+    assert_true(stc.data.min() > 0)
+    assert_true(stc.data.max() < 10e-10)
+    assert_true(stc.data.mean() > 1e-11)
+
+    stc = apply_inverse(evoked, inverse_operator, lambda2, "sLORETA")
+    assert_true(stc.subject == 'sample')
+    assert_true(stc.data.min() > 0)
+    assert_true(stc.data.max() < 10.0)
+    assert_true(stc.data.mean() > 0.1)
+
+    stc = apply_inverse(evoked, inverse_operator, lambda2, "dSPM")
+    assert_true(stc.subject == 'sample')
+    assert_true(stc.data.min() > 0)
+    assert_true(stc.data.max() < 35)
+    assert_true(stc.data.mean() > 0.1)
+
+    my_stc = apply_inverse(evoked, my_inv_op, lambda2, "dSPM")
+
+    assert_true('dev_head_t' in my_inv_op['info'])
+    assert_true('mri_head_t' in my_inv_op)
+
+    assert_true(my_stc.subject == 'sample')
+    assert_equal(stc.times, my_stc.times)
+    assert_array_almost_equal(stc.data, my_stc.data, 2)
+
+
+def test_make_inverse_operator_fixed():
+    """Test MNE inverse computation (fixed orientation)
+    """
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    fwd_1 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=False)
+    fwd_2 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=True)
+
+    # can't make depth-weighted fixed inv without surf ori fwd
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_1,
+                  noise_cov, depth=0.8, loose=None, fixed=True)
+    # can't make fixed inv with depth weighting without free ori fwd
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
+                  noise_cov, depth=0.8, loose=None, fixed=True)
+    # can't make non-depth-weighted fixed inv with surf_ori fwd
+    # (otherwise the average normal could be employed)
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_op,
+                  noise_cov, depth=None, loose=None, fixed=True)
+
+    # compare to C solution w/fixed
+    inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=0.8,
+                                   loose=None, fixed=True)
+    _compare_io(inv_op)
+    inverse_operator_fixed = read_inverse_operator(fname_inv_fixed)
+    _compare_inverses_approx(inverse_operator_fixed, inv_op, evoked, 2)
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)
+
+    # now compare to C solution
+    # note that the forward solution must not be surface-oriented
+    # to get equivalency (surf_ori=True changes the normals)
+    inv_op = make_inverse_operator(evoked.info, fwd_2, noise_cov, depth=None,
+                                   loose=None, fixed=True)
+    inverse_operator_nodepth = read_inverse_operator(fname_inv_nodepth)
+    _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 2)
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)
+
+
+def test_make_inverse_operator_free():
+    """Test MNE inverse computation (free orientation)
+    """
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    fwd_1 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=False)
+    fwd_2 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=True)
+
+    # can't make free inv with fixed fwd
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
+                  noise_cov, depth=None)
+
+    # for free ori inv, loose=None and loose=1 should be equivalent
+    inv_1 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=None)
+    inv_2 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=1)
+    _compare_inverses_approx(inv_1, inv_2, evoked, 2)
+
+    # for depth=None, surf_ori of the fwd should not matter
+    inv_3 = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=None,
+                                  loose=None)
+    inv_4 = make_inverse_operator(evoked.info, fwd_1, noise_cov, depth=None,
+                                  loose=None)
+    _compare_inverses_approx(inv_3, inv_4, evoked, 2)
+
+
+def test_make_inverse_operator_diag():
+    """Test MNE inverse computation with diagonal noise cov
+    """
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov.as_diag(),
+                                   loose=0.2, depth=0.8)
+    _compare_io(inv_op)
+    inverse_operator_diag = read_inverse_operator(fname_inv_diag)
+    # This one's only good to zero decimal places, roundoff error (?)
+    _compare_inverses_approx(inverse_operator_diag, inv_op, evoked, 0)
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator_diag) == 302)
+
+
+def test_inverse_operator_volume():
+    """Test MNE inverse computation on volume source space
+    """
+    inverse_operator_vol = read_inverse_operator(fname_vol_inv)
+    _compare_io(inverse_operator_vol)
+    stc = apply_inverse(evoked, inverse_operator_vol, lambda2, "dSPM")
+    # volume inverses don't have associated subject IDs
+    assert_true(stc.subject is None)
+    stc.save(op.join(tempdir, 'tmp-vl.stc'))
+    stc2 = read_source_estimate(op.join(tempdir, 'tmp-vl.stc'))
+    assert_true(np.all(stc.data > 0))
+    assert_true(np.all(stc.data < 35))
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.times, stc2.times)
+
+
+def test_io_inverse_operator():
+    """Test IO of inverse_operator with GZip
+    """
+    # just do one example for .gz, as it should generalize
+    _compare_io(inverse_operator, '.gz')
+
+
+def test_apply_mne_inverse_raw():
+    """Test MNE with precomputed inverse operator on Raw
+    """
+    start = 3
+    stop = 10
+    _, times = raw[0, start:stop]
+    for pick_normal in [False, True]:
+        stc = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
+                                label=label_lh, start=start, stop=stop, nave=1,
+                                pick_normal=pick_normal, buffer_size=None)
+
+        stc2 = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
+                                 label=label_lh, start=start, stop=stop,
+                                 nave=1, pick_normal=pick_normal,
+                                 buffer_size=3)
+
+        if not pick_normal:
+            assert_true(np.all(stc.data > 0))
+            assert_true(np.all(stc2.data > 0))
+
+        assert_true(stc.subject == 'sample')
+        assert_true(stc2.subject == 'sample')
+        assert_array_almost_equal(stc.times, times)
+        assert_array_almost_equal(stc2.times, times)
+        assert_array_almost_equal(stc.data, stc2.data)
+
+
+def test_apply_mne_inverse_fixed_raw():
+    """Test MNE with fixed-orientation inverse operator on Raw
+    """
+    start = 3
+    stop = 10
+    _, times = raw[0, start:stop]
+
+    # create a fixed-orientation inverse operator
+    fwd = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True)
+    inv_op = make_inverse_operator(raw.info, fwd, noise_cov,
+                                   loose=None, depth=0.8, fixed=True)
+
+    stc = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
+                            label=label_lh, start=start, stop=stop, nave=1,
+                            pick_normal=False, buffer_size=None)
+
+    stc2 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
+                             label=label_lh, start=start, stop=stop, nave=1,
+                             pick_normal=False, buffer_size=3)
+
+    assert_true(stc.subject == 'sample')
+    assert_true(stc2.subject == 'sample')
+    assert_array_almost_equal(stc.times, times)
+    assert_array_almost_equal(stc2.times, times)
+    assert_array_almost_equal(stc.data, stc2.data)
+
+
+def test_apply_mne_inverse_epochs():
+    """Test MNE with precomputed inverse operator on Epochs
+    """
+    event_id, tmin, tmax = 1, -0.2, 0.5
+
+    picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                            ecg=True, eog=True, include=['STI 014'],
+                            exclude='bads')
+    reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+    flat = dict(grad=1e-15, mag=1e-15)
+
+    events = read_events(fname_event)[:15]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject, flat=flat)
+    stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                label=label_lh, pick_normal=True)
+
+    assert_true(len(stcs) == 4)
+    assert_true(3 < stcs[0].data.max() < 10)
+    assert_true(stcs[0].subject == 'sample')
+
+    data = sum(stc.data for stc in stcs) / len(stcs)
+    flip = label_sign_flip(label_lh, inverse_operator['src'])
+
+    label_mean = np.mean(data, axis=0)
+    label_mean_flip = np.mean(flip[:, np.newaxis] * data, axis=0)
+
+    assert_true(label_mean.max() < label_mean_flip.max())
+
+    # test extracting a BiHemiLabel
+    stcs_rh = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                   label=label_rh, pick_normal=True)
+    stcs_bh = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                   label=label_lh + label_rh, pick_normal=True)
+
+    n_lh = len(stcs[0].data)
+    assert_array_almost_equal(stcs[0].data, stcs_bh[0].data[:n_lh])
+    assert_array_almost_equal(stcs_rh[0].data, stcs_bh[0].data[n_lh:])
+
+    # test without using a label (so delayed computation is used)
+    stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                pick_normal=True)
+    assert_true(stcs[0].subject == 'sample')
+    label_stc = stcs[0].in_label(label_rh)
+    assert_true(label_stc.subject == 'sample')
+    assert_array_almost_equal(stcs_rh[0].data, label_stc.data)
+
+
+def test_make_inverse_operator_bads():
+    """Test MNE inverse computation given a mismatch of bad channels
+    """
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+
+    # test bads
+    bad = evoked.info['bads'].pop()
+    inv_ = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=None)
+    union_good = set(noise_cov['names']) & set(evoked.ch_names)
+    union_bads = set(noise_cov['bads']) & set(evoked.info['bads'])
+    evoked.info['bads'].append(bad)
+
+    assert_true(len(set(inv_['info']['ch_names']) - union_good) == 0)
+
+    assert_true(len(set(inv_['info']['bads']) - union_bads) == 0)
diff --git a/mne/minimum_norm/tests/test_time_frequency.py b/mne/minimum_norm/tests/test_time_frequency.py
new file mode 100644
index 0000000..1136c0f
--- /dev/null
+++ b/mne/minimum_norm/tests/test_time_frequency.py
@@ -0,0 +1,157 @@
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.datasets import sample
+from mne import fiff, find_events, Epochs
+from mne.label import read_label
+from mne.minimum_norm.inverse import read_inverse_operator, \
+                                     apply_inverse_epochs
+from mne.minimum_norm.time_frequency import source_band_induced_power, \
+                            source_induced_power, compute_source_psd, \
+                            compute_source_psd_epochs
+
+
+from mne.time_frequency import multitaper_psd
+
+data_path = sample.data_path()
+fname_inv = op.join(data_path, 'MEG', 'sample',
+                                        'sample_audvis-meg-oct-6-meg-inv.fif')
+fname_data = op.join(data_path, 'MEG', 'sample',
+                                        'sample_audvis_raw.fif')
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
+
+
+def test_tfr_with_inverse_operator():
+    """Test time freq with MNE inverse computation"""
+
+    tmin, tmax, event_id = -0.2, 0.5, 1
+
+    # Setup for reading the raw data
+    raw = fiff.Raw(fname_data)
+    events = find_events(raw, stim_channel='STI 014')
+    inverse_operator = read_inverse_operator(fname_inv)
+
+    raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                            stim=False, exclude='bads')
+
+    # Load condition 1
+    event_id = 1
+    events3 = events[:3]  # take 3 events to keep the computation time low
+    epochs = Epochs(raw, events3, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
+
+    # Compute a source estimate per frequency band
+    bands = dict(alpha=[10, 10])
+    label = read_label(fname_label)
+
+    stcs = source_band_induced_power(epochs, inverse_operator, bands,
+                                     n_cycles=2, use_fft=False, pca=True,
+                                     label=label)
+
+    stc = stcs['alpha']
+    assert_true(len(stcs) == len(bands.keys()))
+    assert_true(np.all(stc.data > 0))
+    assert_array_almost_equal(stc.times, epochs.times)
+
+    stcs_no_pca = source_band_induced_power(epochs, inverse_operator, bands,
+                                            n_cycles=2, use_fft=False,
+                                            pca=False, label=label)
+
+    assert_array_almost_equal(stcs['alpha'].data, stcs_no_pca['alpha'].data)
+
+    # Compute a source estimate per frequency band
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
+
+    frequencies = np.arange(7, 30, 2)  # define frequencies of interest
+    power, phase_lock = source_induced_power(epochs, inverse_operator,
+                            frequencies, label, baseline=(-0.1, 0),
+                            baseline_mode='percent', n_cycles=2, n_jobs=1)
+    assert_true(np.all(phase_lock > 0))
+    assert_true(np.all(phase_lock <= 1))
+    assert_true(np.max(power) > 10)
+
+
+def test_source_psd():
+    """Test source PSD computation in label"""
+    raw = fiff.Raw(fname_data)
+    inverse_operator = read_inverse_operator(fname_inv)
+    label = read_label(fname_label)
+    tmin, tmax = 0, 20  # seconds
+    fmin, fmax = 55, 65  # Hz
+    NFFT = 2048
+    stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9.,
+                             method="dSPM", tmin=tmin, tmax=tmax,
+                             fmin=fmin, fmax=fmax, pick_normal=True,
+                             NFFT=NFFT, label=label, overlap=0.1)
+    assert_true(stc.times[0] >= fmin * 1e-3)
+    assert_true(stc.times[-1] <= fmax * 1e-3)
+    # Time max at line frequency (60 Hz in US)
+    assert_true(59e-3 <= stc.times[np.argmax(np.sum(stc.data, axis=0))]
+                      <= 61e-3)
+
+
+def test_source_psd_epochs():
+    """Test multi-taper source PSD computation in label from epochs"""
+
+    raw = fiff.Raw(fname_data)
+    inverse_operator = read_inverse_operator(fname_inv)
+    label = read_label(fname_label)
+
+    event_id, tmin, tmax = 1, -0.2, 0.5
+    lambda2, method = 1. / 9., 'dSPM'
+    bandwidth = 8.
+    fmin, fmax = 0, 100
+
+    picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                            ecg=True, eog=True, include=['STI 014'],
+                            exclude='bads')
+    reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+
+    events = find_events(raw, stim_channel='STI 014')
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject)
+
+    # only look at one epoch
+    epochs.drop_bad_epochs()
+    one_epochs = epochs[:1]
+
+    # return list
+    stc_psd = compute_source_psd_epochs(one_epochs, inverse_operator,
+                                        lambda2=lambda2, method=method,
+                                        pick_normal=True, label=label,
+                                        bandwidth=bandwidth,
+                                        fmin=fmin, fmax=fmax)[0]
+
+    # return generator
+    stcs = compute_source_psd_epochs(one_epochs, inverse_operator,
+                                     lambda2=lambda2, method=method,
+                                     pick_normal=True, label=label,
+                                     bandwidth=bandwidth,
+                                     fmin=fmin, fmax=fmax,
+                                     return_generator=True)
+
+    for stc in stcs:
+        stc_psd_gen = stc
+
+    assert_array_almost_equal(stc_psd.data, stc_psd_gen.data)
+
+    # compare with direct computation
+    stc = apply_inverse_epochs(one_epochs, inverse_operator,
+                               lambda2=lambda2, method=method,
+                               pick_normal=True, label=label)[0]
+
+    sfreq = epochs.info['sfreq']
+    psd, freqs = multitaper_psd(stc.data, sfreq=sfreq, bandwidth=bandwidth,
+                                fmin=fmin, fmax=fmax)
+
+    assert_array_almost_equal(psd, stc_psd.data)
+    assert_array_almost_equal(freqs, stc_psd.times)
diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py
new file mode 100644
index 0000000..601aef4
--- /dev/null
+++ b/mne/minimum_norm/time_frequency.py
@@ -0,0 +1,667 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from warnings import warn
+
+import numpy as np
+from scipy import linalg, signal, fftpack
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..fiff.constants import FIFF
+from ..source_estimate import SourceEstimate
+from ..time_frequency.tfr import cwt, morlet
+from ..time_frequency.multitaper import dpss_windows, _psd_from_mt,\
+                                        _psd_from_mt_adaptive, _mt_spectra
+from ..baseline import rescale
+from .inverse import combine_xyz, prepare_inverse_operator, _assemble_kernel, \
+                     _pick_channels_inverse_operator, _check_method, \
+                     _subject_from_inverse
+from ..parallel import parallel_func
+from .. import verbose
+
+
+ at verbose
+def source_band_induced_power(epochs, inverse_operator, bands, label=None,
+                              lambda2=1.0 / 9.0, method="dSPM", nave=1,
+                              n_cycles=5, df=1, use_fft=False, decim=1,
+                              baseline=None, baseline_mode='logratio',
+                              pca=True, n_jobs=1, dSPM=None, verbose=None):
+    """Compute source space induced power in given frequency bands
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    inverse_operator : instance of inverse operator
+        The inverse operator.
+    bands : dict
+        Example : bands = dict(alpha=[8, 9]).
+    label : Label
+        Restricts the source estimates to a given label.
+    lambda2 : float
+        The regularization parameter of the minimum norm.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    df : float
+        delta frequency within bands.
+    decim : int
+        Temporal decimation factor.
+    use_fft : bool
+        Do convolutions in time or frequency domain with FFT.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'logratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline)).
+    pca : bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    n_jobs : int
+        Number of jobs to run in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    method = _check_method(method, dSPM)
+
+    frequencies = np.concatenate([np.arange(band[0], band[1] + df / 2.0, df)
+                                 for _, band in bands.iteritems()])
+
+    powers, _, vertno = _source_induced_power(epochs,
+                                      inverse_operator, frequencies,
+                                      label=label,
+                                      lambda2=lambda2, method=method,
+                                      nave=nave, n_cycles=n_cycles,
+                                      decim=decim, use_fft=use_fft, pca=pca,
+                                      n_jobs=n_jobs, with_plv=False)
+
+    Fs = epochs.info['sfreq']  # sampling in Hz
+    stcs = dict()
+
+    subject = _subject_from_inverse(inverse_operator)
+    for name, band in bands.iteritems():
+        idx = [k for k, f in enumerate(frequencies) if band[0] <= f <= band[1]]
+
+        # average power in band + mean over epochs
+        power = np.mean(powers[:, idx, :], axis=1)
+
+        # Run baseline correction
+        power = rescale(power, epochs.times[::decim], baseline, baseline_mode,
+                        copy=False)
+
+        tmin = epochs.times[0]
+        tstep = float(decim) / Fs
+        stc = SourceEstimate(power, vertices=vertno, tmin=tmin, tstep=tstep,
+                             subject=subject)
+        stcs[name] = stc
+
+        logger.info('[done]')
+
+    return stcs
+
+
+ at verbose
+def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, with_plv,
+                     pick_normal, decim, verbose=None):
+    """Aux function for source_induced_power"""
+    n_times = data[:, :, ::decim].shape[2]
+    n_freqs = len(Ws)
+    n_sources = K.shape[0]
+    is_free_ori = False
+    if (source_ori == FIFF.FIFFV_MNE_FREE_ORI and not pick_normal):
+        is_free_ori = True
+        n_sources /= 3
+
+    shape = (n_sources, n_freqs, n_times)
+    power = np.zeros(shape, dtype=np.float)  # power
+    if with_plv:
+        shape = (n_sources, n_freqs, n_times)
+        plv = np.zeros(shape, dtype=np.complex)  # phase lock
+    else:
+        plv = None
+
+    for e in data:
+        e = e[sel]  # keep only selected channels
+
+        if Vh is not None:
+            e = np.dot(Vh, e)  # reducing data rank
+
+        for f, w in enumerate(Ws):
+            tfr = cwt(e, [w], use_fft=use_fft, decim=decim)
+            tfr = np.asfortranarray(tfr.reshape(len(e), -1))
+
+            # phase lock and power at freq f
+            if with_plv:
+                plv_f = np.zeros((n_sources, n_times), dtype=np.complex)
+            pow_f = np.zeros((n_sources, n_times), dtype=np.float)
+
+            for k, t in enumerate([np.real(tfr), np.imag(tfr)]):
+                sol = np.dot(K, t)
+
+                sol_pick_normal = sol
+                if is_free_ori:
+                    sol_pick_normal = sol[2::3]
+
+                if with_plv:
+                    if k == 0:  # real
+                        plv_f += sol_pick_normal
+                    else:  # imag
+                        plv_f += 1j * sol_pick_normal
+
+                if is_free_ori:
+                    logger.debug('combining the current components...')
+                    sol = combine_xyz(sol, square=True)
+                else:
+                    np.power(sol, 2, sol)
+                pow_f += sol
+                del sol
+
+            power[:, f, :] += pow_f
+            del pow_f
+
+            if with_plv:
+                plv_f /= np.abs(plv_f)
+                plv[:, f, :] += plv_f
+                del plv_f
+
+    return power, plv
+
+
+ at verbose
+def _source_induced_power(epochs, inverse_operator, frequencies, label=None,
+                          lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
+                          decim=1, use_fft=False, pca=True, pick_normal=True,
+                          n_jobs=1, with_plv=True, zero_mean=False,
+                          verbose=None):
+    """Aux function for source_induced_power
+    """
+    parallel, my_compute_pow_plv, n_jobs = parallel_func(_compute_pow_plv,
+                                                         n_jobs)
+    #
+    #   Set up the inverse according to the parameters
+    #
+    epochs_data = epochs.get_data()
+
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(epochs.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   three current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_normal)
+
+    if pca:
+        U, s, Vh = linalg.svd(K, full_matrices=False)
+        rank = np.sum(s > 1e-8 * s[0])
+        K = s[:rank] * U[:, :rank]
+        Vh = Vh[:rank]
+        logger.info('Reducing data rank to %d' % rank)
+    else:
+        Vh = None
+
+    Fs = epochs.info['sfreq']  # sampling in Hz
+
+    logger.info('Computing source power ...')
+
+    Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    n_jobs = min(n_jobs, len(epochs_data))
+    out = parallel(my_compute_pow_plv(data, K, sel, Ws,
+                                      inv['source_ori'], use_fft, Vh,
+                                      with_plv, pick_normal, decim)
+                        for data in np.array_split(epochs_data, n_jobs))
+    power = sum(o[0] for o in out)
+    power /= len(epochs_data)  # average power over epochs
+
+    if with_plv:
+        plv = sum(o[1] for o in out)
+        plv = np.abs(plv)
+        plv /= len(epochs_data)  # average power over epochs
+    else:
+        plv = None
+
+    if method != "MNE":
+        power *= noise_norm.ravel()[:, None, None] ** 2
+
+    return power, plv, vertno
+
+
+ at verbose
+def source_induced_power(epochs, inverse_operator, frequencies, label=None,
+                         lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
+                         decim=1, use_fft=False, pick_normal=False,
+                         baseline=None, baseline_mode='logratio', pca=True,
+                         n_jobs=1, dSPM=None, zero_mean=False, verbose=None):
+    """Compute induced power and phase lock
+
+    Computation can optionaly be restricted in a label.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    inverse_operator : instance of inverse operator
+        The inverse operator.
+    label : Label
+        Restricts the source estimates to a given label.
+    frequencies : array
+        Array of frequencies of interest.
+    lambda2 : float
+        The regularization parameter of the minimum norm.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    decim : int
+        Temporal decimation factor.
+    use_fft : bool
+        Do convolutions in time or frequency domain with FFT.
+    pick_normal : bool
+        If True, rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'logratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline)).
+    pca : bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    n_jobs : int
+        Number of jobs to run in parallel.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    method = _check_method(method, dSPM)
+
+    power, plv, vertno = _source_induced_power(epochs,
+                            inverse_operator, frequencies,
+                            label=label, lambda2=lambda2, method=method,
+                            nave=nave, n_cycles=n_cycles, decim=decim,
+                            use_fft=use_fft, pick_normal=pick_normal,
+                            pca=pca, n_jobs=n_jobs)
+
+    # Run baseline correction
+    if baseline is not None:
+        power = rescale(power, epochs.times[::decim], baseline, baseline_mode,
+                        copy=False)
+
+    return power, plv
+
+
+ at verbose
+def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
+                       tmin=None, tmax=None, fmin=0., fmax=200.,
+                       NFFT=2048, overlap=0.5, pick_normal=False, label=None,
+                       nave=1, pca=True, verbose=None):
+    """Compute source power spectrum density (PSD)
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    inverse_operator : dict
+        The inverse operator
+    lambda2: float
+        The regularization parameter
+    method: "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA
+    tmin : float | None
+        The beginning of the time interval of interest (in seconds). If None
+        start from the beginning of the file.
+    tmax : float | None
+        The end of the time interval of interest (in seconds). If None
+        stop at the end of the file.
+    fmin : float
+        The lower frequency of interest
+    fmax : float
+        The upper frequency of interest
+    NFFT: int
+        Window size for the FFT. Should be a power of 2.
+    overlap: float
+        The overlap fraction between windows. Should be between 0 and 1.
+        0 means no overlap.
+    pick_normal : bool
+        If True, rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    label: Label
+        Restricts the source estimates to a given label
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    pca: bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64)
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The PSD (in dB) of each of the sources.
+    """
+
+    logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
+
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(raw.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   three current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_normal)
+
+    if pca:
+        U, s, Vh = linalg.svd(K, full_matrices=False)
+        rank = np.sum(s > 1e-8 * s[0])
+        K = s[:rank] * U[:, :rank]
+        Vh = Vh[:rank]
+        logger.info('Reducing data rank to %d' % rank)
+    else:
+        Vh = None
+
+    start, stop = 0, raw.last_samp + 1 - raw.first_samp
+    if tmin is not None:
+        start = raw.time_as_index(tmin)[0]
+    if tmax is not None:
+        stop = raw.time_as_index(tmax)[0] + 1
+    NFFT = int(NFFT)
+    Fs = raw.info['sfreq']
+    window = signal.hanning(NFFT)
+    freqs = fftpack.fftfreq(NFFT, 1. / Fs)
+    freqs_mask = (freqs >= 0) & (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[freqs_mask]
+    fstep = np.mean(np.diff(freqs))
+    psd = np.zeros((noise_norm.size, np.sum(freqs_mask)))
+    n_windows = 0
+
+    for this_start in np.arange(start, stop, int(NFFT * (1. - overlap))):
+        data, _ = raw[sel, this_start:this_start + NFFT]
+        if data.shape[1] < NFFT:
+            logger.info("Skipping last buffer")
+            break
+
+        if Vh is not None:
+            data = np.dot(Vh, data)  # reducing data rank
+
+        data *= window[None, :]
+
+        data_fft = fftpack.fft(data)[:, freqs_mask]
+        sol = np.dot(K, data_fft)
+
+        if is_free_ori and not pick_normal:
+            sol = combine_xyz(sol, square=True)
+        else:
+            sol = np.abs(sol) ** 2
+
+        if method != "MNE":
+            sol *= noise_norm ** 2
+
+        psd += sol
+        n_windows += 1
+
+    psd /= n_windows
+
+    psd = 10 * np.log10(psd)
+
+    subject = _subject_from_inverse(inverse_operator)
+    stc = SourceEstimate(psd, vertices=vertno, tmin=fmin * 1e-3,
+                         tstep=fstep * 1e-3, subject=subject)
+    return stc
+
+
+ at verbose
+def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
+                              method="dSPM", fmin=0., fmax=200.,
+                              pick_normal=False, label=None, nave=1,
+                              pca=True, inv_split=None, bandwidth=4.,
+                              adaptive=False, low_bias=True, n_jobs=1,
+                              verbose=None):
+    """ Generator for compute_source_psd_epochs """
+
+    logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
+
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(epochs.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   three current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_normal)
+
+    if pca:
+        U, s, Vh = linalg.svd(K, full_matrices=False)
+        rank = np.sum(s > 1e-8 * s[0])
+        K = s[:rank] * U[:, :rank]
+        Vh = Vh[:rank]
+        logger.info('Reducing data rank to %d' % rank)
+    else:
+        Vh = None
+
+    # split the inverse operator
+    if inv_split is not None:
+        K_split = np.array_split(K, inv_split)
+    else:
+        K_split = [K]
+
+    # compute DPSS windows
+    n_times = len(epochs.times)
+    sfreq = epochs.info['sfreq']
+
+    # compute standardized half-bandwidth
+    half_nbw = float(bandwidth) * n_times / (2 * sfreq)
+    n_tapers_max = int(2 * half_nbw)
+
+    dpss, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                 low_bias=low_bias)
+    n_tapers = len(dpss)
+
+    logger.info('Using %d tapers with bandwidth %0.1fHz'
+                % (n_tapers, bandwidth))
+
+    if adaptive and len(eigvals) < 3:
+        warn('Not adaptively combining the spectral estimators '
+             'due to a low number of tapers.')
+        adaptive = False
+
+    if adaptive:
+        parallel, my_psd_from_mt_adaptive, n_jobs = \
+            parallel_func(_psd_from_mt_adaptive, n_jobs)
+    else:
+        weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
+
+    subject = _subject_from_inverse(inverse_operator)
+    for k, e in enumerate(epochs):
+        logger.info("Processing epoch : %d" % (k + 1))
+        data = e[sel]
+
+        if Vh is not None:
+            data = np.dot(Vh, data)  # reducing data rank
+
+        # compute tapered spectra in sensor space
+        x_mt, freqs = _mt_spectra(data, dpss, sfreq)
+
+        if k == 0:
+            freq_mask = (freqs >= fmin) & (freqs <= fmax)
+            fstep = np.mean(np.diff(freqs))
+
+        # allocate space for output
+        psd = np.empty((K.shape[0], np.sum(freq_mask)))
+
+        # Optionally, we split the inverse operator into parts to save memory.
+        # Without splitting the tapered spectra in source space have size
+        # (n_vertices x n_tapers x n_times / 2)
+        pos = 0
+        for K_part in K_split:
+            # allocate space for tapered spectra in source space
+            x_mt_src = np.empty((K_part.shape[0], x_mt.shape[1],
+                                x_mt.shape[2]), dtype=x_mt.dtype)
+
+            # apply inverse to each taper
+            for i in range(n_tapers):
+                x_mt_src[:, i, :] = np.dot(K_part, x_mt[:, i, :])
+
+            # compute the psd
+            if adaptive:
+                out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
+                       for x in np.array_split(x_mt_src, n_jobs))
+                this_psd = np.concatenate(out)
+            else:
+                x_mt_src = x_mt_src[:, :, freq_mask]
+                this_psd = _psd_from_mt(x_mt_src, weights)
+
+            psd[pos:pos + K_part.shape[0], :] = this_psd
+            pos += K_part.shape[0]
+
+        # combine orientations
+        if is_free_ori and not pick_normal:
+            psd = combine_xyz(psd, square=False)
+
+        if method != "MNE":
+            psd *= noise_norm ** 2
+
+        stc = SourceEstimate(psd, tmin=fmin, tstep=fstep, vertices=vertno,
+                             subject=subject)
+
+        # we return a generator object for "stream processing"
+        yield stc
+
+
+ at verbose
+def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
+                              method="dSPM", fmin=0., fmax=200.,
+                              pick_normal=False, label=None, nave=1,
+                              pca=True, inv_split=None, bandwidth=4.,
+                              adaptive=False, low_bias=True,
+                              return_generator=False, n_jobs=1,
+                              verbose=None):
+    """Compute source power spectrum density (PSD) from Epochs using
+       multi-taper method
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The raw data.
+    inverse_operator : dict
+        The inverse operator.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    fmin : float
+        The lower frequency of interest.
+    fmax : float
+        The upper frequency of interest.
+    pick_normal : bool
+        If True, rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    label : Label
+        Restricts the source estimates to a given label.
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    pca : bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    inv_split : int or None
+        Split inverse operator into inv_split parts in order to save memory.
+    bandwidth : float
+        The bandwidth of the multi taper windowing function in Hz.
+    adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD
+        (slow, use n_jobs >> 1 to speed up computation).
+    low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    n_jobs : int
+        Number of parallel jobs to use (only used if adaptive=True).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stcs : list (or generator object) of SourceEstimate
+        The source space PSDs for each epoch.
+    """
+
+    # use an auxiliary function so we can either return a generator or a list
+    stcs_gen = _compute_source_psd_epochs(epochs, inverse_operator,
+                              lambda2=lambda2, method=method, fmin=fmin,
+                              fmax=fmax, pick_normal=pick_normal, label=label,
+                              nave=nave, pca=pca, inv_split=inv_split,
+                              bandwidth=bandwidth, adaptive=adaptive,
+                              low_bias=low_bias, n_jobs=n_jobs)
+
+    if return_generator:
+        # return generator object
+        return stcs_gen
+    else:
+        # return a list
+        stcs = list()
+        for stc in stcs_gen:
+            stcs.append(stc)
+
+        return stcs
diff --git a/mne/misc.py b/mne/misc.py
new file mode 100644
index 0000000..36dffd7
--- /dev/null
+++ b/mne/misc.py
@@ -0,0 +1,100 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Scott Burns <sburns at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+
+def parse_config(fname):
+    """Parse a config file (like .ave and .cov files)
+
+    Parameters
+    ----------
+    fname : string
+        config file name
+
+    Returns
+    -------
+    conditions : list of dict
+        Each condition is indexed by the event type.
+        A condition contains as keys:
+            tmin, tmax, name, grad_reject, mag_reject,
+            eeg_reject, eog_reject
+    """
+    reject_params = read_reject_parameters(fname)
+
+    try:
+        with open(fname, 'r') as f:
+            lines = f.readlines()
+    except:
+        raise ValueError("Error while reading %s" % fname)
+
+    cat_ind = [i for i, x in enumerate(lines) if "category {" in x]
+    event_dict = dict()
+    for ind in cat_ind:
+        for k in range(ind + 1, ind + 7):
+            words = lines[k].split()
+            if len(words) >= 2:
+                key = words[0]
+                if key == 'event':
+                    event = int(words[1])
+                    break
+        else:
+            raise ValueError('Could not find event id.')
+        event_dict[event] = dict(**reject_params)
+        for k in range(ind + 1, ind + 7):
+            words = lines[k].split()
+            if len(words) >= 2:
+                key = words[0]
+                if key == 'name':
+                    name = ' '.join(words[1:])
+                    if name[0] == '"':
+                        name = name[1:]
+                    if name[-1] == '"':
+                        name = name[:-1]
+                    event_dict[event]['name'] = name
+                if key in ['tmin', 'tmax', 'basemin', 'basemax']:
+                    event_dict[event][key] = float(words[1])
+    return event_dict
+
+
+def read_reject_parameters(fname):
+    """Read rejection parameters from .cov or .ave config file"""
+
+    try:
+        with open(fname, 'r') as f:
+            lines = f.readlines()
+    except:
+        raise ValueError("Error while reading %s" % fname)
+
+    reject_names = ['gradReject', 'magReject', 'eegReject', 'eogReject',
+                    'ecgReject']
+    reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
+    reject = dict()
+    for line in lines:
+        words = line.split()
+        if words[0] in reject_names:
+            reject[reject_pynames[reject_names.index(words[0])]] = \
+                                                                float(words[1])
+
+    return reject
+
+
+def read_flat_parameters(fname):
+    """Read flat channel rejection parameters from .cov or .ave config file"""
+
+    try:
+        with open(fname, 'r') as f:
+            lines = f.readlines()
+    except:
+        raise ValueError("Error while reading %s" % fname)
+
+    reject_names = ['gradFlat', 'magFlat', 'eegFlat', 'eogFlat', 'ecgFlat']
+    reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
+    flat = dict()
+    for line in lines:
+        words = line.split()
+        if words[0] in reject_names:
+            flat[reject_pynames[reject_names.index(words[0])]] = \
+                                                                float(words[1])
+
+    return flat
diff --git a/mne/mixed_norm/__init__.py b/mne/mixed_norm/__init__.py
new file mode 100644
index 0000000..ec14c8a
--- /dev/null
+++ b/mne/mixed_norm/__init__.py
@@ -0,0 +1,7 @@
+from ..utils import deprecated
+from ..inverse_sparse import mxne_inverse
+
+dec = deprecated('Use the function from mne.inverse_sparse')
+
+mixed_norm = dec(mxne_inverse.mixed_norm)
+tf_mixed_norm = dec(mxne_inverse.tf_mixed_norm)
diff --git a/mne/parallel.py b/mne/parallel.py
new file mode 100644
index 0000000..eea73cd
--- /dev/null
+++ b/mne/parallel.py
@@ -0,0 +1,96 @@
+"""Parallel util function
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+import logging
+logger = logging.getLogger('mne')
+
+from . import verbose
+
+
+ at verbose
+def parallel_func(func, n_jobs, verbose=None):
+    """Return parallel instance with delayed function
+
+    Util function to use joblib only if available
+
+    Parameters
+    ----------
+    func: callable
+        A function
+    n_jobs: int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        INFO or DEBUG will print parallel status, others will not.
+
+    Returns
+    -------
+    parallel: instance of joblib.Parallel or list
+        The parallel object
+    my_func: callable
+        func if not parallel or delayed(func)
+    n_jobs: int
+        Number of jobs >= 0
+    """
+    try:
+        from sklearn.externals.joblib import Parallel, delayed
+    except ImportError:
+        try:
+            from joblib import Parallel, delayed
+        except ImportError:
+            logger.warn("joblib not installed. Cannot run in parallel.")
+            n_jobs = 1
+            my_func = func
+            parallel = list
+            return parallel, my_func, n_jobs
+
+    parallel_verbose = 5 if logger.level <= logging.INFO else 0
+    parallel = Parallel(n_jobs, verbose=parallel_verbose)
+    my_func = delayed(func)
+    n_jobs = check_n_jobs(n_jobs)
+    return parallel, my_func, n_jobs
+
+
+def check_n_jobs(n_jobs, allow_cuda=False):
+    """Check n_jobs in particular for negative values
+
+    Parameters
+    ----------
+    n_jobs : int
+        The number of jobs.
+    allow_cuda : bool
+        Allow n_jobs to be 'cuda'. Default: False.
+
+    Returns
+    -------
+    n_jobs : int
+        The checked number of jobs. Always positive (or 'cuda' if
+        applicable.)
+    """
+    if not isinstance(n_jobs, int):
+        if not allow_cuda:
+            raise ValueError('n_jobs must be an integer')
+        elif not isinstance(n_jobs, basestring) or n_jobs != 'cuda':
+            raise ValueError('n_jobs must be an integer, or "cuda"')
+        #else, we have n_jobs='cuda' and this is okay, so do nothing
+    elif n_jobs <= 0:
+        try:
+            import multiprocessing
+            n_cores = multiprocessing.cpu_count()
+            n_jobs = n_cores + n_jobs
+            if n_jobs <= 0:
+                raise ValueError('If n_jobs has a negative value it must not '
+                                 'be less than the number of CPUs present. '
+                                 'You\'ve got %s CPUs' % n_cores)
+        except ImportError:
+            # only warn if they tried to use something other than 1 job
+            if n_jobs != 1:
+                logger.warn('multiprocessing not installed. Cannot run in '
+                             'parallel.')
+                n_jobs = 1
+
+    return n_jobs
diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py
new file mode 100644
index 0000000..30c6f41
--- /dev/null
+++ b/mne/preprocessing/__init__.py
@@ -0,0 +1,15 @@
+"""Preprocessing with artifact detection, SSP, and ICA"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from .maxfilter import apply_maxfilter
+from .ssp import compute_proj_ecg, compute_proj_eog
+from .eog import find_eog_events
+from .ecg import find_ecg_events
+from .ica import ICA, ica_find_eog_events, ica_find_ecg_events, score_funcs, \
+                 read_ica, run_ica
diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py
new file mode 100644
index 0000000..871cbbb
--- /dev/null
+++ b/mne/preprocessing/ecg.py
@@ -0,0 +1,168 @@
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .. import fiff, verbose
+from ..filter import band_pass_filter
+
+
+def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
+                 l_freq=5, h_freq=35, tstart=0, filter_length='10s'):
+    """Detect QRS component in ECG channels.
+
+    QRS is the main wave on the heart beat.
+
+    Parameters
+    ----------
+    sfreq : float
+        Sampling rate
+    ecg : array
+        ECG signal
+    thresh_value : float
+        qrs detection threshold
+    levels : float
+        number of std from mean to include for detection
+    n_thresh : int
+        max number of crossings
+    l_freq : float
+        Low pass frequency
+    h_freq : float
+        High pass frequency
+    tstart : float
+        Start detection after tstart seconds.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+
+    Returns
+    -------
+    events : array
+        Indices of ECG peaks
+    """
+    win_size = int(round((60.0 * sfreq) / 120.0))
+
+    filtecg = band_pass_filter(ecg, sfreq, l_freq, h_freq,
+                               filter_length=filter_length)
+
+    absecg = np.abs(filtecg)
+    init = int(sfreq)
+
+    n_samples_start = int(init * tstart)
+    absecg = absecg[n_samples_start:]
+
+    n_points = len(absecg)
+
+    maxpt = np.empty(3)
+    maxpt[0] = np.max(absecg[:init])
+    maxpt[1] = np.max(absecg[init:init * 2])
+    maxpt[2] = np.max(absecg[init * 2:init * 3])
+
+    init_max = np.mean(maxpt)
+
+    thresh1 = init_max * thresh_value
+
+    numcross = []
+    time = []
+    rms = []
+    i = 0
+    while i < (n_points - win_size):
+        window = absecg[i:i + win_size]
+        if window[0] > thresh1:
+            maxTime = np.argmax(window)
+            time.append(i + maxTime)
+            numcross.append(np.sum(np.diff((window > thresh1).astype(np.int)
+                                           == 1)))
+            rms.append(np.sqrt(np.mean(window ** 2)))
+            i += win_size
+        else:
+            i += 1
+
+    time = np.array(time)
+    rms_mean = np.mean(rms)
+    rms_std = np.std(rms)
+    rms_thresh = rms_mean + (rms_std * levels)
+    b = np.where(rms < rms_thresh)[0]
+    a = np.array(numcross)[b]
+    clean_events = time[b[a < n_thresh]]
+
+    clean_events += n_samples_start
+
+    return clean_events
+
+
+ at verbose
+def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
+                    l_freq=5, h_freq=35, qrs_threshold=0.6,
+                    filter_length='10s', verbose=None):
+    """Find ECG peaks
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    event_id : int
+        The index to assign to found events
+    ch_name : str
+        The name of the channel to use for ECG peak detection.
+        The argument is mandatory if the dataset contains no ECG
+        channels.
+    tstart : float
+        Start detection after tstart seconds. Useful when beginning
+        of run is noisy.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    qrs_threshold : float
+        Between 0 and 1. qrs detection threshold.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    ecg_events : array
+        Events.
+    ch_ECG : string
+        Name of channel used.
+    average_pulse : float
+        Estimated average pulse.
+    """
+    info = raw.info
+
+    # Geting ECG Channel
+    if ch_name is None:
+        ch_ECG = fiff.pick_types(info, meg=False, eeg=False, stim=False,
+                                 eog=False, ecg=True, emg=False,
+                                 exclude='bads')
+    else:
+        ch_ECG = fiff.pick_channels(raw.ch_names, include=[ch_name])
+        if len(ch_ECG) == 0:
+            raise ValueError('%s not in channel list (%s)' %
+                             (ch_name, raw.ch_names))
+
+    if len(ch_ECG) == 0 and ch_name is None:
+        raise Exception('No ECG channel found. Please specify ch_name '
+                        'parameter e.g. MEG 1531')
+
+    assert len(ch_ECG) == 1
+
+    logger.info('Using channel %s to identify heart beats'
+                % raw.ch_names[ch_ECG[0]])
+
+    ecg, times = raw[ch_ECG, :]
+
+    # detecting QRS and generating event file
+    ecg_events = qrs_detector(info['sfreq'], ecg.ravel(), tstart=tstart,
+                              thresh_value=qrs_threshold, l_freq=l_freq,
+                              h_freq=h_freq, filter_length=filter_length)
+
+    n_events = len(ecg_events)
+    average_pulse = n_events * 60.0 / (times[-1] - times[0])
+    logger.info("Number of ECG events detected : %d (average pulse %d / "
+                "min.)" % (n_events, average_pulse))
+
+    ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
+                       event_id * np.ones(n_events)]
+    return ecg_events, ch_ECG, average_pulse
diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py
new file mode 100644
index 0000000..80e603d
--- /dev/null
+++ b/mne/preprocessing/eog.py
@@ -0,0 +1,123 @@
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .peak_finder import peak_finder
+from .. import fiff, verbose
+from ..filter import band_pass_filter
+
+
+ at verbose
+def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
+                    filter_length='10s', ch_name=None, tstart=0,
+                    verbose=None):
+    """Locate EOG artifacts
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data.
+    event_id : int
+        The index to assign to found events.
+    low_pass : float
+        Low pass frequency.
+    high_pass : float
+        High pass frequency.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    ch_name: str | None
+        If not None, use specified channel(s) for EOG
+    tstart : float
+        Start detection after tstart seconds.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eog_events : array
+        Events.
+    """
+
+    info = raw.info
+
+    # Getting EOG Channel
+    if ch_name is None:
+        ch_eog = fiff.pick_types(info, meg=False, eeg=False, stim=False,
+                                 eog=True, ecg=False, emg=False,
+                                 exclude='bads')
+        if len(ch_eog) == 0:
+            logger.info('No EOG channels found')
+            logger.info('Trying with EEG 061 and EEG 062')
+            ch_eog = fiff.pick_channels(raw.ch_names,
+                                    include=['EEG 061', 'EEG 062'])
+            if len(ch_eog) != 2:
+                raise ValueError('EEG 61 or EEG 62 channel not found !!')
+
+    else:
+
+        # Check if multiple EOG Channels
+        if ',' in ch_name:
+            ch_name = ch_name.split(',')
+        else:
+            ch_name = [ch_name]
+
+        ch_eog = fiff.pick_channels(raw.ch_names, include=ch_name)
+
+        if len(ch_eog) == 0:
+            raise ValueError('%s not in channel list' % ch_name)
+        else:
+            logger.info('Using channel %s as EOG channel%s' % (
+                   " and ".join(ch_name), '' if len(ch_eog) < 2 else 's'))
+
+    logger.info('EOG channel index for this subject is: %s' % ch_eog)
+
+    eog, _ = raw[ch_eog, :]
+
+    eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
+                                  h_freq=h_freq,
+                                  sampling_rate=raw.info['sfreq'],
+                                  first_samp=raw.first_samp,
+                                  filter_length=filter_length,
+                                  tstart=tstart)
+
+    return eog_events
+
+
+def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
+                     filter_length='10s', tstart=0.):
+    """Helper function"""
+
+    logger.info('Filtering the data to remove DC offset to help '
+                'distinguish blinks from saccades')
+
+    # filtering to remove dc offset so that we know which is blink and saccades
+    filteog = np.array([band_pass_filter(x, sampling_rate, 2, 45,
+                                         filter_length=filter_length)
+                        for x in eog])
+    temp = np.sqrt(np.sum(filteog ** 2, axis=1))
+
+    indexmax = np.argmax(temp)
+
+    # easier to detect peaks with filtering.
+    filteog = band_pass_filter(eog[indexmax], sampling_rate, l_freq, h_freq,
+                               filter_length=filter_length)
+
+    # detecting eog blinks and generating event file
+
+    logger.info('Now detecting blinks and generating corresponding events')
+
+    temp = filteog - np.mean(filteog)
+    n_samples_start = int(sampling_rate * tstart)
+    if np.abs(np.max(temp)) > np.abs(np.min(temp)):
+        eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1)
+    else:
+        eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1)
+
+    eog_events += n_samples_start
+    n_events = len(eog_events)
+    logger.info("Number of EOG events detected : %d" % n_events)
+    eog_events = np.c_[eog_events + first_samp, np.zeros(n_events),
+                       event_id * np.ones(n_events)]
+
+    return eog_events
\ No newline at end of file
diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py
new file mode 100644
index 0000000..41deb11
--- /dev/null
+++ b/mne/preprocessing/ica.py
@@ -0,0 +1,1624 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+import inspect
+import warnings
+from inspect import getargspec, isfunction
+from collections import namedtuple
+
+import os
+import json
+import logging
+logger = logging.getLogger('mne')
+
+import numpy as np
+from scipy import stats
+from scipy.spatial import distance
+from scipy import linalg
+
+from .ecg import qrs_detector
+from .eog import _find_eog_events
+
+from ..cov import compute_whitener
+from .. import Covariance
+from ..fiff.pick import pick_types, pick_channels
+from ..fiff.write import write_double_matrix, write_string, \
+                         write_name_list, write_int, start_block, \
+                         end_block
+from ..fiff.tree import dir_tree_find
+from ..fiff.open import fiff_open
+from ..fiff.tag import read_tag
+from ..fiff.constants import Bunch, FIFF
+from ..viz import plot_ica_panel
+from .. import verbose
+from ..fiff.write import start_file, end_file
+
+
+def _make_xy_sfunc(func, ndim_output=False):
+    """Aux function"""
+    if ndim_output:
+        sfunc = lambda x, y: np.array([func(a, y.ravel()) for a in x])[:, 0]
+    else:
+        sfunc = lambda x, y: np.array([func(a, y.ravel()) for a in x])
+    sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
+    sfunc.__doc__ = func.__doc__
+    return sfunc
+
+# makes score funcs attr accessible for users
+score_funcs = Bunch()
+
+xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items() if isfunction(f)
+                     and not n.startswith('_')]
+
+xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items() if isfunction(f)
+                      and not n.startswith('_')]
+
+score_funcs.update(dict((n, _make_xy_sfunc(f)) for n, f in xy_arg_dist_funcs
+                   if getargspec(f).args == ['u', 'v']))
+
+score_funcs.update(dict((n, _make_xy_sfunc(f, ndim_output=True))
+                   for n, f in xy_arg_stats_funcs
+                   if getargspec(f).args == ['x', 'y']))
+
+
+__all__ = ['ICA', 'ica_find_ecg_events', 'ica_find_eog_events', 'score_funcs',
+           'read_ica', 'run_ica']
+
+
+class ICA(object):
+    """M/EEG signal decomposition using Independent Component Analysis (ICA)
+
+    This object can be used to estimate ICA components and then
+    remove some from Raw or Epochs for data exploration or artifact
+    correction.
+
+    Caveat! If supplying a noise covariance keep track of the projections
+    available in the cov or in the raw object. For example, if you are
+    interested in EOG or ECG artifacts, EOG and ECG projections should be
+    temporally removed before fitting the ICA. You can say::
+
+        >> projs, raw.info['projs'] = raw.info['projs'], []
+        >> ica.decompose_raw(raw)
+        >> raw.info['projs'] = projs
+
+    Parameters
+    ----------
+    n_components : int | float | None
+        The number of components used for ICA decomposition. If int, it must be
+        smaller then max_pca_components. If None, all PCA components will be
+        used. If float between 0 and 1 components can will be selected by the
+        cumulative percentage of explained variance.
+    max_pca_components : int | None
+        The number of components used for PCA decomposition. If None, no
+        dimension reduction will be applied and max_pca_components will equal
+        the number of channels supplied on decomposing data.
+    n_pca_components : int
+        The number of PCA components used after ICA recomposition. The ensuing
+        attribute allows to balance noise reduction against potential loss of
+        features due to dimensionality reduction. If greater than
+        `self.n_components_`, the next `n_pca_components` minus
+        `n_components_` PCA components will be added before restoring the
+        sensor space data. The attribute gets updated each time the according
+        parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
+    noise_cov : None | instance of mne.cov.Covariance
+        Noise covariance used for whitening. If None, channels are just
+        z-scored.
+    random_state : None | int | instance of np.random.RandomState
+        np.random.RandomState to initialize the FastICA estimation.
+        As the estimation is non-deterministic it can be useful to
+        fix the seed to have reproducible results.
+    algorithm : {'parallel', 'deflation'}
+        Apply parallel or deflational algorithm for FastICA.
+    fun : string or function, optional. Default: 'logcosh'
+        The functional form of the G function used in the
+        approximation to neg-entropy. Could be either 'logcosh', 'exp',
+        or 'cube'.
+        You can also provide your own function. It should return a tuple
+        containing the value of the function, and of its derivative, in the
+        point.
+    fun_args: dictionary, optional
+        Arguments to send to the functional form.
+        If empty and if fun='logcosh', fun_args will take value
+        {'alpha' : 1.0}
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    current_fit : str
+        Flag informing about which data type (raw or epochs) was used for
+        the fit.
+    ch_names : list-like
+        Channel names resulting from initial picking.
+        The number of components used for ICA decomposition.
+    `n_components_` : int
+        If fit, the actual number of components used for ICA decomposition.
+    n_pca_components : int
+        See above.
+    max_pca_components : int
+        The number of components used for PCA dimensionality reduction.
+    verbose : bool, str, int, or None
+        See above.
+    `pca_components_` : ndarray
+        If fit, the PCA components
+    `pca_mean_` : ndarray
+        If fit, the mean vector used to center the data before doing the PCA.
+    `pca_explained_variance_` : ndarray
+        If fit, the variance explained by each PCA component
+    `mixing_matrix_` : ndarray
+        If fit, the mixing matrix to restore observed data, else None.
+    `unmixing_matrix_` : ndarray
+        If fit, the matrix to unmix observed data, else None.
+    exclude : list
+        List of sources indices to exclude, i.e. artifact components identified
+        throughout the ICA session. Indices added to this list, will be
+        dispatched to the .pick_sources methods. Source indices passed to
+        the .pick_sources method via the 'exclude' argument are added to the
+        .exclude attribute. When saving the ICA also the indices are restored.
+        Hence, artifact components once identified don't have to be added
+        again. To dump this 'artifact memory' say: ica.exclude = []
+
+    """
+    @verbose
+    def __init__(self, n_components, max_pca_components=100,
+                 n_pca_components=64, noise_cov=None, random_state=None,
+                 algorithm='parallel', fun='logcosh', fun_args=None,
+                 verbose=None):
+        self.noise_cov = noise_cov
+
+        if max_pca_components is not None and \
+                n_components > max_pca_components:
+            raise ValueError('n_components must be smaller than '
+                             'max_pca_components')
+
+        if isinstance(n_components, float) \
+                and not 0 < n_components <= 1:
+            raise ValueError('Selecting ICA components by explained variance '
+                             'necessitates values between 0.0 and 1.0 ')
+
+        self.current_fit = 'unfitted'
+        self.verbose = verbose
+        self.n_components = n_components
+        self.max_pca_components = max_pca_components
+        self.n_pca_components = n_pca_components
+        self.ch_names = None
+        self.random_state = random_state
+        self.algorithm = algorithm
+        self.fun = fun
+        self.fun_args = fun_args
+        self.exclude = []
+
+    def __repr__(self):
+        """ICA fit information"""
+        if self.current_fit == 'unfitted':
+            s = 'no'
+        elif self.current_fit == 'raw':
+            s = 'raw data'
+        else:
+            s = 'epochs'
+        s += ' decomposition, '
+        s += ('%s components' % str(self.n_components_) if
+              hasattr(self, 'n_components_') else
+              'no dimension reduction')
+        if self.exclude:
+            s += ', %i sources marked for exclusion' % len(self.exclude)
+
+        return '<ICA  |  %s>' % s
+
+    @verbose
+    def decompose_raw(self, raw, picks=None, start=None, stop=None,
+                      verbose=None):
+        """Run the ICA decomposition on raw data
+
+        Caveat! If supplying a noise covariance keep track of the projections
+        available in the cov, the raw or the epochs object. For example,
+        if you are interested in EOG or ECG artifacts, EOG and ECG projections
+        should be temporally removed before fitting the ICA.
+
+        Parameters
+        ----------
+        raw : instance of mne.fiff.Raw
+            Raw measurements to be decomposed.
+        picks : array-like
+            Channels to be included. This selection remains throughout the
+            initialized ICA session. If None only good data channels are used.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        self : instance of ICA
+            Returns the modified instance.
+        """
+        if self.current_fit != 'unfitted':
+            raise RuntimeError('ICA decomposition has already been fitted. '
+                               'Please start a new ICA session.')
+
+        logger.info('Computing signal decomposition on raw data. '
+                    'Please be patient, this may take some time')
+
+        if picks is None:  # just use good data channels
+            picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
+                               ecg=False, misc=False, stim=False,
+                               exclude='bads')
+
+        if self.max_pca_components is None:
+            self.max_pca_components = len(picks)
+            logger.info('Inferring max_pca_components from picks.')
+
+        self.ch_names = [raw.ch_names[k] for k in picks]
+        start, stop = _check_start_stop(raw, start, stop)
+        data, self._pre_whitener = self._pre_whiten(raw[picks, start:stop][0],
+                                                    raw.info, picks)
+
+        self._decompose(data, self.max_pca_components, 'raw')
+
+        return self
+
+    @verbose
+    def decompose_epochs(self, epochs, picks=None, verbose=None):
+        """Run the ICA decomposition on epochs
+
+        Caveat! If supplying a noise covariance keep track of the projections
+        available in the cov, the raw or the epochs object. For example,
+        if you are interested in EOG or ECG artifacts, EOG and ECG projections
+        should be temporally removed before fitting the ICA.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs. The ICA is estimated on the concatenated epochs.
+        picks : array-like
+            Channels to be included relative to the channels already picked on
+            epochs-initialization. This selection remains throughout the
+            initialized ICA session.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        self : instance of ICA
+            Returns the modified instance.
+        """
+        if self.current_fit != 'unfitted':
+            raise RuntimeError('ICA decomposition has already been fitted. '
+                               'Please start a new ICA session.')
+
+        logger.info('Computing signal decomposition on epochs. '
+                    'Please be patient, this may take some time')
+
+        if picks is None:
+            # just use epochs good data channels and avoid double picking
+            picks = pick_types(epochs.info, include=epochs.ch_names,
+                               exclude='bads')
+
+        meeg_picks = pick_types(epochs.info, meg=True, eeg=True, eog=False,
+                                ecg=False, misc=False, stim=False,
+                                exclude='bads')
+
+        # filter out all the channels the raw wouldn't have initialized
+        picks = np.intersect1d(meeg_picks, picks)
+
+        self.ch_names = [epochs.ch_names[k] for k in picks]
+
+        if self.max_pca_components is None:
+            self.max_pca_components = len(picks)
+            logger.info('Inferring max_pca_components from picks.')
+
+        data, self._pre_whitener = \
+            self._pre_whiten(np.hstack(epochs.get_data()[:, picks]),
+                             epochs.info, picks)
+
+        self._decompose(data, self.max_pca_components, 'epochs')
+
+        return self
+
+    def get_sources_raw(self, raw, start=None, stop=None):
+        """Estimate raw sources given the unmixing matrix
+
+        Parameters
+        ----------
+        raw : instance of Raw
+            Raw object to draw sources from.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, the entire data will be used.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, the entire data will be used.
+
+        Returns
+        -------
+        sources : array, shape = (n_components, n_times)
+            The ICA sources time series.
+        """
+        if not hasattr(self, 'mixing_matrix_'):
+            raise RuntimeError('No fit available. Please first fit ICA '
+                               'decomposition.')
+        start, stop = _check_start_stop(raw, start, stop)
+        return self._get_sources_raw(raw, start, stop)[0]
+
+    def _get_sources_raw(self, raw, start, stop):
+        """Aux function"""
+
+        picks = [raw.ch_names.index(k) for k in self.ch_names]
+        data, _ = self._pre_whiten(raw[picks, start:stop][0], raw.info, picks)
+        pca_data = self._transform_pca(data.T)
+        n_components = self.n_components_
+        raw_sources = self._transform_ica(pca_data[:, :n_components]).T
+        return raw_sources, pca_data
+
+    def get_sources_epochs(self, epochs, concatenate=False):
+        """Estimate epochs sources given the unmixing matrix
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            Epochs object to draw sources from.
+        concatenate : bool
+            If true, epochs and time slices will be concatenated.
+
+        Returns
+        -------
+        epochs_sources : ndarray of shape (n_epochs, n_sources, n_times)
+            The sources for each epoch
+        """
+        if not hasattr(self, 'mixing_matrix_'):
+            raise RuntimeError('No fit available. Please first fit ICA '
+                               'decomposition.')
+
+        return self._get_sources_epochs(epochs, concatenate)[0]
+
+    def _get_sources_epochs(self, epochs, concatenate):
+
+        picks = pick_types(epochs.info, include=self.ch_names, exclude=[])
+
+        # special case where epochs come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
+
+        data, _ = self._pre_whiten(np.hstack(epochs.get_data()[:, picks]),
+                                   epochs.info, picks)
+
+        pca_data = self._transform_pca(data.T)
+        sources = self._transform_ica(pca_data[:, :self.n_components_]).T
+        sources = np.array(np.split(sources, len(epochs.events), 1))
+
+        if concatenate:
+            sources = np.hstack(sources)
+
+        return sources, pca_data
+
+    @verbose
+    def save(self, fname):
+        """Store ICA session into a fiff file.
+
+        Parameters
+        ----------
+        fname : str
+            The absolute path of the file name to save the ICA session into.
+        """
+        if self.current_fit == 'unfitted':
+            raise RuntimeError('No fit available. Please first fit ICA '
+                               'decomposition.')
+
+        logger.info('Wrting ica session to %s...' % fname)
+        fid = start_file(fname)
+
+        try:
+            _write_ica(fid, self)
+        except Exception as inst:
+            os.remove(fname)
+            raise inst
+        end_file(fid)
+
+        return self
+
+    def sources_as_raw(self, raw, picks=None, start=None, stop=None):
+        """Export sources as raw object
+
+        Parameters
+        ----------
+        raw : instance of Raw
+            Raw object to export sources from.
+        picks : array-like
+            Channels to be included in addition to the sources. If None,
+            artifact and stimulus channels will be included.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+
+        Returns
+        -------
+        out : instance of mne.Raw
+            Container object for ICA sources
+        """
+        # include 'reference' channels for comparison with ICA
+        if picks is None:
+            picks = pick_types(raw.info, meg=False, eeg=False, misc=True,
+                               ecg=True, eog=True, stim=True, exclude='bads')
+
+        # merge copied instance and picked data with sources
+
+        start, stop = _check_start_stop(raw, start, stop)
+        sources = self.get_sources_raw(raw, start=start, stop=stop)
+        if raw._preloaded:
+            data, times = raw._data, raw._times
+            del raw._data
+            del raw._times
+
+        out = raw.copy()
+        if raw._preloaded:
+            raw._data, raw._times = data, times
+
+        out.fids = []
+        data_, times_ = raw[picks, start:stop]
+
+        out._data = np.r_[sources, data_]
+        out._times = times_
+        out._preloaded = True
+
+        # update first and last samples
+        out.first_samp = raw.first_samp + (start if start else 0)
+        out.last_samp = out.first_samp + stop if stop else raw.last_samp
+
+        self._export_info(out.info, raw, picks)
+        out._projector = None
+
+        return out
+
+    def _export_info(self, info, container, picks):
+        """Aux function
+        """
+        # set channel names and info
+        ch_names = info['ch_names'] = []
+        ch_info = info['chs'] = []
+        for ii in xrange(self.n_components_):
+            this_source = 'ICA %03d' % (ii + 1)
+            ch_names.append(this_source)
+            ch_info.append(dict(ch_name=this_source, cal=1,
+                logno=ii + 1, coil_type=FIFF.FIFFV_COIL_NONE,
+                kind=FIFF.FIFFV_MISC_CH, coord_Frame=FIFF.FIFFV_COORD_UNKNOWN,
+                loc=np.array([0., 0., 0., 1.] * 3, dtype='f4'),
+                unit=FIFF.FIFF_UNIT_NONE, eeg_loc=None, range=1.0,
+                scanno=ii + 1, unit_mul=0, coil_trans=None))
+
+        # re-append additionally picked ch_names
+        ch_names += [container.ch_names[k] for k in picks]
+        # re-append additionally picked ch_info
+        ch_info += [container.info['chs'][k] for k in picks]
+
+        # update number of channels
+        info['nchan'] = len(picks) + self.n_components_
+        info['bads'] = [ch_names[k] for k in self.exclude]
+        info['projs'] = []  # make sure projections are removed.
+        info['filenames'] = []
+
+    def sources_as_epochs(self, epochs, picks=None):
+        """Create epochs in ICA space from epochs object
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            Epochs object to draw sources from.
+        picks : array-like
+            Channels to be included in addition to the sources. If None,
+            artifact channels will be included.
+
+        Returns
+        -------
+        ica_epochs : instance of Epochs
+            The epochs in ICA space.
+        """
+
+        out = epochs.copy()
+        sources = self.get_sources_epochs(epochs)
+        if picks is None:
+            picks = pick_types(epochs.info, meg=False, eeg=False, misc=True,
+                               ecg=True, eog=True, stim=True, exclude='bads')
+
+        out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
+                                    axis=1) if len(picks) > 0 else sources
+
+        self._export_info(out.info, epochs, picks)
+        out.preload = True
+        out.raw = None
+        out._projector = None
+
+        return out
+
+    def plot_sources_raw(self, raw, order=None, start=None, stop=None,
+                         n_components=None, source_idx=None, ncol=3, nrow=10,
+                         title=None, show=True):
+        """Create panel plots of ICA sources. Wrapper around viz.plot_ica_panel
+
+        Parameters
+        ----------
+        raw : instance of mne.fiff.Raw
+            Raw object to plot the sources from.
+        order : ndarray | None.
+            Index of length `n_components_`. If None, plot will show the sources
+            in the order as fitted.
+            Example::
+
+                arg_sort = np.argsort(np.var(sources)).
+
+        start : int
+            X-axis start index. If None from the beginning.
+        stop : int
+            X-axis stop index. If None to the end.
+        n_components : int
+            Number of components fitted.
+        source_idx : array-like
+            Indices for subsetting the sources.
+        ncol : int | None
+            Number of panel-columns. If None, the entire data will be plotted.
+        nrow : int | None
+            Number of panel-rows. If None, the entire data will be plotted.
+        title : str
+            The figure title. If None a default is provided.
+        show : bool
+            If True, plot will be shown, else just the figure is returned.
+
+        Returns
+        -------
+        fig : instance of pyplot.Figure
+        """
+        start, stop = _check_start_stop(raw, start, stop)
+        sources = self.get_sources_raw(raw, start=start, stop=stop)
+
+        if order is not None:
+            if len(order) != sources.shape[0]:
+                    raise ValueError('order and sources have to be of the '
+                                     'same length.')
+            else:
+                sources = sources[order]
+
+        fig = plot_ica_panel(sources, start=0 if start is not None else start,
+                             stop=(stop - start) if stop is not None else stop,
+                             n_components=n_components, source_idx=source_idx,
+                             ncol=ncol, nrow=nrow, title=title)
+        if show:
+            import matplotlib.pylab as pl
+            pl.show()
+
+        return fig
+
+    def plot_sources_epochs(self, epochs, epoch_idx=None, order=None,
+                            start=None, stop=None, n_components=None,
+                            source_idx=None, ncol=3, nrow=10, show=True):
+        """Create panel plots of ICA sources. Wrapper around viz.plot_ica_panel
+
+        Parameters
+        ----------
+        epochs : instance of mne.Epochs
+            Epochs object to plot the sources from.
+        epoch_idx : int
+            Index to plot particular epoch.
+        order : ndarray | None.
+            Index of length n_components. If None, plot will show the sources
+            in the order as fitted.
+            Example: arg_sort = np.argsort(np.var(sources)).
+        start : int | None
+            First sample to include. If None, data will be shown from the first
+            sample.
+        stop : int | None
+            Last sample to not include. If None, data will be shown to the last
+            sample.
+        n_components : int
+            Number of components fitted.
+        source_idx : array-like
+            Indices for subsetting the sources.
+        ncol : int
+            Number of panel-columns.
+        nrow : int
+            Number of panel-rows.
+        show : bool
+            If True, plot will be shown, else just the figure is returned.
+
+        Returns
+        -------
+        fig : instance of pyplot.Figure
+        """
+        sources = self.get_sources_epochs(epochs, concatenate=True if epoch_idx
+                                          is None else False)
+        source_dim = 1 if sources.ndim > 2 else 0
+        if order is not None:
+            if len(order) != sources.shape[source_dim]:
+                raise ValueError('order and sources have to be of the '
+                                 'same length.')
+            else:
+                sources = (sources[:, order] if source_dim
+                           else sources[order])
+
+        fig = plot_ica_panel(sources[epoch_idx], start=start, stop=stop,
+                             n_components=n_components, source_idx=source_idx,
+                             ncol=ncol, nrow=nrow, show=show)
+
+        return fig
+
+    def find_sources_raw(self, raw, target=None, score_func='pearsonr',
+                         start=None, stop=None):
+        """Find sources based on own distribution or based on similarity to
+        other sources or between source and target.
+
+        Parameters
+        ----------
+        raw : instance of Raw
+            Raw object to draw sources from.
+        target : array-like | ch_name | None
+            Signal to which the sources shall be compared. It has to be of
+            the same shape as the sources. If some string is supplied, a
+            routine will try to find a matching channel. If None, a score
+            function expecting only one input-array argument must be used,
+            for instance, scipy.stats.skew (default).
+        score_func : callable | str label
+            Callable taking as arguments either two input arrays
+            (e.g. pearson correlation) or one input
+            array (e. g. skewness) and returns a float. For convenience the
+            most common score_funcs are available via string labels: Currently,
+            all distance metrics from scipy.spatial and all functions from
+            scipy.stats taking compatible input arguments are supported. These
+            function have been modified to support iteration over the rows of a
+            2D array.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        scores : ndarray
+            Scores for each source as returned from score_func.
+
+        Returns
+        -------
+        scores : ndarray
+            scores for each source as returned from score_func
+        """
+        start, stop = _check_start_stop(raw, start, stop)
+        sources = self.get_sources_raw(raw=raw, start=start, stop=stop)
+
+        # auto target selection
+        if target is not None:
+            if hasattr(target, 'ndim'):
+                if target.ndim < 2:
+                    target = target.reshape(1, target.shape[-1])
+            if isinstance(target, basestring):
+                pick = _get_target_ch(raw, target)
+                target, _ = raw[pick, start:stop]
+            if sources.shape[1] != target.shape[1]:
+                raise ValueError('Source and targets do not have the same'
+                                 'number of time slices.')
+            target = target.ravel()
+
+        return _find_sources(sources, target, score_func)
+
+    def find_sources_epochs(self, epochs, target=None, score_func='pearsonr'):
+        """Find sources based on relations between source and target
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            Epochs object to draw sources from.
+        target : array-like | ch_name | None
+            Signal to which the sources shall be compared. It has to be of
+            the same shape as the sources. If some string is supplied, a
+            routine will try to find a matching channel. If None, a score
+            function expecting only one input-array argument must be used,
+            for instance, scipy.stats.skew (default).
+        score_func : callable | str label
+            Callable taking as arguments either two input arrays
+            (e.g. pearson correlation) or one input
+            array (e. g. skewness) and returns a float. For convenience the
+            most common score_funcs are available via string labels: Currently,
+            all distance metrics from scipy.spatial and all functions from
+            scipy.stats taking compatible input arguments are supported. These
+            function have been modified to support iteration over the rows of a
+            2D array.
+
+        Returns
+        -------
+        scores : ndarray
+            scores for each source as returned from score_func
+        """
+        sources = self.get_sources_epochs(epochs=epochs)
+        # auto target selection
+        if target is not None:
+            if hasattr(target, 'ndim'):
+                if target.ndim < 3:
+                    target = target.reshape(1, 1, target.shape[-1])
+            if isinstance(target, basestring):
+                pick = _get_target_ch(epochs, target)
+                target = epochs.get_data()[:, pick]
+            if sources.shape[2] != target.shape[2]:
+                raise ValueError('Source and targets do not have the same'
+                                 'number of time slices.')
+            target = target.ravel()
+
+        return _find_sources(np.hstack(sources), target, score_func)
+
+    def pick_sources_raw(self, raw, include=None, exclude=None,
+                         n_pca_components=None, start=None, stop=None,
+                         copy=True):
+        """Recompose raw data including or excluding some sources
+
+        Parameters
+        ----------
+        raw : instance of Raw
+            Raw object to pick to remove ICA components from.
+        include : list-like | None
+            The source indices to use. If None all are used.
+        exclude : list-like | None
+            The source indices to remove. If None all are used.
+        n_pca_components : int
+            The number of PCA components to be unwhitened, where
+            `n_components_` is the lower bound and max_pca_components
+            the upper bound. If greater than `self.n_components_`, the next
+            `n_pca_components` minus 'n_components' PCA components will
+            be added before restoring the sensor space data. This can be used
+            to take back the PCA dimension reduction.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        copy: bool
+            modify raw instance in place or return modified copy.
+
+        Returns
+        -------
+        raw : instance of Raw
+            raw instance with selected ICA components removed
+        """
+        if not raw._preloaded:
+            raise ValueError('raw data should be preloaded to have this '
+                             'working. Please read raw data with '
+                             'preload=True.')
+
+        if self.current_fit != 'raw':
+            raise ValueError('Currently no raw data fitted.'
+                             'Please fit raw data first.')
+
+        if exclude is None:
+            self.exclude = list(set(self.exclude))
+        else:
+            self.exclude = list(set(self.exclude + exclude))
+            logger.info('Adding sources %s to .exclude' % ', '.join(
+                        [str(i) for i in exclude if i not in self.exclude]))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        start, stop = _check_start_stop(raw, start, stop)
+        sources, pca_data = self._get_sources_raw(raw, start=start, stop=stop)
+        recomposed = self._pick_sources(sources, pca_data, include,
+                                        self.exclude)
+
+        if copy is True:
+            raw = raw.copy()
+
+        picks = [raw.ch_names.index(k) for k in self.ch_names]
+        raw[picks, start:stop] = recomposed
+        return raw
+
+    def pick_sources_epochs(self, epochs, include=None, exclude=None,
+                            n_pca_components=None, copy=True):
+        """Recompose epochs
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            Epochs object to pick to remove ICA components from.
+            Data must be preloaded.
+        include : list-like | None
+            The source indices to use. If None all are used.
+        exclude : list-like | None
+            The source indices to remove. If None  all are used.
+        n_pca_components : int
+            The number of PCA components to be unwhitened, where
+            `n_components_` is the lower bound and max_pca_components
+            the upper bound. If greater than `self.n_components_`, the next
+            `n_pca_components` minus `n_components_` PCA components will
+            be added before restoring the sensor space data. This can be used
+            to take back the PCA dimension reduction.
+        copy : bool
+            Modify Epochs instance in place or return modified copy.
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            Epochs with selected ICA components removed.
+        """
+        if not epochs.preload:
+            raise ValueError('epochs should be preloaded to have this '
+                             'working. Please read raw data with '
+                             'preload=True.')
+
+        sources, pca_data = self._get_sources_epochs(epochs, True)
+        picks = pick_types(epochs.info, include=self.ch_names,
+                           exclude='bads')
+
+        if copy is True:
+            epochs = epochs.copy()
+
+        if exclude is None:
+            self.exclude = list(set(self.exclude))
+        else:
+            self.exclude = list(set(self.exclude + exclude))
+            logger.info('Adding sources %s to .exclude' % ', '.join(
+                        [str(i) for i in exclude if i not in self.exclude]))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        # put sources-dimension first for selection
+        recomposed = self._pick_sources(sources, pca_data, include,
+                                        self.exclude)
+
+        # restore epochs, channels, tsl order
+        epochs._data[:, picks] = np.array(np.split(recomposed,
+                                          len(epochs.events), 1))
+        epochs.preload = True
+
+        return epochs
+
+    def detect_artifacts(self, raw, start_find=None, stop_find=None,
+                ecg_ch=None, ecg_score_func='pearsonr', ecg_criterion=0.1,
+                eog_ch=None, eog_score_func='pearsonr', eog_criterion=0.1,
+                skew_criterion=-1, kurt_criterion=-1, var_criterion=0,
+                add_nodes=None):
+        """Run ICA artifacts detection workflow.
+
+        Hints and caveats:
+        - It is highly recommended to bandpass filter ECG and EOG
+        data and pass them instead of the channel names as ecg_ch and eog_ch
+        arguments.
+        - please check your results. Detection by kurtosis and variance
+        may be powerful but misclassification of brain signals as
+        noise cannot be precluded.
+        - Consider using shorter times for start_find and stop_find than
+        for start and stop. It can save you much time.
+
+        Example invocation (taking advantage of the defaults)::
+
+            ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')
+
+        Parameters
+        ----------
+        start_find : int | float | None
+            First sample to include for artifact search. If float, data will be
+            interpreted as time in seconds. If None, data will be used from the
+            first sample.
+        stop_find : int | float | None
+            Last sample to not include for artifact search. If float, data will
+            be interpreted as time in seconds. If None, data will be used to
+            the last sample.
+        ecg_ch : str | ndarray | None
+            The `target` argument passed to ica.find_sources_raw. Either the
+            name of the ECG channel or the ECG time series. If None, this step
+            will be skipped.
+        ecg_score_func : str | callable
+            The `score_func` argument passed to ica.find_sources_raw. Either
+            the name of function supported by ICA or a custom function.
+        ecg_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        eog_ch : list | str | ndarray | None
+            The `target` argument or the list of target arguments subsequently
+            passed to ica.find_sources_raw. Either the name of the vertical EOG
+            channel or the corresponding EOG time series. If None, this step
+            will be skipped.
+        eog_score_func : str | callable
+            The `score_func` argument passed to ica.find_sources_raw. Either
+            the name of function supported by ICA or a custom function.
+        eog_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        skew_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        kurt_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        var_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        add_nodes : list of ica_nodes
+            Additional list if tuples carrying the following parameters:
+            (name : str, target : str | array, score_func : callable,
+            criterion : float | int | list-like | slice). This parameter is a
+            generalization of the artifact specific parameters above and has
+            the same structure. Example:
+            add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
+
+        Returns
+        -------
+        self : instance of ICA
+            The ica object with the detected artifact indices marked for
+            exclusion
+        """
+
+        logger.info('    Searching for artifacts...')
+        _detect_artifacts(self, raw=raw, start_find=start_find,
+                    stop_find=stop_find, ecg_ch=ecg_ch,
+                    ecg_score_func=ecg_score_func, ecg_criterion=ecg_criterion,
+                    eog_ch=eog_ch, eog_score_func=eog_score_func,
+                    eog_criterion=eog_criterion, skew_criterion=skew_criterion,
+                    kurt_criterion=kurt_criterion, var_criterion=var_criterion,
+                    add_nodes=add_nodes)
+
+        return self
+
+    def _pre_whiten(self, data, info, picks):
+        """Aux function"""
+        if self.noise_cov is None:  # use standardization as whitener
+            pre_whitener = np.atleast_1d(np.std(data)) ** -1
+            data *= pre_whitener
+        elif not hasattr(self, '_pre_whitener'):  # pick cov
+            ncov = deepcopy(self.noise_cov)
+            if data.shape[0] != ncov['data'].shape[0]:
+                ncov['data'] = ncov['data'][picks][:, picks]
+                assert data.shape[0] == ncov['data'].shape[0]
+
+            pre_whitener, _ = compute_whitener(ncov, info, picks)
+            data = np.dot(pre_whitener, data)
+        else:
+            data = np.dot(self._pre_whitener, data)
+            pre_whitener = self._pre_whitener
+
+        return data, pre_whitener
+
+    def _decompose(self, data, max_pca_components, fit_type):
+        """Aux function """
+        from sklearn.decomposition import RandomizedPCA
+
+        # sklearn < 0.11 does not support random_state argument
+        kwargs = {'n_components': max_pca_components, 'whiten': False}
+
+        aspec = inspect.getargspec(RandomizedPCA.__init__)
+        if 'random_state' not in aspec.args:
+            warnings.warn('RandomizedPCA does not support random_state '
+                          'argument. Use scikit-learn to version 0.11 '
+                          'or newer to get reproducible results.')
+        else:
+            kwargs['random_state'] = 0
+
+        pca = RandomizedPCA(**kwargs)
+        pca_data = pca.fit_transform(data.T)
+
+        if isinstance(self.n_components, float):
+            logger.info('Selecting PCA components by explained variance.')
+            n_components_ = np.sum(pca.explained_variance_ratio_.cumsum()
+                                   < self.n_components)
+            to_ica = pca_data[:, :n_components_]
+        else:
+            logger.info('Selecting PCA components by number.')
+            if self.n_components is not None:  # normal n case
+                to_ica = pca_data[:, :self.n_components]
+            else:  # None case
+                logger.info('Using all PCA components.')
+                to_ica = pca_data
+
+        # the things to store for PCA
+        self.pca_components_ = pca.components_
+        self.pca_mean_ = pca.mean_
+        self.pca_explained_variance_ = pca.explained_variance_
+        # and store number of components as it may be smaller than
+        # pca.components_.shape[1]
+        self.n_components_ = to_ica.shape[1]
+
+        # Take care of ICA
+        try:
+            from sklearn.decomposition import FastICA  # to avoid strong dep.
+        except ImportError:
+            raise Exception('the scikit-learn package is missing and '
+                            'required for ICA')
+
+        # sklearn < 0.11 does not support random_state argument for FastICA
+        kwargs = {'algorithm': self.algorithm, 'fun': self.fun,
+                  'fun_args': self.fun_args}
+
+        if self.random_state is not None:
+            aspec = inspect.getargspec(FastICA.__init__)
+            if 'random_state' not in aspec.args:
+                warnings.warn('random_state argument ignored, update '
+                              'scikit-learn to version 0.11 or newer')
+            else:
+                kwargs['random_state'] = self.random_state
+
+        ica = FastICA(**kwargs)
+        ica.fit(to_ica)
+
+        # For ICA the only thing to store is the unmixing matrix
+        if not hasattr(ica, 'sources_'):
+            self.unmixing_matrix_ = ica.unmixing_matrix_
+        else:
+            self.unmixing_matrix_ = ica.components_
+
+        self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_).T
+        self.current_fit = fit_type
+
+    def _pick_sources(self, sources, pca_data, include, exclude):
+        """Aux function"""
+
+        _n_pca_comp = self.n_pca_components
+        if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
+            raise ValueError('n_pca_components must be between n_comp'
+                             'onents and max_pca_components.')
+
+        if include not in (None, []):
+            mute = [i for i in xrange(len(sources)) if i not in include]
+            sources[mute, :] = 0.  # include via exclusion
+        elif exclude not in (None, []):
+            sources[exclude, :] = 0.  # just exclude
+
+        # restore pca data
+        pca_restored = np.dot(sources.T, self.mixing_matrix_)
+
+        # re-append deselected pca dimension if desired
+        if _n_pca_comp > self.n_components_:
+            pca_reappend = pca_data[:, self.n_components_:_n_pca_comp]
+            pca_restored = np.c_[pca_restored, pca_reappend]
+
+        # restore sensor space data
+        out = self._inverse_transform_pca(pca_restored)
+
+        # restore scaling
+        if self.noise_cov is None:  # revert standardization
+            out /= self._pre_whitener
+        else:
+            out = np.dot(out, linalg.pinv(self._pre_whitener))
+
+        return out.T
+
+    def _transform_pca(self, data):
+        """Apply decorrelation / dimensionality reduction on MEEG data.
+        """
+        X = np.atleast_2d(data)
+        if self.pca_mean_ is not None:
+            X = X - self.pca_mean_
+
+        X = np.dot(X, self.pca_components_.T)
+        return X
+
+    def _transform_ica(self, data):
+        """Apply ICA unmixing matrix to recover the latent sources.
+        """
+        return np.dot(np.atleast_2d(data), self.unmixing_matrix_.T)
+
+    def _inverse_transform_pca(self, X):
+        """Aux function"""
+        components = self.pca_components_[:X.shape[1]]
+        X_orig = np.dot(X, components)
+
+        if self.pca_mean_ is not None:
+            X_orig += self.pca_mean_
+
+        return X_orig
+
+
+def _check_start_stop(raw, start, stop):
+    """Aux function"""
+    return [c if (isinstance(c, int) or c is None) else
+            raw.time_as_index(c)[0] for c in start, stop]
+
+
+ at verbose
+def ica_find_ecg_events(raw, ecg_source, event_id=999,
+                        tstart=0.0, l_freq=5, h_freq=35, qrs_threshold=0.6,
+                        verbose=None):
+    """Find ECG peaks from one selected ICA source
+
+    Parameters
+    ----------
+    ecg_source : ndarray
+        ICA source resembling ECG to find peaks from.
+    event_id : int
+        The index to assign to found events.
+    raw : instance of Raw
+        Raw object to draw sources from.
+    tstart : float
+        Start detection after tstart seconds. Useful when beginning
+        of run is noisy.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    qrs_threshold : float
+        Between 0 and 1. qrs detection threshold.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    ecg_events : array
+        Events.
+    ch_ECG : string
+        Name of channel used.
+    average_pulse : float.
+        Estimated average pulse.
+    """
+    logger.info('Using ICA source to identify heart beats')
+
+    # detecting QRS and generating event file
+    ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
+                              tstart=tstart, thresh_value=qrs_threshold,
+                              l_freq=l_freq, h_freq=h_freq)
+
+    n_events = len(ecg_events)
+
+    ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
+                       event_id * np.ones(n_events)]
+
+    return ecg_events
+
+
+ at verbose
+def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
+                        h_freq=10, verbose=None):
+    """Locate EOG artifacts from one selected ICA source
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data.
+    eog_source : ndarray
+        ICA source resembling EOG to find peaks from.
+    event_id : int
+        The index to assign to found events.
+    low_pass : float
+        Low pass frequency.
+    high_pass : float
+        High pass frequency.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eog_events : array
+        Events
+    """
+    eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
+                                  l_freq=l_freq, h_freq=h_freq,
+                                  sampling_rate=raw.info['sfreq'],
+                                  first_samp=raw.first_samp)
+    return eog_events
+
+
+def _get_target_ch(container, target):
+    """Aux function"""
+    # auto target selection
+    pick = pick_channels(container.ch_names, include=[target])
+    if len(pick) == 0:
+        raise ValueError('%s not in channel list (%s)' %
+                        (target, container.ch_names))
+    return pick
+
+
+def _find_sources(sources, target, score_func):
+    """Aux function"""
+    if isinstance(score_func, basestring):
+        score_func = score_funcs.get(score_func, score_func)
+
+    if not callable(score_func):
+        raise ValueError('%s is not a valid score_func.' % score_func)
+
+    scores = (score_func(sources, target) if target is not None
+              else score_func(sources, 1))
+
+    return scores
+
+
+def _serialize(dict_, outer_sep=';', inner_sep=':'):
+    """Aux function"""
+
+    s = []
+    for k, v in dict_.items():
+        if callable(v):
+            v = v.__name__
+        for cls in (np.random.RandomState, Covariance):
+            if isinstance(v, cls):
+                v = cls.__name__
+
+        s.append(k + inner_sep + json.dumps(v))
+
+    return outer_sep.join(s)
+
+
+def _deserialize(str_, outer_sep=';', inner_sep=':'):
+    """Aux Function"""
+    out = {}
+    for mapping in str_.split(outer_sep):
+        k, v = mapping.split(inner_sep)
+        vv = json.loads(v)
+        out[k] = vv if not isinstance(vv, unicode) else str(vv)
+
+    return out
+
+
+def _write_ica(fid, ica):
+    """Write an ICA object
+
+    Parameters
+    ----------
+    fid: file
+        The file descriptor
+    ica:
+        The instance of ICA to write
+    """
+    ica_interface = dict(noise_cov=ica.noise_cov,
+                         n_components=ica.n_components,
+                         n_pca_components=ica.n_pca_components,
+                         max_pca_components=ica.max_pca_components,
+                         current_fit=ica.current_fit,
+                         algorithm=ica.algorithm,
+                         fun=ica.fun,
+                         fun_args=ica.fun_args)
+
+    start_block(fid, FIFF.FIFFB_ICA)
+
+    #   ICA interface params
+    write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
+                 _serialize(ica_interface))
+
+    #   Channel names
+    if ica.ch_names is not None:
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
+
+    #   Whitener
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica._pre_whitener)
+
+    #   PCA components_
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
+                        ica.pca_components_)
+
+    #   PCA mean_
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
+
+    #   PCA explained_variance_
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
+                        ica.pca_explained_variance_)
+
+    #   ICA unmixing
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
+
+    #   Write bad components
+
+    write_int(fid, FIFF.FIFF_MNE_ICA_BADS, ica.exclude)
+
+    # Done!
+    end_block(fid, FIFF.FIFFB_ICA)
+
+
+ at verbose
+def read_ica(fname):
+    """Restore ICA sessions from fif file.
+
+    Parameters
+    ----------
+    fname : str
+        Absolute path to fif file containing ICA matrices.
+
+    Returns
+    -------
+    ica : instance of ICA
+        The ICA estimator.
+    """
+
+    logger.info('Reading %s ...' % fname)
+    fid, tree, _ = fiff_open(fname)
+    ica_data = dir_tree_find(tree, FIFF.FIFFB_ICA)
+
+    if len(ica_data) == 0:
+        fid.close()
+        raise ValueError('Could not find ICA data')
+
+    my_ica_data = ica_data[0]
+    for d in my_ica_data['directory']:
+        kind = d.kind
+        pos = d.pos
+        if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
+            tag = read_tag(fid, pos)
+            ica_interface = tag.data
+        elif kind == FIFF.FIFF_MNE_ROW_NAMES:
+            tag = read_tag(fid, pos)
+            ch_names = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
+            tag = read_tag(fid, pos)
+            pre_whitener = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
+            tag = read_tag(fid, pos)
+            pca_components = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
+            tag = read_tag(fid, pos)
+            pca_explained_variance = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
+            tag = read_tag(fid, pos)
+            pca_mean = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
+            tag = read_tag(fid, pos)
+            unmixing_matrix = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_BADS:
+            tag = read_tag(fid, pos)
+            exclude = tag.data
+
+    fid.close()
+
+    interface = _deserialize(ica_interface)
+    current_fit = interface.pop('current_fit')
+    if interface['noise_cov'] == Covariance.__name__:
+        logger.info('Reading whitener drawn from noise covariance ...')
+
+    logger.info('Now restoring ICA session ...')
+    ica = ICA(**interface)
+    ica.current_fit = current_fit
+    ica.ch_names = ch_names.split(':')
+    ica._pre_whitener = pre_whitener
+    ica.pca_mean_ = pca_mean
+    ica.pca_components_ = pca_components
+    ica.n_components_ = unmixing_matrix.shape[0]
+    ica.pca_explained_variance_ = pca_explained_variance
+    ica.unmixing_matrix_ = unmixing_matrix
+    ica.mixing_matrix_ = linalg.pinv(ica.unmixing_matrix_).T
+    ica.exclude = [] if exclude is None else list(exclude)
+    logger.info('Ready.')
+
+    return ica
+
+
+_ica_node = namedtuple('Node', 'name target score_func criterion')
+
+
+def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
+                    ecg_criterion, eog_ch, eog_score_func, eog_criterion,
+                    skew_criterion, kurt_criterion, var_criterion, add_nodes):
+    """Aux Function"""
+
+    nodes = []
+    if ecg_ch is not None:
+        nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]
+
+    if eog_ch not in [None, []]:
+        if not isinstance(eog_ch, list):
+            eog_ch = [eog_ch]
+        for idx, ch in enumerate(eog_ch):
+            nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,
+                      eog_criterion)]
+
+    if skew_criterion is not None:
+        nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]
+
+    if kurt_criterion is not None:
+        nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]
+
+    if var_criterion is not None:
+        nodes += [_ica_node('variance', None, np.var, var_criterion)]
+
+    if add_nodes is not None:
+        nodes.extend(add_nodes)
+
+    for node in nodes:
+        scores = ica.find_sources_raw(raw, start=start_find, stop=stop_find,
+                                      target=node.target,
+                                      score_func=node.score_func)
+        if isinstance(node.criterion, float):
+            found = list(np.where(np.abs(scores) > node.criterion)[0])
+        else:
+            found = list(np.atleast_1d(abs(scores).argsort()[node.criterion]))
+
+        case = (len(found), 's' if len(found) > 1 else '', node.name)
+        logger.info('    found %s artifact%s by %s' % case)
+        ica.exclude += found
+
+    logger.info('Artifact indices found:\n    ' + str(ica.exclude).strip('[]'))
+    if len(set(ica.exclude)) != len(ica.exclude):
+        logger.info('    Removing duplicate indices...')
+        ica.exclude = list(set(ica.exclude))
+
+    logger.info('Ready.')
+
+
+ at verbose
+def run_ica(raw, n_components, max_pca_components=100,
+            n_pca_components=64, noise_cov=None, random_state=None,
+            algorithm='parallel', fun='logcosh', fun_args=None,
+            verbose=None, picks=None, start=None, stop=None, start_find=None,
+            stop_find=None, ecg_ch=None, ecg_score_func='pearsonr',
+            ecg_criterion=0.1, eog_ch=None, eog_score_func='pearsonr',
+            eog_criterion=0.1, skew_criterion=-1, kurt_criterion=-1,
+            var_criterion=0, add_nodes=None):
+    """Run ICA decomposition on raw data and identify artifact sources
+
+    This function implements an automated artifact removal work flow.
+
+    Hints and caveats:
+    - It is highly recommended to bandpass filter ECG and EOG
+    data and pass them instead of the channel names as ecg_ch and eog_ch
+    arguments.
+    - please check your results. Detection by kurtosis and variance
+    can be powerful but misclassification of brain signals as
+    noise cannot be precluded. If you are not sure set those to None.
+    - Consider using shorter times for start_find and stop_find than
+    for start and stop. It can save you much time.
+
+    Example invocation (taking advantage of defaults):
+
+    ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
+                  ecg_channel='MEG 1531', eog_channel='EOG 061')
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data to decompose.
+    n_components : int | float | None
+        The number of components used for ICA decomposition. If int, it must be
+        smaller then max_pca_components. If None, all PCA components will be
+        used. If float between 0 and 1 components can will be selected by the
+        cumulative percentage of explained variance.
+    n_pca_components
+        The number of PCA components used after ICA recomposition. The ensuing
+        attribute allows to balance noise reduction against potential loss of
+        features due to dimensionality reduction. If greater than
+        self.n_components_, the next 'n_pca_components' minus
+        'n_components_' PCA components will be added before restoring the
+        sensor space data. The attribute gets updated each time the according
+        parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
+    max_pca_components : int | None
+        The number of components used for PCA decomposition. If None, no
+        dimension reduction will be applied and max_pca_components will equal
+        the number of channels supplied on decomposing data.
+    noise_cov : None | instance of mne.cov.Covariance
+        Noise covariance used for whitening. If None, channels are just
+        z-scored.
+    random_state : None | int | instance of np.random.RandomState
+        np.random.RandomState to initialize the FastICA estimation.
+        As the estimation is non-deterministic it can be useful to
+        fix the seed to have reproducible results.
+    algorithm : {'parallel', 'deflation'}
+        Apply parallel or deflational algorithm for FastICA
+    fun : string or function, optional. Default: 'logcosh'
+        The functional form of the G function used in the
+        approximation to neg-entropy. Could be either 'logcosh', 'exp',
+        or 'cube'.
+        You can also provide your own function. It should return a tuple
+        containing the value of the function, and of its derivative, in the
+        point.
+    fun_args: dictionary, optional
+        Arguments to send to the functional form.
+        If empty and if fun='logcosh', fun_args will take value
+        {'alpha' : 1.0}
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    picks : array-like
+        Channels to be included. This selection remains throughout the
+        initialized ICA session. If None only good data channels are used.
+    start : int | float | None
+        First sample to include for decomposition. If float, data will be
+        interpreted as time in seconds. If None, data will be used from the
+        first sample.
+    stop : int | float | None
+        Last sample to not include for decomposition. If float, data will be
+        interpreted as time in seconds. If None, data will be used to the
+        last sample.
+    start_find : int | float | None
+        First sample to include for artifact search. If float, data will be
+        interpreted as time in seconds. If None, data will be used from the
+        first sample.
+    stop_find : int | float | None
+        Last sample to not include for artifact search. If float, data will be
+        interpreted as time in seconds. If None, data will be used to the last
+        sample.
+    ecg_ch : str | ndarray | None
+        The `target` argument passed to ica.find_sources_raw. Either the
+        name of the ECG channel or the ECG time series. If None, this step
+        will be skipped.
+    ecg_score_func : str | callable
+        The `score_func` argument passed to ica.find_sources_raw. Either
+        the name of function supported by ICA or a custom function.
+    ecg_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    eog_ch : list | str | ndarray | None
+        The `target` argument or the list of target arguments subsequently
+        passed to ica.find_sources_raw. Either the name of the vertical EOG
+        channel or the corresponding EOG time series. If None, this step
+        will be skipped.
+    eog_score_func : str | callable
+        The `score_func` argument passed to ica.find_sources_raw. Either
+        the name of function supported by ICA or a custom function.
+    eog_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    skew_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    kurt_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    var_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    add_nodes : list of ica_nodes
+        Additional list if tuples carrying the following parameters:
+        (name : str, target : str | array, score_func : callable,
+        criterion : float | int | list-like | slice). This parameter is a
+        generalization of the artifact specific parameters above and has
+        the same structure. Example:
+        add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
+
+    Returns
+    -------
+    ica : instance of ICA
+        The ica object with detected artifact sources marked for exclusion
+    """
+    ica = ICA(n_components=n_components, max_pca_components=max_pca_components,
+              n_pca_components=n_pca_components, noise_cov=noise_cov,
+              random_state=random_state, algorithm=algorithm, fun=fun,
+              fun_args=fun_args, verbose=verbose)
+
+    ica.decompose_raw(raw, start=start, stop=stop, picks=picks)
+    logger.info('%s' % ica)
+    logger.info('    Now searching for artifacts...')
+
+    _detect_artifacts(ica=ica, raw=raw, start_find=start_find,
+                      stop_find=stop_find, ecg_ch=ecg_ch,
+                      ecg_score_func=ecg_score_func,
+                      ecg_criterion=ecg_criterion, eog_ch=eog_ch,
+                      eog_score_func=eog_score_func,
+                      eog_criterion=ecg_criterion,
+                      skew_criterion=skew_criterion,
+                      kurt_criterion=kurt_criterion,
+                      var_criterion=var_criterion,
+                      add_nodes=add_nodes)
+
+    return ica
diff --git a/mne/preprocessing/maxfilter.py b/mne/preprocessing/maxfilter.py
new file mode 100644
index 0000000..cee2f69
--- /dev/null
+++ b/mne/preprocessing/maxfilter.py
@@ -0,0 +1,281 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from warnings import warn
+
+import numpy as np
+from scipy import optimize, linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..fiff import Raw
+from ..fiff.constants import FIFF
+from .. import verbose
+
+
+ at verbose
+def fit_sphere_to_headshape(info, verbose=None):
+    """ Fit a sphere to the headshape points to determine head center for
+        maxfilter.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    radius : float
+        Sphere radius in mm.
+    origin_head: ndarray
+        Head center in head coordinates (mm).
+    origin_device: ndarray
+        Head center in device coordinates (mm).
+
+    """
+    # get head digization points, excluding some frontal points (nose etc.)
+    hsp = [p['r'] for p in info['dig'] if p['kind'] == FIFF.FIFFV_POINT_EXTRA
+           and not (p['r'][2] < 0 and p['r'][1] > 0)]
+
+    if len(hsp) == 0:
+        raise ValueError('No head digitization points found')
+
+    hsp = 1e3 * np.array(hsp)
+
+    # initial guess for center and radius
+    xradius = (np.max(hsp[:, 0]) - np.min(hsp[:, 0])) / 2
+    yradius = (np.max(hsp[:, 1]) - np.min(hsp[:, 1])) / 2
+
+    radius_init = (xradius + yradius) / 2
+    center_init = np.array([0.0, 0.0, np.max(hsp[:, 2]) - radius_init])
+
+    # optimization
+    x0 = np.r_[center_init, radius_init]
+    cost_fun = lambda x, hsp:\
+        np.sum((np.sqrt(np.sum((hsp - x[:3]) ** 2, axis=1)) - x[3]) ** 2)
+
+    disp = True if logger.level <= logging.INFO else False
+    x_opt = optimize.fmin_powell(cost_fun, x0, args=(hsp,), disp=disp)
+
+    origin_head = x_opt[:3]
+    radius = x_opt[3]
+
+    # compute origin in device coordinates
+    trans = info['dev_head_t']
+    if trans['from'] != FIFF.FIFFV_COORD_DEVICE\
+        or trans['to'] != FIFF.FIFFV_COORD_HEAD:
+            raise RuntimeError('device to head transform not found')
+
+    head_to_dev = linalg.inv(trans['trans'])
+    origin_device = 1e3 * np.dot(head_to_dev,
+                                 np.r_[1e-3 * origin_head, 1.0])[:3]
+
+    logger.info('Fitted sphere: r = %0.1f mm' % radius)
+    logger.info('Origin head coordinates: %0.1f %0.1f %0.1f mm' %
+                (origin_head[0], origin_head[1], origin_head[2]))
+    logger.info('Origin device coordinates: %0.1f %0.1f %0.1f mm' %
+                (origin_device[0], origin_device[1], origin_device[2]))
+
+    return radius, origin_head, origin_device
+
+
+def _mxwarn(msg):
+    warn('Possible MaxFilter bug: %s, more info: '
+          'http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs' % msg)
+
+
+ at verbose
+def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
+                    bad=None, autobad='off', skip=None, force=False,
+                    st=False, st_buflen=16.0, st_corr=0.96, mv_trans=None,
+                    mv_comp=False, mv_headpos=False, mv_hp=None,
+                    mv_hpistep=None, mv_hpisubt=None, mv_hpicons=True,
+                    linefreq=None, mx_args='', overwrite=True,
+                    verbose=None):
+
+    """ Apply NeuroMag MaxFilter to raw data.
+
+        Needs Maxfilter license, maxfilter has to be in PATH
+
+    Parameters
+    ----------
+    in_fname : string
+        Input file name
+
+    out_fname : string
+        Output file name
+
+    origin : array-like or string
+        Head origin in mm. If None it will be estimated from headshape points.
+
+    frame : string ('device' or 'head')
+        Coordinate frame for head center
+
+    bad : string, list (or None)
+        List of static bad channels. Can be a list with channel names, or a
+        string with channels (names or logical channel numbers)
+
+    autobad : string ('on', 'off', 'n')
+        Sets automated bad channel detection on or off
+
+    skip : string or a list of float-tuples (or None)
+        Skips raw data sequences, time intervals pairs in sec,
+        e.g.: 0 30 120 150
+
+    force : bool
+        Ignore program warnings
+
+    st : bool
+        Apply the time-domain MaxST extension
+
+    st_buflen : float
+        MaxSt buffer length in sec (disabled if st is False)
+
+    st_corr : float
+        MaxSt subspace correlation limit (disabled if st is False)
+
+    mv_trans : string (filename or 'default') (or None)
+        Transforms the data into the coil definitions of in_fname, or into the
+        default frame (None: don't use option)
+
+    mv_comp : bool (or 'inter')
+        Estimates and compensates head movements in continuous raw data
+
+    mv_headpos : bool
+        Estimates and stores head position parameters, but does not compensate
+        movements (disabled if mv_comp is False)
+
+    mv_hp : string (or None)
+        Stores head position data in an ascii file
+        (disabled if mv_comp is False)
+
+    mv_hpistep : float (or None)
+        Sets head position update interval in ms (disabled if mv_comp is False)
+
+    mv_hpisubt : string ('amp', 'base', 'off') (or None)
+        Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off
+        (disabled if mv_comp is False)
+
+    mv_hpicons : bool
+        Check initial consistency isotrak vs hpifit
+        (disabled if mv_comp is False)
+
+    linefreq : int (50, 60) (or None)
+        Sets the basic line interference frequency (50 or 60 Hz)
+        (None: do not use line filter)
+
+    mx_args : string
+        Additional command line arguments to pass to MaxFilter
+
+    overwrite : bool
+        Overwrite output file if it already exists
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    origin: string
+        Head origin in selected coordinate frame
+    """
+
+    # check for possible maxfilter bugs
+    if mv_trans is not None and mv_comp:
+        _mxwarn("Don't use '-trans' with head-movement compensation "
+                "'-movecomp'")
+
+    if autobad != 'off' and (mv_headpos or mv_comp):
+        _mxwarn("Don't use '-autobad' with head-position estimation "
+                "'-headpos' or movement compensation '-movecomp'")
+
+    if st and autobad != 'off':
+        _mxwarn("Don't use '-autobad' with '-st' option")
+
+    # determine the head origin if necessary
+    if origin is None:
+        logger.info('Estimating head origin from headshape points..')
+        raw = Raw(in_fname)
+        r, o_head, o_dev = fit_sphere_to_headshape(raw.info)
+        raw.close()
+        logger.info('[done]')
+        if frame == 'head':
+            origin = o_head
+        elif frame == 'device':
+            origin = o_dev
+        else:
+            RuntimeError('invalid frame for origin')
+
+    if not isinstance(origin, basestring):
+        origin = '%0.1f %0.1f %0.1f' % (origin[0], origin[1], origin[2])
+
+    # format command
+    cmd = ('maxfilter -f %s -o %s -frame %s -origin %s '
+           % (in_fname, out_fname, frame, origin))
+
+    if bad is not None:
+        # format the channels
+        if not isinstance(bad, list):
+            bad = bad.split()
+        bad = map(str, bad)
+        bad_logic = [ch[3:] if ch.startswith('MEG') else ch for ch in bad]
+        bad_str = ' '.join(bad_logic)
+
+        cmd += '-bad %s ' % bad_str
+
+    cmd += '-autobad %s ' % autobad
+
+    if skip is not None:
+        if isinstance(skip, list):
+            skip = ' '.join(['%0.3f %0.3f' % (s[0], s[1]) for s in skip])
+        cmd += '-skip %s ' % skip
+
+    if force:
+        cmd += '-force '
+
+    if st:
+        cmd += '-st '
+        cmd += ' %d ' % st_buflen
+        cmd += '-corr %0.4f ' % st_corr
+
+    if mv_trans is not None:
+        cmd += '-trans %s ' % mv_trans
+
+    if mv_comp:
+        cmd += '-movecomp '
+        if mv_comp == 'inter':
+            cmd += ' inter '
+
+        if mv_headpos:
+            cmd += '-headpos '
+
+        if mv_hp is not None:
+            cmd += '-hp %s' % mv_hp
+
+        if mv_hpisubt is not None:
+            cmd += 'hpisubt %s ' % mv_hpisubt
+
+        if mv_hpicons:
+            cmd += '-hpicons '
+
+    if linefreq is not None:
+        cmd += '-linefreq %d ' % linefreq
+
+    cmd += mx_args
+
+    if overwrite and os.path.exists(out_fname):
+        os.remove(out_fname)
+
+    logger.info('Running MaxFilter: %s ' % cmd)
+    st = os.system(cmd)
+    if st != 0:
+        raise RuntimeError('MaxFilter returned non-zero exit status %d' % st)
+    logger.info('[done]')
+
+    return origin
diff --git a/mne/preprocessing/peak_finder.py b/mne/preprocessing/peak_finder.py
new file mode 100644
index 0000000..9235197
--- /dev/null
+++ b/mne/preprocessing/peak_finder.py
@@ -0,0 +1,172 @@
+import numpy as np
+from math import ceil
+
+import logging
+logger = logging.getLogger('mne')
+
+from .. import verbose
+from .. utils import deprecated
+
+
+ at verbose
+def peak_finder(x0, thresh=None, extrema=1, verbose=None):
+    """Noise tolerant fast peak finding algorithm
+
+    Parameters
+    ----------
+    x0 : 1d array
+        A real vector from the maxima will be found (required).
+    thresh : float
+        The amount above surrounding data for a peak to be
+        identified (default = (max(x0)-min(x0))/4). Larger values mean
+        the algorithm is more selective in finding peaks.
+    extrema : {-1, 1}
+        1 if maxima are desired, -1 if minima are desired
+        (default = maxima, 1).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    peak_loc : array
+        The indices of the identified peaks in x0
+    peak_mag : array
+        The magnitude of the identified peaks
+
+    Note
+    ----
+    If repeated values are found the first is identified as the peak.
+    Conversion from initial Matlab code from:
+    Nathanael C. Yoder (ncyoder at purdue.edu)
+
+    Example
+    -------
+    t = 0:.0001:10;
+    x = 12*sin(10*2*pi*t)-3*sin(.1*2*pi*t)+randn(1,numel(t));
+    x(1250:1255) = max(x);
+    peak_finder(x)
+    """
+
+    x0 = np.asanyarray(x0)
+
+    if x0.ndim >= 2:
+        raise ValueError('The input data must be a 1D vector')
+
+    s = x0.size
+
+    if thresh is None:
+        thresh = (np.max(x0) - np.min(x0)) / 4
+
+    assert extrema in [-1, 1]
+
+    if extrema == -1:
+        x0 = extrema * x0  # Make it so we are finding maxima regardless
+
+    dx0 = np.diff(x0)  # Find derivative
+    # This is so we find the first of repeated values
+    dx0[dx0 == 0] = -np.finfo(float).eps
+    # Find where the derivative changes sign
+    ind = np.where(dx0[:-1:] * dx0[1::] < 0)[0] + 1
+
+    # Include endpoints in potential peaks and valleys
+    x = np.concatenate((x0[:1], x0[ind], x0[-1:]))
+    ind = np.concatenate(([0], ind, [s - 1]))
+
+    #  x only has the peaks, valleys, and endpoints
+    length = x.size
+    min_mag = np.min(x)
+
+    if length > 2:  # Function with peaks and valleys
+
+        # Set initial parameters for loop
+        temp_mag = min_mag
+        found_peak = False
+        left_min = min_mag
+
+        # Deal with first point a little differently since tacked it on
+        # Calculate the sign of the derivative since we taked the first point
+        # on it does not necessarily alternate like the rest.
+        signDx = np.sign(np.diff(x[:3]))
+        if signDx[0] <= 0:  # The first point is larger or equal to the second
+            ii = -1
+            if signDx[0] == signDx[1]:  # Want alternating signs
+                x = np.concatenate((x[:1], x[2:]))
+                ind = np.concatenate((ind[:1], ind[2:]))
+                length -= 1
+
+        else:  # First point is smaller than the second
+            ii = 0
+            if signDx[0] == signDx[1]:  # Want alternating signs
+                x = x[1:]
+                ind = ind[1:]
+                length -= 1
+
+        # Preallocate max number of maxima
+        maxPeaks = ceil(length / 2.0)
+        peak_loc = np.zeros(maxPeaks, dtype=np.int)
+        peak_mag = np.zeros(maxPeaks)
+        c_ind = 0
+        # Loop through extrema which should be peaks and then valleys
+        while ii < (length - 1):
+            ii += 1  # This is a peak
+            # Reset peak finding if we had a peak and the next peak is bigger
+            # than the last or the left min was small enough to reset.
+            if found_peak and ((x[ii] > peak_mag[-1])
+                              or (left_min < peak_mag[-1] - thresh)):
+                temp_mag = min_mag
+                found_peak = False
+
+            # Make sure we don't iterate past the length of our vector
+            if ii == length - 1:
+                break  # We assign the last point differently out of the loop
+
+            # Found new peak that was lager than temp mag and threshold larger
+            # than the minimum to its left.
+            if (x[ii] > temp_mag) and (x[ii] > left_min + thresh):
+                temp_loc = ii
+                temp_mag = x[ii]
+
+            ii += 1  # Move onto the valley
+            # Come down at least thresh from peak
+            if not found_peak and (temp_mag > (thresh + x[ii])):
+                found_peak = True  # We have found a peak
+                left_min = x[ii]
+                peak_loc[c_ind] = temp_loc  # Add peak to index
+                peak_mag[c_ind] = temp_mag
+                c_ind += 1
+            elif x[ii] < left_min:  # New left minima
+                left_min = x[ii]
+
+        # Check end point
+        if (x[-1] > temp_mag) and (x[-1] > (left_min + thresh)):
+            peak_loc[c_ind] = length - 1
+            peak_mag[c_ind] = x[-1]
+            c_ind += 1
+        elif not found_peak and temp_mag > min_mag:
+            # Check if we still need to add the last point
+            peak_loc[c_ind] = temp_loc
+            peak_mag[c_ind] = temp_mag
+            c_ind += 1
+
+        # Create output
+        peak_inds = ind[peak_loc[:c_ind]]
+        peak_mags = peak_mag[:c_ind]
+    else:  # This is a monotone function where an endpoint is the only peak
+        x_ind = np.argmax(x)
+        peak_mags = x[x_ind]
+        if peak_mags > (min_mag + thresh):
+            peak_inds = ind[x_ind]
+        else:
+            peak_mags = []
+            peak_inds = []
+
+    # Change sign of data if was finding minima
+    if extrema < 0:
+        peak_mags *= -1.0
+        x0 = -x0
+
+    # Plot if no output desired
+    if len(peak_inds) == 0:
+        logger.info('No significant peaks found')
+
+    return peak_inds, peak_mags
diff --git a/mne/preprocessing/ssp.py b/mne/preprocessing/ssp.py
new file mode 100644
index 0000000..56a3301
--- /dev/null
+++ b/mne/preprocessing/ssp.py
@@ -0,0 +1,381 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+from warnings import warn
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+from .. import Epochs, compute_proj_evoked, compute_proj_epochs, verbose
+from ..fiff import pick_types, make_eeg_average_ref_proj
+from .ecg import find_ecg_events
+from .eog import find_eog_events
+
+
+def _safe_del_key(dict_, key):
+    """ Aux function
+
+    Use this function when preparing rejection parameters
+    instead of directly deleting keys.
+    """
+    if key in dict_:
+        del dict_[key]
+
+
+ at verbose
+def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
+                      n_grad, n_mag, n_eeg, l_freq, h_freq,
+                      average, filter_length, n_jobs, ch_name,
+                      reject, flat, bads, avg_ref, no_proj, event_id,
+                      exg_l_freq, exg_h_freq, tstart, qrs_threshold,
+                      filter_method, iir_params=dict(order=4, ftype='butter'),
+                      verbose=None):
+    """Compute SSP/PCA projections for ECG or EOG artifacts
+
+    Note: raw has to be constructed with preload=True (or string)
+    Warning: raw will be modified by this function
+
+    Parameters
+    ----------
+    mode : string ('ECG', or 'EOG')
+        What type of events to detect.
+    raw : mne.fiff.Raw
+        Raw input file.
+    raw_event : mne.fiff.Raw or None
+        Raw file to use for event detection (if None, raw is used).
+    tmin : float
+        Time before event in seconds.
+    tmax : float
+        Time after event in seconds.
+    n_grad : int
+        Number of SSP vectors for gradiometers.
+    n_mag : int
+        Number of SSP vectors for magnetometers.
+    n_eeg : int
+        Number of SSP vectors for EEG.
+    l_freq : float | None
+        Filter low cut-off frequency in Hz.
+    h_freq : float | None
+        Filter high cut-off frequency in Hz.
+    average : bool
+        Compute SSP after averaging.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    ch_name : string (or None)
+        Channel to use for ECG event detection.
+    reject : dict
+        Epoch rejection configuration (see Epochs).
+    flat : dict
+        Epoch flat configuration (see Epochs).
+    bads : list
+        List with (additional) bad channels.
+    avg_ref : bool
+        Add EEG average reference proj.
+    no_proj : bool
+        Exclude the SSP projectors currently in the fiff file.
+    event_id : int
+        ID to use for events.
+    exg_l_freq : float
+        Low pass frequency applied for filtering EXG channel.
+    exg_h_freq : float
+        High pass frequency applied for filtering EXG channel.
+    tstart : float
+        Start artifact detection after tstart seconds.
+    qrs_threshold : float
+        Between 0 and 1. qrs detection threshold (only for ECG).
+    filter_method : str
+        Method for filtering ('iir' or 'fft').
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    proj : list
+        Computed SSP projectors.
+    events : ndarray
+        Detected events.
+    """
+    if not raw._preloaded:
+        raise ValueError('raw needs to be preloaded, '
+                         'use preload=True in constructor')
+
+    if no_proj:
+        projs = []
+    else:
+        projs = cp.deepcopy(raw.info['projs'])
+        logger.info('Including %d SSP projectors from raw file'
+                    % len(projs))
+
+    if avg_ref:
+        eeg_proj = make_eeg_average_ref_proj(raw.info)
+        projs.append(eeg_proj)
+
+    if raw_event is None:
+        raw_event = raw
+
+    if mode == 'ECG':
+        logger.info('Running ECG SSP computation')
+        events, _, _ = find_ecg_events(raw_event, ch_name=ch_name,
+                                       event_id=event_id, l_freq=exg_l_freq,
+                                       h_freq=exg_h_freq, tstart=tstart,
+                                       qrs_threshold=qrs_threshold,
+                                       filter_length=filter_length)
+    elif mode == 'EOG':
+        logger.info('Running EOG SSP computation')
+        events = find_eog_events(raw_event, event_id=event_id,
+                           l_freq=exg_l_freq, h_freq=exg_h_freq,
+                           filter_length=filter_length, ch_name=ch_name,
+                           tstart=tstart)
+    else:
+        raise ValueError("mode must be 'ECG' or 'EOG'")
+
+    # Check to make sure we actually got at least one useable event
+    if events.shape[0] < 1:
+        warn('No %s events found, returning None for projs' % mode)
+        return None, events
+
+    logger.info('Computing projector')
+    my_info = cp.deepcopy(raw.info)
+    my_info['bads'] += bads
+
+    # Handler rejection parameters
+    if reject is not None:  # make sure they didn't pass None
+        if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
+                          exclude='bads')) == 0:
+            _safe_del_key(reject, 'grad')
+        if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
+                          exclude='bads')) == 0:
+            _safe_del_key(reject, 'mag')
+        if len(pick_types(my_info, meg=False, eeg=True, eog=False,
+                          exclude='bads')) == 0:
+            _safe_del_key(reject, 'eeg')
+        if len(pick_types(my_info, meg=False, eeg=False, eog=True,
+                          exclude='bads')) == 0:
+            _safe_del_key(reject, 'eog')
+    if flat is not None:  # make sure they didn't pass None
+        if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
+                          exclude='bads')) == 0:
+            _safe_del_key(flat, 'grad')
+        if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
+                          exclude='bads')) == 0:
+            _safe_del_key(flat, 'mag')
+        if len(pick_types(my_info, meg=False, eeg=True, eog=False,
+                          exclude='bads')) == 0:
+            _safe_del_key(flat, 'eeg')
+        if len(pick_types(my_info, meg=False, eeg=False, eog=True,
+                          exclude='bads')) == 0:
+            _safe_del_key(flat, 'eog')
+
+    # exclude bad channels from projection
+    picks = pick_types(my_info, meg=True, eeg=True, eog=True, exclude='bads')
+    raw.filter(l_freq, h_freq, picks=picks, filter_length=filter_length,
+               n_jobs=n_jobs, method=filter_method, iir_params=iir_params)
+
+    epochs = Epochs(raw, events, None, tmin, tmax, baseline=None, preload=True,
+                    picks=picks, reject=reject, flat=flat, proj=True)
+
+    epochs.drop_bad_epochs()
+    if epochs.events.shape[0] < 1:
+        warn('No good epochs found, returning None for projs')
+        return None, events
+
+    if average:
+        evoked = epochs.average()
+        ev_projs = compute_proj_evoked(evoked, n_grad=n_grad, n_mag=n_mag,
+                                       n_eeg=n_eeg)
+    else:
+        ev_projs = compute_proj_epochs(epochs, n_grad=n_grad, n_mag=n_mag,
+                                       n_eeg=n_eeg, n_jobs=n_jobs)
+
+    for p in ev_projs:
+        p['desc'] = mode + "-" + p['desc']
+
+    projs.extend(ev_projs)
+
+    logger.info('Done.')
+
+    return projs, events
+
+
+ at verbose
+def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
+                     n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
+                     average=False, filter_length='10s', n_jobs=1, ch_name=None,
+                     reject=dict(grad=2000e-13, mag=3000e-15, eeg=50e-6,
+                     eog=250e-6), flat=None, bads=[], avg_ref=False,
+                     no_proj=False, event_id=999, ecg_l_freq=5, ecg_h_freq=35,
+                     tstart=0., qrs_threshold=0.6, filter_method='fft',
+                     iir_params=dict(order=4, ftype='butter'), verbose=None):
+    """Compute SSP/PCA projections for ECG artifacts
+
+    Note: raw has to be constructed with preload=True (or string)
+    Warning: raw will be modified by this function
+
+    Parameters
+    ----------
+    raw : mne.fiff.Raw
+        Raw input file.
+    raw_event : mne.fiff.Raw or None
+        Raw file to use for event detection (if None, raw is used).
+    tmin : float
+        Time before event in seconds.
+    tmax : float
+        Time after event in seconds.
+    n_grad : int
+        Number of SSP vectors for gradiometers.
+    n_mag : int
+        Number of SSP vectors for magnetometers.
+    n_eeg : int
+        Number of SSP vectors for EEG.
+    l_freq : float | None
+        Filter low cut-off frequency in Hz.
+    h_freq : float | None
+        Filter high cut-off frequency in Hz.
+    average : bool
+        Compute SSP after averaging.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    ch_name : string (or None)
+        Channel to use for ECG detection (Required if no ECG found).
+    reject : dict
+        Epoch rejection configuration (see Epochs).
+    flat : dict
+        Epoch flat configuration (see Epochs).
+    bads : list
+        List with (additional) bad channels.
+    avg_ref : bool
+        Add EEG average reference proj.
+    no_proj : bool
+        Exclude the SSP projectors currently in the fiff file.
+    event_id : int
+        ID to use for events.
+    ecg_l_freq : float
+        Low pass frequency applied for filtering ECG channel.
+    ecg_h_freq : float
+        High pass frequency applied for filtering ECG channel.
+    tstart : float
+        Start artifact detection after tstart seconds.
+    qrs_threshold : float
+        Between 0 and 1. qrs detection threshold.
+    filter_method : str
+        Method for filtering ('iir' or 'fft').
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    proj : list
+        Computed SSP projectors.
+    ecg_events : ndarray
+        Detected ECG events.
+    """
+
+    projs, ecg_events = _compute_exg_proj('ECG', raw, raw_event, tmin, tmax,
+                        n_grad, n_mag, n_eeg, l_freq, h_freq,
+                        average, filter_length, n_jobs, ch_name,
+                        reject, flat, bads, avg_ref, no_proj, event_id,
+                        ecg_l_freq, ecg_h_freq, tstart, qrs_threshold,
+                        filter_method, iir_params)
+
+    return projs, ecg_events
+
+
+ at verbose
+def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
+                     n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
+                     average=False, filter_length='10s', n_jobs=1,
+                     reject=dict(grad=2000e-13, mag=3000e-15, eeg=500e-6,
+                     eog=np.inf), flat=None, bads=[], avg_ref=False,
+                     no_proj=False, event_id=998, eog_l_freq=1, eog_h_freq=10,
+                     tstart=0., filter_method='fft',
+                     iir_params=dict(order=4, ftype='butter'), ch_name=None,
+                     verbose=None):
+    """Compute SSP/PCA projections for EOG artifacts
+
+    Note: raw has to be constructed with preload=True (or string)
+    Warning: raw will be modified by this function
+
+    Parameters
+    ----------
+    raw : mne.fiff.Raw
+        Raw input file.
+    raw_event : mne.fiff.Raw or None
+        Raw file to use for event detection (if None, raw is used).
+    tmin : float
+        Time before event in seconds.
+    tmax : float
+        Time after event in seconds.
+    n_grad : int
+        Number of SSP vectors for gradiometers.
+    n_mag : int
+        Number of SSP vectors for magnetometers.
+    n_eeg : int
+        Number of SSP vectors for EEG.
+    l_freq : float | None
+        Filter low cut-off frequency in Hz.
+    h_freq : float | None
+        Filter high cut-off frequency in Hz.
+    average : bool
+        Compute SSP after averaging.
+    preload : string (or True)
+        Temporary file used during computaion.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    reject : dict
+        Epoch rejection configuration (see Epochs).
+    flat : dict
+        Epoch flat configuration (see Epochs).
+    bads : list
+        List with (additional) bad channels.
+    avg_ref : bool
+        Add EEG average reference proj.
+    no_proj : bool
+        Exclude the SSP projectors currently in the fiff file.
+    event_id : int
+        ID to use for events.
+    eog_l_freq : float
+        Low pass frequency applied for filtering E0G channel.
+    eog_h_freq : float
+        High pass frequency applied for filtering E0G channel.
+    tstart : float
+        Start artifact detection after tstart seconds.
+    filter_method : str
+        Method for filtering ('iir' or 'fft').
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    ch_name: str | None
+        If not None, specify EOG channel name.
+
+    Returns
+    -------
+    proj: list
+        Computed SSP projectors.
+    eog_events: ndarray
+        Detected EOG events.
+    """
+
+    projs, eog_events = _compute_exg_proj('EOG', raw, raw_event, tmin, tmax,
+                        n_grad, n_mag, n_eeg, l_freq, h_freq,
+                        average, filter_length, n_jobs, ch_name,
+                        reject, flat, bads, avg_ref, no_proj, event_id,
+                        eog_l_freq, eog_h_freq, tstart, qrs_threshold=0.6,
+                        filter_method=filter_method, iir_params=iir_params)
+
+    return projs, eog_events
\ No newline at end of file
diff --git a/mne/preprocessing/stim.py b/mne/preprocessing/stim.py
new file mode 100644
index 0000000..eb54afc
--- /dev/null
+++ b/mne/preprocessing/stim.py
@@ -0,0 +1,66 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy import signal, interpolate
+
+from ..fiff import pick_types
+
+
+def eliminate_stim_artifact(raw, events, event_id, tmin=-0.005,
+                            tmax=0.01, mode='linear'):
+    """Eliminates stimulations artifacts from raw data
+
+    The raw object will be modified in place (no copy)
+
+    Parameters
+    ----------
+    raw : Raw object
+        raw data object.
+    events : array, shape (n_events, 3)
+        The list of events.
+    event_id : int
+        The id of the events generating the stimulation artifacts.
+    tmin : float
+        Start time before event in seconds.
+    tmax : float
+        End time after event in seconds.
+    mode : 'linear' | 'window'
+        way to fill the artifacted time interval.
+        'linear' does linear interpolation
+        'window' applies a (1 - hanning) window.
+
+    Returns
+    -------
+    raw: Raw object
+        raw data object.
+    """
+    if not raw._preloaded:
+        raise RuntimeError('Modifying data of Raw is only supported '
+                           'when preloading is used. Use preload=True '
+                           '(or string) in the constructor.')
+    events_sel = (events[:, 2] == event_id)
+    event_start = events[events_sel, 0]
+    s_start = int(np.ceil(raw.info['sfreq'] * np.abs(tmin)))
+    s_end = int(np.ceil(raw.info['sfreq'] * tmax))
+
+    picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
+
+    if mode == 'window':
+        window = 1 - np.r_[signal.hann(4)[:2], np.ones(s_end + s_start - 4),
+                           signal.hann(4)[-2:]].T
+
+    for k in range(len(event_start)):
+        first_samp = int(event_start[k]) - raw.first_samp - s_start
+        last_samp = int(event_start[k]) - raw.first_samp + s_end
+        data, _ = raw[picks, first_samp:last_samp]
+        if mode == 'linear':
+            x = np.array([first_samp, last_samp])
+            f = interpolate.interp1d(x, data[:, (0, -1)])
+            xnew = np.arange(first_samp, last_samp)
+            interp_data = f(xnew)
+            raw[picks, first_samp:last_samp] = interp_data
+        elif mode == 'window':
+            raw[picks, first_samp:last_samp] = data * window[np.newaxis, :]
+    return raw
diff --git a/mne/preprocessing/tests/__init__.py b/mne/preprocessing/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/preprocessing/tests/test_ecg.py b/mne/preprocessing/tests/test_ecg.py
new file mode 100644
index 0000000..7aeb87b
--- /dev/null
+++ b/mne/preprocessing/tests/test_ecg.py
@@ -0,0 +1,21 @@
+import os.path as op
+
+from nose.tools import assert_true
+
+from mne.fiff import Raw
+from mne.preprocessing.ecg import find_ecg_events
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+event_fname = op.join(data_path, 'test-eve.fif')
+proj_fname = op.join(data_path, 'test_proj.fif')
+
+
+def test_find_ecg():
+    """Test find ECG peaks"""
+    raw = Raw(raw_fname)
+    events, ch_ECG, average_pulse = find_ecg_events(raw, event_id=999,
+                                                    ch_name='MEG 1531')
+    n_events = len(events)
+    _, times = raw[0, :]
+    assert_true(55 < average_pulse < 60)
diff --git a/mne/preprocessing/tests/test_eog.py b/mne/preprocessing/tests/test_eog.py
new file mode 100644
index 0000000..d594045
--- /dev/null
+++ b/mne/preprocessing/tests/test_eog.py
@@ -0,0 +1,18 @@
+import os.path as op
+from nose.tools import assert_true
+
+from mne.fiff import Raw
+from mne.preprocessing.eog import find_eog_events
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+event_fname = op.join(data_path, 'test-eve.fif')
+proj_fname = op.join(data_path, 'test_proj.fif')
+
+
+def test_find_eog():
+    """Test find EOG peaks"""
+    raw = Raw(raw_fname)
+    events = find_eog_events(raw)
+    n_events = len(events)
+    assert_true(n_events == 4)
diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py
new file mode 100644
index 0000000..30ce0bd
--- /dev/null
+++ b/mne/preprocessing/tests/test_ica.py
@@ -0,0 +1,318 @@
+# Author: Denis Engemann <d.engemann at fz-juelich.de>
+#         Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import warnings
+
+from nose.tools import assert_true, assert_raises
+from copy import deepcopy
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from scipy import stats
+from itertools import product
+
+from mne import fiff, Epochs, read_events, cov
+from mne.preprocessing import ICA, ica_find_ecg_events, ica_find_eog_events,\
+                              read_ica, run_ica
+from mne.preprocessing.ica import score_funcs
+from mne.utils import _TempDir, requires_sklearn
+
+tempdir = _TempDir()
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
+test_cov_name = op.join(data_dir, 'test-cov.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+start, stop = 0, 8  # if stop is too small pca may fail in some cases, but
+                    # we're okay on this file
+raw = fiff.Raw(raw_fname, preload=True).crop(0, stop, False)
+
+events = read_events(event_name)
+picks = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False,
+                        exclude='bads')
+
+# for testing eog functionality
+picks2 = fiff.pick_types(raw.info, meg=True, stim=False, ecg=False, eog=True,
+                         exclude='bads')
+
+reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+flat = dict(grad=1e-15, mag=1e-15)
+
+test_cov = cov.read_cov(test_cov_name)
+epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                baseline=(None, 0), preload=True)
+
+epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
+                baseline=(None, 0), preload=True)
+
+score_funcs_unsuited = ['pointbiserialr', 'ansari']
+
+
+ at requires_sklearn
+def test_ica_core():
+    """Test ICA on raw and epochs
+    """
+    # setup parameter
+    # XXX. The None cases helped revealing bugs but are time consuming.
+    noise_cov = [None, test_cov]
+    # removed None cases to speed up...
+    n_components = [3, 1.0]  # for future dbg add cases
+    max_pca_components = [4]
+    picks_ = [picks]
+    iter_ica_params = product(noise_cov, n_components, max_pca_components,
+                              picks_)
+
+    # # test init catchers
+    assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
+    assert_raises(ValueError, ICA, n_components=1.3, max_pca_components=2)
+
+    # test essential core functionality
+    for n_cov, n_comp, max_n, pcks in iter_ica_params:
+      # Test ICA raw
+        ica = ICA(noise_cov=n_cov, n_components=n_comp,
+                  max_pca_components=max_n, n_pca_components=max_n,
+                  random_state=0)
+
+        print ica  # to test repr
+
+        # test fit checker
+        assert_raises(RuntimeError, ica.get_sources_raw, raw)
+        assert_raises(RuntimeError, ica.get_sources_epochs, epochs)
+
+        # test decomposition
+        ica.decompose_raw(raw, picks=pcks, start=start, stop=stop)
+        print ica  # to test repr
+        # test re-init exception
+        assert_raises(RuntimeError, ica.decompose_raw, raw, picks=picks)
+
+        sources = ica.get_sources_raw(raw)
+        assert_true(sources.shape[0] == ica.n_components_)
+
+        # test preload filter
+        raw3 = raw.copy()
+        raw3._preloaded = False
+        assert_raises(ValueError, ica.pick_sources_raw, raw3,
+                      include=[1, 2])
+
+        for excl, incl in (([], []), ([], [1, 2]), ([1, 2], [])):
+            raw2 = ica.pick_sources_raw(raw, exclude=excl, include=incl,
+                                        copy=True)
+
+            assert_array_almost_equal(raw2[:, :][1], raw[:, :][1])
+
+        #######################################################################
+        # test epochs decomposition
+
+        # test re-init exception
+        assert_raises(RuntimeError, ica.decompose_epochs, epochs, picks=picks)
+        ica = ICA(noise_cov=n_cov, n_components=n_comp,
+                  max_pca_components=max_n, n_pca_components=max_n,
+                  random_state=0)
+
+        ica.decompose_epochs(epochs, picks=picks)
+        print ica  # to test repr
+        # test pick block after epochs fit
+        assert_raises(ValueError, ica.pick_sources_raw, raw)
+
+        sources = ica.get_sources_epochs(epochs)
+        assert_true(sources.shape[1] == ica.n_components_)
+
+        assert_raises(ValueError, ica.find_sources_epochs, epochs,
+                      target=np.arange(1))
+
+        # test preload filter
+        epochs3 = epochs.copy()
+        epochs3.preload = False
+        assert_raises(ValueError, ica.pick_sources_epochs, epochs3,
+                      include=[1, 2])
+
+        # test source picking
+        for excl, incl in (([], []), ([], [1, 2]), ([1, 2], [])):
+            epochs2 = ica.pick_sources_epochs(epochs, exclude=excl,
+                                      include=incl, copy=True)
+
+            assert_array_almost_equal(epochs2.get_data(),
+                                      epochs.get_data())
+
+
+ at requires_sklearn
+def test_ica_additional():
+    """Test additional functionality
+    """
+    stop2 = 500
+
+    test_cov2 = deepcopy(test_cov)
+    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
+              n_pca_components=4)
+    ica.decompose_raw(raw, picks[:5])
+    assert_true(ica.n_components_ < 5)
+
+    ica = ICA(n_components=3, max_pca_components=4,
+              n_pca_components=4)
+    assert_raises(RuntimeError, ica.save, '')
+    ica.decompose_raw(raw, picks=None, start=start, stop=stop2)
+
+    # epochs extraction from raw fit
+    assert_raises(RuntimeError, ica.get_sources_epochs, epochs)
+
+    # test reading and writing
+    test_ica_fname = op.join(op.dirname(tempdir), 'ica_test.fif')
+    for cov in (None, test_cov):
+        ica = ICA(noise_cov=cov, n_components=3, max_pca_components=4,
+                  n_pca_components=4)
+        ica.decompose_raw(raw, picks=picks, start=start, stop=stop2)
+        sources = ica.get_sources_epochs(epochs)
+        assert_true(sources.shape[1] == ica.n_components_)
+
+        for exclude in [[], [0]]:
+            ica.exclude = [0]
+            ica.save(test_ica_fname)
+            ica_read = read_ica(test_ica_fname)
+            assert_true(ica.exclude == ica_read.exclude)
+            # test pick merge -- add components
+            ica.pick_sources_raw(raw, exclude=[1])
+            assert_true(ica.exclude == [0, 1])
+            #                 -- only as arg
+            ica.exclude = []
+            ica.pick_sources_raw(raw, exclude=[0, 1])
+            assert_true(ica.exclude == [0, 1])
+            #                 -- remove duplicates
+            ica.exclude += [1]
+            ica.pick_sources_raw(raw, exclude=[0, 1])
+            assert_true(ica.exclude == [0, 1])
+
+            ica_raw = ica.sources_as_raw(raw)
+            assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
+                                        ica_raw.info['bads']])
+
+        ica.n_pca_components = 2
+        ica.save(test_ica_fname)
+        ica_read = read_ica(test_ica_fname)
+        assert_true(ica.n_pca_components ==
+                    ica_read.n_pca_components)
+        ica.n_pca_components = 4
+        ica_read.n_pca_components = 4
+
+        ica.exclude = []
+        ica.save(test_ica_fname)
+        ica_read = read_ica(test_ica_fname)
+
+        assert_true(ica.ch_names == ica_read.ch_names)
+
+        assert_true(np.allclose(ica.mixing_matrix_, ica_read.mixing_matrix_,
+                                rtol=1e-16, atol=1e-32))
+        assert_array_equal(ica.pca_components_,
+                           ica_read.pca_components_)
+        assert_array_equal(ica.pca_mean_, ica_read.pca_mean_)
+        assert_array_equal(ica.pca_explained_variance_,
+                           ica_read.pca_explained_variance_)
+        assert_array_equal(ica._pre_whitener, ica_read._pre_whitener)
+
+        # assert_raises(RuntimeError, ica_read.decompose_raw, raw)
+        sources = ica.get_sources_raw(raw)
+        sources2 = ica_read.get_sources_raw(raw)
+        assert_array_almost_equal(sources, sources2)
+
+        _raw1 = ica.pick_sources_raw(raw, exclude=[1])
+        _raw2 = ica_read.pick_sources_raw(raw, exclude=[1])
+        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
+
+    os.remove(test_ica_fname)
+    # check scrore funcs
+    for name, func in score_funcs.items():
+        if name in score_funcs_unsuited:
+            continue
+        scores = ica.find_sources_raw(raw, target='EOG 061', score_func=func,
+                                      start=0, stop=10)
+        assert_true(ica.n_components_ == len(scores))
+
+    # check univariate stats
+    scores = ica.find_sources_raw(raw, score_func=stats.skew)
+    # check exception handling
+    assert_raises(ValueError, ica.find_sources_raw, raw,
+                  target=np.arange(1))
+
+    params = []
+    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
+    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
+    for idx, ch_name in product(*params):
+        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
+                             eog_ch=ch_name, skew_criterion=idx,
+                             var_criterion=idx, kurt_criterion=idx)
+    ## score funcs epochs ##
+
+    # check score funcs
+    for name, func in score_funcs.items():
+        if name in score_funcs_unsuited:
+            continue
+        scores = ica.find_sources_epochs(epochs_eog, target='EOG 061',
+                                         score_func=func)
+        assert_true(ica.n_components_ == len(scores))
+
+    # check univariate stats
+    scores = ica.find_sources_epochs(epochs, score_func=stats.skew)
+
+    # check exception handling
+    assert_raises(ValueError, ica.find_sources_epochs, epochs,
+                  target=np.arange(1))
+
+    # ecg functionality
+    ecg_scores = ica.find_sources_raw(raw, target='MEG 1531',
+                                      score_func='pearsonr')
+
+    ecg_events = ica_find_ecg_events(raw, sources[np.abs(ecg_scores).argmax()])
+
+    assert_true(ecg_events.ndim == 2)
+
+    # eog functionality
+    eog_scores = ica.find_sources_raw(raw, target='EOG 061',
+                                      score_func='pearsonr')
+    eog_events = ica_find_eog_events(raw, sources[np.abs(eog_scores).argmax()])
+
+    assert_true(eog_events.ndim == 2)
+
+    # Test ica fiff export
+    ica_raw = ica.sources_as_raw(raw, start=0, stop=100)
+    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
+    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
+    assert_true(ica.n_components_ == len(ica_chans))
+    test_ica_fname = op.join(op.abspath(op.curdir), 'test_ica.fif')
+    ica_raw.save(test_ica_fname)
+    ica_raw2 = fiff.Raw(test_ica_fname, preload=True)
+    assert_array_almost_equal(ica_raw._data, ica_raw2._data)
+    ica_raw2.close()
+    os.remove(test_ica_fname)
+
+    # Test ica epochs export
+    ica_epochs = ica.sources_as_epochs(epochs)
+    assert_true(ica_epochs.events.shape == epochs.events.shape)
+    sources_epochs = ica.get_sources_epochs(epochs)
+    assert_array_equal(ica_epochs.get_data(), sources_epochs)
+    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
+    assert_true(ica.n_components_ == len(ica_chans))
+    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
+    assert_true(ica_epochs.raw is None)
+    assert_true(ica_epochs.preload == True)
+
+    # regression test for plot method
+    assert_raises(ValueError, ica.plot_sources_raw, raw,
+                  order=np.arange(50))
+    assert_raises(ValueError, ica.plot_sources_epochs, epochs,
+                  order=np.arange(50))
+
+
+def test_run_ica():
+    """Test run_ica function"""
+    params = []
+    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx
+    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
+    for idx, ch_name in product(*params):
+        run_ica(raw, n_components=.9, start=0, stop=100, start_find=0,
+                stop_find=50, ecg_ch=ch_name, eog_ch=ch_name,
+                skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
diff --git a/mne/preprocessing/tests/test_peak_finder.py b/mne/preprocessing/tests/test_peak_finder.py
new file mode 100644
index 0000000..56dbb2f
--- /dev/null
+++ b/mne/preprocessing/tests/test_peak_finder.py
@@ -0,0 +1,10 @@
+from numpy.testing import assert_array_equal
+
+from mne.preprocessing.peak_finder import peak_finder
+
+
+def test_peak_finder():
+    """Test the peak detection method"""
+    x = [0, 2, 5, 0, 6, -1]
+    peak_inds, peak_mags = peak_finder(x)
+    assert_array_equal(peak_inds, [2, 4])
diff --git a/mne/preprocessing/tests/test_ssp.py b/mne/preprocessing/tests/test_ssp.py
new file mode 100644
index 0000000..32758f1
--- /dev/null
+++ b/mne/preprocessing/tests/test_ssp.py
@@ -0,0 +1,94 @@
+import os.path as op
+import warnings
+
+from nose.tools import assert_true, assert_equal
+from numpy.testing import assert_array_almost_equal
+import numpy as np
+
+from ...fiff import Raw
+from ...fiff.proj import make_projector, activate_proj
+from ..ssp import compute_proj_ecg, compute_proj_eog
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+dur_use = 5.0
+eog_times = np.array([0.5, 2.3, 3.6, 14.5])
+raw_0 = Raw(raw_fname, preload=True).crop(0, 10, False)
+raw_0.close()
+
+
+def test_compute_proj_ecg():
+    """Test computation of ECG SSP projectors"""
+    for average in [False, True]:
+        raw = raw_0.copy()
+        # For speed, let's not filter here (must also not reject then)
+        projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                    ch_name='MEG 1531', bads=['MEG 2443'],
+                                    average=average, avg_ref=True,
+                                    no_proj=True, l_freq=None, h_freq=None,
+                                    reject=None, tmax=dur_use,
+                                    qrs_threshold=0.5)
+        assert_true(len(projs) == 7)
+        # heart rate at least 0.5 Hz, but less than 3 Hz
+        assert_true(events.shape[0] > 0.5 * dur_use and
+                    events.shape[0] < 3 * dur_use)
+        #XXX: better tests
+
+        # without setting a bad channel, this should throw a warning (only
+        # thrown once, so it's for average == True)
+        with warnings.catch_warnings(record=True) as w:
+            projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                            ch_name='MEG 1531', bads=[],
+                                            average=average, avg_ref=True,
+                                            no_proj=True, l_freq=None,
+                                            h_freq=None, tmax=dur_use)
+            assert_equal(len(w), 0 if average else 1)
+        assert_equal(projs, None)
+
+
+def test_compute_proj_eog():
+    """Test computation of EOG SSP projectors"""
+    for average in [False, True]:
+        raw = raw_0.copy()
+        n_projs_init = len(raw.info['projs'])
+        projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                     bads=['MEG 2443'], average=average,
+                                     avg_ref=True, no_proj=False, l_freq=None,
+                                     h_freq=None, reject=None, tmax=dur_use)
+        assert_true(len(projs) == (7 + n_projs_init))
+        assert_true(np.abs(events.shape[0] -
+                    np.sum(np.less(eog_times, dur_use))) <= 1)
+        #XXX: better tests
+
+        # This will not throw a warning (?)
+        with warnings.catch_warnings(record=True) as w:
+            projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                         average=average, bads=[],
+                                         avg_ref=True, no_proj=False,
+                                         l_freq=None, h_freq=None,
+                                         tmax=dur_use)
+            assert_equal(len(w), 0)
+        assert_equal(projs, None)
+
+
+def test_compute_proj_parallel():
+    """Test computation of ExG projectors using parallelization"""
+    raw = raw_0.copy()
+    projs, _ = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                bads=['MEG 2443'], average=False,
+                                avg_ref=True, no_proj=False, n_jobs=1,
+                                l_freq=None, h_freq=None, reject=None,
+                                tmax=dur_use)
+    raw_2 = raw_0.copy()
+    projs_2, _ = compute_proj_eog(raw_2, n_mag=2, n_grad=2, n_eeg=2,
+                                  bads=['MEG 2443'], average=False,
+                                  avg_ref=True, no_proj=False, n_jobs=2,
+                                  l_freq=None, h_freq=None, reject=None,
+                                  tmax=dur_use)
+    projs = activate_proj(projs)
+    projs_2 = activate_proj(projs_2)
+    projs, _, _ = make_projector(projs, raw_2.info['ch_names'],
+                                 bads=['MEG 2443'])
+    projs_2, _, _ = make_projector(projs_2, raw_2.info['ch_names'],
+                                   bads=['MEG 2443'])
+    assert_array_almost_equal(projs, projs_2, 10)
diff --git a/mne/preprocessing/tests/test_stim.py b/mne/preprocessing/tests/test_stim.py
new file mode 100644
index 0000000..57ecf5e
--- /dev/null
+++ b/mne/preprocessing/tests/test_stim.py
@@ -0,0 +1,36 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.fiff import Raw
+from mne.event import read_events
+from mne.preprocessing.stim import eliminate_stim_artifact
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+event_fname = op.join(data_path, 'test-eve.fif')
+
+
+def test_stim_elim():
+    """Test eliminate stim artifact"""
+    raw = Raw(raw_fname, preload=True)
+    events = read_events(event_fname)
+    event_idx = np.where(events[:, 2] == 1)[0][0]
+    tidx = events[event_idx, 0] - raw.first_samp
+
+    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=-0.005,
+                                  tmax=0.01, mode='linear')
+    data, times = raw[:, tidx - 3:tidx + 5]
+    diff_data0 = np.diff(data[0])
+    diff_data0 -= np.mean(diff_data0)
+    assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
+    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=-0.005,
+                                  tmax=0.01, mode='window')
+    data, times = raw[:, tidx:tidx + 1]
+    assert_true(np.all(data) == 0.)
diff --git a/mne/proj.py b/mne/proj.py
new file mode 100644
index 0000000..9708f34
--- /dev/null
+++ b/mne/proj.py
@@ -0,0 +1,357 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from . import fiff, Epochs, verbose
+from .fiff.pick import pick_types, pick_types_forward
+from .fiff.proj import Projection
+from .event import make_fixed_length_events
+from .parallel import parallel_func
+from .cov import _check_n_samples
+from .forward import is_fixed_orient, _subject_from_forward
+from .source_estimate import SourceEstimate
+from .fiff.proj import make_projector, make_eeg_average_ref_proj
+from .fiff import FIFF
+
+
+def read_proj(fname):
+    """Read projections from a FIF file.
+
+    Parameters
+    ----------
+    fname : string
+        The name of file containing the projections vectors.
+
+    Returns
+    -------
+    projs : list
+        The list of projection vectors.
+    """
+    fid, tree, _ = fiff.fiff_open(fname)
+    projs = fiff.proj.read_proj(fid, tree)
+    return projs
+
+
+def write_proj(fname, projs):
+    """Write projections to a FIF file.
+
+    Parameters
+    ----------
+    fname : string
+        The name of file containing the projections vectors.
+
+    projs : list
+        The list of projection vectors.
+    """
+    fid = fiff.write.start_file(fname)
+    fiff.proj.write_proj(fid, projs)
+    fiff.write.end_file(fid)
+
+
+ at verbose
+def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, verbose=None):
+    mag_ind = pick_types(info, meg='mag', exclude='bads')
+    grad_ind = pick_types(info, meg='grad', exclude='bads')
+    eeg_ind = pick_types(info, meg=False, eeg=True, exclude='bads')
+
+    if (n_grad > 0) and len(grad_ind) == 0:
+        logger.info("No gradiometers found. Forcing n_grad to 0")
+        n_grad = 0
+    if (n_mag > 0) and len(mag_ind) == 0:
+        logger.info("No magnetometers found. Forcing n_mag to 0")
+        n_mag = 0
+    if (n_eeg > 0) and len(eeg_ind) == 0:
+        logger.info("No EEG channels found. Forcing n_eeg to 0")
+        n_eeg = 0
+
+    ch_names = info['ch_names']
+    grad_names, mag_names, eeg_names = ([ch_names[k] for k in ind]
+                                     for ind in [grad_ind, mag_ind, eeg_ind])
+
+    projs = []
+    for n, ind, names, desc in zip([n_grad, n_mag, n_eeg],
+                                   [grad_ind, mag_ind, eeg_ind],
+                                   [grad_names, mag_names, eeg_names],
+                                   ['planar', 'axial', 'eeg']):
+        if n == 0:
+            continue
+        data_ind = data[ind][:, ind]
+        U = linalg.svd(data_ind, full_matrices=False,
+                       overwrite_a=True)[0][:, :n]
+        for k, u in enumerate(U.T):
+            proj_data = dict(col_names=names, row_names=None,
+                             data=u[np.newaxis, :], nrow=1, ncol=u.size)
+            this_desc = "%s-%s-PCA-%02d" % (desc, desc_prefix, k + 1)
+            logger.info("Adding projection: %s" % this_desc)
+            proj = Projection(active=False, data=proj_data, desc=this_desc, kind=1)
+            projs.append(proj)
+
+    return projs
+
+
+ at verbose
+def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
+                        verbose=None):
+    """Compute SSP (spatial space projection) vectors on Epochs
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs containing the artifact
+    n_grad : int
+        Number of vectors for gradiometers
+    n_mag : int
+        Number of vectors for gradiometers
+    n_eeg : int
+        Number of vectors for gradiometers
+    n_jobs : int
+        Number of jobs to use to compute covariance
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs: list
+        List of projection vectors
+    """
+    # compute data covariance
+    data = _compute_cov_epochs(epochs, n_jobs)
+    event_id = epochs.event_id
+    if event_id is None or len(event_id.keys()) == 0:
+        event_id = '0'
+    elif len(event_id.keys()) == 1:
+        event_id = str(event_id.values()[0])
+    else:
+        event_id = 'Multiple-events'
+    desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
+    return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix)
+
+
+def _compute_cov_epochs(epochs, n_jobs):
+    """Helper function for computing epochs covariance"""
+    parallel, p_fun, _ = parallel_func(np.dot, n_jobs)
+    data = parallel(p_fun(e, e.T) for e in epochs)
+    n_epochs = len(data)
+    if n_epochs == 0:
+        raise RuntimeError('No good epochs found')
+
+    n_chan, n_samples = epochs.__iter__().next().shape
+    _check_n_samples(n_samples * n_epochs, n_chan)
+    data = sum(data)
+    return data
+
+
+ at verbose
+def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, verbose=None):
+    """Compute SSP (spatial space projection) vectors on Evoked
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The Evoked obtained by averaging the artifact
+    n_grad : int
+        Number of vectors for gradiometers
+    n_mag : int
+        Number of vectors for gradiometers
+    n_eeg : int
+        Number of vectors for gradiometers
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs : list
+        List of projection vectors
+    """
+    data = np.dot(evoked.data, evoked.data.T)  # compute data covariance
+    desc_prefix = "%-.3f-%-.3f" % (evoked.times[0], evoked.times[-1])
+    return _compute_proj(data, evoked.info, n_grad, n_mag, n_eeg, desc_prefix)
+
+
+ at verbose
+def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
+                     n_eeg=0, reject=None, flat=None, n_jobs=1, verbose=None):
+    """Compute SSP (spatial space projection) vectors on Raw
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        A raw object to use the data from
+    start : float
+        Time (in sec) to start computing SSP
+    stop : float
+        Time (in sec) to stop computing SSP
+        None will go to the end of the file
+    duration : float
+        Duration (in sec) to chunk data into for SSP
+        If duration is None, data will not be chunked.
+    n_grad : int
+        Number of vectors for gradiometers
+    n_mag : int
+        Number of vectors for gradiometers
+    n_eeg : int
+        Number of vectors for gradiometers
+    reject : dict
+        Epoch rejection configuration (see Epochs)
+    flat : dict
+        Epoch flat configuration (see Epochs)
+    n_jobs : int
+        Number of jobs to use to compute covariance
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs: list
+        List of projection vectors
+    """
+    if duration is not None:
+        events = make_fixed_length_events(raw, 999, start, stop, duration)
+        epochs = Epochs(raw, events, None, tmin=0., tmax=duration,
+                        picks=pick_types(raw.info, meg=True, eeg=True,
+                                         eog=True, ecg=True, emg=True,
+                                         exclude='bads'),
+                        reject=reject, flat=flat)
+        data = _compute_cov_epochs(epochs, n_jobs)
+        info = epochs.info
+        if not stop:
+            stop = raw.n_times / raw.info['sfreq']
+    else:
+        # convert to sample indices
+        start = max(raw.time_as_index(start)[0], 0)
+        stop = raw.time_as_index(stop)[0] if stop else raw.n_times
+        stop = min(stop, raw.n_times)
+        data, times = raw[:, start:stop]
+        _check_n_samples(stop - start, data.shape[0])
+        data = np.dot(data, data.T)  # compute data covariance
+        info = raw.info
+        # convert back to times
+        start = start / raw.info['sfreq']
+        stop = stop / raw.info['sfreq']
+
+    desc_prefix = "Raw-%-.3f-%-.3f" % (start, stop)
+    projs = _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix)
+    return projs
+
+
+def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
+                    verbose=None):
+    """Compute sensitivity map
+
+    Such maps are used to know how much sources are visible by a type
+    of sensor, and how much projections shadow some sources.
+
+    Parameters
+    ----------
+    fwd : dict
+        The forward operator. Must be free- and surface-oriented.
+    projs : list
+        List of projection vectors.
+    ch_type : 'grad' | 'mag' | 'eeg'
+        The type of sensors to use.
+    mode : str
+        The type of sensitivity map computed. See manual. Should be 'free',
+        'fixed', 'ratio', 'radiality', 'angle', 'remaining', or 'dampening'
+        corresponding to the argument --map 1, 2, 3, 4, 5, 6 and 7 of the
+        command mne_sensitivity_map.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in fwd['info']['bads'].
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Return
+    ------
+    stc : SourceEstimate
+        The sensitivity map as a SourceEstimate instance for
+        visualization.
+    """
+    # check strings
+    if not ch_type in ['eeg', 'grad', 'mag']:
+        raise ValueError("ch_type should be 'eeg', 'mag' or 'grad (got %s)"
+                          % ch_type)
+    if not mode in ['free', 'fixed', 'ratio', 'radiality', 'angle',
+                    'remaining', 'dampening']:
+        raise ValueError('Unknown mode type (got %s)' % mode)
+
+    # check forward
+    if not fwd['surf_ori']:
+        raise ValueError('fwd should be surface oriented')
+    if is_fixed_orient(fwd):
+        raise ValueError('fwd should not have fixed orientation')
+
+    # limit forward
+    if ch_type == 'eeg':
+        fwd = pick_types_forward(fwd, meg=False, eeg=True, exclude=exclude)
+    else:
+        fwd = pick_types_forward(fwd, meg=ch_type, eeg=False, exclude=exclude)
+
+    gain = fwd['sol']['data']
+
+    # Make sure EEG has average
+    if ch_type == 'eeg':
+        if projs is None or \
+                not any([p['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF
+                         for p in projs]):
+            eeg_ave = [make_eeg_average_ref_proj(fwd['info'])]
+        projs = eeg_ave if projs is None else projs + eeg_ave
+
+    # Construct the projector
+    if projs is not None:
+        proj, ncomp, U = make_projector(projs, fwd['sol']['row_names'],
+                                              include_active=True)
+        # do projection for most types
+        if mode not in ['angle', 'remaining', 'dampening']:
+            gain = np.dot(proj, gain)
+
+    # can only run the last couple methods if there are projectors
+    elif mode in ['angle', 'remaining', 'dampening']:
+        raise ValueError('No projectors used, cannot compute %s' % mode)
+
+    n_sensors, n_dipoles = gain.shape
+    n_locations = n_dipoles // 3
+    sensitivity_map = np.empty(n_locations)
+
+    for k in xrange(n_locations):
+        gg = gain[:, 3 * k:3 * (k + 1)]
+        if mode != 'fixed':
+            s = linalg.svd(gg, full_matrices=False, compute_uv=False)
+        if mode == 'free':
+            sensitivity_map[k] = s[0]
+        else:
+            gz = linalg.norm(gg[:, 2])  # the normal component
+            if mode == 'fixed':
+                sensitivity_map[k] = gz
+            elif mode == 'ratio':
+                sensitivity_map[k] = gz / s[0]
+            elif mode == 'radiality':
+                sensitivity_map[k] = 1. - (gz / s[0])
+            else:
+                if mode == 'angle':
+                    co = linalg.norm(np.dot(gg[:, 2], U))
+                    sensitivity_map[k] = co / gz
+                else:
+                    p = linalg.norm(np.dot(proj, gg[:, 2]))
+                    if mode == 'remaining':
+                        sensitivity_map[k] = p / gz
+                    elif mode == 'dampening':
+                        sensitivity_map[k] = 1. - p / gz
+                    else:
+                        raise ValueError('Unknown mode type (got %s)' % mode)
+
+    # only normalize fixed and free methods
+    if mode in ['fixed', 'free']:
+        sensitivity_map /= np.max(sensitivity_map)
+
+    vertices = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
+    subject = _subject_from_forward(fwd)
+    stc = SourceEstimate(sensitivity_map[:, np.newaxis],
+                         vertices=vertices, tmin=0, tstep=1,
+                         subject=subject)
+    return stc
diff --git a/mne/selection.py b/mne/selection.py
new file mode 100644
index 0000000..8ea7937
--- /dev/null
+++ b/mne/selection.py
@@ -0,0 +1,104 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from os import path
+
+import logging
+logger = logging.getLogger('mne')
+
+from . import verbose
+
+
+ at verbose
+def read_selection(name, fname=None, verbose=None):
+    """Read channel selection from file
+
+    By default, the selections used in mne_browse_raw are supported*.
+    Additional selections can be added by specifying a selection file (e.g.
+    produced using mne_browse_raw) using the fname parameter.
+
+    The name parameter can be a string or a list of string. The returned
+    selection will be the combination of all selections in the file where
+    (at least) one element in name is a substring of the selection name in
+    the file. For example, "name = ['temporal', 'Right-frontal']" will produce
+    a comination of "Left-temporal", "Right-temporal", and "Right-frontal".
+
+    * The included selections are: "Vertex", "Left-temporal", "Right-temporal",
+    "Left-parietal", "Right-parietal", "Left-occipital", "Right-occipital",
+    "Left-frontal", and "Right-frontal"
+
+    Parameters
+    ----------
+    name : string or list of string
+        Name of the selection. If is a list, the selections are combined.
+    fname : string
+        Filename of the selection file (if None, built-in selections are used).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    sel : list of string
+        List with channel names in the selection.
+    """
+
+    # convert name to list of string
+    if isinstance(name, tuple):
+        name = list(name)
+
+    if not isinstance(name, list):
+        name = [name]
+
+    # use built-in selections by default
+    if fname is None:
+        fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel')
+
+    if not path.exists(fname):
+        raise ValueError('The file %s does not exist.' % fname)
+
+    # use this to make sure we find at least one match for each name
+    name_found = {}
+    for n in name:
+        name_found[n] = False
+
+    fid = open(fname, 'r')
+    sel = []
+
+    for line in fid:
+        line = line.strip()
+
+        # skip blank lines and comments
+        if len(line) == 0 or line[0] == '#':
+            continue
+
+        # get the name of the selection in the file
+        pos = line.find(':')
+        if pos < 0:
+            logger.info('":" delimiter not found in selections file, '
+                        'skipping line')
+            continue
+
+        sel_name_file = line[:pos]
+
+        # search for substring match with name provided
+        for n in name:
+            if sel_name_file.find(n) >= 0:
+                sel.extend(line[pos + 1:].split('|'))
+                name_found[n] = True
+                break
+
+    fid.close()
+
+    # make sure we found at least one match for each name
+    for n, found in name_found.iteritems():
+        if not found:
+            raise ValueError('No match for selection name "%s" found' % n)
+
+    # make the selection a sorted list with unique elements
+    sel = list(set(sel))
+    sel.sort()
+
+    return sel
diff --git a/mne/simulation/__init__.py b/mne/simulation/__init__.py
new file mode 100644
index 0000000..7076a90
--- /dev/null
+++ b/mne/simulation/__init__.py
@@ -0,0 +1,6 @@
+"""Data simulation code
+"""
+
+from .evoked import generate_evoked
+
+from .source import select_source_in_label, generate_sparse_stc
diff --git a/mne/simulation/evoked.py b/mne/simulation/evoked.py
new file mode 100644
index 0000000..f7b9895
--- /dev/null
+++ b/mne/simulation/evoked.py
@@ -0,0 +1,125 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+import copy
+
+import numpy as np
+from scipy import signal
+
+from ..fiff.pick import pick_channels_cov
+from ..utils import check_random_state
+from ..forward import apply_forward
+
+
+def generate_evoked(fwd, stc, evoked, cov, snr=3, tmin=None, tmax=None,
+                    iir_filter=None, random_state=None):
+    """Generate noisy evoked data
+
+    Parameters
+    ----------
+    fwd : dict
+        a forward solution
+    stc : SourceEstimate object
+        The source time courses
+    evoked : Evoked object
+        An instance of evoked used as template
+    cov : Covariance object
+        The noise covariance
+    snr : float
+        signal to noise ratio in dB. It corresponds to
+        10 * log10( var(signal) / var(noise) )
+    tmin : float | None
+        start of time interval to estimate SNR. If None first time point
+        is used.
+    tmax : float
+        start of time interval to estimate SNR. If None last time point
+        is used.
+    iir_filter : None | array
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2]
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    evoked : Evoked object
+        The simulated evoked data
+    """
+    evoked = apply_forward(fwd, stc, evoked)
+    noise = generate_noise_evoked(evoked, cov, iir_filter, random_state)
+    evoked_noise = add_noise_evoked(evoked, noise, snr, tmin=tmin, tmax=tmax)
+    return evoked_noise
+
+
+def generate_noise_evoked(evoked, noise_cov, iir_filter=None,
+                          random_state=None):
+    """Creates noise as a multivariate Gaussian
+
+    The spatial covariance of the noise is given from the cov matrix.
+
+    Parameters
+    ----------
+    evoked : evoked object
+        an instance of evoked used as template
+    cov : Covariance object
+        The noise covariance
+    iir_filter : None | array
+        IIR filter coefficients (denominator)
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    noise : evoked object
+        an instance of evoked
+    """
+    noise = copy.deepcopy(evoked)
+    noise_cov = pick_channels_cov(noise_cov, include=noise.info['ch_names'])
+    rng = check_random_state(random_state)
+    n_channels = np.zeros(noise.info['nchan'])
+    n_samples = evoked.data.shape[1]
+    noise.data = rng.multivariate_normal(n_channels, noise_cov.data,
+                                         n_samples).T
+    if iir_filter is not None:
+        noise.data = signal.lfilter([1], iir_filter, noise.data, axis=-1)
+    return noise
+
+
+def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
+    """Adds noise to evoked object with specified SNR.
+
+    SNR is computed in the interval from tmin to tmax.
+
+    Parameters
+    ----------
+    evoked : Evoked object
+        An instance of evoked with signal
+    noise : Evoked object
+        An instance of evoked with noise
+    snr : float
+        signal to noise ratio in dB. It corresponds to
+        10 * log10( var(signal) / var(noise) )
+    tmin : float
+        start time before event
+    tmax : float
+        end time after event
+
+    Returns
+    -------
+    evoked_noise : Evoked object
+        An instance of evoked corrupted by noise
+    """
+    evoked = copy.deepcopy(evoked)
+    times = evoked.times
+    if tmin is None:
+        tmin = np.min(times)
+    if tmax is None:
+        tmax = np.max(times)
+    tmask = (times >= tmin) & (times <= tmax)
+    tmp = np.mean((evoked.data[:, tmask] ** 2).ravel()) / \
+                                     np.mean((noise.data ** 2).ravel())
+    tmp = 10 * np.log10(tmp)
+    noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
+    evoked.data += noise.data
+    return evoked
diff --git a/mne/simulation/source.py b/mne/simulation/source.py
new file mode 100644
index 0000000..1cc4792
--- /dev/null
+++ b/mne/simulation/source.py
@@ -0,0 +1,196 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from ..source_estimate import SourceEstimate
+from ..utils import check_random_state
+
+
+def select_source_in_label(src, label, random_state=None):
+    """Select source positions using a label
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    label : Label
+        the label (read with mne.read_label)
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    lh_vertno : list
+        selected source coefficients on the left hemisphere
+    rh_vertno : list
+        selected source coefficients on the right hemisphere
+    """
+    lh_vertno = list()
+    rh_vertno = list()
+
+    rng = check_random_state(random_state)
+
+    if label.hemi == 'lh':
+        src_sel_lh = np.intersect1d(src[0]['vertno'], label.vertices)
+        idx_select = rng.randint(0, len(src_sel_lh), 1)
+        lh_vertno.append(src_sel_lh[idx_select][0])
+    else:
+        src_sel_rh = np.intersect1d(src[1]['vertno'], label.vertices)
+        idx_select = rng.randint(0, len(src_sel_rh), 1)
+        rh_vertno.append(src_sel_rh[idx_select][0])
+
+    return lh_vertno, rh_vertno
+
+
+def generate_sparse_stc(src, labels, stc_data, tmin, tstep, random_state=None):
+    """Generate sparse sources time courses from waveforms and labels
+
+    This function randomly selects a single vertex in each label and assigns
+    a waveform from stc_data to it.
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    labels : list of Labels
+        The labels
+    stc_data : array (shape: len(labels) x n_times)
+        The waveforms
+    tmin : float
+        The beginning of the timeseries
+    tstep : float
+        The time step (1 / sampling frequency)
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+    """
+    if len(labels) != len(stc_data):
+        raise ValueError('labels and stc_data must have the same length')
+
+    rng = check_random_state(random_state)
+    vertno = [[], []]
+    lh_data = list()
+    rh_data = list()
+    for label_data, label in zip(stc_data, labels):
+        lh_vertno, rh_vertno = select_source_in_label(src, label, rng)
+        vertno[0] += lh_vertno
+        vertno[1] += rh_vertno
+        if len(lh_vertno) != 0:
+            lh_data.append(np.atleast_2d(label_data))
+        elif len(rh_vertno) != 0:
+            rh_data.append(np.atleast_2d(label_data))
+        else:
+            raise ValueError('No vertno found.')
+
+    vertno = map(np.array, vertno)
+
+    # the data is in the order left, right
+    data = list()
+    if len(vertno[0]) != 0:
+        idx = np.argsort(vertno[0])
+        vertno[0] = vertno[0][idx]
+        data.append(np.concatenate(lh_data)[idx])
+
+    if len(vertno[1]) != 0:
+        idx = np.argsort(vertno[1])
+        vertno[1] = vertno[1][idx]
+        data.append(np.concatenate(rh_data)[idx])
+
+    data = np.concatenate(data)
+
+    stc = SourceEstimate(data, vertices=vertno, tmin=tmin, tstep=tstep)
+
+    return stc
+
+
+def generate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
+    """Generate sources time courses from waveforms and labels
+
+    This function generates a source estimate with extended sources by
+    filling the labels with the waveforms given in stc_data.
+
+    By default, the vertices within a label are assigned the same waveform.
+    The waveforms can be scaled for each vertex by using the label values
+    and value_fun. E.g.,
+
+    # create a source label where the values are the distance from the center
+    labels = circular_source_labels('sample', 0, 10, 0)
+
+    # sources with decaying strength (x will be the distance from the center)
+    fun = lambda x: exp(- x / 10)
+    stc = generate_stc(fwd, labels, stc_data, tmin, tstep, fun)
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    labels : list of Labels
+        The labels
+    stc_data : array (shape: len(labels) x n_times)
+        The waveforms
+    tmin : float
+        The beginning of the timeseries
+    tstep : float
+        The time step (1 / sampling frequency)
+    value_fun : function
+        Function to apply to the label values
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+    """
+
+    if len(labels) != len(stc_data):
+        raise ValueError('labels and stc_data must have the same length')
+
+    vertno = [[], []]
+    stc_data_extended = [[], []]
+    hemi_to_ind = {'lh': 0, 'rh': 1}
+    for i, label in enumerate(labels):
+        hemi_ind = hemi_to_ind[label.hemi]
+        src_sel = np.intersect1d(src[hemi_ind]['vertno'],
+                                 label.vertices)
+        if value_fun is not None:
+            idx_sel = np.searchsorted(label.vertices, src_sel)
+            values_sel = np.array([value_fun(v) for v in
+                                   label.values[idx_sel]])
+
+            data = np.outer(values_sel, stc_data[i])
+        else:
+            data = np.tile(stc_data[i], (len(src_sel), 1))
+
+        vertno[hemi_ind].append(src_sel)
+        stc_data_extended[hemi_ind].append(np.atleast_2d(data))
+
+    # format the vertno list
+    for idx in (0, 1):
+        if len(vertno[idx]) > 1:
+            vertno[idx] = np.concatenate(vertno[idx])
+        elif len(vertno[idx]) == 1:
+            vertno[idx] = vertno[idx][0]
+    vertno = map(np.array, vertno)
+
+    # the data is in the order left, right
+    data = list()
+    if len(vertno[0]) != 0:
+        idx = np.argsort(vertno[0])
+        vertno[0] = vertno[0][idx]
+        data.append(np.concatenate(stc_data_extended[0])[idx])
+
+    if len(vertno[1]) != 0:
+        idx = np.argsort(vertno[1])
+        vertno[1] = vertno[1][idx]
+        data.append(np.concatenate(stc_data_extended[1])[idx])
+
+    data = np.concatenate(data)
+
+    stc = SourceEstimate(data, vertices=vertno, tmin=tmin, tstep=tstep)
+    return stc
diff --git a/mne/simulation/tests/__init__.py b/mne/simulation/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/simulation/tests/test_evoked.py b/mne/simulation/tests/test_evoked.py
new file mode 100644
index 0000000..1417f0e
--- /dev/null
+++ b/mne/simulation/tests/test_evoked.py
@@ -0,0 +1,76 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true, assert_raises
+
+from mne.datasets import sample
+from mne import read_label, read_forward_solution
+from mne.time_frequency import morlet
+from mne.simulation import generate_sparse_stc, generate_evoked
+import mne
+from mne.fiff.pick import pick_types_evoked, pick_types_forward
+
+
+data_path = sample.data_path()
+fwd_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-meg-eeg-oct-6-fwd.fif')
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+                    'data', 'test_raw.fif')
+ave_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+                    'data', 'test-ave.fif')
+cov_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+                    'data', 'test-cov.fif')
+
+
+def test_simulate_evoked():
+    """ Test simulation of evoked data """
+
+    raw = mne.fiff.Raw(raw_fname)
+    fwd = read_forward_solution(fwd_fname, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
+    cov = mne.read_cov(cov_fname)
+    label_names = ['Aud-lh', 'Aud-rh']
+    labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
+                        '%s.label' % label)) for label in label_names]
+
+    evoked_template = mne.fiff.read_evoked(ave_fname, setno=0, baseline=None)
+    evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
+                                        exclude=raw.info['bads'])
+
+    snr = 6  # dB
+    tmin = -0.1
+    sfreq = 1000.  # Hz
+    tstep = 1. / sfreq
+    n_samples = 600
+    times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
+
+    # Generate times series from 2 Morlet wavelets
+    stc_data = np.zeros((len(labels), len(times)))
+    Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
+    stc_data[0][:len(Ws[0])] = np.real(Ws[0])
+    stc_data[1][:len(Ws[1])] = np.real(Ws[1])
+    stc_data *= 100 * 1e-9  # use nAm as unit
+
+    # time translation
+    stc_data[1] = np.roll(stc_data[1], 80)
+    stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
+                              random_state=0)
+
+    # Generate noisy evoked data
+    iir_filter = [1, -0.9]
+    evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
+                             tmin=0.0, tmax=0.2, iir_filter=iir_filter)
+    assert_array_almost_equal(evoked.times, stc.times)
+    assert_true(len(evoked.data) == len(fwd['sol']['data']))
+
+    # make a vertex that doesn't exist in fwd, should throw error
+    stc_bad = stc.copy()
+    mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
+    stc_bad.vertno[0][0] = mv + 1
+    assert_raises(RuntimeError, generate_evoked, fwd, stc_bad,
+                  evoked_template, cov, snr, tmin=0.0, tmax=0.2)
diff --git a/mne/simulation/tests/test_source.py b/mne/simulation/tests/test_source.py
new file mode 100644
index 0000000..0d2449e
--- /dev/null
+++ b/mne/simulation/tests/test_source.py
@@ -0,0 +1,205 @@
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true
+
+from mne.datasets import sample
+from mne import read_label, read_forward_solution
+from mne.label import Label
+from mne.simulation.source import generate_stc, generate_sparse_stc
+
+
+data_path = sample.data_path()
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-meg-oct-6-fwd.fif')
+fwd = read_forward_solution(fname_fwd, force_fixed=True)
+label_names = ['Aud-lh', 'Aud-rh', 'Vis-rh']
+labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
+                    '%s.label' % label)) for label in label_names]
+
+label_names_single_hemi = ['Aud-rh', 'Vis-rh']
+labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
+                    '%s.label' % label)) for label in label_names_single_hemi]
+
+
+def test_generate_stc():
+    """ Test generation of source estimate """
+    mylabels = []
+    for i, label in enumerate(labels):
+        new_label = Label(vertices=label.vertices,
+                          pos=label.pos,
+                          values=2 * i * np.ones(len(label.values)),
+                          hemi=label.hemi,
+                          comment=label.comment)
+        mylabels.append(new_label)
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+
+    stc_data = np.ones((len(labels), n_times))
+    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
+
+    for label in labels:
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertno[0])
+
+        assert_true(np.all(stc.data[idx] == 1.0))
+        assert_true(stc.data[idx].shape[1] == n_times)
+
+    # test with function
+    fun = lambda x: x ** 2
+    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
+
+    # the first label has value 0, the second value 2, the third value 6
+
+    for i, label in enumerate(labels):
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertno[0])
+
+        assert_array_almost_equal(stc.data[idx],
+                        ((2. * i) ** 2.) * np.ones((len(idx), n_times)))
+
+
+def test_generate_sparse_stc():
+    """ Test generation of sparse source estimate """
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+
+    stc_data = np.ones((len(labels), n_times))\
+                     * np.arange(len(labels))[:, None]
+    stc_1 = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep, 0)
+
+    for i, label in enumerate(labels):
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc_1.vertno[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc_1.vertno[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc_1.vertno[0])
+
+        assert_true(np.all(stc_1.data[idx] == float(i)))
+
+    assert_true(stc_1.data.shape[0] == len(labels))
+    assert_true(stc_1.data.shape[1] == n_times)
+
+    # make sure we get the same result when using the same seed
+    stc_2 = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep, 0)
+
+    assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
+    assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
+
+
+def test_generate_stc_single_hemi():
+    """ Test generation of source estimate """
+    mylabels = []
+    for i, label in enumerate(labels_single_hemi):
+        new_label = Label(vertices=label.vertices,
+                          pos=label.pos,
+                          values=2 * i * np.ones(len(label.values)),
+                          hemi=label.hemi,
+                          comment=label.comment)
+        mylabels.append(new_label)
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+
+    stc_data = np.ones((len(labels_single_hemi), n_times))
+    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
+
+    for label in labels_single_hemi:
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertno[0])
+
+        assert_true(np.all(stc.data[idx] == 1.0))
+        assert_true(stc.data[idx].shape[1] == n_times)
+
+    # test with function
+    fun = lambda x: x ** 2
+    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
+
+    # the first label has value 0, the second value 2, the third value 6
+
+    for i, label in enumerate(labels_single_hemi):
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertno[0])
+
+        assert_array_almost_equal(stc.data[idx],
+                        ((2. * i) ** 2.) * np.ones((len(idx), n_times)))
+
+
+def test_generate_sparse_stc_single_hemi():
+    """ Test generation of sparse source estimate """
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+
+    stc_data = np.ones((len(labels_single_hemi), n_times))\
+                     * np.arange(len(labels_single_hemi))[:, None]
+    stc_1 = generate_sparse_stc(fwd['src'], labels_single_hemi, stc_data,
+                                tmin, tstep, 0)
+
+    for i, label in enumerate(labels_single_hemi):
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc_1.vertno[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc_1.vertno[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc_1.vertno[0])
+
+        assert_true(np.all(stc_1.data[idx] == float(i)))
+
+    assert_true(stc_1.data.shape[0] == len(labels_single_hemi))
+    assert_true(stc_1.data.shape[1] == n_times)
+
+    # make sure we get the same result when using the same seed
+    stc_2 = generate_sparse_stc(fwd['src'], labels_single_hemi, stc_data,
+                                tmin, tstep, 0)
+
+    assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
+    assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
diff --git a/mne/source_estimate.py b/mne/source_estimate.py
new file mode 100644
index 0000000..aa19e6e
--- /dev/null
+++ b/mne/source_estimate.py
@@ -0,0 +1,2521 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import copy
+from math import ceil
+import numpy as np
+from scipy import linalg, sparse
+from scipy.sparse import csr_matrix, coo_matrix
+import warnings
+
+import logging
+logger = logging.getLogger('mne')
+
+from .filter import resample
+from .parallel import parallel_func
+from .surface import read_surface
+from .utils import get_subjects_dir, _check_subject, \
+                   _check_pandas_index_arguments, _check_pandas_installed, \
+                   deprecated
+from .viz import plot_source_estimates
+from . import verbose
+from . fixes import in1d
+
+
+ at deprecated('read_stc is deprecated and will be removed with version 0.7. '
+            'Please use read_source_estimate instead.')
+def read_stc(filename):
+    """Read an STC file and return as dict
+
+    STC files contain activations or source reconstructions.
+
+    Parameters
+    ----------
+    filename : string
+        The name of the STC file.
+
+    Returns
+    -------
+    data: dict
+        The STC structure. It has the following keys:
+           tmin           The first time point of the data in seconds
+           tstep          Time between frames in seconds
+           vertices       vertex indices (0 based)
+           data           The data matrix (nvert * ntime)
+
+    See Also
+    --------
+    read_source_estimate
+    """
+    return _read_stc(filename)
+
+
+def _read_stc(filename):
+    """ Aux Function
+    """
+    fid = open(filename, 'rb')
+
+    stc = dict()
+
+    fid.seek(0, 2)  # go to end of file
+    file_length = fid.tell()
+    fid.seek(0, 0)  # go to beginning of file
+
+    # read tmin in ms
+    stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1))
+    stc['tmin'] /= 1000.0
+
+    # read sampling rate in ms
+    stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1))
+    stc['tstep'] /= 1000.0
+
+    # read number of vertices/sources
+    vertices_n = int(np.fromfile(fid, dtype=">u4", count=1))
+
+    # read the source vector
+    stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n)
+
+    # read the number of timepts
+    data_n = int(np.fromfile(fid, dtype=">u4", count=1))
+
+    if (vertices_n and  # vertices_n can be 0 (empty stc)
+            ((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
+        raise ValueError('incorrect stc file size')
+
+    # read the data matrix
+    stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n)
+    stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
+
+    # close the file
+    fid.close()
+    return stc
+
+
+ at deprecated('write_stc is deprecated and will be removed with version 0.7. '
+            'Please use SourceEstimate.save instead.')
+def write_stc(filename, tmin, tstep, vertices, data):
+    """Write an STC file
+
+    Parameters
+    ----------
+    filename : string
+        The name of the STC file.
+    tmin : float
+        The first time point of the data in seconds.
+    tstep : float
+        Time between frames in seconds.
+    vertices : array of integers
+        Vertex indices (0 based).
+    data : 2D array
+        The data matrix (nvert * ntime).
+
+    See Also
+    --------
+    SourceEstimate.save (instance method)
+    """
+    return _write_stc(filename, tmin, tstep, vertices, data)
+
+
+def _write_stc(filename, tmin, tstep, vertices, data):
+    """Write an STC file
+
+    Parameters
+    ----------
+    filename : string
+        The name of the STC file.
+    tmin : float
+        The first time point of the data in seconds.
+    tstep : float
+        Time between frames in seconds.
+    vertices : array of integers
+        Vertex indices (0 based).
+    data : 2D array
+        The data matrix (nvert * ntime).
+    """
+    fid = open(filename, 'wb')
+
+    # write start time in ms
+    fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
+    # write sampling rate in ms
+    fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
+    # write number of vertices
+    fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())
+    # write the vertex indices
+    fid.write(np.array(vertices, dtype='>u4').tostring())
+
+    # write the number of timepts
+    fid.write(np.array(data.shape[1], dtype='>u4').tostring())
+    #
+    # write the data
+    #
+    fid.write(np.array(data.T, dtype='>f4').tostring())
+
+    # close the file
+    fid.close()
+
+
+def _read_3(fid):
+    """ Read 3 byte integer from file
+    """
+    data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
+
+    out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
+
+    return out
+
+
+ at deprecated('read_w is deprecated and will be removed with version 0.7. '
+            'Please use read_source_estimate instead.')
+def read_w(filename):
+    """Read a w file and return as dict
+
+    w files contain activations or source reconstructions for a single time
+    point.
+
+    Parameters
+    ----------
+    filename : string
+        The name of the w file.
+
+    Returns
+    -------
+    data: dict
+        The w structure. It has the following keys:
+           vertices       vertex indices (0 based)
+           data           The data matrix (nvert long)
+    """
+    return _read_w(filename)
+
+
+def _read_w(filename):
+    """Read a w file and return as dict
+
+    w files contain activations or source reconstructions for a single time
+    point.
+
+    Parameters
+    ----------
+    filename : string
+        The name of the w file.
+
+    Returns
+    -------
+    data: dict
+        The w structure. It has the following keys:
+           vertices       vertex indices (0 based)
+           data           The data matrix (nvert long)
+    """
+
+    fid = open(filename, 'rb')
+
+    # skip first 2 bytes
+    fid.read(2)
+
+    # read number of vertices/sources (3 byte integer)
+    vertices_n = int(_read_3(fid))
+
+    vertices = np.zeros((vertices_n), dtype=np.int32)
+    data = np.zeros((vertices_n), dtype=np.float32)
+
+    # read the vertices and data
+    for i in range(vertices_n):
+        vertices[i] = _read_3(fid)
+        data[i] = np.fromfile(fid, dtype='>f4', count=1)
+
+    w = dict()
+    w['vertices'] = vertices
+    w['data'] = data
+
+    # close the file
+    fid.close()
+    return w
+
+
+def _write_3(fid, val):
+    """ Write 3 byte integer to file
+    """
+
+    f_bytes = np.zeros((3), dtype=np.uint8)
+
+    f_bytes[0] = (val >> 16) & 255
+    f_bytes[1] = (val >> 8) & 255
+    f_bytes[2] = val & 255
+
+    fid.write(f_bytes.tostring())
+
+
+ at deprecated('read_w is deprecated and will be removed with version 0.7. '
+            'Please use SoureEstimate.save instead.')
+def write_w(filename, vertices, data):
+    """Read a w file
+
+    w files contain activations or source reconstructions for a single time
+    point.
+
+    Parameters
+    ----------
+    filename: string
+        The name of the w file.
+    vertices: array of int
+        Vertex indices (0 based).
+    data: 1D array
+        The data array (nvert).
+
+    See Also
+    --------
+    SourceEstimate.save (instance method)
+    """
+    return _write_w(filename, vertices, data)
+
+
+def _write_w(filename, vertices, data):
+    """Read a w file
+
+    w files contain activations or source reconstructions for a single time
+    point.
+
+    Parameters
+    ----------
+    filename: string
+        The name of the w file.
+    vertices: array of int
+        Vertex indices (0 based).
+    data: 1D array
+        The data array (nvert).
+    """
+
+    assert(len(vertices) == len(data))
+
+    fid = open(filename, 'wb')
+
+    # write 2 zero bytes
+    fid.write(np.zeros((2), dtype=np.uint8).tostring())
+
+    # write number of vertices/sources (3 byte integer)
+    vertices_n = len(vertices)
+    _write_3(fid, vertices_n)
+
+    # write the vertices and data
+    for i in range(vertices_n):
+        _write_3(fid, vertices[i])
+        #XXX: without float() endianness is wrong, not sure why
+        fid.write(np.array(float(data[i]), dtype='>f4').tostring())
+
+    # close the file
+    fid.close()
+
+
+def read_source_estimate(fname, subject=None):
+    """Returns a SourceEstimate object.
+
+    Parameters
+    ----------
+    fname : str
+        Path to (a) source-estimate file(s).
+    subject : str | None
+        Name of the subject the source estimate(s) is (are) from.
+        It is good practice to set this attribute to avoid combining
+        incompatible labels and SourceEstimates (e.g., ones from other
+        subjects). Note that due to file specification limitations, the
+        subject name isn't saved to or loaded from files written to disk.
+
+    Notes
+    -----
+     - for volume source estimates, ``fname`` should provide the path to a
+       single file named '*-vl.stc` or '*-vol.stc'
+     - for surface source estimates, ``fname`` should either provide the
+       path to the file corresponding to a single hemisphere ('*-lh.stc',
+       '*-rh.stc') or only specify the asterisk part in these patterns. In any
+       case, the function expects files for both hemisphere with names
+       following this pattern.
+     - for single time point .w files, ``fname`` should follow the same
+       pattern as for surface estimates, except that files are named
+       '*-lh.w' and '*-rh.w'.
+    """
+    fname_arg = fname
+
+    # make sure corresponding file(s) can be found
+    ftype = None
+    if os.path.exists(fname):
+        if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
+                fname.endswith('-vl.w') or fname.endswith('-vol.w'):
+            ftype = 'volume'
+        elif fname.endswith('.stc'):
+            ftype = 'surface'
+            if fname.endswith(('-lh.stc', '-rh.stc')):
+                fname = fname[:-7]
+            else:
+                err = ("Invalid .stc filename: %r; needs to end with "
+                       "hemisphere tag ('...-lh.stc' or '...-rh.stc')"
+                       % fname)
+                raise IOError(err)
+        elif fname.endswith('.w'):
+            ftype = 'w'
+            if fname.endswith(('-lh.w', '-rh.w')):
+                fname = fname[:-5]
+            else:
+                err = ("Invalid .w filename: %r; needs to end with "
+                       "hemisphere tag ('...-lh.w' or '...-rh.w')"
+                       % fname)
+                raise IOError(err)
+
+    if ftype is not 'volume':
+        stc_exist = map(os.path.exists, (fname + '-rh.stc', fname + '-lh.stc'))
+        w_exist = map(os.path.exists, (fname + '-rh.w', fname + '-lh.w'))
+        if all(stc_exist) and (ftype is not 'w'):
+            ftype = 'surface'
+        elif all(w_exist):
+            ftype = 'w'
+        elif any(stc_exist) or any(w_exist):
+            raise IOError("Hemisphere missing for %r" % fname_arg)
+        else:
+            raise IOError("SourceEstimate File(s) not found for: %r"
+                          % fname_arg)
+
+    # read the files
+    if ftype == 'volume':  # volume source space
+        if fname.endswith('.stc'):
+            kwargs = _read_stc(fname)
+        elif fname.endswith('.w'):
+            kwargs = _read_w(fname)
+            kwargs['data'] = kwargs['data'][:, np.newaxis]
+            kwargs['tmin'] = 0.0
+            kwargs['tstep'] = 0.0
+        else:
+            raise IOError('Volume source estimate must end with .stc or .w')
+    elif ftype == 'surface':  # stc file with surface source spaces
+        lh = _read_stc(fname + '-lh.stc')
+        rh = _read_stc(fname + '-rh.stc')
+        assert lh['tmin'] == rh['tmin']
+        assert lh['tstep'] == rh['tstep']
+        kwargs = lh.copy()
+        kwargs['data'] = np.r_[lh['data'], rh['data']]
+        kwargs['vertices'] = [lh['vertices'], rh['vertices']]
+    elif ftype == 'w':  # w file with surface source spaces
+        lh = _read_w(fname + '-lh.w')
+        rh = _read_w(fname + '-rh.w')
+        kwargs = lh.copy()
+        kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
+        kwargs['vertices'] = [lh['vertices'], rh['vertices']]
+        # w files only have a single time point
+        kwargs['tmin'] = 0.0
+        kwargs['tstep'] = 1.0
+
+    kwargs['subject'] = subject
+    return SourceEstimate(**kwargs)
+
+
+class _NotifyArray(np.ndarray):
+    """Array class that executes a callback when it is modified
+    """
+    def __new__(cls, input_array, modify_callback=None):
+        obj = np.asarray(input_array).view(cls)
+        obj.modify_callback = modify_callback
+        return obj
+
+    def __array_finalize__(self, obj):
+        if obj is None:
+            # an empty constructor was used
+            return
+
+        # try to copy the callback
+        self.modify_callback = getattr(obj, 'modify_callback', None)
+
+    def _modified_(self):
+        """Execute the callback if it is set"""
+        if self.modify_callback is not None:
+            self.modify_callback()
+
+    def __getattribute__(self, name):
+        # catch ndarray methods that modify the array inplace
+        if name in ['fill', 'itemset', 'resize', 'sort']:
+            self._modified_()
+
+        return object.__getattribute__(self, name)
+
+    def __setitem__(self, item, value):
+        self._modified_()
+        np.ndarray.__setitem__(self, item, value)
+
+    def __array_wrap__(self, out_arr, context=None):
+        # this method is called whenever a numpy ufunc (+, +=..) is called
+        # the last entry in context is the array that receives the result
+        if (context is not None and len(context[1]) == 3
+                and context[1][2] is self):
+            self._modified_()
+
+        return np.ndarray.__array_wrap__(self, out_arr, context)
+
+
+def _verify_source_estimate_compat(a, b):
+    """Make sure two SourceEstimates are compatible for arith. operations"""
+    compat = False
+    if len(a.vertno) == len(b.vertno):
+        if all([np.array_equal(av, vv) for av, vv in zip(a.vertno, b.vertno)]):
+            compat = True
+    if not compat:
+        raise ValueError('Cannot combine SourceEstimates that do not have the '
+                         'same vertices. Consider using stc.expand().')
+    if a.subject != b.subject:
+        raise ValueError('source estimates do not have the same subject '
+                         'names, "%s" and "%s"' % (a.name, b.name))
+
+
+class SourceEstimate(object):
+    """SourceEstimate container
+
+    Can be saved and loaded from .stc or .w files.
+
+    Parameters
+    ----------
+    data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
+        The data in source space. The data can either be a single array or
+        a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
+        "sens_data" shape (n_sensors, n_times). In this case, the source
+        space data corresponds to "numpy.dot(kernel, sens_data)".
+    vertices : array | list of two arrays
+        Vertex numbers corresponding to the data.
+    tmin : scalar
+        Time point of the first sample in data.
+    tstep : scalar
+        Time step between successive samples in data.
+    subject : str | None
+        The subject name. While not necessary, it is safer to set the
+        subject parameter to avoid analysis errors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    subject : str | None
+        The subject name.
+    times : array of shape (n_times,)
+        The time vector.
+    vertno : array or list of array of shape (n_dipoles,)
+        The indices of the dipoles in the different source spaces. Can
+        be an array if there is only one source space (e.g., for volumes).
+    data : array of shape (n_dipoles, n_times)
+        The data in source space.
+    shape : tuple
+        The shape of the data. A tuple of int (n_dipoles, n_times).
+    """
+    @verbose
+    def __init__(self, data, vertices=None, tmin=None, tstep=None,
+                 subject=None, verbose=None):
+        kernel, sens_data = None, None
+        if isinstance(data, tuple):
+            if len(data) != 2:
+                raise ValueError('If data is a tuple it has to be length 2')
+            kernel, sens_data = data
+            data = None
+            if kernel.shape[1] != sens_data.shape[0]:
+                raise ValueError('kernel and sens_data have invalid '
+                                 'dimensions')
+
+        if isinstance(vertices, list):
+            if not (len(vertices) == 2 or len(vertices) == 1) or \
+                    not all([isinstance(v, np.ndarray) for v in vertices]):
+                raise ValueError('Vertices, if a list, must contain one or '
+                                 'two numpy arrays')
+            n_src = sum([len(v) for v in vertices])
+        elif not isinstance(vertices, np.ndarray):
+            raise ValueError('Vertices must be a list or numpy array')
+        else:
+            n_src = len(vertices)
+        # safeguard the user against doing something silly
+        if data is not None and data.shape[0] != n_src:
+            raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '
+                             'must match' % (n_src, data.shape[0]))
+
+        self._data = data
+        self.tmin = tmin
+        self.tstep = tstep
+        self.vertno = vertices
+        self.verbose = verbose
+        self._kernel = kernel
+        self._sens_data = sens_data
+        self.times = None
+        self._update_times()
+        self.subject = _check_subject(None, subject, False)
+
+    @verbose
+    def save(self, fname, ftype='stc', verbose=None):
+        """Save the source estimates to a file
+
+        Parameters
+        ----------
+        fname : string
+            The stem of the file name. The file names used for surface source
+            spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
+            and "-rh.w") to the stem provided, for the left and the right
+            hemisphere, respectively. For volume source spaces, the stem is
+            extended with "-vl.stc" or "-vl.w".
+        ftype : string
+            File format to use. Allowed values are "stc" (default) and "w".
+            The "w" format only supports a single time point.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        """
+        if ftype not in ['stc', 'w']:
+            raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
+        if self.is_surface():
+            lh_data = self.data[:len(self.lh_vertno)]
+            rh_data = self.data[-len(self.rh_vertno):]
+
+            if ftype == 'stc':
+                logger.info('Writing STC to disk...')
+                _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
+                           vertices=self.lh_vertno, data=lh_data)
+                _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
+                           vertices=self.rh_vertno, data=rh_data)
+            elif ftype == 'w':
+                if self.shape[1] != 1:
+                    raise ValueError('w files can only contain a single time '
+                                     'point')
+                logger.info('Writing STC to disk (w format)...')
+                _write_w(fname + '-lh.w', vertices=self.lh_vertno,
+                         data=lh_data[:, 0])
+                _write_w(fname + '-rh.w', vertices=self.rh_vertno,
+                         data=rh_data[:, 0])
+        else:
+            if isinstance(self.vertno, list):
+                write_vertices = self.vertno[0]
+            else:
+                write_vertices = self.vertno
+            if ftype == 'stc':
+                logger.info('Writing STC to disk...')
+                if not (fname.endswith('-vl.stc')
+                        or fname.endswith('-vol.stc')):
+                    fname += '-vl.stc'
+                _write_stc(fname, tmin=self.tmin, tstep=self.tstep,
+                           vertices=write_vertices, data=self.data)
+            elif ftype == 'w':
+                logger.info('Writing STC to disk (w format)...')
+                if not (fname.endswith('-vl.w')
+                        or fname.endswith('-vol.w')):
+                    fname += '-vl.w'
+                _write_w(fname, vertices=write_vertices, data=self.data)
+
+        logger.info('[done]')
+
+    def _remove_kernel_sens_data_(self):
+        """Remove kernel and sensor space data
+
+        Note: self._data is also computed if it is None
+        """
+        if self._kernel is not None or self._sens_data is not None:
+            # we can no longer use the kernel and sens_data
+            logger.info('STC data modified: removing kernel and sensor data')
+            if self._data is None:
+                self._data = np.dot(self._kernel, self._sens_data)
+            self._kernel = None
+            self._sens_data = None
+
+    def __repr__(self):
+        if isinstance(self.vertno, list):
+            nv = sum([len(v) for v in self.vertno])
+        else:
+            nv = self.vertno.size
+        s = "%d vertices" % nv
+        if self.subject is not None:
+            s += ", subject : %s" % self.subject
+        s += ", tmin : %s (ms)" % (1e3 * self.tmin)
+        s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
+        s += ", tstep : %s (ms)" % (1e3 * self.tstep)
+        s += ", data size : %s x %s" % self.shape
+        return "<SourceEstimate  |  %s>" % s
+
+    def crop(self, tmin=None, tmax=None):
+        """Restrict SourceEstimate to a time interval
+
+        Parameters
+        ----------
+        tmin : float or None
+            The first time point in seconds. If None the first present is used.
+        tmax : float or None
+            The last time point in seconds. If None the last present is used.
+        """
+        mask = np.ones(len(self.times), dtype=np.bool)
+        if tmax is not None:
+            mask = mask & (self.times <= tmax)
+        if tmin is not None:
+            mask = mask & (self.times >= tmin)
+            self.tmin = tmin
+
+        if self._kernel is not None and self._sens_data is not None:
+            self._sens_data = self._sens_data[:, mask]
+            self._data = None  # will be recomputed when data is accessed
+        else:
+            self._data = self._data[:, mask]
+
+        self._update_times()
+
+    @verbose
+    def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
+                 verbose=None):
+        """Resample data
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use.
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        For some data, it may be more accurate to use npad=0 to reduce
+        artifacts. This is dataset dependent -- check your data!
+
+        Note that the sample rate of the original data is inferred from tstep.
+        """
+        # resampling in sensor instead of source space gives a somewhat
+        # different result, so we don't allow it
+        self._remove_kernel_sens_data_()
+
+        o_sfreq = 1.0 / self.tstep
+        self._data = resample(self._data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
+
+        # adjust indirectly affected variables
+        self.tstep = 1.0 / sfreq
+        self._update_times()
+
+    @property
+    def data(self):
+        if self._data is None:
+            # compute the solution the first time the data is accessed
+            # return a "notify array", so we can later remove the kernel
+            # and sensor data if the user modifies self._data
+            self._data = _NotifyArray(np.dot(self._kernel, self._sens_data),
+                modify_callback=self._remove_kernel_sens_data_)
+        return self._data
+
+    @property
+    def lh_data(self):
+        return self.data[:len(self.lh_vertno)]
+
+    @property
+    def rh_data(self):
+        return self.data[len(self.lh_vertno):]
+
+    @property
+    def lh_vertno(self):
+        return self.vertno[0]
+
+    @property
+    def rh_vertno(self):
+        return self.vertno[1]
+
+    @property
+    def shape(self):
+        if self._data is not None:
+            return self._data.shape
+        return (self._kernel.shape[0], self._sens_data.shape[1])
+
+    def is_surface(self):
+        """Returns True if source estimate is defined over surfaces
+        """
+        if isinstance(self.vertno, list) and len(self.vertno) == 2:
+            return True
+        else:
+            return False
+
+    def _update_times(self):
+        """Update the times attribute after changing tmin, tmax, or tstep"""
+        self.times = self.tmin + (self.tstep * np.arange(self.shape[1]))
+
+    def __add__(self, a):
+        stc = copy.deepcopy(self)
+        stc += a
+        return stc
+
+    def __iadd__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, SourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data += a.data
+        else:
+            self._data += a
+        return self
+
+    def __sub__(self, a):
+        stc = copy.deepcopy(self)
+        stc -= a
+        return stc
+
+    def __isub__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, SourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data -= a.data
+        else:
+            self._data -= a
+        return self
+
+    def __div__(self, a):
+        stc = copy.deepcopy(self)
+        stc /= a
+        return stc
+
+    def __idiv__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, SourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data /= a.data
+        else:
+            self._data /= a
+        return self
+
+    def __mul__(self, a):
+        stc = copy.deepcopy(self)
+        stc *= a
+        return stc
+
+    def __imul__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, SourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data *= a.data
+        else:
+            self._data *= a
+        return self
+
+    def __pow__(self, a):
+        stc = copy.deepcopy(self)
+        stc **= a
+        return stc
+
+    def __ipow__(self, a):
+        self._remove_kernel_sens_data_()
+        self._data **= a
+        return self
+
+    def __radd__(self, a):
+        return self + a
+
+    def __rsub__(self, a):
+        return self - a
+
+    def __rmul__(self, a):
+        return self * a
+
+    def __rdiv__(self, a):
+        return self / a
+
+    def __neg__(self):
+        stc = copy.deepcopy(self)
+        stc._remove_kernel_sens_data_()
+        stc._data *= -1
+        return stc
+
+    def __pos__(self):
+        return self
+
+    def sqrt(self):
+        """Return copy of SourceEstimate with sqrt(data)."""
+        return self ** (0.5)
+
+    def copy(self):
+        """Return copy of SourceEstimate instance"""
+        return copy.deepcopy(self)
+
+    def bin(self, width, tstart=None, tstop=None, func=np.mean):
+        """Returns a SourceEstimate object with data summarized over time bins
+
+        Time bins of ``width`` seconds. This method is intended for
+        visualization only. No filter is applied to the data before binning,
+        making the method inappropriate as a tool for downsampling data.
+
+        Parameters
+        ----------
+        width : scalar
+            Width of the individual bins in seconds.
+
+        func : callable
+            Function that is applied to summarize the data. Needs to accept a
+            numpy.array as first input and an ``axis`` keyword argument.
+
+        tstart : scalar | None
+            Time point where the first bin starts. The default is the first
+            time point of the stc.
+
+        tstop : scalar | None
+            Last possible time point contained in a bin (if the last bin would
+            be shorter than width it is dropped). The default is the last time
+            point of the stc.
+        """
+        if tstart is None:
+            tstart = self.tmin
+        if tstop is None:
+            tstop = self.times[-1]
+
+        times = np.arange(tstart, tstop + self.tstep, width)
+        nv, _ = self.shape
+        nt = len(times) - 1
+        data = np.empty((nv, nt), dtype=self.data.dtype)
+        for i in xrange(nt):
+            idx = (self.times >= times[i]) & (self.times < times[i + 1])
+            data[:, i] = func(self.data[:, idx], axis=1)
+
+        tmin = times[0] + width / 2.
+        stc = SourceEstimate(data, vertices=self.vertno,
+                             tmin=tmin, tstep=width, subject=self.subject)
+        return stc
+
+    def _hemilabel_stc(self, label):
+        is_surface = self.is_surface()
+
+        # find applicable SourceEstimate vertices
+        if is_surface:
+            if label.hemi == 'lh':
+                stc_vertices = self.vertno[0]
+            else:
+                stc_vertices = self.vertno[1]
+        else:
+            stc_vertices = self.vertno[0]
+
+        # find index of the Label's vertices
+        idx = np.nonzero(map(label.vertices.__contains__, stc_vertices))[0]
+
+        # find output vertices
+        vertices = stc_vertices[idx]
+
+        # find data
+        if is_surface and (label.hemi == 'rh'):
+            values = self.data[idx + len(self.vertno[0])]
+        else:
+            values = self.data[idx]
+
+        return vertices, values
+
+    def in_label(self, label):
+        """Returns a SourceEstimate object restricted to a label
+
+        SourceEstimate contains the time course of
+        activation of all sources inside the label.
+
+        Parameters
+        ----------
+        label : Label | BiHemiLabel
+            The label (as created for example by mne.read_label). If the label
+            does not match any sources in the SourceEstimate, a ValueError is
+            raised.
+        """
+        if not self.is_surface():
+            raise NotImplementedError
+        # make sure label and stc are compatible
+        if label.subject is not None and self.subject is not None \
+                and label.subject != self.subject:
+            raise RuntimeError('label and stc must have same subject names, '
+                               'currently "%s" and "%s"' % (label.subject,
+                                                            self.subject))
+
+        if label.hemi == 'both':
+            lh_vert, lh_val = self._hemilabel_stc(label.lh)
+            rh_vert, rh_val = self._hemilabel_stc(label.rh)
+            vertices = [lh_vert, rh_vert]
+            values = np.vstack((lh_val, rh_val))
+        elif label.hemi == 'lh':
+            lh_vert, values = self._hemilabel_stc(label)
+            vertices = [lh_vert, np.array([])]
+        elif label.hemi == 'rh':
+            rh_vert, values = self._hemilabel_stc(label)
+            vertices = [np.array([]), rh_vert]
+        else:
+            raise TypeError("Expected  Label or BiHemiLabel; got %r" % label)
+
+        if sum(map(len, vertices)) == 0:
+            raise ValueError('No vertices match the label in the stc file')
+
+        label_stc = SourceEstimate(values, vertices=vertices,
+                                   tmin=self.tmin, tstep=self.tstep,
+                                   subject=self.subject)
+        return label_stc
+
+    def expand(self, vertno):
+        """Expand SourceEstimate to include more vertices
+
+        This will add rows to stc.data (zero-filled) and modify stc.vertno
+        to include all vertices in stc.vertno and the input vertno.
+
+        Parameters
+        ----------
+        vertno : list of array
+            New vertices to add. Can also contain old values.
+        """
+        if not isinstance(vertno, list):
+            raise TypeError('vertno must be a list')
+        if not len(self.vertno) == len(vertno):
+            raise ValueError('vertno must have the same length as stc.vertno')
+
+        # can no longer use kernel and sensor data
+        self._remove_kernel_sens_data_()
+
+        inserters = list()
+        offsets = [0]
+        for vi, (v_old, v_new) in enumerate(zip(self.vertno, vertno)):
+            v_new = np.setdiff1d(v_new, v_old)
+            inds = np.searchsorted(v_old, v_new)
+            # newer numpy might overwrite inds after np.insert, copy here
+            inserters += [inds.copy()]
+            offsets += [len(v_old)]
+            self.vertno[vi] = np.insert(v_old, inds, v_new)
+        inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
+        inds = np.concatenate(inds)
+        new_data = np.zeros((len(inds), self._data.shape[1]))
+        self._data = np.insert(self._data, inds, new_data, axis=0)
+        return self
+
+    @verbose
+    def extract_label_time_course(self, labels, src, mode='mean_flip',
+                                  allow_empty=False, verbose=None):
+        """Extract label time courses for lists of labels
+
+        This function will extract one time course for each label. The way the
+        time courses are extracted depends on the mode parameter.
+
+        Valid values for mode are:
+        'mean': Average within each label.
+        'mean_flip': Average within each label with sign flip depending on
+        source orientation.
+        'pca_flip': Apply an SVD to the time courses within each label and use
+        the scaled and sign-flipped first right-singular vector as the label
+        time course. The scaling is performed such that the power of the label
+        time course is the same as the average per-vertex time course power
+        within the label. The sign of the resulting time course is adjusted by
+        multiplying it with "sign(dot(u, flip))" where u is the first
+        left-singular vector, and flip is a sing-flip vector based on the
+        vertex normals. This procedure assures that the phase does not
+        randomly change by 180 degrees from one stc to the next.
+
+        See also mne.extract_label_time_course to extract time courses for a
+        list of SourceEstimate more efficiently.
+
+        Parameters
+        ----------
+        labels : Label | list of Label
+            The labels for which to extract the time courses.
+        src : list
+            Source spaces for left and right hemisphere.
+        mode : str
+            Extraction mode, see explanation above.
+        allow_empty : bool
+            Instead of emitting an error, return all-zero time course for
+            labels that do not have any vertices in the source estimate.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        label_tc : array, shape=(len(labels), n_times)
+            Extracted time course for each label.
+        """
+        label_tc = extract_label_time_course(self, labels, src, mode=mode,
+                                             return_generator=False,
+                                             allow_empty=allow_empty,
+                                             verbose=verbose)
+
+        return label_tc
+
+    def transform_data(self, transform_fun, fun_args=None,
+                       idx=None, tmin_idx=None, tmax_idx=None, **kwargs):
+        """Get data after a linear (time) transform has been applied
+
+        The transorm is applied to each source time course independently.
+
+
+        Parameters
+        ----------
+        transform_fun : callable
+            The transform to be applied. The first parameter of the function
+            is the input data. The first return value is the transformed
+            data, remaining outputs are ignored. The first dimension of the
+            transformed data has to be the same as the first dimension of the
+            input data.
+        fun_args : tuple | None
+            Additional parameters to be passed to transform_fun.
+        idx : array | None
+            Indicices of source time courses for which to compute transform.
+            If None, all time courses are used.
+        tmin_idx : int | None
+            Index of first time point to include. If None, the index of the
+            first time point is used.
+        tmax_idx : int | None
+            Index of the first time point not to include. If None, time points
+            up to (and including) the last time point are included.
+        **kwargs : dict
+            Keyword arguments to be passed to transform_fun.
+
+        Returns
+        -------
+        data_t : ndarray
+            The transformed data.
+
+        .. note::
+            Applying transforms can be significantly faster if the
+            SourceEstimate object was created using "(kernel, sens_data)", for
+            the "data" parameter as the transform is applied in sensor space.
+            Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
+            this automatically (if possible).
+        """
+
+        if idx is None:
+            # use all time courses by default
+            idx = slice(None, None)
+
+        if fun_args is None:
+            fun_args = tuple()
+
+        if self._kernel is None and self._sens_data is None:
+            # transform source space data directly
+            data_t = transform_fun(self.data[idx, tmin_idx:tmax_idx],
+                                   *fun_args, **kwargs)
+
+            if isinstance(data_t, tuple):
+                # use only first return value
+                data_t = data_t[0]
+        else:
+            # apply transform in sensor space
+            sens_data_t = transform_fun(self._sens_data[:, tmin_idx:tmax_idx],
+                                        *fun_args, **kwargs)
+
+            if isinstance(sens_data_t, tuple):
+                # use only first return value
+                sens_data_t = sens_data_t[0]
+
+            # apply inverse
+            data_shape = sens_data_t.shape
+            if len(data_shape) > 2:
+                # flatten the last dimensions
+                sens_data_t = sens_data_t.reshape(data_shape[0],
+                                                  np.prod(data_shape[1:]))
+
+            data_t = np.dot(self._kernel[idx, :], sens_data_t)
+
+            # restore original shape if necessary
+            if len(data_shape) > 2:
+                data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
+
+        return data_t
+
+    def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
+                       subjects_dir=None):
+        """Return the vertex on a given surface that is at the center of mass
+        of  the activity in stc. Note that all activity must occur in a single
+        hemisphere, otherwise an error is returned. The "mass" of each point in
+        space for computing the spatial center of mass is computed by summing
+        across time, and vice-versa for each point in time in computing the
+        temporal center of mass. This is useful for quantifying spatio-temporal
+        cluster locations, especially when combined with the function
+        mne.source_space.vertex_to_mni().
+
+        Parameters
+        ----------
+        subject : string | None
+            The subject the stc is defined for.
+        hemi : int, or None
+            Calculate the center of mass for the left (0) or right (1)
+            hemisphere. If None, one of the hemispheres must be all zeroes,
+            and the center of mass will be calculated for the other
+            hemisphere (useful for getting COM for clusters).
+        restrict_vertices : bool, or array of int
+            If True, returned vertex will be one from stc. Otherwise, it could
+            be any vertex from surf. If an array of int, the returned vertex
+            will come from that array. For most accuruate estimates, do not
+            restrict vertices.
+        subjects_dir : str, or None
+            Path to the SUBJECTS_DIR. If None, the path is obtained by using
+            the environment variable SUBJECTS_DIR.
+
+        Returns
+        -------
+        vertex : int
+            Vertex of the spatial center of mass for the inferred hemisphere,
+            with each vertex weighted by the sum of the stc across time. For a
+            boolean stc, then, this would be weighted purely by the duration
+            each vertex was active.
+        hemi : int
+            Hemisphere the vertex was taken from.
+        t : float
+            Time of the temporal center of mass (weighted by the sum across
+            source vertices).
+
+        References:
+            Used in Larson and Lee, "The cortical dynamics underlying effective
+            switching of auditory spatial attention", NeuroImage 2012.
+        """
+        subject = _check_subject(self.subject, subject)
+
+        if not self.is_surface():
+            raise ValueError('Finding COM must be done on surface')
+
+        values = np.sum(self.data, axis=1)  # sum across time
+        vert_inds = [np.arange(len(self.vertno[0])),
+                     np.arange(len(self.vertno[1])) + len(self.vertno[0])]
+        if hemi is None:
+            hemi = np.where(np.array([np.sum(values[vi])
+                            for vi in vert_inds]))[0]
+            if not len(hemi) == 1:
+                raise ValueError('Could not infer hemisphere')
+            hemi = hemi[0]
+        if not hemi in [0, 1]:
+            raise ValueError('hemi must be 0 or 1')
+
+        subjects_dir = get_subjects_dir(subjects_dir)
+
+        values = values[vert_inds[hemi]]
+
+        hemis = ['lh', 'rh']
+        surf = os.path.join(subjects_dir, subject, 'surf',
+                            hemis[hemi] + '.sphere')
+
+        if isinstance(surf, basestring):  # read in surface
+            surf = read_surface(surf)
+
+        if restrict_vertices is False:
+            restrict_vertices = np.arange(surf[0].shape[0])
+        elif restrict_vertices is True:
+            restrict_vertices = self.vertno[hemi]
+
+        if np.any(self.data < 0):
+            raise ValueError('Cannot compute COM with negative values')
+
+        pos = surf[0][self.vertno[hemi], :].T
+        c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
+
+        # Find the vertex closest to the COM
+        vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
+                                            c_o_m) ** 2, axis=1)))
+        vertex = restrict_vertices[vertex]
+
+        # do time center of mass by using the values across space
+        masses = np.sum(self.data, axis=0).astype(float)
+        t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
+        t = self.tmin + self.tstep * t_ind
+        return vertex, hemi, t
+
+    def plot(self, subject=None, surface='inflated', hemi='lh',
+             colormap='hot', time_label='time=%0.2f ms',
+             smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
+             transparent=True, alpha=1.0, time_viewer=False,
+             config_opts={}, subjects_dir=None, figure=None):
+        """Plot SourceEstimates with PySurfer
+
+        Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+        which will automatically be set by this function. Plotting multiple
+        SourceEstimates with different values for subjects_dir will cause
+        PySurfer to use the wrong FreeSurfer surfaces when using methods of
+        the returned Brain object. It is therefore recommended to set the
+        SUBJECTS_DIR environment variable or always use the same value for
+        subjects_dir (within the same Python session).
+
+        Parameters
+        ----------
+        stc : SourceEstimates
+            The source estimates to plot.
+        subject : str | None
+            The subject name corresponding to FreeSurfer environment
+            variable SUBJECT. If None stc.subject will be used. If that
+            is None, the environment will be used.
+        surface : str
+            The type of surface (inflated, white etc.).
+        hemi : str, 'lh' | 'rh' | 'both'
+            The hemisphere to display. Using 'both' opens two separate figures,
+            one for each hemisphere.
+        colormap : str
+            The type of colormap to use.
+        time_label : str
+            How to print info about the time instant visualized.
+        smoothing_steps : int
+            The amount of smoothing.
+        fmin : float
+            The minimum value to display.
+        fmid : float
+            The middle value on the colormap.
+        fmax : float
+            The maximum value for the colormap.
+        transparent : bool
+            If True, use a linear transparency between fmin and fmid.
+        alpha : float
+            Alpha value to apply globally to the overlay.
+        time_viewer : bool
+            Display time viewer GUI.
+        config_opts : dict
+            Keyword arguments for Brain initialization.
+            See pysurfer.viz.Brain.
+        subjects_dir : str
+            The path to the FreeSurfer subjects reconstructions.
+            It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
+        figure : instance of mayavi.core.scene.Scene | None
+            If None, the last figure will be cleaned and a new figure will
+            be created.
+
+        Returns
+        -------
+        brain : Brain | list of Brain
+            A instance of surfer.viz.Brain from PySurfer For hemi='both',
+            a list with Brain instances for the left and right hemisphere is
+            returned.
+        """
+        brain = plot_source_estimates(self, subject, surface=surface,
+                        hemi=hemi, colormap=colormap, time_label=time_label,
+                        smoothing_steps=smoothing_steps, fmin=fmin, fmid=fmid,
+                        fmax=fmax, transparent=transparent, alpha=alpha,
+                        time_viewer=time_viewer, config_opts=config_opts,
+                        subjects_dir=subjects_dir, figure=figure)
+        return brain
+
+    @verbose
+    def morph(self, subject_to, grade=5, smooth=None,
+              subjects_dir=None, buffer_size=64, n_jobs=1, subject_from=None,
+              verbose=None):
+        """Morph a source estimate from one subject to another
+
+        Parameters
+        ----------
+        subject_to : string
+            Name of the subject on which to morph as named in the SUBJECTS_DIR
+        stc_from : SourceEstimate
+            Source estimates for subject "from" to morph
+        grade : int, list (of two arrays), or None
+            Resolution of the icosahedral mesh (typically 5). If None, all
+            vertices will be used (potentially filling the surface). If a list,
+            then values will be morphed to the set of vertices specified in
+            in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
+            grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+            standard grade 5 source space) can be substantially faster than
+            computing vertex locations. Note that if subject='fsaverage'
+            and 'grade=5', this set of vertices will automatically be used
+            (instead of computed) for speed, since this is a common morph.
+        smooth : int or None
+            Number of iterations for the smoothing of the surface data.
+            If None, smooth is automatically defined to fill the surface
+            with non-zero values.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        buffer_size : int
+            Morph data in chunks of `buffer_size` time instants.
+            Saves memory when morphing long time intervals.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        subject_from : string
+            Name of the original subject as named in the SUBJECTS_DIR.
+            If None, self.subject will be used.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        stc_to : SourceEstimate
+            Source estimate for the destination subject.
+        """
+        subject_from = _check_subject(self.subject, subject_from)
+        return morph_data(subject_from, subject_to, self, grade, smooth,
+                          subjects_dir, buffer_size, n_jobs, verbose)
+
+    def morph_precomputed(self, subject_to, vertices_to, morph_mat,
+                          subject_from=None):
+        """Morph source estimate between subjects using a precomputed matrix
+
+        Parameters
+        ----------
+        subject_to : string
+            Name of the subject on which to morph as named in the SUBJECTS_DIR.
+        vertices_to : list of array of int
+            The vertices on the destination subject's brain.
+        morph_mat : sparse matrix
+            The morphing matrix, usually from compute_morph_matrix.
+        subject_from : string | None
+            Name of the original subject as named in the SUBJECTS_DIR.
+            If None, self.subject will be used.
+
+        Returns
+        -------
+        stc_to : SourceEstimate
+            Source estimate for the destination subject.
+        """
+        subject_from = _check_subject(self.subject, subject_from)
+        return morph_data_precomputed(subject_from, subject_to, self,
+                                      vertices_to, morph_mat)
+
+    def as_data_frame(self, index=None, scale_time=1e3, copy=True):
+        """Represent source estimates as Pandas DataFrame
+
+        Export source estimates in tabular structure with vertices as columns
+        and two additional info columns 'subject' and 'time'.
+        This function is useful to visualize and analyse source time courses
+        with external statistical software such as statsmodels or R.
+
+        Parameters
+        ----------
+        index : tuple of str | None
+            Column to be used as index for the data. Valid string options
+            are 'subject' and 'time'. If None, all three info
+            columns will be included in the table as categorial data.
+        scale_time : float
+            Scaling to be applied to time units.
+        copy : bool
+            If true, data will be copied. Else data may be modified in place.
+
+        Returns
+        -------
+        df : instance of DataFrame
+            Source estimates exported into tabular data structure.
+        """
+        pd = _check_pandas_installed()
+
+        default_index = ['subject', 'time']
+        if index is not None:
+            _check_pandas_index_arguments(index, default_index)
+        else:
+            index = default_index
+
+        data = self.data.T
+        shape = data.shape
+        mindex = list()
+        mindex.append(('time', self.times * scale_time))
+        mindex.append(('subject', np.repeat(self.subject, shape[0])))
+
+        if copy:
+            data = data.copy()
+        assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
+
+        vert_names = [i for e in [['%s %i' % ('LH' if ii < 1 else 'RH', vert)
+                      for vert in vertno]
+                      for ii, vertno in enumerate(self.vertno)] for i in e]
+        df = pd.DataFrame(data, columns=vert_names)
+        [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
+
+        if index is not None:
+            with warnings.catch_warnings(True):
+                df.set_index(index, inplace=True)
+            if 'time' in df.index.names and hasattr(df.index, 'levels'):
+                df.index.levels[1] = df.index.levels[1].astype(int)
+
+        return df
+
+###############################################################################
+# Morphing
+
+from .fiff.constants import FIFF
+from .fiff.tag import find_tag
+from .fiff.open import fiff_open
+from .fiff.tree import dir_tree_find
+from .surface import read_bem_surfaces
+
+
+ at verbose
+def read_morph_map(subject_from, subject_to, subjects_dir=None,
+                   verbose=None):
+    """Read morph map generated with mne_make_morph_maps
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR.
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR.
+    subjects_dir : string
+        Path to SUBJECTS_DIR is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    left_map, right_map : sparse matrix
+        The morph maps for the 2 hemispheres.
+    """
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    # Does the file exist
+    name = '%s/morph-maps/%s-%s-morph.fif' % (subjects_dir, subject_from,
+                                              subject_to)
+    if not os.path.exists(name):
+        name = '%s/morph-maps/%s-%s-morph.fif' % (subjects_dir, subject_to,
+                                                  subject_from)
+        if not os.path.exists(name):
+            raise ValueError('The requested morph map does not exist\n' +
+                             'Perhaps you need to run the MNE tool:\n' +
+                             '  mne_make_morph_maps --from %s --to %s'
+                             % (subject_from, subject_to))
+
+    fid, tree, _ = fiff_open(name)
+
+    # Locate all maps
+    maps = dir_tree_find(tree, FIFF.FIFFB_MNE_MORPH_MAP)
+    if len(maps) == 0:
+        fid.close()
+        raise ValueError('Morphing map data not found')
+
+    # Find the correct ones
+    left_map = None
+    right_map = None
+    for m in maps:
+        tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_FROM)
+        if tag.data == subject_from:
+            tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_TO)
+            if tag.data == subject_to:
+                #  Names match: which hemishere is this?
+                tag = find_tag(fid, m, FIFF.FIFF_MNE_HEMI)
+                if tag.data == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
+                    tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
+                    left_map = tag.data
+                    logger.info('    Left-hemisphere map read.')
+                elif tag.data == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
+                    tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
+                    right_map = tag.data
+                    logger.info('    Right-hemisphere map read.')
+
+    fid.close()
+    if left_map is None:
+        raise ValueError('Left hemisphere map not found in %s' % name)
+
+    if right_map is None:
+        raise ValueError('Left hemisphere map not found in %s' % name)
+
+    return left_map, right_map
+
+
+def mesh_edges(tris):
+    """Returns sparse matrix with edges as an adjacency matrix
+
+    Parameters
+    ----------
+    tris : array of shape [n_triangles x 3]
+        The triangles.
+
+    Returns
+    -------
+    edges : sparse matrix
+        The adjacency matrix.
+    """
+    npoints = np.max(tris) + 1
+    ones_ntris = np.ones(3 * len(tris))
+    a, b, c = tris.T
+    x = np.concatenate((a, b, c))
+    y = np.concatenate((b, c, a))
+    edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
+    edges = edges.tocsr()
+    edges = edges + edges.T
+    return edges
+
+
+def mesh_dist(tris, vert):
+    """Compute adjacency matrix weighted by distances
+
+    It generates an adjacency matrix where the entries are the distances
+    between neighboring vertices.
+
+    Parameters
+    ----------
+    tris : array (n_tris x 3)
+        Mesh triangulation
+    vert : array (n_vert x 3)
+        Vertex locations
+
+    Returns
+    -------
+    dist_matrix : scipy.sparse.csr_matrix
+        Sparse matrix with distances between adjacent vertices
+    """
+    edges = mesh_edges(tris).tocoo()
+
+    # Euclidean distances between neighboring vertices
+    dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
+                          axis=1))
+
+    dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
+
+    return dist_matrix
+
+
+ at verbose
+def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
+                  verbose=None):
+    """Morph data from one subject's source space to another
+
+    Parameters
+    ----------
+    data : array, or csr sparse matrix
+        A n_vertices x n_times (or other dimension) dataset to morph.
+    idx_use : array of int
+        Vertices from the original subject's data.
+    e : sparse matrix
+        The mesh edges of the "from" subject.
+    smooth : int
+        Number of smoothing iterations to perform. A hard limit of 100 is
+        also imposed.
+    n_vertices : int
+        Number of vertices.
+    nearest : array of int
+        Vertices on the destination surface to use.
+    maps : sparse matrix
+        Morph map from one subject to the other.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    data_morphed : array, or csr sparse matrix
+        The morphed data (same type as input).
+    """
+
+    n_iter = 99  # max nb of smoothing iterations (minus one)
+    if smooth is not None:
+        smooth -= 1
+    # make sure we're in CSR format
+    e = e.tocsr()
+    if sparse.issparse(data):
+        use_sparse = True
+        if not isinstance(data, sparse.csr_matrix):
+            data = data.tocsr()
+    else:
+        use_sparse = False
+    done = False
+    # do the smoothing
+    for k in range(n_iter + 1):
+        # get the row sum
+        mult = np.zeros(e.shape[1])
+        mult[idx_use] = 1
+        idx_use_data = idx_use
+        data_sum = e * mult
+
+        # new indices are non-zero sums
+        idx_use = np.where(data_sum)[0]
+
+        # typically want to make the next iteration have these indices
+        idx_out = idx_use
+
+        # figure out if this is the last iteration
+        if smooth is None:
+            if k == n_iter or len(idx_use) >= n_vertices:
+                # stop when vertices filled
+                idx_out = None
+                done = True
+        elif k == smooth:
+            idx_out = None
+            done = True
+
+        # do standard smoothing multiplication
+        data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out)
+
+        if done is True:
+            break
+
+        # do standard normalization
+        if use_sparse:
+            data.data /= data_sum[idx_use].repeat(np.diff(data.indptr))
+        else:
+            data /= data_sum[idx_use][:, None]
+
+    # do special normalization for last iteration
+    if use_sparse:
+        data_sum[data_sum == 0] = 1
+        data.data /= data_sum.repeat(np.diff(data.indptr))
+    else:
+        data[idx_use, :] /= data_sum[idx_use][:, None]
+
+    logger.info('    %d smooth iterations done.' % (k + 1))
+    data_morphed = maps[nearest, :] * data
+    return data_morphed
+
+
+def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None):
+    """Helper for morphing
+
+    Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]"
+    but faster.
+    """
+    if len(idx_use_data) < e.shape[1]:
+        if use_sparse:
+            data = e[:, idx_use_data] * data
+        else:
+            # constructing a new sparse matrix is faster than sub-indexing
+            # e[:, idx_use_data]!
+            col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data)
+            d_sparse = sparse.csr_matrix((data.ravel(),
+                                          (row.ravel(), col.ravel())),
+                                         shape=(e.shape[1], data.shape[1]))
+            data = e * d_sparse
+            data = np.asarray(data.todense())
+    else:
+        data = e * data
+
+    # trim data
+    if idx_use_out is not None:
+        data = data[idx_use_out]
+    return data
+
+
+def _compute_nearest(xhs, rr, use_balltree=True):
+    """Find nearest neighbors
+
+    Note: The rows in xhs and rr must all be unit-length vectors, otherwise
+    the result will be incorrect.
+
+    Parameters
+    ----------
+    xhs : array, shape=(n_samples, n_dim)
+        Points of data set.
+    rr : array, shape=(n_query, n_dim)
+        Points to find nearest neighbors for.
+    use_balltree : bool
+        Use fast BallTree based search from scikit-learn. If scikit-learn
+        is not installed it will fall back to the slow brute force search.
+
+    Returns
+    -------
+    nearest : array, shape=(n_query,)
+        Index of nearest neighbor in xhs for every point in rr.
+    """
+    if use_balltree:
+        try:
+            from sklearn.neighbors import BallTree
+        except ImportError:
+            logger.info('Nearest-neighbor searches will be significantly '
+                        'faster if scikit-learn is installed.')
+            use_balltree = False
+
+    if use_balltree:
+        ball_tree = BallTree(xhs)
+        nearest = ball_tree.query(rr, k=1, return_distance=False)[:, 0]
+    else:
+        nearest = np.zeros(len(rr), dtype=np.int)
+        dr = 32
+        for k in range(0, len(rr), dr):
+            dots = np.dot(rr[k:k + dr], xhs.T)
+            nearest[k:k + dr] = np.argmax(dots, axis=1)
+
+    return nearest
+
+
+def _get_subject_sphere_tris(subject, subjects_dir):
+    spheres = [os.path.join(subjects_dir, subject, 'surf',
+                            xh + '.sphere.reg') for xh in ['lh', 'rh']]
+    tris = [read_surface(s)[1] for s in spheres]
+    return tris
+
+
+ at verbose
+def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
+               subjects_dir=None, buffer_size=64, n_jobs=1, verbose=None):
+    """Morph a source estimate from one subject to another
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR
+    stc_from : SourceEstimate
+        Source estimates for subject "from" to morph
+    grade : int, list (of two arrays), or None
+        Resolution of the icosahedral mesh (typically 5). If None, all
+        vertices will be used (potentially filling the surface). If a list,
+        then values will be morphed to the set of vertices specified in
+        in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
+        grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+        standard grade 5 source space) can be substantially faster than
+        computing vertex locations. Note that if subject='fsaverage'
+        and 'grade=5', this set of vertices will automatically be used
+        (instead of computed) for speed, since this is a common morph.
+    smooth : int or None
+        Number of iterations for the smoothing of the surface data.
+        If None, smooth is automatically defined to fill the surface
+        with non-zero values.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    buffer_size : int
+        Morph data in chunks of `buffer_size` time instants.
+        Saves memory when morphing long time intervals.
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc_to : SourceEstimate
+        Source estimate for the destination subject.
+    """
+    if not stc_from.is_surface():
+        raise ValueError('Morphing is only possible with surface source '
+                         'estimates')
+
+    logger.info('Morphing data...')
+    subjects_dir = get_subjects_dir(subjects_dir)
+    nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
+    tris = _get_subject_sphere_tris(subject_from, subjects_dir)
+    maps = read_morph_map(subject_from, subject_to, subjects_dir)
+
+    # morph the data
+    data = [stc_from.lh_data, stc_from.rh_data]
+    data_morphed = [None, None]
+
+    n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size))
+
+    parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs)
+
+    for hemi in [0, 1]:
+        e = mesh_edges(tris[hemi])
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+        idx_use = stc_from.vertno[hemi]
+        if len(idx_use) == 0:
+            continue
+        data_morphed[hemi] = np.concatenate(
+            parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
+                                     n_vertices, nearest[hemi], maps[hemi])
+                     for data_buffer
+                     in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
+
+    vertices = [nearest[0], nearest[1]]
+    if data_morphed[0] is None:
+        if data_morphed[1] is None:
+            data = np.r_[[], []]
+            vertices = [np.array([], dtype=int), np.array([], dtype=int)]
+        else:
+            data = data_morphed[1]
+            vertices = [np.array([], dtype=int), vertices[1]]
+    elif data_morphed[1] is None:
+        data = data_morphed[0]
+        vertices = [vertices[0], np.array([], dtype=int)]
+    else:
+        data = np.r_[data_morphed[0], data_morphed[1]]
+
+    stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep,
+                            subject=subject_to, verbose=stc_from.verbose)
+    logger.info('[done]')
+
+    return stc_to
+
+
+ at verbose
+def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
+                         smooth=None, subjects_dir=None, verbose=None):
+    """Get a matrix that morphs data from one subject to another
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR
+    vertices_from : list of arrays of int
+        Vertices for each hemisphere (LH, RH) for subject_from
+    vertices_to : list of arrays of int
+        Vertices for each hemisphere (LH, RH) for subject_to
+    smooth : int or None
+        Number of iterations for the smoothing of the surface data.
+        If None, smooth is automatically defined to fill the surface
+        with non-zero values.
+    subjects_dir : string
+        Path to SUBJECTS_DIR is not set in the environment
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    morph_matrix : sparse matrix
+        matrix that morphs data from subject_from to subject_to
+    """
+    logger.info('Computing morph matrix...')
+    subjects_dir = get_subjects_dir(subjects_dir)
+    tris = _get_subject_sphere_tris(subject_from, subjects_dir)
+    maps = read_morph_map(subject_from, subject_to, subjects_dir)
+
+    morpher = [None] * 2
+    for hemi in [0, 1]:
+        e = mesh_edges(tris[hemi])
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+        idx_use = vertices_from[hemi]
+        if len(idx_use) == 0:
+            morpher[hemi] = []
+            continue
+        m = sparse.eye(len(idx_use), len(idx_use), format='csr')
+        morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices,
+                                      vertices_to[hemi], maps[hemi])
+    # be careful about zero-length arrays
+    if isinstance(morpher[0], list):
+        morpher = morpher[1]
+    elif isinstance(morpher[1], list):
+        morpher = morpher[0]
+    else:
+        morpher = sparse_block_diag(morpher, format='csr')
+    logger.info('[done]')
+    return morpher
+
+
+ at verbose
+def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
+                      verbose=None):
+    """Convert a grade to source space vertices for a given subject
+
+    Parameters
+    ----------
+    subject : str
+        Name of the subject
+    grade : int
+        Resolution of the icosahedral mesh (typically 5). If None, all
+        vertices will be used (potentially filling the surface). If a list,
+        then values will be morphed to the set of vertices specified in
+        in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
+        grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+        standard grade 5 source space) can be substantially faster than
+        computing vertex locations. Note that if subject='fsaverage'
+        and 'grade=5', this set of vertices will automatically be used
+        (instead of computed) for speed, since this is a common morph.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    vertices : list of arrays of int
+        Vertex numbers for LH and RH
+    """
+    # add special case for fsaverage for speed
+    if subject == 'fsaverage' and grade == 5:
+        return [np.arange(10242), np.arange(10242)]
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    spheres_to = [os.path.join(subjects_dir, subject, 'surf',
+                               xh + '.sphere.reg') for xh in ['lh', 'rh']]
+    lhs, rhs = [read_surface(s)[0] for s in spheres_to]
+
+    if grade is not None:  # fill a subset of vertices
+        if isinstance(grade, list):
+            if not len(grade) == 2:
+                raise ValueError('grade as a list must have two elements '
+                                 '(arrays of output vertices)')
+            vertices = grade
+        else:
+            # find which vertices to use in "to mesh"
+            ico = _get_ico_tris(grade, return_surf=True)
+            lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
+            rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
+
+            # Compute nearest vertices in high dim mesh
+            parallel, my_compute_nearest, _ = \
+                parallel_func(_compute_nearest, n_jobs)
+            lhs, rhs, rr = [a.astype(np.float32)
+                            for a in [lhs, rhs, ico['rr']]]
+            vertices = parallel(my_compute_nearest(xhs, rr)
+                                for xhs in [lhs, rhs])
+    else:  # potentially fill the surface
+        vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
+
+    return vertices
+
+
+def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to,
+                           morph_mat):
+    """Morph source estimate between subjects using a precomputed matrix
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR.
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR.
+    stc_from : SourceEstimate
+        Source estimates for subject "from" to morph.
+    vertices_to : list of array of int
+        The vertices on the destination subject's brain.
+    morph_mat : sparse matrix
+        The morphing matrix, typically from compute_morph_matrix.
+
+    Returns
+    -------
+    stc_to : SourceEstimate
+        Source estimate for the destination subject.
+    """
+    if not sparse.issparse(morph_mat):
+        raise ValueError('morph_mat must be a sparse matrix')
+
+    if not isinstance(vertices_to, list) or not len(vertices_to) == 2:
+        raise ValueError('vertices_to must be a list of length 2')
+
+    if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]:
+        raise ValueError('number of vertices in vertices_to must match '
+                         'morph_mat.shape[0]')
+    if not stc_from.data.shape[0] == morph_mat.shape[1]:
+        raise ValueError('stc_from.data.shape[0] must be the same as '
+                         'morph_mat.shape[0]')
+
+    if stc_from.subject is not None and stc_from.subject != subject_from:
+        raise ValueError('stc_from.subject and subject_from must match')
+    data = morph_mat * stc_from.data
+    stc_to = SourceEstimate(data, vertices_to, stc_from.tmin, stc_from.tstep,
+                            verbose=stc_from.verbose, subject=subject_to)
+    return stc_to
+
+
+ at verbose
+def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
+    """Compute connectivity for a source space activation over time
+
+    Parameters
+    ----------
+    src : source space
+        The source space.
+    n_times : int
+        Number of time instants.
+    dist : float, or None
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors. If None, immediate neighbors
+        are extracted from an ico surface.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatio-temporal
+        graph structure. If N is the number of vertices in the
+        source space, the N first nodes in the graph are the
+        vertices are time 1, the nodes from 2 to 2N are the vertices
+        during time 2, etc.
+    """
+    if dist is None:
+        if src[0]['use_tris'] is None:
+            raise Exception("The source space does not appear to be an ico "
+                            "surface. Connectivity cannot be extracted from "
+                            "non-ico source spaces.")
+        used_verts = [np.unique(s['use_tris']) for s in src]
+        lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris'])
+        rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris'])
+        tris = np.concatenate((lh_tris, rh_tris + np.max(lh_tris) + 1))
+        connectivity = spatio_temporal_tris_connectivity(tris, n_times)
+
+        # deal with source space only using a subset of vertices
+        masks = [in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
+        if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:
+            raise ValueError('Used vertices do not match connectivity shape')
+        if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
+            raise ValueError('Vertex mask does not match number of vertices')
+        masks = np.concatenate(masks)
+        missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
+        if missing:
+            warnings.warn('%0.1f%% of original source space vertices have been'
+                          ' omitted, tri-based connectivity will have holes.\n'
+                          'Consider using distance-based connectivity or '
+                          'morphing data to all source space vertices.'
+                          % missing)
+            masks = np.tile(masks, n_times)
+            masks = np.where(masks)[0]
+            connectivity = connectivity.tocsr()
+            connectivity = connectivity[masks]
+            connectivity = connectivity[:, masks]
+            # return to original format
+            connectivity = connectivity.tocoo()
+
+        return connectivity
+    else:  # use distances computed and saved in the source space file
+        return spatio_temporal_dist_connectivity(src, n_times, dist)
+
+
+ at verbose
+def grade_to_tris(grade, verbose=None):
+    """Get tris defined for a certain grade
+
+    Parameters
+    ----------
+    grade : int
+        Grade of an icosahedral mesh.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    tris : list
+        2-element list containing Nx3 arrays of tris, suitable for use in
+        spatio_temporal_tris_connectivity.
+    """
+    a = _get_ico_tris(grade, None, False)
+    tris = np.concatenate((a, a + (np.max(a) + 1)))
+    return tris
+
+
+ at verbose
+def spatio_temporal_tris_connectivity(tris, n_times, verbose=None):
+    """Compute connectivity from triangles and time instants
+
+    Parameters
+    ----------
+    tris : array
+        N x 3 array defining triangles.
+    n_times : int
+        Number of time points
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatio-temporal
+        graph structure. If N is the number of vertices in the
+        source space, the N first nodes in the graph are the
+        vertices are time 1, the nodes from 2 to 2N are the vertices
+        during time 2, etc.
+    """
+    edges = mesh_edges(tris).tocoo()
+    return _get_connectivity_from_edges(edges, n_times)
+
+
+ at verbose
+def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):
+    """Compute connectivity from distances in a source space and time instants
+
+    Parameters
+    ----------
+    src : source space
+        The source space must have distances between vertices computed, such
+        that src['dist'] exists and is useful. This can be obtained using MNE
+        with a call to mne_add_patch_info with the --dist option.
+    n_times : int
+        Number of time points
+    dist : float
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatio-temporal
+        graph structure. If N is the number of vertices in the
+        source space, the N first nodes in the graph are the
+        vertices are time 1, the nodes from 2 to 2N are the vertices
+        during time 2, etc.
+    """
+    if src[0]['dist'] is None:
+        raise RuntimeError('src must have distances included, consider using\n'
+                           'mne_add_patch_info with --dist argument')
+    edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]
+                              for s in src])
+    edges.data[:] = np.less_equal(edges.data, dist)
+    # clean it up and put it in coo format
+    edges = edges.tocsr()
+    edges.eliminate_zeros()
+    edges = edges.tocoo()
+    return _get_connectivity_from_edges(edges, n_times)
+
+
+ at verbose
+def spatial_src_connectivity(src, dist=None, verbose=None):
+    """Compute connectivity for a source space activation
+
+    Parameters
+    ----------
+    src : source space
+        The source space.
+    dist : float, or None
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors. If None, immediate neighbors
+        are extracted from an ico surface.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatial graph structure.
+    """
+    return spatio_temporal_src_connectivity(src, 1, dist)
+
+
+ at verbose
+def spatial_tris_connectivity(tris, verbose=None):
+    """Compute connectivity from triangles
+
+    Parameters
+    ----------
+    tris : array
+        N x 3 array defining triangles.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatial graph structure.
+    """
+    return spatio_temporal_tris_connectivity(tris, 1)
+
+
+def spatial_dist_connectivity(src, dist, verbose=None):
+    """Compute connectivity from distances in a source space
+
+    Parameters
+    ----------
+    src : source space
+        The source space must have distances between vertices computed, such
+        that src['dist'] exists and is useful. This can be obtained using MNE
+        with a call to mne_add_patch_info with the --dist option.
+    dist : float
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatial graph structure.
+    """
+    return spatio_temporal_dist_connectivity(src, 1, dist)
+
+
+def sparse_block_diag(mats, format=None, dtype=None):
+    """An implementation of scipy.sparse.block_diag since old versions of
+    scipy don't have it. Forms a sparse matrix by stacking matrices in block
+    diagonal form.
+
+    Parameters
+    ----------
+    mats : list of matrices
+        Input matrices.
+    format : str, optional
+        The sparse format of the result (e.g. "csr"). If not given, the
+        matrix is returned in "coo" format.
+    dtype : dtype specifier, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of blocks.
+
+    Returns
+    -------
+    res : sparse matrix
+    """
+    try:
+        return sparse.block_diag(mats, format=format, dtype=dtype)
+    except AttributeError:
+        nmat = len(mats)
+        rows = []
+        for ia, a in enumerate(mats):
+            row = [None] * nmat
+            row[ia] = a
+            rows.append(row)
+        return sparse.bmat(rows, format=format, dtype=dtype)
+
+
+ at verbose
+def _get_connectivity_from_edges(edges, n_times, verbose=None):
+    """Given edges sparse matrix, create connectivity matrix"""
+    n_vertices = edges.shape[0]
+    logger.info("-- number of connected vertices : %d" % n_vertices)
+    nnz = edges.col.size
+    aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)
+    col = (edges.col[None, :] + aux).ravel()
+    row = (edges.row[None, :] + aux).ravel()
+    if n_times > 1:  # add temporal edges
+        o = (n_vertices * np.arange(n_times - 1)[:, None]
+             + np.arange(n_vertices)[None, :]).ravel()
+        d = (n_vertices * np.arange(1, n_times)[:, None]
+             + np.arange(n_vertices)[None, :]).ravel()
+        row = np.concatenate((row, o, d))
+        col = np.concatenate((col, d, o))
+    data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
+                   dtype=np.int)
+    connectivity = coo_matrix((data, (row, col)),
+                              shape=(n_times * n_vertices, ) * 2)
+    return connectivity
+
+
+ at verbose
+def _get_ico_tris(grade, verbose=None, return_surf=False):
+    """Get triangles for ico surface."""
+    ico_file_name = os.path.join(os.path.dirname(__file__), 'data',
+                                 'icos.fif.gz')
+    ico = read_bem_surfaces(ico_file_name, s_id=9000 + grade)
+
+    if not return_surf:
+        return ico['tris']
+    else:
+        return ico
+
+
+def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
+    """Save a volume source estimate in a nifti file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the generated nifti file.
+    stc : instance of SourceEstimate
+        The source estimate
+    src : list
+        The list of source spaces (should actually be of length 1)
+    dest : 'mri' | 'surf'
+        If 'mri' the volume is defined in the coordinate system of
+        the original T1 image. If 'surf' the coordinate system
+        of the FreeSurfer surface is used (Surface RAS).
+    mri_resolution: bool
+        It True the image is saved in MRI resolution.
+        WARNING: if you have many time points the file produced can be
+        huge.
+
+    Returns
+    -------
+    img : instance Nifti1Image
+        The image object.
+    """
+    if stc.is_surface():
+        raise Exception('Only volume source estimates can be saved as '
+                        'volumes')
+
+    n_times = stc.data.shape[1]
+    shape = src[0]['shape']
+    shape3d = (shape[2], shape[1], shape[0])
+    shape = (n_times, shape[2], shape[1], shape[0])
+    vol = np.zeros(shape)
+    mask3d = src[0]['inuse'].reshape(shape3d).astype(np.bool)
+
+    if mri_resolution:
+        mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'],
+                       src[0]['mri_width'])
+        mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'],
+                     src[0]['mri_width'])
+        mri_vol = np.zeros(mri_shape)
+        interpolator = src[0]['interpolator']
+
+    for k, v in enumerate(vol):
+        v[mask3d] = stc.data[:, k]
+        if mri_resolution:
+            mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d)
+
+    if mri_resolution:
+        vol = mri_vol
+
+    vol = vol.T
+
+    if mri_resolution:
+        affine = src[0]['vox_mri_t']['trans'].copy()
+    else:
+        affine = src[0]['src_mri_t']['trans'].copy()
+    if dest == 'mri':
+        affine = np.dot(src[0]['mri_ras_t']['trans'], affine)
+    affine[:3] *= 1e3
+
+    try:
+        import nibabel as nib  # lazy import to avoid dependency
+    except ImportError:
+        raise ImportError("nibabel is required to save volume images.")
+
+    header = nib.nifti1.Nifti1Header()
+    header.set_xyzt_units('mm', 'msec')
+    header['pixdim'][4] = 1e3 * stc.tstep
+    img = nib.Nifti1Image(vol, affine, header=header)
+    nib.save(img, fname)
+    return img
+
+
+def _get_label_flip(labels, label_vertidx, src):
+    """Helper function to get sign-flip for labels"""
+    # do the import here to avoid circular dependency
+    from .label import label_sign_flip
+    # get the sign-flip vector for every label
+    label_flip = list()
+    for label, vertidx in zip(labels, label_vertidx):
+        if label.hemi == 'both':
+            raise ValueError('BiHemiLabel not supported when using sign-flip')
+        if vertidx is not None:
+            flip = label_sign_flip(label, src)[:, None]
+        else:
+            flip = None
+        label_flip.append(flip)
+
+    return label_flip
+
+
+ at verbose
+def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
+                                   allow_empty=False, verbose=None):
+    """Generator for extract_label_time_course"""
+
+    n_labels = len(labels)
+
+    # get vertno from source space, they have to be the same as in the stcs
+    vertno = [s['vertno'] for s in src]
+    nvert = [len(vn) for vn in vertno]
+
+    # do the initialization
+    label_vertidx = list()
+    for label in labels:
+        if label.hemi == 'both':
+            # handle BiHemiLabel
+            sub_labels = [label.lh, label.rh]
+        else:
+            sub_labels = [label]
+        this_vertidx = list()
+        for slabel in sub_labels:
+            if slabel.hemi == 'lh':
+                this_vertno = np.intersect1d(vertno[0], slabel.vertices)
+                vertidx = np.searchsorted(vertno[0], this_vertno)
+            elif slabel.hemi == 'rh':
+                this_vertno = np.intersect1d(vertno[1], slabel.vertices)
+                vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)
+            else:
+                raise ValueError('label %s has invalid hemi' % label.name)
+            this_vertidx.append(vertidx)
+
+        # convert it to an array
+        this_vertidx = np.concatenate(this_vertidx)
+        if len(this_vertidx) == 0:
+            msg = ('source space does not contain any vertices for label %s'
+                   % label.name)
+            if not allow_empty:
+                raise ValueError(msg)
+            else:
+                logger.warn(msg + '. Assigning all-zero time series to label.')
+            this_vertidx = None  # to later check if label is empty
+
+        label_vertidx.append(this_vertidx)
+
+    # mode-dependent initalization
+    if mode == 'mean':
+        pass  # we have this here to catch invalid values for mode
+    elif mode == 'mean_flip':
+       # get the sign-flip vector for every label
+        label_flip = _get_label_flip(labels, label_vertidx, src)
+    elif mode == 'pca_flip':
+       # get the sign-flip vector for every label
+        label_flip = _get_label_flip(labels, label_vertidx, src)
+    else:
+        raise ValueError('%s is an invalid mode' % mode)
+
+    # loop through source estimates and extract time series
+    for stc in stcs:
+
+        # make sure the stc is compatible with the source space
+        if len(stc.vertno[0]) != nvert[0] or len(stc.vertno[1]) != nvert[1]:
+            raise ValueError('stc not compatible with source space')
+        if any([np.any(svn != vn) for svn, vn in zip(stc.vertno, vertno)]):
+            raise ValueError('stc not compatible with source space')
+
+        logger.info('Extracting time courses for %d labels (mode: %s)'
+                    % (n_labels, mode))
+
+        # do the extraction
+        label_tc = np.zeros((n_labels, stc.data.shape[1]),
+                            dtype=stc.data.dtype)
+        if mode == 'mean':
+            for i, vertidx in enumerate(label_vertidx):
+                if vertidx is not None:
+                    label_tc[i] = np.mean(stc.data[vertidx, :], axis=0)
+        elif mode == 'mean_flip':
+            for i, (vertidx, flip) in enumerate(zip(label_vertidx,
+                                                    label_flip)):
+                if vertidx is not None:
+                    label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0)
+        elif mode == 'pca_flip':
+            for i, (vertidx, flip) in enumerate(zip(label_vertidx,
+                                                    label_flip)):
+                if vertidx is not None:
+                    U, s, V = linalg.svd(stc.data[vertidx, :],
+                                         full_matrices=False)
+                    # determine sign-flip
+                    sign = np.sign(np.dot(U[:, 0], flip))
+
+                    # use average power in label for scaling
+                    scale = linalg.norm(s) / np.sqrt(len(vertidx))
+
+                    label_tc[i] = sign * scale * V[0]
+        else:
+            raise ValueError('%s is an invalid mode' % mode)
+
+        # this is a generator!
+        yield label_tc
+
+
+ at verbose
+def extract_label_time_course(stcs, labels, src, mode='mean_flip',
+                              allow_empty=False, return_generator=False,
+                              verbose=None):
+    """Extract label time course for lists of labels and source estimates
+
+    This function will extract one time course for each label and source
+    estimate. The way the time courses are extracted depends on the mode
+    parameter.
+
+    Valid values for mode are:
+    'mean': Average within each label.
+    'mean_flip': Average within each label with sign flip depending on source
+    orientation.
+    'pca_flip': Apply an SVD to the time courses within each label and use the
+    scaled and sign-flipped first right-singular vector as the label time
+    course. The scaling is performed such that the power of the label time
+    course is the same as the average per-vertex time course power within
+    the label. The sign of the resulting time course is adjusted by multiplying
+    it with "sign(dot(u, flip))" where u is the first left-singular vector,
+    and flip is a sing-flip vector based on the vertex normals. This procedure
+    assures that the phase does not randomly change by 180 degrees from one
+    stc to the next.
+
+    Parameters
+    ----------
+    stcs : SourceEstimate | list (or generator) of SourceEstimate
+        The source estimates from which to extract the time course.
+    labels : Label | list of Label
+        The labels for which to extract the time course.
+    src : list
+        Source spaces for left and right hemisphere.
+    mode : str
+        Extraction mode, see explanation above.
+    allow_empty : bool
+        Instead of emitting an error, return all-zero time courses for labels
+        that do not have any vertices in the source estimate.
+    return_generator : bool
+        If True, a generator instead of a list is returned.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    label_tc : array | list (or generator) of array,
+               shape=(len(labels), n_times)
+        Extracted time course for each label and source estimate.
+    """
+    # convert inputs to lists
+    if isinstance(stcs, SourceEstimate):
+        stcs = [stcs]
+        return_several = False
+        return_generator = False
+    else:
+        return_several = True
+
+    if not isinstance(labels, list):
+        labels = [labels]
+
+    label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,
+                                              allow_empty=allow_empty)
+
+    if not return_generator:
+        # do the extraction and return a list
+        label_tc = list(label_tc)
+
+    if not return_several:
+        # input was a single SoureEstimate, return single array
+        label_tc = label_tc[0]
+
+    return label_tc
diff --git a/mne/source_space.py b/mne/source_space.py
new file mode 100644
index 0000000..070ba6c
--- /dev/null
+++ b/mne/source_space.py
@@ -0,0 +1,766 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+from scipy import sparse, linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from .fiff.constants import FIFF
+from .fiff.tree import dir_tree_find
+from .fiff.tag import find_tag, read_tag
+from .fiff.open import fiff_open
+from .fiff.write import start_block, end_block, write_int, \
+                        write_float_sparse_rcs, write_string, \
+                        write_float_matrix, write_int_matrix, \
+                        write_coord_trans, start_file, end_file, write_id
+from .surface import read_surface
+from .utils import get_subjects_dir, run_subprocess, has_freesurfer, \
+                   has_nibabel
+from . import verbose
+
+
+class SourceSpaces(list):
+    """Represent a list of source space
+
+    Currently implemented as a list of dictionaries containing the source
+    space information
+
+    Parameters
+    ----------
+    source_spaces : list
+        A list of dictionaries containing the source space information.
+    info : dict
+        Dictionary with information about the creation of the source space
+        file. Has keys 'working_dir' and 'command_line'.
+
+    Attributes
+    ----------
+    info : dict
+        Dictionary with information about the creation of the source space
+        file. Has keys 'working_dir' and 'command_line'.
+    """
+    def __init__(self, source_spaces, info=None):
+        super(SourceSpaces, self).__init__(source_spaces)
+        if info is None:
+            self.info = dict()
+        else:
+            self.info = dict(info)
+
+    def __repr__(self):
+        ss_repr = []
+        for ss in self:
+            ss_type = ss['type']
+            if ss_type == 'vol':
+                r = ("'vol', shape=%s, n_used=%i"
+                     % (repr(ss['shape']), ss['nuse']))
+            elif ss_type == 'surf':
+                r = "'surf', n_vertices=%i, n_used=%i" % (ss['np'], ss['nuse'])
+            else:
+                r = "%r" % ss_type
+            ss_repr.append('<%s>' % r)
+        ss_repr = ', '.join(ss_repr)
+        return "<SourceSpaces: [{ss}]>".format(ss=ss_repr)
+
+    def save(self, fname):
+        """Save the source spaces to a fif file
+
+        Parameters
+        ----------
+        fname : str
+            File to write.
+        """
+        write_source_spaces(fname, self)
+
+
+def _add_patch_info(s):
+    """Patch information in a source space
+
+    Generate the patch information from the 'nearest' vector in
+    a source space. For vertex in the source space it provides
+    the list of neighboring vertices in the high resolution
+    triangulation.
+
+    Parameters
+    ----------
+    s : dict
+        The source space.
+    """
+    nearest = s['nearest']
+    if nearest is None:
+        s['pinfo'] = None
+        s['patch_inds'] = None
+        return
+
+    logger.info('    Computing patch statistics...')
+
+    indn = np.argsort(nearest)
+    nearest_sorted = nearest[indn]
+
+    steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
+    starti = np.r_[[0], steps]
+    stopi = np.r_[steps, [len(nearest)]]
+
+    pinfo = list()
+    for start, stop in zip(starti, stopi):
+        pinfo.append(np.sort(indn[start:stop]))
+    s['pinfo'] = pinfo
+
+    # compute patch indices of the in-use source space vertices
+    patch_verts = nearest_sorted[steps - 1]
+    s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
+
+    logger.info('    Patch information added...')
+
+
+ at verbose
+def read_source_spaces_from_tree(fid, tree, add_geom=False, verbose=None):
+    """Read the source spaces from a FIF file
+
+    Parameters
+    ----------
+    fid : file descriptor
+        An open file descriptor.
+    tree : dict
+        The FIF tree structure if source is a file id.
+    add_geom : bool, optional (default False)
+        Add geometry information to the surfaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : SourceSpaces
+        The source spaces.
+    """
+    #   Find all source spaces
+    spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
+    if len(spaces) == 0:
+        raise ValueError('No source spaces found')
+
+    src = list()
+    for s in spaces:
+        logger.info('    Reading a source space...')
+        this = _read_one_source_space(fid, s)
+        logger.info('    [done]')
+        if add_geom:
+            complete_source_space_info(this)
+
+        src.append(this)
+
+    src = SourceSpaces(src)
+    logger.info('    %d source spaces read' % len(spaces))
+
+    return src
+
+
+ at verbose
+def read_source_spaces(fname, add_geom=False, verbose=None):
+    """Read the source spaces from a FIF file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file.
+    add_geom : bool, optional (default False)
+        Add geometry information to the surfaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : SourceSpaces
+        The source spaces.
+    """
+    fid, tree, _ = fiff_open(fname)
+    src = read_source_spaces_from_tree(fid, tree, add_geom=add_geom,
+                                       verbose=verbose)
+    src.info['fname'] = fname
+
+    node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
+    if node:
+        node = node[0]
+        for p in range(node['nent']):
+            kind = node['directory'][p].kind
+            pos = node['directory'][p].pos
+            tag = read_tag(fid, pos)
+            if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
+                src.info['working_dir'] = tag.data
+            elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
+                src.info['command_line'] = tag.data
+
+    return src
+
+
+ at verbose
+def _read_one_source_space(fid, this, verbose=None):
+    """Read one source space
+    """
+    FIFF_BEM_SURF_NTRI = 3104
+    FIFF_BEM_SURF_TRIANGLES = 3106
+
+    res = dict()
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
+    if tag is None:
+        res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
+    else:
+        res['id'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
+    if tag is None:
+        raise ValueError('Unknown source space type')
+    else:
+        src_type = int(tag.data)
+        if src_type == 1:
+            res['type'] = 'surf'
+        elif src_type == 2:
+            res['type'] = 'vol'
+        else:
+            raise ValueError('Unknown source space type (%d)' % src_type)
+
+    if res['type'] == 'vol':
+
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
+        if tag is not None:
+            res['shape'] = tuple(tag.data)
+
+        tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
+        if tag is not None:
+            res['src_mri_t'] = tag.data
+
+        parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        if len(parent_mri) == 0:
+            # MNE 2.7.3 (and earlier) didn't store necessary information
+            # about volume coordinate translations. Although there is a
+            # FFIF_COORD_TRANS in the higher level of the FIFF file, this
+            # doesn't contain all the info we need. Safer to return an
+            # error unless a user really wants us to add backward compat.
+            raise ValueError('Can not find parent MRI location. The volume '
+                             'source space may have been made with an MNE '
+                             'version that is too old (<= 2.7.3). Consider '
+                             'updating and regenerating the inverse.')
+
+        mri = parent_mri[0]
+        for d in mri['directory']:
+            if d.kind == FIFF.FIFF_COORD_TRANS:
+                tag = read_tag(fid, d.pos)
+                trans = tag.data
+                if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
+                    res['vox_mri_t'] = tag.data
+                if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
+                    res['mri_ras_t'] = tag.data
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
+        if tag is not None:
+            res['interpolator'] = tag.data
+        else:
+            logger.info("Interpolation matrix for MRI not found.")
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
+        if tag is not None:
+            res['mri_file'] = tag.data
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
+        if tag is not None:
+            res['mri_width'] = int(tag.data)
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
+        if tag is not None:
+            res['mri_height'] = int(tag.data)
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
+        if tag is not None:
+            res['mri_depth'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+    if tag is None:
+        raise ValueError('Number of vertices not found')
+
+    res['np'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
+    if tag is None:
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
+        if tag is None:
+            res['ntri'] = 0
+        else:
+            res['ntri'] = int(tag.data)
+    else:
+        res['ntri'] = tag.data
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        raise ValueError('Coordinate frame information not found')
+
+    res['coord_frame'] = tag.data
+
+    #   Vertices, normals, and triangles
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
+    if tag is None:
+        raise ValueError('Vertex data not found')
+
+    res['rr'] = tag.data.astype(np.float)  # double precision for mayavi
+    if res['rr'].shape[0] != res['np']:
+        raise ValueError('Vertex information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
+    if tag is None:
+        raise ValueError('Vertex normals not found')
+
+    res['nn'] = tag.data
+    if res['nn'].shape[0] != res['np']:
+        raise ValueError('Vertex normal information is incorrect')
+
+    if res['ntri'] > 0:
+        tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
+        if tag is None:
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
+            if tag is None:
+                raise ValueError('Triangulation not found')
+            else:
+                res['tris'] = tag.data - 1  # index start at 0 in Python
+        else:
+            res['tris'] = tag.data - 1  # index start at 0 in Python
+
+        if res['tris'].shape[0] != res['ntri']:
+            raise ValueError('Triangulation information is incorrect')
+    else:
+        res['tris'] = None
+
+    #   Which vertices are active
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
+    if tag is None:
+        res['nuse'] = 0
+        res['inuse'] = np.zeros(res['nuse'], dtype=np.int)
+        res['vertno'] = None
+    else:
+        res['nuse'] = int(tag.data)
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
+        if tag is None:
+            raise ValueError('Source selection information missing')
+
+        res['inuse'] = tag.data.astype(np.int).T
+        if len(res['inuse']) != res['np']:
+            raise ValueError('Incorrect number of entries in source space '
+                             'selection')
+
+        res['vertno'] = np.where(res['inuse'])[0]
+
+    #   Use triangulation
+    tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
+    tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
+    if tag1 is None or tag2 is None:
+        res['nuse_tri'] = 0
+        res['use_tris'] = None
+    else:
+        res['nuse_tri'] = tag1.data
+        res['use_tris'] = tag2.data - 1  # index start at 0 in Python
+
+    #   Patch-related information
+    tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
+    tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
+
+    if tag1 is None or tag2 is None:
+        res['nearest'] = None
+        res['nearest_dist'] = None
+    else:
+        res['nearest'] = tag1.data
+        res['nearest_dist'] = tag2.data.T
+
+    _add_patch_info(res)
+
+    #   Distances
+    tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
+    tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
+    if tag1 is None or tag2 is None:
+        res['dist'] = None
+        res['dist_limit'] = None
+    else:
+        res['dist'] = tag1.data
+        res['dist_limit'] = tag2.data
+        #   Add the upper triangle
+        res['dist'] = res['dist'] + res['dist'].T
+    if (res['dist'] is not None):
+        logger.info('    Distance information added...')
+
+    tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
+    if tag is not None:
+        res['subject_his_id'] = tag.data
+
+    return res
+
+
+ at verbose
+def complete_source_space_info(this, verbose=None):
+    """Add more info on surface
+    """
+    #   Main triangulation
+    logger.info('    Completing triangulation info...')
+    this['tri_area'] = np.zeros(this['ntri'])
+    r1 = this['rr'][this['tris'][:, 0], :]
+    r2 = this['rr'][this['tris'][:, 1], :]
+    r3 = this['rr'][this['tris'][:, 2], :]
+    this['tri_cent'] = (r1 + r2 + r3) / 3.0
+    this['tri_nn'] = np.cross((r2 - r1), (r3 - r1))
+    size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
+    this['tri_area'] = size / 2.0
+    this['tri_nn'] /= size[:, None]
+    logger.info('[done]')
+
+    #   Selected triangles
+    logger.info('    Completing selection triangulation info...')
+    if this['nuse_tri'] > 0:
+        r1 = this['rr'][this['use_tris'][:, 0], :]
+        r2 = this['rr'][this['use_tris'][:, 1], :]
+        r3 = this['rr'][this['use_tris'][:, 2], :]
+        this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
+        this['use_tri_nn'] = np.cross((r2 - r1), (r3 - r1))
+        this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn'] ** 2, axis=1)
+                                       ) / 2.0
+    logger.info('[done]')
+
+
+def find_source_space_hemi(src):
+    """Return the hemisphere id for a source space
+
+    Parameters
+    ----------
+    src : dict
+        The source space to investigate
+
+    Returns
+    -------
+    hemi : int
+        Deduced hemisphere id
+    """
+    xave = src['rr'][:, 0].sum()
+
+    if xave < 0:
+        hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
+    else:
+        hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
+
+    return hemi
+
+
+def label_src_vertno_sel(label, src):
+    """ Find vertex numbers and indices from label
+
+    Parameters
+    ----------
+    label : Label
+        Source space label
+    src : dict
+        Source space
+
+    Returns
+    -------
+    vertno : list of length 2
+        Vertex numbers for lh and rh
+    src_sel : array of int (len(idx) = len(vertno[0]) + len(vertno[1]))
+        Indices of the selected vertices in sourse space
+    """
+    if src[0]['type'] != 'surf':
+        return Exception('Label are only supported with surface source spaces')
+
+    vertno = [src[0]['vertno'], src[1]['vertno']]
+
+    if label.hemi == 'lh':
+        vertno_sel = np.intersect1d(vertno[0], label.vertices)
+        src_sel = np.searchsorted(vertno[0], vertno_sel)
+        vertno[0] = vertno_sel
+        vertno[1] = np.array([])
+    elif label.hemi == 'rh':
+        vertno_sel = np.intersect1d(vertno[1], label.vertices)
+        src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
+        vertno[0] = np.array([])
+        vertno[1] = vertno_sel
+    elif label.hemi == 'both':
+        vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
+        src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
+        vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
+        src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
+        src_sel = np.hstack((src_sel_lh, src_sel_rh))
+        vertno = [vertno_sel_lh, vertno_sel_rh]
+    else:
+        raise Exception("Unknown hemisphere type")
+
+    return vertno, src_sel
+
+
+def _get_vertno(src):
+    return [s['vertno'] for s in src]
+
+
+###############################################################################
+# Write routines
+
+ at verbose
+def write_source_spaces_to_fid(fid, src, verbose=None):
+    """Write the source spaces to a FIF file
+
+    Parameters
+    ----------
+    fid : file descriptor
+        An open file descriptor.
+    src : list
+        The list of source spaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    for s in src:
+        logger.info('    Write a source space...')
+        start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
+        _write_one_source_space(fid, s, verbose)
+        end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
+        logger.info('    [done]')
+    logger.info('    %d source spaces written' % len(src))
+
+
+ at verbose
+def write_source_spaces(fname, src, verbose=None):
+    """Write source spaces to a file
+
+    Parameters
+    ----------
+    fname : str
+        File to write.
+    src : SourceSpaces
+        The source spaces (as returned by read_source_spaces).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MNE)
+
+    if src.info:
+        start_block(fid, FIFF.FIFFB_MNE_ENV)
+
+        write_id(fid, FIFF.FIFF_BLOCK_ID)
+
+        data = src.info.get('working_dir', None)
+        if data:
+            write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
+        data = src.info.get('command_line', None)
+        if data:
+            write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
+
+        end_block(fid, FIFF.FIFFB_MNE_ENV)
+
+    write_source_spaces_to_fid(fid, src, verbose)
+
+    end_block(fid, FIFF.FIFFB_MNE)
+    end_file(fid)
+
+
+def _write_one_source_space(fid, this, verbose=None):
+    """Write one source space"""
+    if this['type'] == 'surf':
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, 1)
+    elif this['type'] == 'vol':
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, 2)
+    else:
+        raise ValueError('Unknown source space type (%d)' % this['type'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
+
+    data = this.get('subject_his_id', None)
+    if data:
+        write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
+    write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
+
+    if this['type'] == 'vol':
+
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
+        write_coord_trans(fid, this['src_mri_t'])
+
+        start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        write_coord_trans(fid, this['vox_mri_t'])
+
+        write_coord_trans(fid, this['mri_ras_t'])
+
+        write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
+                            this['interpolator'])
+
+        if 'mri_file' in this and this['mri_file'] is not None:
+            write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
+                         this['mri_file'])
+
+        write_int(fid, FIFF.FIFF_MRI_WIDTH, this['mri_width'])
+        write_int(fid, FIFF.FIFF_MRI_HEIGHT, this['mri_height'])
+        write_int(fid, FIFF.FIFF_MRI_DEPTH, this['mri_depth'])
+
+        end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
+    write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
+    write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
+
+    #   Which vertices are active
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
+
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
+    if this['ntri'] > 0:
+        write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
+                         this['tris'] + 1)
+
+    if this['type'] != 'vol' and this['use_tris'] is not None:
+        #   Use triangulation
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
+        write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
+                         this['use_tris'] + 1)
+
+    #   Patch-related information
+    if this['nearest'] is not None:
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
+        write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
+                  this['nearest_dist'])
+
+    #   Distances
+    if this['dist'] is not None:
+        # Save only upper triangular portion of the matrix
+        dists = this['dist'].copy()
+        dists = sparse.triu(dists, format=dists.format)
+        write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
+        write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
+                           this['dist_limit'])
+
+
+ at verbose
+def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
+                  verbose=None):
+    """Convert the array of vertices for a hemisphere to MNI coordinates
+
+    Parameters
+    ----------
+    vertices : int, or list of int
+        Vertex number(s) to convert
+    hemis : int, or list of int
+        Hemisphere(s) the vertices belong to
+    subject : string
+        Name of the subject to load surfaces from.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    mode : string | None
+        Either 'nibabel' or 'freesurfer' for the software to use to
+        obtain the transforms. If None, 'nibabel' is tried first, falling
+        back to 'freesurfer' if it fails. Results should be equivalent with
+        either option, but nibabel may be quicker (and more pythonic).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    coordinates : n_vertices x 3 array of float
+        The MNI coordinates (in mm) of the vertices
+
+    Notes
+    -----
+    This function requires either nibabel (in Python) or Freesurfer
+    (with utility "mri_info") to be correctly installed.
+    """
+    if not has_freesurfer and not has_nibabel():
+        raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
+                           'correctly installed and accessible from Python')
+
+    if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
+        vertices = [vertices]
+
+    if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
+        hemis = [hemis] * len(vertices)
+
+    if not len(hemis) == len(vertices):
+        raise ValueError('hemi and vertices must match in length')
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
+             for h in ['lh', 'rh']]
+    rr = [read_surface(s)[0] for s in surfs]
+
+    # take point locations in RAS space and convert to MNI coordinates
+    xfm = _read_talxfm(subject, subjects_dir, mode)
+    data = np.array([np.concatenate((rr[h][v, :], [1]))
+                     for h, v in zip(hemis, vertices)]).T
+    return np.dot(xfm, data)[:3, :].T.copy()
+
+
+ at verbose
+def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
+    """Read MNI transform from FreeSurfer talairach.xfm file
+
+    Adapted from freesurfer m-files. Altered to deal with Norig
+    and Torig correctly.
+    """
+    if mode is not None and not mode in ['nibabel', 'freesurfer']:
+        raise ValueError('mode must be "nibabel" or "freesurfer"')
+    fname = op.join(subjects_dir, subject, 'mri', 'transforms',
+                    'talairach.xfm')
+    with open(fname, 'r') as fid:
+        logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
+
+        # read lines until we get the string 'Linear_Transform', which precedes
+        # the data transformation matrix
+        got_it = False
+        comp = 'Linear_Transform'
+        for line in fid:
+            if line[:len(comp)] == comp:
+                # we have the right line, so don't read any more
+                got_it = True
+                break
+
+        if got_it:
+            xfm = list()
+            # read the transformation matrix (3x4)
+            for ii, line in enumerate(fid):
+                digs = [float(s) for s in line.strip('\n;').split()]
+                xfm.append(digs)
+                if ii == 2:
+                    break
+            xfm.append([0., 0., 0., 1.])
+            xfm = np.array(xfm, dtype=float)
+        else:
+            raise ValueError('failed to find \'Linear_Transform\' string in '
+                             'xfm file:\n%s' % fname)
+
+    # now get Norig and Torig
+    path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
+
+    try:
+        import nibabel as nib
+        use_nibabel = True
+    except ImportError:
+        use_nibabel = False
+        if mode == 'nibabel':
+            raise ImportError('Tried to import nibabel but failed, try using '
+                              'mode=None or mode=Freesurfer')
+
+    # note that if mode == None, then we default to using nibabel
+    if use_nibabel is True and mode == 'freesurfer':
+        use_nibabel = False
+    if use_nibabel:
+        img = nib.load(path)
+        hdr = img.get_header()
+        n_orig = hdr.get_vox2ras()
+        ds = np.array(hdr.get_zooms())
+        ns = (np.array(hdr.get_data_shape()[:3]) * ds) / 2.0
+        t_orig = np.array([[-ds[0], 0, 0, ns[0]],
+                           [0, 0, ds[2], -ns[2]],
+                           [0, -ds[1], 0, ns[1]],
+                           [0, 0, 0, 1]], dtype=float)
+        nt_orig = [n_orig, t_orig]
+    else:
+        nt_orig = list()
+        for conv in ['--vox2ras', '--vox2ras-tkr']:
+            stdout, stderr = run_subprocess(['mri_info', conv, path])
+            stdout = np.fromstring(stdout, sep=' ').astype(float)
+            if not stdout.size == 16:
+                raise ValueError('Could not parse Freesurfer mri_info output')
+            nt_orig.append(stdout.reshape(4, 4))
+    xfm = np.dot(xfm, np.dot(nt_orig[0], linalg.inv(nt_orig[1])))
+    return xfm
diff --git a/mne/stats/__init__.py b/mne/stats/__init__.py
new file mode 100644
index 0000000..08f4323
--- /dev/null
+++ b/mne/stats/__init__.py
@@ -0,0 +1,12 @@
+"""Functions for statistical analysis"""
+
+from .parametric import f_threshold_twoway_rm, f_twoway_rm
+from .permutations import permutation_t_test
+from .cluster_level import permutation_cluster_test, \
+                           permutation_cluster_1samp_test, \
+                           spatio_temporal_cluster_1samp_test, \
+                           spatio_temporal_cluster_test, \
+                           _st_mask_from_s_inds, \
+                           ttest_1samp_no_p,\
+                           summarize_clusters_stc
+from .multi_comp import fdr_correction, bonferroni_correction
diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py
new file mode 100755
index 0000000..5ed2776
--- /dev/null
+++ b/mne/stats/cluster_level.py
@@ -0,0 +1,1414 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Authors: Thorsten Kranz <thorstenkranz at gmail.com>
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: Simplified BSD
+
+import numpy as np
+from scipy import stats, sparse, ndimage
+import warnings
+
+import logging
+logger = logging.getLogger('mne')
+
+from .parametric import f_oneway
+from ..parallel import parallel_func, check_n_jobs
+from ..utils import split_list
+from ..fixes import in1d, unravel_index
+from .. import SourceEstimate
+from .. import verbose
+
+
+def _get_clusters_spatial(s, neighbors):
+    """Helper function to form spatial clusters using neighbor lists
+
+    This is equivalent to _get_components with n_times = 1, with a properly
+    reconfigured connectivity matrix (formed as "neighbors" list)
+    """
+    # s is a vector of spatial indices that are significant, like:
+    #     s = np.where(x_in)[0]
+    # for x_in representing a single time-instant
+    r = np.ones(s.shape, dtype=bool)
+    clusters = list()
+    next_ind = 0 if s.size > 0 else None
+    while next_ind is not None:
+        # put first point in a cluster, adjust remaining
+        t_inds = [next_ind]
+        r[next_ind] = False
+        icount = 1  # count of nodes in the current cluster
+        while icount <= len(t_inds):
+            ind = t_inds[icount - 1]
+            # look across other vertices
+            buddies = np.where(r)[0]
+            buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
+                                      assume_unique=True)]
+            t_inds += buddies.tolist()
+            r[buddies] = False
+            icount += 1
+        # this is equivalent to np.where(r)[0] for these purposes, but it's
+        # a little bit faster. Unfortunately there's no way to tell numpy
+        # just to find the first instance (to save checking every one):
+        next_ind = np.argmax(r)
+        if next_ind == 0:
+            next_ind = None
+        clusters.append(s[t_inds])
+    return clusters
+
+
+def _reassign(check, clusters, base, num):
+    """Helper function to reassign cluster numbers"""
+    # reconfigure check matrix
+    check[check == num] = base
+    # concatenate new values into clusters array
+    clusters[base - 1] = np.concatenate((clusters[base - 1],
+                                         clusters[num - 1]))
+    clusters[num - 1] = np.array([], dtype=int)
+
+
+def _get_clusters_st_1step(keepers, neighbors):
+    """Directly calculate connectivity based on knowledge that time points are
+    only connected to adjacent neighbors for data organized as time x space.
+
+    This algorithm time increases linearly with the number of time points,
+    compared to with the square for the standard (graph) algorithm.
+
+    This algorithm creates clusters for each time point using a method more
+    efficient than the standard graph method (but otherwise equivalent), then
+    combines these clusters across time points in a reasonable way."""
+    n_src = len(neighbors)
+    n_times = len(keepers)
+    # start cluster numbering at 1 for diffing convenience
+    enum_offset = 1
+    check = np.zeros((n_times, n_src), dtype=int)
+    clusters = list()
+    for ii, k in enumerate(keepers):
+        c = _get_clusters_spatial(k, neighbors)
+        for ci, cl in enumerate(c):
+            check[ii, cl] = ci + enum_offset
+        enum_offset += len(c)
+        # give them the correct offsets
+        c = [cl + ii * n_src for cl in c]
+        clusters += c
+
+    # now that each cluster has been assigned a unique number, combine them
+    # by going through each time point
+    for check1, check2, k in zip(check[:-1], check[1:], keepers[:-1]):
+        # go through each one that needs reassignment
+        inds = k[check2[k] - check1[k] > 0]
+        check1_d = check1[inds]
+        n = check2[inds]
+        nexts = np.unique(n)
+        for num in nexts:
+            prevs = check1_d[n == num]
+            base = np.min(prevs)
+            for pr in np.unique(prevs[prevs != base]):
+                _reassign(check1, clusters, base, pr)
+            # reassign values
+            _reassign(check2, clusters, base, num)
+    # clean up clusters
+    clusters = [cl for cl in clusters if len(cl) > 0]
+    return clusters
+
+
+def _get_clusters_st_multistep(keepers, neighbors, max_step=1):
+    """Directly calculate connectivity based on knowledge that time points are
+    only connected to adjacent neighbors for data organized as time x space.
+
+    This algorithm time increases linearly with the number of time points,
+    compared to with the square for the standard (graph) algorithm."""
+    n_src = len(neighbors)
+    n_times = len(keepers)
+    t_border = [0]
+    for ki, k in enumerate(keepers):
+        keepers[ki] = k + ki * n_src
+        t_border += [t_border[ki] + len(k)]
+    t_border = np.array(t_border)[:, np.newaxis]
+    keepers = np.concatenate(keepers)
+    v = keepers
+    t, s = divmod(v, n_src)
+
+    r = np.ones(t.shape, dtype=bool)
+    clusters = list()
+    next_ind = 0
+    inds = np.arange(t_border[0], t_border[n_times])
+    if s.size > 0:
+        while next_ind is not None:
+            # put first point in a cluster, adjust remaining
+            t_inds = [next_ind]
+            r[next_ind] = False
+            icount = 1  # count of nodes in the current cluster
+            # look for significant values at the next time point,
+            # same sensor, not placed yet, and add those
+            while icount <= len(t_inds):
+                ind = t_inds[icount - 1]
+                selves = inds[t_border[max(t[ind] - max_step, 0)]:
+                              t_border[min(t[ind] + max_step + 1, n_times)]]
+                selves = selves[r[selves]]
+                selves = selves[s[ind] == s[selves]]
+
+                # look at current time point across other vertices
+                buddies = inds[t_border[t[ind]]:t_border[t[ind] + 1]]
+                buddies = buddies[r[buddies]]
+                buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
+                                          assume_unique=True)]
+                buddies = np.concatenate((selves, buddies))
+                t_inds += buddies.tolist()
+                r[buddies] = False
+                icount += 1
+            # this is equivalent to np.where(r)[0] for these purposes, but it's
+            # a little bit faster. Unfortunately there's no way to tell numpy
+            # just to find the first instance (to save checking every one):
+            next_ind = np.argmax(r)
+            if next_ind == 0:
+                next_ind = None
+            clusters.append(v[t_inds])
+
+    return clusters
+
+
+def _get_clusters_st(x_in, neighbors, max_step=1):
+    """Helper function to choose the most efficient version"""
+    n_src = len(neighbors)
+    n_times = x_in.size / n_src
+    cl_goods = np.where(x_in)[0]
+    if len(cl_goods) > 0:
+        keepers = [np.array([], dtype=int)] * n_times
+        row, col = unravel_index(cl_goods, (n_times, n_src))
+        if isinstance(row, int):
+            row  = [row]
+            col  = [col]
+            lims = [0]
+        else:
+            order = np.argsort(row)
+            row = row[order]
+            col = col[order]
+            lims = [0] + (np.where(np.diff(row) > 0)[0]
+                          + 1).tolist() + [len(row)]
+        
+        for start, end in zip(lims[:-1], lims[1:]):
+            keepers[row[start]] = np.sort(col[start:end])
+        if max_step == 1:
+            return _get_clusters_st_1step(keepers, neighbors)
+        else:
+            return _get_clusters_st_multistep(keepers, neighbors,
+                                              max_step)
+    else:
+        return []
+
+
+def _get_components(x_in, connectivity, return_list=True):
+    """get connected components from a mask and a connectivity matrix"""
+    try:
+        from sklearn.utils._csgraph import cs_graph_components
+    except:
+        try:
+            from scikits.learn.utils._csgraph import cs_graph_components
+        except:
+            # in theory we might be able to shoehorn this into using
+            # _get_clusters_spatial if we transform connectivity into
+            # a neighbor list, and it might end up being faster anyway,
+            # but for now:
+            raise ValueError('scikits-learn must be installed')
+
+    mask = np.logical_and(x_in[connectivity.row], x_in[connectivity.col])
+    data = connectivity.data[mask]
+    row = connectivity.row[mask]
+    col = connectivity.col[mask]
+    shape = connectivity.shape
+    idx = np.where(x_in)[0]
+    row = np.concatenate((row, idx))
+    col = np.concatenate((col, idx))
+    data = np.concatenate((data, np.ones(len(idx), dtype=data.dtype)))
+    connectivity = sparse.coo_matrix((data, (row, col)), shape=shape)
+    _, components = cs_graph_components(connectivity)
+    if return_list:
+        labels = np.unique(components)
+        clusters = list()
+        for l in labels:
+            c = np.where(components == l)[0]
+            if np.any(x_in[c]):
+                clusters.append(c)
+        # logger.info("-- number of components : %d"
+        #             % np.unique(components).size)
+        return clusters
+    else:
+        return components
+
+
+def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
+                   include=None, partitions=None, t_power=1, show_info=False):
+    """For a given 1d-array (test statistic), find all clusters which
+    are above/below a certain threshold. Returns a list of 2-tuples.
+
+    When doing a two-tailed test (tail == 0), only points with the same
+    sign will be clustered together.
+
+    Parameters
+    ----------
+    x : 1D array
+        Data
+    threshold : float | dict
+        Where to threshold the statistic. Should be negative for tail == -1,
+        and positive for tail == 0 or 1. Can also be an dict for
+        threshold-free cluster enhancement.
+    tail : -1 | 0 | 1
+        Type of comparison
+    connectivity : sparse matrix in COO format, None, or list
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        If connectivity is a list, it is assumed that each entry stores the
+        indices of the spatial neighbors in a spatio-temporal dataset x.
+        Default is None, i.e, a regular lattice connectivity.
+    max_step : int
+        If connectivity is a list, this defines the maximal number of steps
+        between vertices along the second dimension (typically time) to be
+        considered connected.
+    include : 1D bool array or None
+        Mask to apply to the data of points to cluster. If None, all points
+        are used.
+    partitions : array of int or None
+        An array (same size as X) of integers indicating which points belong
+        to each partition.
+    t_power : float
+        Power to raise the statistical values (usually t-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    show_info : bool
+        If True, display information about thresholds used (for TFCE). Should
+        only be done for the standard permutation.
+
+    Returns
+    -------
+    clusters : list of slices or list of arrays (boolean masks)
+        We use slices for 1D signals and mask to multidimensional
+        arrays.
+    sums: array
+        Sum of x values in clusters.
+    """
+    if not tail in [-1, 0, 1]:
+        raise ValueError('invalid tail parameter')
+
+    x = np.asanyarray(x)
+
+    if not np.isscalar(threshold):
+        if not isinstance(threshold, dict):
+            raise TypeError('threshold must be a number, or a dict for '
+                            'threshold-free cluster enhancement')
+        if not all([key in threshold for key in ['start', 'step']]):
+            raise KeyError('threshold, if dict, must have at least '
+                           '"start" and "step"')
+        tfce = True
+        if tail == -1:
+            if threshold['start'] > 0:
+                raise ValueError('threshold["start"] must be <= 0 for '
+                                 'tail == -1')
+            if threshold['step'] >= 0:
+                raise ValueError('threshold["step"] must be < 0 for '
+                                 'tail == -1')
+            stop = np.min(x)
+        elif tail == 1:
+            stop = np.max(x)
+        else:  # tail == 0
+            stop = np.max(np.abs(x))
+        thresholds = np.arange(threshold['start'], stop,
+                               threshold['step'], float)
+        h_power = threshold.get('h_power', 2)
+        e_power = threshold.get('e_power', 0.5)
+        if show_info is True:
+            if len(thresholds) == 0:
+                txt = ('threshold["start"] (%s) is more extreme than '
+                       'data statistics with most extreme value %s'
+                       % (threshold['start'], stop))
+                logger.warn(txt)
+                warnings.warn(txt)
+            else:
+                logger.info('Using %d thresholds from %0.2f to %0.2f for TFCE '
+                            'computation (h_power=%0.2f, e_power=%0.2f)'
+                            % (len(thresholds), thresholds[0], thresholds[-1],
+                               h_power, e_power))
+        scores = np.zeros(x.size)
+    else:
+        thresholds = [threshold]
+        tfce = False
+
+    # include all points by default
+    if include is None:
+        include = np.ones(x.shape, dtype=bool)
+
+    if not np.all(np.diff(thresholds) > 0):
+        raise RuntimeError('Threshold misconfiguration, must be monotonically'
+                           ' increasing')
+
+    # set these here just in case thresholds == []
+    clusters = list()
+    sums = np.empty(0)
+    for ti, thresh in enumerate(thresholds):
+        # these need to be reset on each run
+        clusters = list()
+        sums = np.empty(0)
+        if tail == 0:
+            x_ins = [np.logical_and(x > thresh, include),
+                     np.logical_and(x < -thresh, include)]
+        elif tail == -1:
+            x_ins = [np.logical_and(x < thresh, include)]
+        else:  # tail == 1
+            x_ins = [np.logical_and(x > thresh, include)]
+        # loop over tails
+        for x_in in x_ins:
+            if np.any(x_in):
+                out = _find_clusters_1dir_parts(x, x_in, connectivity,
+                                                max_step, partitions, t_power)
+                clusters += out[0]
+                sums = np.concatenate((sums, out[1]))
+        if tfce is True:
+            # the score of each point is the sum of the h^H * e^E for each
+            # supporting section "rectangle" h x e.
+            if ti == 0:
+                h = abs(thresh)
+            else:
+                h = abs(thresh - thresholds[ti - 1])
+            h = h ** h_power
+            for c in clusters:
+                # triage based on cluster storage type
+                if isinstance(c, slice):
+                    len_c = c.stop - c.start
+                elif c.dtype == bool:
+                    len_c = np.sum(c)
+                else:
+                    len_c = len(c)
+                scores[c] += h * (len_c ** e_power)
+    if tfce is True:
+        # each point gets treated independently
+        clusters = np.arange(x.size)
+        if connectivity is None:
+            if x.ndim == 1:
+                # slices
+                clusters = [slice(c, c + 1) for c in clusters]
+            else:
+                # boolean masks (raveled)
+                clusters = [(clusters == ii).ravel()
+                            for ii in range(len(clusters))]
+        else:
+            clusters = [np.array([c]) for c in clusters]
+        sums = scores
+    return clusters, sums
+
+
+def _find_clusters_1dir_parts(x, x_in, connectivity, max_step, partitions,
+                              t_power):
+    """Deal with partitions, and pass the work to _find_clusters_1dir
+    """
+    if partitions is None:
+        clusters, sums = _find_clusters_1dir(x, x_in, connectivity, max_step,
+                                             t_power)
+    else:
+        # cluster each partition separately
+        clusters = list()
+        sums = list()
+        for p in range(np.max(partitions) + 1):
+            x_i = np.logical_and(x_in, partitions == p)
+            out = _find_clusters_1dir(x, x_i, connectivity, max_step, t_power)
+            clusters += out[0]
+            sums.append(out[1])
+        sums = np.concatenate(sums)
+    return clusters, sums
+
+
+def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power):
+    """Actually call the clustering algorithm"""
+    if connectivity is None:
+        labels, n_labels = ndimage.label(x_in)
+
+        if x.ndim == 1:
+            # slices
+            clusters = ndimage.find_objects(labels, n_labels)
+            if len(clusters) == 0:
+                sums = []
+            else:
+                if t_power == 1:
+                    sums = ndimage.measurements.sum(x, labels,
+                                                  index=range(1, n_labels + 1))
+                else:
+                    sums = ndimage.measurements.sum(np.sign(x) *
+                                                  np.abs(x) ** t_power, labels,
+                                                  index=range(1, n_labels + 1))
+        else:
+            # boolean masks (raveled)
+            clusters = list()
+            sums = np.empty(n_labels)
+            for l in range(1, n_labels + 1):
+                c = labels == l
+                clusters.append(c.ravel())
+                if t_power == 1:
+                    sums[l - 1] = np.sum(x[c])
+                else:
+                    sums[l - 1] = np.sum(np.sign(x[c]) *
+                                         np.abs(x[c]) ** t_power)
+    else:
+        if x.ndim > 1:
+            raise Exception("Data should be 1D when using a connectivity "
+                            "to define clusters.")
+        if isinstance(connectivity, sparse.spmatrix):
+            clusters = _get_components(x_in, connectivity)
+        elif isinstance(connectivity, list):  # use temporal adjacency
+            clusters = _get_clusters_st(x_in, connectivity, max_step)
+        else:
+            raise ValueError('Connectivity must be a sparse matrix or list')
+        if t_power == 1:
+            sums = np.array([np.sum(x[c]) for c in clusters])
+        else:
+            sums = np.array([np.sum(np.sign(x[c]) * np.abs(x[c]) ** t_power)
+                            for c in clusters])
+
+    return clusters, np.atleast_1d(sums)
+
+
+def _cluster_indices_to_mask(components, n_tot):
+    """Convert to the old format of clusters, which were bool arrays"""
+    for ci, c in enumerate(components):
+        components[ci] = np.zeros((n_tot), dtype=bool)
+        components[ci][c] = True
+    return components
+
+
+def _cluster_mask_to_indices(components):
+    """Convert to the old format of clusters, which were bool arrays"""
+    for ci, c in enumerate(components):
+        if not isinstance(c, slice):
+            components[ci] = np.where(c)[0]
+    return components
+
+
+def _pval_from_histogram(T, H0, tail):
+    """Get p-values from stats values given an H0 distribution
+
+    For each stat compute a p-value as percentile of its statistics
+    within all statistics in surrogate data
+    """
+    if not tail in [-1, 0, 1]:
+        raise ValueError('invalid tail parameter')
+
+    # from pct to fraction
+    if tail == -1:  # up tail
+        pval = np.array([np.sum(H0 <= t) for t in T])
+    elif tail == 1:  # low tail
+        pval = np.array([np.sum(H0 >= t) for t in T])
+    else:  # both tails
+        pval = np.array([np.sum(abs(H0) >= abs(t)) for t in T])
+
+    pval = (pval + 1.0) / (H0.size + 1.0)  # the init data is one resampling
+    return pval
+
+
+def _setup_connectivity(connectivity, n_vertices, n_times):
+    if connectivity.shape[0] == n_vertices:  # use global algorithm
+        connectivity = connectivity.tocoo()
+        n_times = None
+    else:  # use temporal adjacency algorithm
+        if not round(n_vertices / float(connectivity.shape[0])) == n_times:
+            raise ValueError('connectivity must be of the correct size')
+        # we claim to only use upper triangular part... not true here
+        connectivity = (connectivity + connectivity.transpose()).tocsr()
+        connectivity = [connectivity.indices[connectivity.indptr[i]:
+                        connectivity.indptr[i + 1]] for i in
+                        range(len(connectivity.indptr) - 1)]
+    return connectivity
+
+
+def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
+                     max_step, include, partitions, t_power, seeds,
+                     sample_shape):
+
+    n_samp = X_full.shape[0]
+
+    # allocate space for output
+    max_cluster_sums = np.empty(len(seeds), dtype=np.double)
+
+    for seed_idx, seed in enumerate(seeds):
+        # shuffle sample indices
+        rng = np.random.RandomState(seed)
+        idx_shuffled = np.arange(n_samp)
+        rng.shuffle(idx_shuffled)
+        idx_shuffle_list = [idx_shuffled[s] for s in slices]
+
+        # shuffle all data at once
+        X_shuffle_list = [X_full[idx, :] for idx in idx_shuffle_list]
+        T_obs_surr = stat_fun(*X_shuffle_list)
+
+        # The stat should have the same shape as the samples for no conn.
+        if connectivity is None:
+            T_obs_surr.shape = sample_shape
+
+        # Find cluster on randomized stats
+        out = _find_clusters(T_obs_surr, threshold=threshold, tail=tail,
+                             max_step=max_step, connectivity=connectivity,
+                             partitions=partitions, include=include,
+                             t_power=t_power)
+        perm_clusters_sums = out[1]
+
+        if len(perm_clusters_sums) > 0:
+            max_cluster_sums[seed_idx] = np.max(perm_clusters_sums)
+        else:
+            max_cluster_sums[seed_idx] = 0
+
+    return max_cluster_sums
+
+
+def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
+                           max_step, include, partitions, t_power, seeds,
+                           sample_shape):
+    n_samp = X.shape[0]
+    assert slices is None  # should be None for the 1 sample case
+
+    # allocate space for output
+    max_cluster_sums = np.empty(len(seeds), dtype=np.double)
+
+    for seed_idx, seed in enumerate(seeds):
+        if isinstance(seed, np.ndarray):
+            # new surrogate data with specified sign flip
+            if not seed.size == n_samp:
+                raise ValueError('rng string must be n_samples long')
+            signs = 2 * seed[:, None].astype(int) - 1
+            if not np.all(np.equal(np.abs(signs), 1)):
+                raise ValueError('signs from rng must be +/- 1')
+        else:
+            rng = np.random.RandomState(seed)
+            # new surrogate data with random sign flip
+            signs = np.sign(0.5 - rng.rand(n_samp))
+            signs = signs[:, np.newaxis]
+
+        X *= signs
+
+        # Recompute statistic on randomized data
+        T_obs_surr = stat_fun(X)
+
+        # Set X back to previous state (trade memory efficiency for CPU use)
+        X *= signs
+
+        # The stat should have the same shape as the samples for no conn.
+        if connectivity is None:
+            T_obs_surr.shape = sample_shape
+
+        # Find cluster on randomized stats
+        out = _find_clusters(T_obs_surr, threshold=threshold, tail=tail,
+                             max_step=max_step, connectivity=connectivity,
+                             partitions=partitions, include=include,
+                             t_power=t_power)
+        perm_clusters_sums = out[1]
+        if len(perm_clusters_sums) > 0:
+            # get max with sign info
+            idx_max = np.argmax(np.abs(perm_clusters_sums))
+            max_cluster_sums[seed_idx] = perm_clusters_sums[idx_max]
+        else:
+            max_cluster_sums[seed_idx] = 0
+
+    return max_cluster_sums
+
+
+ at verbose
+def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
+                              connectivity, verbose, n_jobs, seed, max_step,
+                              exclude, step_down_p, t_power, out_type,
+                              check_disjoint):
+    n_jobs = check_n_jobs(n_jobs)
+    """ Aux Function
+
+    Note. X is required to be a list. Depending on the length of X
+    either a 1 sample t-test or an f-test / more sample permutation scheme
+    is elicited.
+    """
+
+    if not out_type in ['mask', 'indices']:
+        raise ValueError('out_type must be either \'mask\' or \'indices\'')
+
+    # check dimensions for each group in X (a list at this stage).
+    X = [x[:, np.newaxis] if x.ndim == 1 else x for x in X]
+    n_samples = X[0].shape[0]
+    n_times = X[0].shape[1]
+
+    sample_shape = X[0].shape[1:]
+    for x in X:
+        if x.shape[1:] != sample_shape:
+            raise ValueError('All samples mush have the same size')
+
+    # flatten the last dimensions in case the data is high dimensional
+    X = [np.reshape(x, (x.shape[0], -1)) for x in X]
+    n_tests = X[0].shape[1]
+
+    if connectivity is not None:
+        connectivity = _setup_connectivity(connectivity, n_tests, n_times)
+
+    if (exclude is not None) and not exclude.size == n_tests:
+        raise ValueError('exclude must be the same shape as X[0]')
+
+    # Step 1: Calculate T-stat for original data
+    # -------------------------------------------------------------
+    T_obs = stat_fun(*X)
+    logger.info('stat_fun(H1): min=%f max=%f' % (np.min(T_obs), np.max(T_obs)))
+
+    # The stat should have the same shape as the samples for no conn.
+    if connectivity is None:
+        T_obs.shape = sample_shape
+
+    if exclude is not None:
+        include = np.logical_not(exclude)
+    else:
+        include = None
+
+    # determine if connectivity itself can be separated into disjoint sets
+    if check_disjoint is True and connectivity is not None:
+        partitions = _get_partitions_from_connectivity(connectivity, n_times)
+    else:
+        partitions = None
+
+    out = _find_clusters(T_obs, threshold, tail, connectivity,
+                         max_step=max_step, include=include,
+                         partitions=partitions, t_power=t_power,
+                         show_info=True)
+    clusters, cluster_stats = out
+    # For TFCE, return the "adjusted" statistic instead of raw scores
+    if isinstance(threshold, dict):
+        T_obs = cluster_stats.copy()
+
+    logger.info('Found %d clusters' % len(clusters))
+
+    # convert clusters to old format
+    if connectivity is not None:
+        # our algorithms output lists of indices by default
+        if out_type == 'mask':
+            clusters = _cluster_indices_to_mask(clusters, n_tests)
+    else:
+        # ndimage outputs slices or boolean masks by default
+        if out_type == 'indices':
+            clusters = _cluster_mask_to_indices(clusters)
+
+    # The stat should have the same shape as the samples
+    T_obs.shape = sample_shape
+
+    if len(X) == 1:  # 1 sample test
+        do_perm_func = _do_1samp_permutations
+        X_full = X[0]
+        slices = None
+    else:
+        do_perm_func = _do_permutations
+        X_full = np.concatenate(X, axis=0)
+        n_samples_per_condition = [x.shape[0] for x in X]
+        splits_idx = np.append([0], np.cumsum(n_samples_per_condition))
+        slices = [slice(splits_idx[k], splits_idx[k + 1])
+                                                    for k in range(len(X))]
+
+    parallel, my_do_perm_func, _ = parallel_func(do_perm_func, n_jobs)
+
+    # Step 2: If we have some clusters, repeat process on permuted data
+    # -------------------------------------------------------------------
+    if len(clusters) > 0:
+        # check to see if we can do an exact test
+        # note for a two-tailed test, we can exploit symmetry to just do half
+        seeds = None
+        if len(X) == 1:
+            max_perms = 2 ** (n_samples - (tail == 0))
+            if max_perms <= n_permutations:
+                # omit first perm b/c accounted for in _pval_from_histogram,
+                # convert to binary array representation
+                seeds = [np.fromiter(np.binary_repr(s, n_samples), dtype=int)
+                         for s in range(1, max_perms)]
+
+        if seeds is None:
+            if seed is None:
+                seeds = [None] * n_permutations
+            else:
+                seeds = list(seed + np.arange(n_permutations))
+
+        # Step 3: repeat permutations for step-down-in-jumps procedure
+        smallest_p = -1
+        clusters_kept = 0
+        step_down_include = None  # start out including all points
+        step_down_iteration = 0
+        while smallest_p < step_down_p:
+            # actually do the clustering for each partition
+            if include is not None:
+                if step_down_include is not None:
+                    this_include = np.logical_and(include, step_down_include)
+                else:
+                    this_include = include
+            else:
+                this_include = step_down_include
+            H0 = parallel(my_do_perm_func(X_full, slices, threshold, tail,
+                          connectivity, stat_fun, max_step, this_include,
+                          partitions, t_power, s, sample_shape)
+                          for s in split_list(seeds, n_jobs))
+            H0 = np.concatenate(H0)
+            cluster_pv = _pval_from_histogram(cluster_stats, H0, tail)
+
+            # sort them by significance; for backward compat, don't sort the
+            # clusters themselves
+            inds = np.argsort(cluster_pv)
+            ord_pv = cluster_pv[inds]
+            smallest_p = ord_pv[clusters_kept]
+            step_down_include = np.ones(n_tests, dtype=bool)
+            under = np.where(cluster_pv < step_down_p)[0]
+            for ci in under:
+                step_down_include[clusters[ci]] = False
+            if connectivity is None:
+                step_down_include.shape = sample_shape
+            step_down_iteration += 1
+            if step_down_p > 0:
+                extra_text = 'additional ' if step_down_iteration > 1 else ''
+                new_count = under.size - clusters_kept
+                plural = '' if new_count == 1 else 's'
+                logger.info('Step-down-in-jumps iteration'
+                            '%i found %i %scluster%s'
+                            % (step_down_iteration, new_count,
+                               extra_text, plural))
+            clusters_kept += under.size
+
+        # The clusters should have the same shape as the samples
+        clusters = _reshape_clusters(clusters, sample_shape)
+        return T_obs, clusters, cluster_pv, H0
+    else:
+        return T_obs, np.array([]), np.array([]), np.array([])
+
+
+def ttest_1samp_no_p(X, sigma=0, method='relative'):
+    """t-test with variance adjustment and no p-value calculation
+
+    Parameters
+    ----------
+    X : array
+        Array to return t-values for.
+    sigma : float
+        The variance estate will be given by "var + sigma * max(var)" or
+        "var + sigma", depending on "method". By default this is 0 (no
+        adjustment). See Notes for details.
+    method : str
+        If 'relative', the minimum variance estimate will be sigma * max(var),
+        if 'absolute' the minimum variance estimate will be sigma.
+
+    Returns
+    -------
+    t : array
+        t-values, potentially adjusted using the hat method.
+
+    Notes
+    -----
+    One can use the conversion:
+
+        threshold = -scipy.stats.distributions.t.ppf(p_thresh, n_samples - 1)
+
+    to convert a desired p-value threshold to t-value threshold. Don't forget
+    that for two-tailed tests, p_thresh in the above should be divided by 2.
+
+    To use the "hat" adjustment method, a value of sigma=1e-3 may be a
+    reasonable choice. See Ridgway et al. 2012 "The problem of low variance
+    voxels in statistical parametric mapping; a new hat avoids a 'haircut'",
+    NeuroImage. 2012 Feb 1;59(3):2131-41.
+    """
+    if not method in ['absolute', 'relative']:
+        raise ValueError('method must be "absolute" or "relative", not %s'
+                         % method)
+    var = np.var(X, axis=0, ddof=1)
+    if sigma > 0:
+        limit = sigma * np.max(var) if method == 'relative' else sigma
+        var += limit
+    return np.mean(X, axis=0) / np.sqrt(var / X.shape[0])
+
+
+ at verbose
+def permutation_cluster_test(X, threshold=None, n_permutations=1024,
+                             tail=0, stat_fun=f_oneway,
+                             connectivity=None, verbose=None, n_jobs=1,
+                             seed=None, max_step=1, exclude=None,
+                             step_down_p=0, t_power=1, out_type='mask',
+                             check_disjoint=False):
+    """Cluster-level statistical permutation test
+
+    For a list of 2d-arrays of data, e.g. power values, calculate some
+    statistics for each timepoint (dim 1) over groups.  Do a cluster
+    analysis with permutation test for calculating corrected p-values.
+    Randomized data are generated with random partitions of the data.
+
+    Parameters
+    ----------
+    X : list
+        List of 2d-arrays containing the data, dim 1: timepoints, dim 2:
+        elements of groups.
+    threshold : float | dict | None
+        If threshold is None, it will choose a t-threshold equivalent to
+        p < 0.05 for the given number of (within-subject) observations.
+        If a dict is used, then threshold-free cluster enhancement (TFCE)
+        will be used.
+    n_permutations : int
+        The number of permutations to compute.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the statistic is thresholded above threshold.
+        If tail is -1, the statistic is thresholded below threshold.
+        If tail is 0, the statistic is thresholded on both sides of
+        the distribution.
+    stat_fun : callable
+        function called to calculate statistics, must accept 1d-arrays as
+        arguments (default: scipy.stats.f_oneway).
+    connectivity : sparse matrix.
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        Default is None, i.e, a regular lattice connectivity.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    exclude : boolean array or None
+        Mask to apply to the data to exclude certain points from clustering
+        (e.g., medial wall vertices). Should be the same shape as X. If None,
+        no points are excluded.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually f-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables.
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv : array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+    """
+    if threshold is None:
+        p_thresh = 0.05 / (1 + (tail == 0))
+        n_samples_per_group = [len(x) for x in X]
+        threshold = stats.distributions.f.ppf(1. - p_thresh,
+                                              *n_samples_per_group)
+        if np.sign(tail) < 0:
+            threshold = -threshold
+
+    return _permutation_cluster_test(X=X, threshold=threshold,
+                        n_permutations=n_permutations,
+                        tail=tail, stat_fun=stat_fun,
+                        connectivity=connectivity, verbose=verbose,
+                        n_jobs=n_jobs, seed=seed, max_step=max_step,
+                        exclude=exclude, step_down_p=step_down_p,
+                        t_power=t_power, out_type=out_type,
+                        check_disjoint=check_disjoint)
+
+
+permutation_cluster_test.__test__ = False
+
+
+ at verbose
+def permutation_cluster_1samp_test(X, threshold=None, n_permutations=1024,
+                                   tail=0, stat_fun=ttest_1samp_no_p,
+                                   connectivity=None, verbose=None, n_jobs=1,
+                                   seed=None, max_step=1, exclude=None,
+                                   step_down_p=0, t_power=1, out_type='mask',
+                                   check_disjoint=False):
+    """Non-parametric cluster-level 1 sample T-test
+
+    From a array of observations, e.g. signal amplitudes or power spectrum
+    estimates etc., calculate if the observed mean significantly deviates
+    from 0. The procedure uses a cluster analysis with permutation test
+    for calculating corrected p-values. Randomized data are generated with
+    random sign flips.
+
+    Parameters
+    ----------
+    X : array, shape=(n_samples, p, q) or (n_samples, p)
+        Array where the first dimension corresponds to the
+        samples (observations). X[k] can be a 1D or 2D array (time series
+        or TF image) associated to the kth observation.
+    threshold : float | dict | None
+        If threshold is None, it will choose a t-threshold equivalent to
+        p < 0.05 for the given number of (within-subject) observations.
+        If a dict is used, then threshold-free cluster enhancement (TFCE)
+        will be used.
+    n_permutations : int
+        The number of permutations to compute.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the statistic is thresholded above threshold.
+        If tail is -1, the statistic is thresholded below threshold.
+        If tail is 0, the statistic is thresholded on both sides of
+        the distribution.
+    stat_fun : function
+        Function used to compute the statistical map.
+    connectivity : sparse matrix or None
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        This matrix must be square with dimension (n_vertices * n_times) or
+        (n_vertices). Default is None, i.e, a regular lattice connectivity.
+        Use square n_vertices matrix for datasets with a large temporal
+        extent to save on memory and computation time.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+        Note that if n_permutations >= 2^(n_samples) [or (2^(n_samples-1)) for
+        two-tailed tests], this value will be ignored since an exact test
+        (full permutation test) will be performed.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    exclude : boolean array or None
+        Mask to apply to the data to exclude certain points from clustering
+        (e.g., medial wall vertices). Should be the same shape as X. If None,
+        no points are excluded.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually t-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv : array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+    """
+    if threshold is None:
+        p_thresh = 0.05 / (1 + (tail == 0))
+        n_samples = len(X)
+        threshold = -stats.distributions.t.ppf(p_thresh, n_samples - 1)
+        if np.sign(tail) < 0:
+            threshold = -threshold
+
+    X = [X]  # for one sample only one data array
+    return _permutation_cluster_test(X=X, threshold=threshold,
+                        n_permutations=n_permutations,
+                        tail=tail, stat_fun=stat_fun,
+                        connectivity=connectivity, verbose=verbose,
+                        n_jobs=n_jobs, seed=seed, max_step=max_step,
+                        exclude=exclude, step_down_p=step_down_p,
+                        t_power=t_power, out_type=out_type,
+                        check_disjoint=check_disjoint)
+
+
+permutation_cluster_1samp_test.__test__ = False
+
+
+ at verbose
+def spatio_temporal_cluster_1samp_test(X, threshold=None,
+        n_permutations=1024, tail=0, stat_fun=ttest_1samp_no_p,
+        connectivity=None, verbose=None, n_jobs=1, seed=None, max_step=1,
+        spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
+        check_disjoint=False):
+    """Non-parametric cluster-level 1 sample T-test for spatio-temporal data
+
+    This function provides a convenient wrapper for data organized in the form
+    (observations x time x space) to use permutation_cluster_1samp_test.
+
+    Parameters
+    ----------
+    X : array
+        Array of shape observations x time x vertices.
+    threshold : float | dict | None
+        If threshold is None, it will choose a t-threshold equivalent to
+        p < 0.05 for the given number of (within-subject) observations.
+        If a dict is used, then threshold-free cluster enhancement (TFCE)
+        will be used.
+    n_permutations : int
+        The number of permutations to compute.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the statistic is thresholded above threshold.
+        If tail is -1, the statistic is thresholded below threshold.
+        If tail is 0, the statistic is thresholded on both sides of
+        the distribution.
+    stat_fun : function
+        Function used to compute the statistical map.
+    connectivity : sparse matrix or None
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        This matrix must be square with dimension (n_vertices * n_times) or
+        (n_vertices). Default is None, i.e, a regular lattice connectivity.
+        Use square n_vertices matrix for datasets with a large temporal
+        extent to save on memory and computation time.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+        Note that if n_permutations >= 2^(n_samples) [or (2^(n_samples-1)) for
+        two-tailed tests], this value will be ignored since an exact test
+        (full permutation test) will be performed.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    spatial_exclude : list of int or None
+        List of spatial indices to exclude from clustering.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually t-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables.
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv: array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+
+    TFCE originally described in Smith/Nichols (2009),
+    "Threshold-free cluster enhancement: Addressing problems of
+    smoothing, threshold dependence, and localisation in cluster
+    inference", NeuroImage 44 (2009) 83-98.
+    """
+    n_samples, n_times, n_vertices = X.shape
+
+    # convert spatial_exclude before passing on if necessary
+    if spatial_exclude is not None:
+        exclude = _st_mask_from_s_inds(n_times, n_vertices,
+                                       spatial_exclude, True)
+    else:
+        exclude = None
+
+    # do the heavy lifting
+    out = permutation_cluster_1samp_test(X, threshold=threshold,
+              stat_fun=stat_fun, tail=tail, n_permutations=n_permutations,
+              connectivity=connectivity, n_jobs=n_jobs, seed=seed,
+              max_step=max_step, exclude=exclude, step_down_p=step_down_p,
+              t_power=t_power, out_type=out_type,
+              check_disjoint=check_disjoint)
+    return out
+
+
+spatio_temporal_cluster_1samp_test.__test__ = False
+
+
+ at verbose
+def spatio_temporal_cluster_test(X, threshold=1.67,
+        n_permutations=1024, tail=0, stat_fun=f_oneway,
+        connectivity=None, verbose=None, n_jobs=1, seed=None, max_step=1,
+        spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
+        check_disjoint=False):
+    """Non-parametric cluster-level test for spatio-temporal data
+
+    This function provides a convenient wrapper for data organized in the form
+    (observations x time x space) to use permutation_cluster_test.
+
+    Parameters
+    ----------
+    X: list of arrays
+        Array of shape (observations, time, vertices) in each group.
+    threshold: float
+        The threshold for the statistic.
+    n_permutations: int
+        See permutation_cluster_test.
+    tail : -1 or 0 or 1 (default = 0)
+        See permutation_cluster_test.
+    stat_fun : function
+        function called to calculate statistics, must accept 1d-arrays as
+        arguments (default: scipy.stats.f_oneway)
+    connectivity : sparse matrix or None
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        Default is None, i.e, a regular lattice connectivity.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    spatial_exclude : list of int or None
+        List of spatial indices to exclude from clustering.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually f-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv: array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+    """
+    n_samples, n_times, n_vertices = X[0].shape
+
+    # convert spatial_exclude before passing on if necessary
+    if spatial_exclude is not None:
+        exclude = _st_mask_from_s_inds(n_times, n_vertices,
+                                       spatial_exclude, True)
+    else:
+        exclude = None
+
+    # do the heavy lifting
+    out = permutation_cluster_test(X, threshold=threshold,
+              stat_fun=stat_fun, tail=tail, n_permutations=n_permutations,
+              connectivity=connectivity, n_jobs=n_jobs, seed=seed,
+              max_step=max_step, exclude=exclude, step_down_p=step_down_p,
+              t_power=t_power, out_type=out_type,
+              check_disjoint=check_disjoint)
+    return out
+
+
+spatio_temporal_cluster_test.__test__ = False
+
+
+def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True):
+    """This function returns a boolean mask vector to apply to a spatio-
+    temporal connectivity matrix (n_times * n_vertices square) to include (or
+    exclude) certain spatial coordinates. This is useful for excluding certain
+    regions from analysis (e.g., medial wall vertices).
+
+    Parameters
+    ----------
+    n_times : int
+        Number of time points.
+    n_vertices : int
+        Number of spatial points.
+    vertices : list or array of int
+        Vertex numbers to set.
+    set_as : bool
+        If True, all points except "vertices" are set to False (inclusion).
+        If False, all points except "vertices" are set to True (exclusion).
+
+    Returns
+    -------
+    mask : array of bool
+        A (n_times * n_vertices) array of boolean values for masking
+    """
+    mask = np.zeros((n_times, n_vertices), dtype=bool)
+    mask[:, vertices] = True
+    mask = mask.ravel()
+    if set_as is False:
+        mask = np.logical_not(mask)
+    return mask
+
+
+ at verbose
+def _get_partitions_from_connectivity(connectivity, n_times, verbose=None):
+    """Use indices to specify disjoint subsets (e.g., hemispheres) based on
+    connectivity"""
+    if isinstance(connectivity, list):
+        test = np.ones(len(connectivity))
+        test_conn = np.zeros((len(connectivity), len(connectivity)),
+                             dtype='bool')
+        for vi in range(len(connectivity)):
+            test_conn[connectivity[vi], vi] = True
+        test_conn = sparse.coo_matrix(test_conn, dtype='float')
+    else:
+        test = np.ones(connectivity.shape[0])
+        test_conn = connectivity
+
+    part_clusts = _find_clusters(test, 0, 1, test_conn)[0]
+    if len(part_clusts) > 1:
+        logger.info('%i disjoint connectivity sets found'
+                    % len(part_clusts))
+        partitions = np.zeros(len(test), dtype='int')
+        for ii, pc in enumerate(part_clusts):
+            partitions[pc] = ii
+        if isinstance(connectivity, list):
+            partitions = np.tile(partitions, n_times)
+    else:
+        logger.info('No disjoint connectivity sets found')
+        partitions = None
+
+    return partitions
+
+
+def _reshape_clusters(clusters, sample_shape):
+    """Reshape cluster masks or indices to be of the correct shape"""
+    # format of the bool mask and indices are ndarrays
+    if len(clusters) > 0 and isinstance(clusters[0], np.ndarray):
+        if clusters[0].dtype == bool:  # format of mask
+            clusters = [c.reshape(sample_shape) for c in clusters]
+        else:  # format of indices
+            clusters = [unravel_index(c, sample_shape) for c in clusters]
+    return clusters
+
+
+def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
+    subject='fsaverage', vertno=[np.arange(10242), np.arange(10242)]):
+    """ Assemble summary SourceEstimate from spatiotemporal cluster results
+
+    This helps visualizing results from spatio-temporal-clustering
+    permutation tests
+
+    Parameters
+    ----------
+    clu : tuple
+        the output from clustering permutation tests.
+    p_thresh : float
+        The significance threshold for inclusion of clusters.
+    tstep : float
+        The temporal difference between two time samples.
+    tmin : float | int
+        The time of the first sample.
+    subject : str
+        The name of the subject.
+    vertno : list of arrays
+        The vertex numbers associated with the source space locations.
+
+    Returns
+    -------
+    out : instance of SourceEstimate
+    """
+    T_obs, clusters, clu_pvals, _ = clu
+    n_times, n_vertices = T_obs.shape
+    good_cluster_inds = np.where(clu_pvals < p_thresh)[0]
+    #  Build a convenient representation of each cluster, where each
+    #  cluster becomes a "time point" in the SourceEstimate
+    if len(good_cluster_inds) > 0:
+        data = np.zeros((n_vertices, n_times))
+        data_summary = np.zeros((n_vertices, len(good_cluster_inds) + 1))
+        for ii, cluster_ind in enumerate(good_cluster_inds):
+            data.fill(0)
+            v_inds = clusters[cluster_ind][1]
+            t_inds = clusters[cluster_ind][0]
+            data[v_inds, t_inds] = T_obs[t_inds, v_inds]
+            # Store a nice visualization of the cluster by summing across time (in ms)
+            data = np.sign(data) * np.logical_not(data == 0) * tstep
+            data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
+            # Make the first "time point" a sum across all clusters for easy
+            # visualization
+        data_summary[:, 0] = np.sum(data_summary, axis=1)
+
+        return SourceEstimate(data_summary, vertno, tmin=tmin, tstep=tstep,
+                             subject=subject)
+    else:
+        raise RuntimeError('No significant clusters available. Please adjust '
+                           'your threshold or check your statistical analysis.')
diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py
new file mode 100644
index 0000000..6bb9170
--- /dev/null
+++ b/mne/stats/multi_comp.py
@@ -0,0 +1,102 @@
+# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis
+#          Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# Code borrowed from statsmodels
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+
+def _ecdf(x):
+    '''no frills empirical cdf used in fdrcorrection
+    '''
+    nobs = len(x)
+    return np.arange(1, nobs + 1) / float(nobs)
+
+
+def fdr_correction(pvals, alpha=0.05, method='indep'):
+    """P-value correction with False Discovery Rate (FDR)
+
+    Correction for multiple comparison using FDR.
+
+    This covers Benjamini/Hochberg for independent or positively correlated and
+    Benjamini/Yekutieli for general or negatively correlated tests.
+
+    Parameters
+    ----------
+    pvals : array_like
+        set of p-values of the individual tests.
+    alpha : float
+        error rate
+    method : 'indep' | 'negcorr'
+        If 'indep' it implements Benjamini/Hochberg for independent or if
+        'negcorr' it corresponds to Benjamini/Yekutieli.
+
+    Returns
+    -------
+    reject : array, bool
+        True if a hypothesis is rejected, False if not
+    pval_corrected : array
+        pvalues adjusted for multiple hypothesis testing to limit FDR
+
+    Notes
+    -----
+    Reference:
+    Genovese CR, Lazar NA, Nichols T.
+    Thresholding of statistical maps in functional neuroimaging using the false
+    discovery rate. Neuroimage. 2002 Apr;15(4):870-8.
+    """
+    pvals = np.asarray(pvals)
+    shape_init = pvals.shape
+    pvals = pvals.ravel()
+
+    pvals_sortind = np.argsort(pvals)
+    pvals_sorted = pvals[pvals_sortind]
+    sortrevind = pvals_sortind.argsort()
+
+    if method in ['i', 'indep', 'p', 'poscorr']:
+        ecdffactor = _ecdf(pvals_sorted)
+    elif method in ['n', 'negcorr']:
+        cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))
+        ecdffactor = _ecdf(pvals_sorted) / cm
+    else:
+        raise ValueError("Method should be 'indep' and 'negcorr'")
+
+    reject = pvals_sorted < (ecdffactor * alpha)
+    if reject.any():
+        rejectmax = max(np.nonzero(reject)[0])
+    else:
+        rejectmax = 0
+    reject[:rejectmax] = True
+
+    pvals_corrected_raw = pvals_sorted / ecdffactor
+    pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
+    pvals_corrected[pvals_corrected > 1.0] = 1.0
+    pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)
+    reject = reject[sortrevind].reshape(shape_init)
+    return reject, pvals_corrected
+
+
+def bonferroni_correction(pval, alpha=0.05):
+    """P-value correction with Bonferroni method
+
+    Parameters
+    ----------
+    pvals : array_like
+        set of p-values of the individual tests.
+    alpha : float
+        error rate
+
+    Returns
+    -------
+    reject : array, bool
+        True if a hypothesis is rejected, False if not
+    pval_corrected : array
+        pvalues adjusted for multiple hypothesis testing to limit FDR
+
+    """
+    pval = np.asarray(pval)
+    pval_corrected = pval * float(pval.size)
+    reject = pval < alpha
+    return reject, pval_corrected
diff --git a/mne/stats/parametric.py b/mne/stats/parametric.py
new file mode 100644
index 0000000..943ab35
--- /dev/null
+++ b/mne/stats/parametric.py
@@ -0,0 +1,252 @@
+import numpy as np
+from scipy import stats
+from scipy.signal import detrend
+from ..fixes import matrix_rank
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+defaults_twoway_rm = {
+    'parse': {
+        'A': [0],
+        'B': [1],
+        'A+B': [0, 1],
+        'A:B': [2],
+        'A*B': [0, 1, 2]
+        },
+    'iter_contrasts': np.array([(1, 0, 1), (0, 1, 1), (1, 1, 1)])
+ }
+
+
+# The following function is a rewriting of scipy.stats.f_oneway
+# Contrary to the scipy.stats.f_oneway implementation it does not
+# copy the data while keeping the inputs unchanged.
+def _f_oneway(*args):
+    """
+    Performs a 1-way ANOVA.
+
+    The one-way ANOVA tests the null hypothesis that 2 or more groups have
+    the same population mean. The test is applied to samples from two or
+    more groups, possibly with differing sizes.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The sample measurements should be given as arguments.
+
+    Returns
+    -------
+    F-value : float
+        The computed F-value of the test
+    p-value : float
+        The associated p-value from the F-distribution
+
+    Notes
+    -----
+    The ANOVA test has important assumptions that must be satisfied in order
+    for the associated p-value to be valid.
+
+    1. The samples are independent
+    2. Each sample is from a normally distributed population
+    3. The population standard deviations of the groups are all equal.  This
+       property is known as homocedasticity.
+
+    If these assumptions are not true for a given set of data, it may still be
+    possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
+    some loss of power
+
+    The algorithm is from Heiman[2], pp.394-7.
+
+    See scipy.stats.f_oneway that should give the same results while
+    being less efficient
+
+    References
+    ----------
+    .. [1] Lowry, Richard.  "Concepts and Applications of Inferential
+           Statistics". Chapter 14.
+           http://faculty.vassar.edu/lowry/ch14pt1.html
+
+    .. [2] Heiman, G.W.  Research Methods in Statistics. 2002.
+
+    """
+    n_classes = len(args)
+    n_samples_per_class = np.array([len(a) for a in args])
+    n_samples = np.sum(n_samples_per_class)
+    ss_alldata = reduce(lambda x, y: x + y,
+                        [np.sum(a ** 2, axis=0) for a in args])
+    sums_args = [np.sum(a, axis=0) for a in args]
+    square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
+    square_of_sums_args = [s ** 2 for s in sums_args]
+    sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
+    ssbn = 0
+    for k, _ in enumerate(args):
+        ssbn += square_of_sums_args[k] / n_samples_per_class[k]
+    ssbn -= square_of_sums_alldata / float(n_samples)
+    sswn = sstot - ssbn
+    dfbn = n_classes - 1
+    dfwn = n_samples - n_classes
+    msb = ssbn / float(dfbn)
+    msw = sswn / float(dfwn)
+    f = msb / msw
+    prob = stats.fprob(dfbn, dfwn, f)
+    return f, prob
+
+
+def f_oneway(*args):
+    """Call scipy.stats.f_oneway, but return only f-value"""
+    return _f_oneway(*args)[0]
+
+
+def _check_effects(effects):
+    """ Aux Function """
+    if effects.upper() not in defaults_twoway_rm['parse']:
+        raise ValueError('The value passed for `effects` is not supported.'
+            ' Please consider the documentation.')
+
+    return defaults_twoway_rm['parse'][effects]
+
+
+def _iter_contrasts(n_subjects, factor_levels, effect_picks):
+    """ Aux Function """
+    sc, sy, = [], []  # setup contrasts
+    for n_levels in factor_levels:
+        sc.append([np.ones([n_levels, 1]),
+            detrend(np.eye(n_levels), type='constant')])
+        sy.append([np.ones([n_levels, 1]) / n_levels, np.eye(n_levels)])
+
+    for (c1, c2, c3) in defaults_twoway_rm['iter_contrasts'][effect_picks]:
+        c_ = np.kron(sc[0][c1], sc[c3][c2])
+        df1 = matrix_rank(c_)
+        df2 = df1 * (n_subjects - 1)
+        yield c_, df1, df2
+
+
+def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
+                       pvalue=0.05):
+    """ Compute f-value thesholds for a two-way ANOVA
+
+    Parameters
+    ----------
+    n_subjects : int
+        The number of subjects to be analyzed.
+    factor_levels : list-like
+        The number of levels per factor.
+    effects : str
+        A string denoting the effect to be returned. The following
+        mapping is currently supported:
+            'A': main effect of A
+            'B': main effect of B
+            'A:B': interaction effect
+            'A+B': both main effects
+            'A*B': all three effects
+    pvalue : float
+        The p-value to be thresholded.
+
+    Returns
+    -------
+    f_threshold : list | float
+        list of f-values for each effect if the number of effects
+        requested > 2, else float.
+    """
+    effect_picks = _check_effects(effects)
+
+    f_threshold = []
+    for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
+                                        effect_picks):
+        f_threshold.append(stats.f(df1, df2).isf(pvalue))
+
+    return f_threshold if len(f_threshold) > 1 else f_threshold[0]
+
+
+# The following functions based on MATLAB code by Rik Henson
+# and Python code from the pvttble toolbox by Roger Lew.
+def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
+                   correction=False, return_pvals=True):
+    """ 2 way repeated measures ANOVA for fully balanced designs
+
+    data : ndarray
+        3D array where the first two dimensions are compliant
+        with a subjects X conditions scheme:
+
+        first factor repeats slowest:
+
+                    A1B1 A1B2 A2B1 B2B2
+        subject 1   1.34 2.53 0.97 1.74
+        subject ... .... .... .... ....
+        subject k   2.45 7.90 3.09 4.76
+
+        The last dimensions is thought to carry the observations
+        for mass univariate analysis.
+    factor_levels : list-like
+        The number of levels per factor.
+    effects : str
+        A string denoting the effect to be returned. The following
+        mapping is currently supported:
+            'A': main effect of A
+            'B': main effect of B
+            'A:B': interaction effect
+            'A+B': both main effects
+            'A*B': all three effects
+    alpha : float
+        The significance threshold.
+    correction : bool
+        The correction method to be employed if one factor has more than two
+        levels. If True, sphericity correction using the Greenhouse-Geisser
+        method will be applied.
+    return_pvals : bool
+        If True, return p values corresponding to f values.
+
+    Returns
+    -------
+    f_vals : ndarray
+        An array of f values with length corresponding to the number
+        of effects estimated. The shape depends on the number of effects
+        estimated.
+    p_vals : ndarray
+        If not requested via return_pvals, defaults to an empty array.
+    """
+    if data.ndim == 2:  # general purpose support, e.g. behavioural data
+        data = data[:, :, np.newaxis]
+    elif data.ndim > 3:  # let's allow for some magic here.
+        data = data.reshape(data.shape[0], data.shape[1],
+            np.prod(data.shape[2:]))
+
+    effect_picks = _check_effects(effects)
+    n_obs = data.shape[2]
+    n_replications = data.shape[0]
+
+    # pute last axis in fornt to 'iterate' over mass univariate instances.
+    data = np.rollaxis(data, 2)
+    fvalues, pvalues = [], []
+    for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
+            effect_picks):
+        y = np.dot(data, c_)
+        b = np.mean(y, axis=1)[:, np.newaxis, :]
+        ss = np.sum(np.sum(y * b, axis=2), axis=1)
+        mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
+        fvals = ss / mse
+        fvalues.append(fvals)
+        if correction:
+            # sample covariances, leave off "/ (y.shape[1] - 1)" norm because
+            # it falls out. the below line is faster than the equivalent:
+            # v = np.array([np.dot(y_.T, y_) for y_ in y])
+            v = np.array(map(np.dot, y.swapaxes(2, 1), y))
+            v = (np.array(map(np.trace, v)) ** 2 /
+                  (df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
+            eps = v
+
+        df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
+        if correction:
+            df1, df2 = [d[None, :] * eps for d in df1, df2]
+
+        if return_pvals:
+            pvals = stats.f(df1, df2).sf(fvals)
+        else:
+            pvals = np.empty(0)
+        pvalues.append(pvals)
+
+    # handle single effect returns
+    return [np.squeeze(np.asarray(v)) for v in fvalues, pvalues]
diff --git a/mne/stats/permutations.py b/mne/stats/permutations.py
new file mode 100644
index 0000000..2608c5f
--- /dev/null
+++ b/mne/stats/permutations.py
@@ -0,0 +1,152 @@
+"""T-test with permutations
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Fernando Perez (bin_perm_rep function)
+#
+# License: Simplified BSD
+
+from math import sqrt
+import numpy as np
+
+from ..parallel import parallel_func
+from .. import verbose
+
+
+def bin_perm_rep(ndim, a=0, b=1):
+    """bin_perm_rep(ndim) -> ndim permutations with repetitions of (a,b).
+
+    Returns an array with all the possible permutations with repetitions of
+    (0,1) in ndim dimensions.  The array is shaped as (2**ndim,ndim), and is
+    ordered with the last index changing fastest.  For examble, for ndim=3:
+
+    Examples:
+
+    >>> bin_perm_rep(3)
+    array([[0, 0, 0],
+           [0, 0, 1],
+           [0, 1, 0],
+           [0, 1, 1],
+           [1, 0, 0],
+           [1, 0, 1],
+           [1, 1, 0],
+           [1, 1, 1]])
+    """
+
+    # Create the leftmost column as 0,0,...,1,1,...
+    nperms = 2 ** ndim
+    perms = np.empty((nperms, ndim), type(a))
+    perms.fill(a)
+    half_point = nperms / 2
+    perms[half_point:, 0] = b
+    # Fill the rest of the table by sampling the pervious column every 2 items
+    for j in range(1, ndim):
+        half_col = perms[::2, j - 1]
+        perms[:half_point, j] = half_col
+        perms[half_point:, j] = half_col
+
+    return perms
+
+
+def _max_stat(X, X2, perms, dof_scaling):
+    """Aux function for permutation_t_test (for parallel comp)"""
+    n_samples = len(X)
+    mus = np.dot(perms, X) / float(n_samples)
+    stds = np.sqrt(X2[None, :] - mus ** 2) * dof_scaling  # std with splitting
+    max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1)  # t-max
+    return max_abs
+
+
+ at verbose
+def permutation_t_test(X, n_permutations=10000, tail=0, n_jobs=1,
+                       verbose=None):
+    """One sample/paired sample permutation test based on a t-statistic.
+
+    This function can perform the test on one variable or
+    simultaneously on multiple variables. When applying the test to multiple
+    variables, the "tmax" method is used for adjusting the p-values of each
+    variable for multiple comparisons. Like Bonferroni correction, this method
+    adjusts p-values in a way that controls the family-wise error rate.
+    However, the permutation method will be more
+    powerful than Bonferroni correction when different variables in the test
+    are correlated.
+
+    Parameters
+    ----------
+    X : array of shape [n_samples x n_tests]
+        Data of size number of samples (aka number of observations) times
+        number of tests (aka number of variables).
+    n_permutations : int or 'all'
+        Number of permutations. If n_permutations is 'all' all possible
+        permutations are tested (2**n_samples). It's the exact test, that
+        can be untractable when the number of samples is big (e.g. > 20).
+        If n_permutations >= 2**n_samples then the exact test is performed.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the alternative hypothesis is that the
+        mean of the data is greater than 0 (upper tailed test).  If tail is 0,
+        the alternative hypothesis is that the mean of the data is different
+        than 0 (two tailed test).  If tail is -1, the alternative hypothesis
+        is that the mean of the data is less than 0 (lower tailed test).
+    n_jobs : int
+        Number of CPUs to use for computation.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables
+
+    p_values : array of shape [n_tests]
+        P-values for all the tests (aka variables)
+
+    H0 : array of shape [n_permutations]
+        T-statistic obtained by permutations and t-max trick for multiple
+        comparison.
+
+    Notes
+    -----
+    A reference (among many) in field of neuroimaging:
+    Nichols, T. E. & Holmes, A. P. (2002). Nonparametric permutation tests
+    for functional neuroimaging: a primer with examples.
+    Human Brain Mapping, 15, 1-25.
+    Overview of standard nonparametric randomization and permutation
+    testing applied to neuroimaging data (e.g. fMRI)
+    DOI: http://dx.doi.org/10.1002/hbm.1058
+    """
+    n_samples, n_tests = X.shape
+
+    do_exact = False
+    if (n_permutations == 'all') or (n_permutations >= 2 ** n_samples - 1):
+        do_exact = True
+        n_permutations = 2 ** n_samples - 1
+
+    X2 = np.mean(X ** 2, axis=0)  # precompute moments
+    mu0 = np.mean(X, axis=0)
+    dof_scaling = sqrt(n_samples / (n_samples - 1.0))
+    std0 = np.sqrt(X2 - mu0 ** 2) * dof_scaling  # get std with var splitting
+    T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples))
+
+    if do_exact:
+        perms = bin_perm_rep(n_samples, a=1, b=-1)[1:, :]
+    else:
+        perms = np.sign(0.5 - np.random.rand(n_permutations, n_samples))
+
+    parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs)
+
+    max_abs = np.concatenate(parallel(my_max_stat(X, X2, p, dof_scaling)
+                                      for p in np.array_split(perms, n_jobs)))
+    H0 = np.sort(max_abs)
+
+    scaling = float(n_permutations + 1)
+
+    if tail == 0:
+        p_values = 1.0 - np.searchsorted(H0, np.abs(T_obs)) / scaling
+    elif tail == 1:
+        p_values = 1.0 - np.searchsorted(H0, T_obs) / scaling
+    elif tail == -1:
+        p_values = 1.0 - np.searchsorted(H0, -T_obs) / scaling
+
+    return T_obs, p_values, H0
+
+permutation_t_test.__test__ = False  # for nosetests
diff --git a/mne/stats/tests/__init__.py b/mne/stats/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py
new file mode 100644
index 0000000..823b955
--- /dev/null
+++ b/mne/stats/tests/test_cluster_level.py
@@ -0,0 +1,359 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_equal,\
+                          assert_array_almost_equal
+from nose.tools import assert_true, assert_raises
+from scipy import sparse, linalg, stats
+from functools import partial
+import warnings
+
+from mne.stats.cluster_level import permutation_cluster_test, \
+                                    permutation_cluster_1samp_test, \
+                                    spatio_temporal_cluster_test, \
+                                    spatio_temporal_cluster_1samp_test, \
+                                    ttest_1samp_no_p, summarize_clusters_stc
+
+noise_level = 20
+
+normfactor = np.hanning(20).sum()
+
+rng = np.random.RandomState(42)
+condition1_1d = rng.randn(40, 350) * noise_level
+for c in condition1_1d:
+    c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
+
+condition2_1d = rng.randn(33, 350) * noise_level
+for c in condition2_1d:
+    c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
+
+pseudoekp = 5 * np.hanning(150)[None, :]
+condition1_1d[:, 100:250] += pseudoekp
+condition2_1d[:, 100:250] -= pseudoekp
+
+condition1_2d = condition1_1d[:, :, np.newaxis]
+condition2_2d = condition2_1d[:, :, np.newaxis]
+
+
+def test_cluster_permutation_test():
+    """Test cluster level permutations tests."""
+    for condition1, condition2 in zip((condition1_1d, condition1_2d),
+                                      (condition2_1d, condition2_2d)):
+        T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
+                                    [condition1, condition2],
+                                    n_permutations=100, tail=1, seed=1)
+        assert_equal(np.sum(cluster_p_values < 0.05), 1)
+
+        T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
+                                    [condition1, condition2],
+                                    n_permutations=100, tail=0, seed=1)
+        assert_equal(np.sum(cluster_p_values < 0.05), 1)
+
+        # test with 2 jobs
+        T_obs, clusters, cluster_p_values_buff, hist =\
+            permutation_cluster_test([condition1, condition2],
+                                    n_permutations=100, tail=0, seed=1,
+                                    n_jobs=2)
+        assert_array_equal(cluster_p_values, cluster_p_values_buff)
+
+
+def test_cluster_permutation_t_test():
+    """Test cluster level permutations T-test."""
+    for condition1 in (condition1_1d, condition1_2d):
+        # these are so significant we can get away with fewer perms
+        T_obs, clusters, cluster_p_values, hist =\
+            permutation_cluster_1samp_test(condition1, n_permutations=100,
+                                           tail=0, seed=1)
+        assert_equal(np.sum(cluster_p_values < 0.05), 1)
+
+        T_obs_pos, c_1, cluster_p_values_pos, _ =\
+            permutation_cluster_1samp_test(condition1, n_permutations=100,
+                                    tail=1, threshold=1.67, seed=1)
+
+        T_obs_neg, _, cluster_p_values_neg, _ =\
+            permutation_cluster_1samp_test(-condition1, n_permutations=100,
+                                    tail=-1, threshold=-1.67, seed=1)
+        assert_array_equal(T_obs_pos, -T_obs_neg)
+        assert_array_equal(cluster_p_values_pos < 0.05,
+                           cluster_p_values_neg < 0.05)
+
+        # test with 2 jobs
+        T_obs_neg, _, cluster_p_values_neg_buff, _ = \
+            permutation_cluster_1samp_test(-condition1, n_permutations=100,
+                                            tail=-1, threshold=-1.67, seed=1,
+                                            n_jobs=2)
+
+        assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)
+
+
+def test_cluster_permutation_with_connectivity():
+    """Test cluster level permutations with connectivity matrix."""
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+
+    n_pts = condition1_1d.shape[1]
+    # we don't care about p-values in any of these, so do fewer permutations
+    args = dict(seed=None, max_step=1, exclude=None,
+                step_down_p=0, t_power=1, threshold=1.67,
+                check_disjoint=False, n_permutations=50)
+
+    did_warn = False
+    for X1d, X2d, func, spatio_temporal_func in \
+                [(condition1_1d, condition1_2d,
+                  permutation_cluster_1samp_test,
+                  spatio_temporal_cluster_1samp_test),
+                  ([condition1_1d, condition2_1d],
+                   [condition1_2d, condition2_2d],
+                    permutation_cluster_test,
+                    spatio_temporal_cluster_test)]:
+        out = func(X1d, **args)
+        connectivity = grid_to_graph(1, n_pts)
+        out_connectivity = func(X1d, connectivity=connectivity, **args)
+        assert_array_equal(out[0], out_connectivity[0])
+        for a, b in zip(out_connectivity[1], out[1]):
+            assert_array_equal(out[0][a], out[0][b])
+            assert_true(np.all(a[b]))
+
+        # test spatio-temporal w/o time connectivity (repeat spatial pattern)
+        connectivity_2 = sparse.coo_matrix(
+            linalg.block_diag(connectivity.asfptype().todense(),
+                              connectivity.asfptype().todense()))
+
+        if isinstance(X1d, list):
+            X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]
+        else:
+            X1d_2 = np.concatenate((X1d, X1d), axis=1)
+
+        out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args)
+        # make sure we were operating on the same values
+        split = len(out[0])
+        assert_array_equal(out[0], out_connectivity_2[0][:split])
+        assert_array_equal(out[0], out_connectivity_2[0][split:])
+
+        # make sure we really got 2x the number of original clusters
+        n_clust_orig = len(out[1])
+        assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig)
+
+        # Make sure that we got the old ones back
+        data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
+        data_2 = set([np.sum(out_connectivity_2[0][a[:n_pts]]) for a in
+            out_connectivity_2[1][:]])
+        assert_true(len(data_1.intersection(data_2)) == len(data_1))
+
+        # now use the other algorithm
+        if isinstance(X1d, list):
+            X1d_3 = [np.reshape(x, (-1, 2, 350)) for x in X1d_2]
+        else:
+            X1d_3 = np.reshape(X1d_2, (-1, 2, 350))
+
+        out_connectivity_3 = spatio_temporal_func(
+                                 X1d_3, n_permutations=50,
+                                 connectivity=connectivity, max_step=0,
+                                 threshold=1.67, check_disjoint=True)
+        # make sure we were operating on the same values
+        split = len(out[0])
+        assert_array_equal(out[0], out_connectivity_3[0][0])
+        assert_array_equal(out[0], out_connectivity_3[0][1])
+
+        # make sure we really got 2x the number of original clusters
+        assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig)
+
+        # Make sure that we got the old ones back
+        data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
+        data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in
+            out_connectivity_3[1]])
+        assert_true(len(data_1.intersection(data_2)) == len(data_1))
+
+        # test new versus old method
+        out_connectivity_4 = spatio_temporal_func(
+                                 X1d_3, n_permutations=50,
+                                 connectivity=connectivity, max_step=2,
+                                 threshold=1.67)
+        out_connectivity_5 = spatio_temporal_func(
+                                 X1d_3, n_permutations=50,
+                                 connectivity=connectivity, max_step=1,
+                                 threshold=1.67)
+
+        # clusters could be in a different order
+        sums_4 = [np.sum(out_connectivity_4[0][a])
+                  for a in out_connectivity_4[1]]
+        sums_5 = [np.sum(out_connectivity_4[0][a])
+                  for a in out_connectivity_5[1]]
+        sums_4 = np.sort(sums_4)
+        sums_5 = np.sort(sums_5)
+        assert_array_almost_equal(sums_4, sums_5)
+
+        assert_raises(ValueError, spatio_temporal_func,
+                                 X1d_3, n_permutations=1,
+                                 connectivity=connectivity, max_step=1,
+                                 threshold=1.67, n_jobs=-1000)
+
+        # not enough TFCE params
+        assert_raises(KeyError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, threshold=dict(me='hello'))
+
+        # too extreme a start threshold
+        with warnings.catch_warnings(True) as w:
+            spatio_temporal_func(X1d_3, connectivity=connectivity,
+                                 threshold=dict(start=10, step=1))
+        if not did_warn:
+            assert_true(len(w) == 1)
+            did_warn = True
+
+        # too extreme a start threshold
+        assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, tail=-1,
+                      threshold=dict(start=1, step=-1))
+        assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, tail=-1,
+                      threshold=dict(start=-1, step=1))
+
+        # wrong type for threshold
+        assert_raises(TypeError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, threshold=[])
+
+        # wrong value for tail
+        assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, tail=2)
+
+        # make sure it actually found a significant point
+        out_connectivity_6 = spatio_temporal_func(
+                                 X1d_3, n_permutations=50,
+                                 connectivity=connectivity, max_step=1,
+                                 threshold=dict(start=1, step=1))
+        assert_true(np.min(out_connectivity_6[2]) < 0.05)
+
+
+def test_permutation_connectivity_equiv():
+    """Test cluster level permutations with and without connectivity
+    """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+    rng = np.random.RandomState(0)
+    # subjects, time points, spatial points
+    X = rng.randn(7, 2, 10)
+    # add some significant points
+    X[:, 0:2, 0:2] += 10  # span two time points and two spatial points
+    X[:, 1, 5:9] += 10  # span four time points
+    max_steps = [1, 1, 1, 2]
+    # This will run full algorithm in two ways, then the ST-algorithm in 2 ways
+    # All of these should give the same results
+    conns = [None, grid_to_graph(2, 10),
+             grid_to_graph(1, 10), grid_to_graph(1, 10)]
+    stat_map = None
+    thresholds = [2, dict(start=0.5, step=0.5)]
+    sig_counts = [2, 8]
+    sdps = [0, 0.05, 0.05]
+    ots = ['mask', 'mask', 'indices']
+    for thresh, count in zip(thresholds, sig_counts):
+        cs = None
+        ps = None
+        for max_step, conn in zip(max_steps, conns):
+            for stat_fun in [ttest_1samp_no_p,
+                             partial(ttest_1samp_no_p, sigma=1e-3)]:
+                for sdp, ot in zip(sdps, ots):
+                    t, clusters, p, H0 = \
+                            permutation_cluster_1samp_test(X,
+                                                           threshold=thresh,
+                                                           connectivity=conn,
+                                                           n_jobs=2,
+                                                           max_step=max_step,
+                                                           stat_fun=stat_fun,
+                                                           step_down_p=sdp,
+                                                           out_type=ot)
+                    # make sure our output datatype is correct
+                    if ot == 'mask':
+                        assert_true(isinstance(clusters[0], np.ndarray))
+                        assert_true(clusters[0].dtype == bool)
+                        assert_array_equal(clusters[0].shape, X.shape[1:])
+                    else:  # ot == 'indices'
+                        assert_true(isinstance(clusters[0], tuple))
+
+                    # make sure all comparisons were done; for TFCE, no perm
+                    # should come up empty
+                    if count == 8:
+                        assert_true(not np.any(H0 == 0))
+                    inds = np.where(p < 0.05)[0]
+                    assert_true(len(inds) == count)
+                    this_cs = [clusters[ii] for ii in inds]
+                    this_ps = p[inds]
+                    this_stat_map = np.zeros((2, 10), dtype=bool)
+                    for ci, c in enumerate(this_cs):
+                        if isinstance(c, tuple):
+                            this_c = np.zeros((2, 10), bool)
+                            for x, y in zip(c[0], c[1]):
+                                this_stat_map[x, y] = True
+                                this_c[x, y] = True
+                            this_cs[ci] = this_c
+                            c = this_c
+                        this_stat_map[c] = True
+                    if cs is None:
+                        ps = this_ps
+                        cs = this_cs
+                    if stat_map is None:
+                        stat_map = this_stat_map
+                    assert_array_equal(ps, this_ps)
+                    assert_true(len(cs) == len(this_cs))
+                    for c1, c2 in zip(cs, this_cs):
+                        assert_array_equal(c1, c2)
+                    assert_array_equal(stat_map, this_stat_map)
+
+
+def spatio_temporal_cluster_test_connectivity():
+    """Test cluster level permutations with and without connectivity """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+
+    rng = np.random.RandomState(0)
+    noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10)
+    data1_2d  = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1])
+    
+    noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10)
+    data2_2d  = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1])
+
+    conn = grid_to_graph(data1_2d.shape[-1], 1)
+
+    threshold = dict(start=4.0, step=2)
+    T_obs, clusters, p_values_conn, hist = \
+        spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn,
+                                     n_permutations=50, tail=1, seed=1,
+                                     threshold=threshold)
+
+    T_obs, clusters, p_values_no_conn, hist = \
+        spatio_temporal_cluster_test([data1_2d, data2_2d],
+                                     n_permutations=50, tail=1, seed=1,
+                                     threshold=threshold)
+
+    assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05))
+
+
+def ttest_1samp(X):
+    """Returns T-values
+    """
+    return stats.ttest_1samp(X, 0)[0]
+
+
+def test_summarize_clusters():
+    """ test summary stcs
+    """
+    clu = (np.random.random([1, 20484]),
+           [(np.array([0]), np.array([0, 2, 4]))],
+            np.array([0.02, 0.1]),
+            np.array([12, -14, 30]))
+    stc_sum = summarize_clusters_stc(clu)
+    assert_true(stc_sum.data.shape[1] == 2)
+    clu[2][0] = 0.3
+    assert_raises(RuntimeError, summarize_clusters_stc, clu)
diff --git a/mne/stats/tests/test_multi_comp.py b/mne/stats/tests/test_multi_comp.py
new file mode 100644
index 0000000..4cba141
--- /dev/null
+++ b/mne/stats/tests/test_multi_comp.py
@@ -0,0 +1,44 @@
+import numpy as np
+from numpy.testing import assert_almost_equal, assert_allclose, assert_raises
+from nose.tools import assert_true
+from scipy import stats
+
+from mne.stats import fdr_correction, bonferroni_correction
+
+
+def test_multi_pval_correction():
+    """Test pval correction for multi comparison (FDR and Bonferroni)
+    """
+    rng = np.random.RandomState(0)
+    X = rng.randn(10, 1000, 10)
+    X[:, :50, 0] += 4.0  # 50 significant tests
+    alpha = 0.05
+
+    T, pval = stats.ttest_1samp(X, 0)
+
+    n_samples = X.shape[0]
+    n_tests = X.size / n_samples
+    thresh_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
+
+    reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha)
+    thresh_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
+    assert_true(pval_bonferroni.ndim == 2)
+    assert_true(reject_bonferroni.ndim == 2)
+    assert_allclose(pval_bonferroni / 10000, pval)
+
+    fwer = np.mean(reject_bonferroni)
+    assert_almost_equal(fwer, alpha, 1)
+
+    reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
+    assert_true(pval_fdr.ndim == 2)
+    assert_true(reject_fdr.ndim == 2)
+    thresh_fdr = np.min(np.abs(T)[reject_fdr])
+    assert_true(0 <= (reject_fdr.sum() - 50) <= 50 * 1.05)
+    assert_true(thresh_uncorrected <= thresh_fdr <= thresh_bonferroni)
+    assert_raises(ValueError, fdr_correction, pval, alpha, method='blah')
+    assert_true(np.all(fdr_correction(pval, alpha=0)[0] == 0))
+
+    reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='negcorr')
+    thresh_fdr = np.min(np.abs(T)[reject_fdr])
+    assert_true(0 <= (reject_fdr.sum() - 50) <= 50 * 1.05)
+    assert_true(thresh_uncorrected <= thresh_fdr <= thresh_bonferroni)
diff --git a/mne/stats/tests/test_parametric.py b/mne/stats/tests/test_parametric.py
new file mode 100644
index 0000000..1f7f0b0
--- /dev/null
+++ b/mne/stats/tests/test_parametric.py
@@ -0,0 +1,91 @@
+from itertools import product
+from ..parametric import f_twoway_rm, f_threshold_twoway_rm, \
+    defaults_twoway_rm
+from nose.tools import assert_raises, assert_true
+from numpy.testing import assert_array_almost_equal
+
+import numpy as np
+
+# hardcoded external test results, manually transferred
+test_external = {
+    # SPSS, manually conducted analyis
+    'spss_fvals': np.array([2.568, 0.240, 1.756]),
+    'spss_pvals_uncorrected': np.array([0.126, 0.788, 0.186]),
+    'spss_pvals_corrected': np.array([0.126, 0.784, 0.192]),
+    # R 15.2
+    # data generated using this code http://goo.gl/7UcKb
+    'r_fvals': np.array([2.567619, 0.24006, 1.756380]),
+    'r_pvals_uncorrected': np.array([0.12557, 0.78776, 0.1864])
+}
+
+#  generated using this expression: `np.random.RandomState(42).randn(20, 6)`
+test_data = np.array(
+[[0.49671415, -0.1382643, 0.64768854, 1.52302986, -0.23415337, -0.23413696],
+ [1.57921282, 0.76743473, -0.46947439, 0.54256004, -0.46341769, -0.46572975],
+ [0.24196227, -1.91328024, -1.72491783, -0.56228753, -1.01283112, 0.31424733],
+ [-0.90802408, -1.4123037, 1.46564877, -0.2257763, 0.0675282, -1.42474819],
+ [-0.54438272, 0.11092259, -1.15099358, 0.37569802, -0.60063869, -0.29169375],
+ [-0.60170661, 1.85227818, -0.01349722, -1.05771093, 0.82254491, -1.22084365],
+ [0.2088636, -1.95967012, -1.32818605, 0.19686124, 0.73846658, 0.17136828],
+ [-0.11564828, -0.3011037, -1.47852199, -0.71984421, -0.46063877, 1.05712223],
+ [0.34361829, -1.76304016, 0.32408397, -0.38508228, -0.676922, 0.61167629],
+ [1.03099952, 0.93128012, -0.83921752, -0.30921238, 0.33126343, 0.97554513],
+ [-0.47917424, -0.18565898, -1.10633497, -1.19620662, 0.81252582, 1.35624003],
+ [-0.07201012, 1.0035329, 0.36163603, -0.64511975, 0.36139561, 1.53803657],
+ [-0.03582604, 1.56464366, -2.6197451, 0.8219025, 0.08704707, -0.29900735],
+ [0.09176078, -1.98756891, -0.21967189, 0.35711257, 1.47789404, -0.51827022],
+ [-0.8084936, -0.50175704, 0.91540212, 0.32875111, -0.5297602, 0.51326743],
+ [0.09707755, 0.96864499, -0.70205309, -0.32766215, -0.39210815, -1.46351495],
+ [0.29612028, 0.26105527, 0.00511346, -0.23458713, -1.41537074, -0.42064532],
+ [-0.34271452, -0.80227727, -0.16128571, 0.40405086, 1.8861859, 0.17457781],
+ [0.25755039, -0.07444592, -1.91877122, -0.02651388, 0.06023021, 2.46324211],
+ [-0.19236096, 0.30154734, -0.03471177, -1.16867804, 1.14282281, 0.75193303]])
+
+
+def test_f_twoway_rm():
+    """ Test 2-way anova """
+    iter_params = product([4, 10], [2, 15], [4, 6, 8], ['A', 'B', 'A:B'],
+        [False, True])
+    for params in iter_params:
+        n_subj, n_obs, n_levels, picks, correction = params
+        data = np.random.random([n_subj, n_levels, n_obs])
+        effects = {
+            4: [2, 2],
+            6: [2, 3],
+            8: [2, 4]
+        }
+        fvals, pvals = f_twoway_rm(data, effects[n_levels], picks,
+                                      correction=correction)
+        assert_true((fvals >= 0).all())
+        if pvals.any():
+            assert_true(((0 <= pvals) & (1 >= pvals)).all())
+        n_effects = len(defaults_twoway_rm['parse'][picks])
+        assert_true(fvals.size == n_obs * n_effects)
+        if n_effects == 1:  # test for principle of least surprise ...
+            assert_true(fvals.ndim == 1)
+
+        fvals_ = f_threshold_twoway_rm(n_subj, effects[n_levels], picks)
+        assert_true((fvals_ >= 0).all())
+        assert_true(fvals_.size == n_effects)
+
+    data = np.random.random([n_subj, n_levels, 1])
+    assert_raises(ValueError, f_twoway_rm, data, effects[n_levels],
+                  effects='C', correction=correction)
+    data = np.random.random([n_subj, n_levels, n_obs, 3])
+    # check for dimension handling
+    f_twoway_rm(data, effects[n_levels], picks, correction=correction)
+
+    # now check against external software results
+    fvals, pvals = f_twoway_rm(test_data, [2, 3])
+
+    assert_array_almost_equal(fvals,
+        test_external['spss_fvals'], 3)
+    assert_array_almost_equal(pvals,
+        test_external['spss_pvals_uncorrected'], 3)
+    assert_array_almost_equal(fvals,
+        test_external['r_fvals'], 4)
+    assert_array_almost_equal(pvals,
+        test_external['r_pvals_uncorrected'], 3)
+
+    _, pvals = f_twoway_rm(test_data, [2, 3], correction=True)
+    assert_array_almost_equal(pvals, test_external['spss_pvals_corrected'], 3)
diff --git a/mne/stats/tests/test_permutations.py b/mne/stats/tests/test_permutations.py
new file mode 100644
index 0000000..8ac0bac
--- /dev/null
+++ b/mne/stats/tests/test_permutations.py
@@ -0,0 +1,33 @@
+import numpy as np
+from numpy.testing import assert_array_equal, assert_almost_equal
+from scipy import stats
+
+from mne.stats.permutations import permutation_t_test
+
+
+def test_permutation_t_test():
+    """Test T-test based on permutations
+    """
+    # 1 sample t-test
+    np.random.seed(10)
+    n_samples, n_tests = 30, 5
+    X = np.random.randn(n_samples, n_tests)
+    X[:, :2] += 1
+
+    T_obs, p_values, H0 = permutation_t_test(X, n_permutations=999, tail=0)
+    is_significant = p_values < 0.05
+    assert_array_equal(is_significant, [True, True, False, False, False])
+
+    T_obs, p_values, H0 = permutation_t_test(X, n_permutations=999, tail=1)
+    is_significant = p_values < 0.05
+    assert_array_equal(is_significant, [True, True, False, False, False])
+
+    T_obs, p_values, H0 = permutation_t_test(X, n_permutations=999, tail=-1)
+    is_significant = p_values < 0.05
+    assert_array_equal(is_significant, [False, False, False, False, False])
+
+    X = np.random.randn(18, 1)
+    T_obs, p_values, H0 = permutation_t_test(X[:, [0]], n_permutations='all')
+    T_obs_scipy, p_values_scipy = stats.ttest_1samp(X[:, 0], 0)
+    assert_almost_equal(T_obs[0], T_obs_scipy, 8)
+    assert_almost_equal(p_values[0], p_values_scipy, 2)
diff --git a/mne/surface.py b/mne/surface.py
new file mode 100644
index 0000000..eeacd37
--- /dev/null
+++ b/mne/surface.py
@@ -0,0 +1,394 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from struct import pack
+
+import logging
+logger = logging.getLogger('mne')
+
+from .fiff.constants import FIFF
+from .fiff.open import fiff_open
+from .fiff.tree import dir_tree_find
+from .fiff.tag import find_tag
+from .fiff.write import write_int, write_float, write_float_matrix, \
+                        write_int_matrix, start_file, end_block, \
+                        start_block, end_file
+from . import verbose
+
+#
+#   These fiff definitions are not needed elsewhere
+#
+FIFFB_BEM = 310  # BEM data
+FIFFB_BEM_SURF = 311  # One of the surfaces
+FIFF_BEM_SURF_ID = 3101  # int    surface number
+FIFF_BEM_SURF_NAME = 3102  # string surface name
+FIFF_BEM_SURF_NNODE = 3103  # int    number of nodes on a surface
+FIFF_BEM_SURF_NTRI = 3104  # int     number of triangles on a surface
+FIFF_BEM_SURF_NODES = 3105  # float  surface nodes (nnode,3)
+FIFF_BEM_SURF_TRIANGLES = 3106  # int    surface triangles (ntri,3)
+FIFF_BEM_SURF_NORMALS = 3107  # float  surface node normal unit vectors
+FIFF_BEM_COORD_FRAME = 3112  # The coordinate frame of the mode
+FIFF_BEM_SIGMA = 3113  # Conductivity of a compartment
+
+
+ at verbose
+def read_bem_surfaces(fname, add_geom=False, s_id=None, verbose=None):
+    """Read the BEM surfaces from a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the file containing the surfaces.
+    add_geom : bool, optional (default False)
+        If True add geometry information to the surfaces.
+    s_id : int | None
+        If int, only read and return the surface with the given s_id.
+        An error will be raised if it doesn't exist. If None, all
+        surfaces are read and returned.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surf: list | dict
+        A list of dictionaries that each contain a surface. If s_id
+        is not None, only the requested surface will be returned.
+    """
+    #
+    #   Default coordinate frame
+    #
+    coord_frame = FIFF.FIFFV_COORD_MRI
+    #
+    #   Open the file, create directory
+    #
+    fid, tree, _ = fiff_open(fname)
+    #
+    #   Find BEM
+    #
+    bem = dir_tree_find(tree, FIFFB_BEM)
+    if bem is None:
+        fid.close()
+        raise ValueError('BEM data not found')
+
+    bem = bem[0]
+    #
+    #   Locate all surfaces
+    #
+    bemsurf = dir_tree_find(bem, FIFFB_BEM_SURF)
+    if bemsurf is None:
+        fid.close()
+        raise ValueError('BEM surface data not found')
+
+    logger.info('    %d BEM surfaces found' % len(bemsurf))
+    #
+    #   Coordinate frame possibly at the top level
+    #
+    tag = find_tag(fid, bem, FIFF_BEM_COORD_FRAME)
+    if tag is not None:
+        coord_frame = tag.data
+    #
+    #   Read all surfaces
+    #
+    if s_id is not None:
+        surfs = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
+                 for bsurf in bemsurf]
+        surfs = [s for s in surfs if s is not None]
+        if not len(surfs) == 1:
+            raise ValueError('surface with id %d not found' % s_id)
+        fid.close()
+        return surfs[0]
+
+    surf = []
+    for bsurf in bemsurf:
+        logger.info('    Reading a surface...')
+        this = _read_bem_surface(fid, bsurf, coord_frame)
+        logger.info('[done]')
+        if add_geom:
+            _complete_surface_info(this)
+        surf.append(this)
+
+    logger.info('    %d BEM surfaces read' % len(surf))
+
+    fid.close()
+
+    return surf
+
+
+def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
+    """Read one bem surface
+    """
+    res = dict()
+    #
+    #   Read all the interesting stuff
+    #
+    tag = find_tag(fid, this, FIFF_BEM_SURF_ID)
+
+    if tag is None:
+        res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
+    else:
+        res['id'] = int(tag.data)
+
+    if s_id is not None:
+        if res['id'] != s_id:
+            return None
+
+    tag = find_tag(fid, this, FIFF_BEM_SIGMA)
+    if tag is None:
+        res['sigma'] = 1.0
+    else:
+        res['sigma'] = float(tag.data)
+
+    tag = find_tag(fid, this, FIFF_BEM_SURF_NNODE)
+    if tag is None:
+        fid.close()
+        raise ValueError('Number of vertices not found')
+
+    res['np'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
+    if tag is None:
+        fid.close()
+        raise ValueError('Number of triangles not found')
+    else:
+        res['ntri'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        tag = find_tag(fid, this, FIFF_BEM_COORD_FRAME)
+        if tag is None:
+            res['coord_frame'] = def_coord_frame
+        else:
+            res['coord_frame'] = tag.data
+    else:
+        res['coord_frame'] = tag.data
+    #
+    #   Vertices, normals, and triangles
+    #
+    tag = find_tag(fid, this, FIFF_BEM_SURF_NODES)
+    if tag is None:
+        fid.close()
+        raise ValueError('Vertex data not found')
+
+    res['rr'] = tag.data.astype(np.float)  # XXX : double because of mayavi bug
+    if res['rr'].shape[0] != res['np']:
+        fid.close()
+        raise ValueError('Vertex information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
+    if tag is None:
+        res['nn'] = []
+    else:
+        res['nn'] = tag.data
+        if res['nn'].shape[0] != res['np']:
+            fid.close()
+            raise ValueError('Vertex normal information is incorrect')
+
+    tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
+    if tag is None:
+        fid.close()
+        raise ValueError('Triangulation not found')
+
+    res['tris'] = tag.data - 1  # index start at 0 in Python
+    if res['tris'].shape[0] != res['ntri']:
+        fid.close()
+        raise ValueError('Triangulation information is incorrect')
+
+    return res
+
+
+ at verbose
+def _complete_surface_info(this, verbose=None):
+    """Complete surface info"""
+    #
+    #   Main triangulation
+    #
+    logger.info('    Completing triangulation info...')
+    logger.info('triangle normals...')
+    this['tri_area'] = np.zeros(this['ntri'])
+    r1 = this['rr'][this['tris'][:, 0], :]
+    r2 = this['rr'][this['tris'][:, 1], :]
+    r3 = this['rr'][this['tris'][:, 2], :]
+    this['tri_cent'] = (r1 + r2 + r3) / 3.0
+    this['tri_nn'] = np.cross((r2 - r1), (r3 - r1))
+    #
+    #   Triangle normals and areas
+    #
+    size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
+    this['tri_area'] = size / 2.0
+    this['tri_nn'] /= size[:, None]
+    #
+    #   Accumulate the vertex normals
+    #
+    logger.info('vertex normals...')
+    this['nn'] = np.zeros((this['np'], 3))
+    for p in range(this['ntri']):
+        this['nn'][this['tris'][p, :], :] += this['tri_nn'][p, :]
+    #
+    #   Compute the lengths of the vertex normals and scale
+    #
+    logger.info('normalize...')
+    this['nn'] /= np.sqrt(np.sum(this['nn'] ** 2, axis=1))[:, None]
+
+    logger.info('[done]')
+    return this
+
+
+###############################################################################
+# Handle freesurfer
+
+def _fread3(fobj):
+    """Docstring"""
+    b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
+    return (b1 << 16) + (b2 << 8) + b3
+
+
+def _fread3_many(fobj, n):
+    """Read 3-byte ints from an open binary file object."""
+    b1, b2, b3 = np.fromfile(fobj, ">u1",
+                             3 * n).reshape(-1, 3).astype(np.int).T
+    return (b1 << 16) + (b2 << 8) + b3
+
+
+def read_curvature(filepath):
+    """Load in curavature values from the ?h.curv file."""
+    with open(filepath, "rb") as fobj:
+        magic = _fread3(fobj)
+        if magic == 16777215:
+            vnum = np.fromfile(fobj, ">i4", 3)[0]
+            curv = np.fromfile(fobj, ">f4", vnum)
+        else:
+            vnum = magic
+            _fread3(fobj)
+            curv = np.fromfile(fobj, ">i2", vnum) / 100
+        bin_curv = 1 - np.array(curv != 0, np.int)
+    return bin_curv
+
+
+def read_surface(fname):
+    """Load a Freesurfer surface mesh in triangular format
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file containing the surface.
+
+    Returns
+    -------
+    coords : array, shape=(n_vertices, 3)
+        Coordinate points.
+    faces : int array, shape=(n_faces, 3)
+        Triangulation (each line contains indexes for three points which
+        together form a face).
+    """
+    with open(fname, "rb") as fobj:
+        magic = _fread3(fobj)
+        if (magic == 16777215) or (magic == 16777213):  # Quad file or new quad
+            nvert = _fread3(fobj)
+            nquad = _fread3(fobj)
+            coords = np.fromfile(fobj, ">i2", nvert * 3).astype(np.float)
+            coords = coords.reshape(-1, 3) / 100.0
+            quads = _fread3_many(fobj, nquad * 4)
+            quads = quads.reshape(nquad, 4)
+            #
+            #   Face splitting follows
+            #
+            faces = np.zeros((2 * nquad, 3), dtype=np.int)
+            nface = 0
+            for quad in quads:
+                if (quad[0] % 2) == 0:
+                    faces[nface] = quad[0], quad[1], quad[3]
+                    nface += 1
+                    faces[nface] = quad[2], quad[3], quad[1]
+                    nface += 1
+                else:
+                    faces[nface] = quad[0], quad[1], quad[2]
+                    nface += 1
+                    faces[nface] = quad[0], quad[2], quad[3]
+                    nface += 1
+
+        elif magic == 16777214:  # Triangle file
+            create_stamp = fobj.readline()
+            _ = fobj.readline()
+            vnum = np.fromfile(fobj, ">i4", 1)[0]
+            fnum = np.fromfile(fobj, ">i4", 1)[0]
+            coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
+            faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
+        else:
+            raise ValueError("%s does not appear to be a Freesurfer surface"
+                             % fname)
+
+    coords = coords.astype(np.float)  # XXX: due to mayavi bug on mac 32bits
+    return coords, faces
+
+
+def write_surface(fname, coords, faces, create_stamp=''):
+    """Write a triangular Freesurfer surface mesh
+
+    Accepts the same data format as is returned by read_surface().
+
+    Parameters
+    ----------
+    fname : str
+        File to write.
+    coords : array, shape=(n_vertices, 3)
+        Coordinate points.
+    faces : int array, shape=(n_faces, 3)
+        Triangulation (each line contains indexes for three points which
+        together form a face).
+    create_stamp : str
+        Comment that is written to the beginning of the file. Can not contain
+        line breaks.
+    """
+    if len(create_stamp.splitlines()) > 1:
+        raise ValueError("create_stamp can only contain one line")
+
+    with open(fname, 'w') as fid:
+        fid.write(pack('>3B', 255, 255, 254))
+        fid.writelines(('%s\n' % create_stamp, '\n'))
+        vnum = len(coords)
+        fnum = len(faces)
+        fid.write(pack('>2i', vnum, fnum))
+        fid.write(np.array(coords, dtype='>f4').tostring())
+        fid.write(np.array(faces, dtype='>i4').tostring())
+
+
+###############################################################################
+# Write
+
+def write_bem_surface(fname, surf):
+    """Write one bem surface
+
+    Parameters
+    ----------
+    fname : string
+        File to write
+    surf : dict
+        A surface structured as obtained with read_bem_surfaces
+    """
+
+    # Create the file and save the essentials
+    fid = start_file(fname)
+
+    start_block(fid, FIFFB_BEM)
+    start_block(fid, FIFFB_BEM_SURF)
+
+    write_int(fid, FIFF_BEM_SURF_ID, surf['id'])
+    write_float(fid, FIFF_BEM_SIGMA, surf['sigma'])
+    write_int(fid, FIFF_BEM_SURF_NNODE, surf['np'])
+    write_int(fid, FIFF_BEM_SURF_NTRI, surf['ntri'])
+    write_int(fid, FIFF_BEM_COORD_FRAME, surf['coord_frame'])
+    write_float_matrix(fid, FIFF_BEM_SURF_NODES, surf['rr'])
+
+    if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
+        write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, surf['nn'])
+
+    # index start at 0 in Python
+    write_int_matrix(fid, FIFF_BEM_SURF_TRIANGLES, surf['tris'] + 1)
+
+    end_block(fid, FIFFB_BEM_SURF)
+    end_block(fid, FIFFB_BEM)
+
+    end_file(fid)
diff --git a/mne/tests/__init__.py b/mne/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py
new file mode 100644
index 0000000..0c3e8a3
--- /dev/null
+++ b/mne/tests/test_cov.py
@@ -0,0 +1,201 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_true
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_raises
+import numpy as np
+from scipy import linalg
+import warnings
+
+from mne.cov import regularize, whiten_evoked
+from mne import read_cov, Epochs, merge_events, \
+               find_events, compute_raw_data_covariance, \
+               compute_covariance
+from mne.fiff import Raw, pick_channels_cov, pick_channels, Evoked, pick_types
+from mne.utils import _TempDir
+
+base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
+cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+ave_fname = op.join(base_dir, 'test-ave.fif')
+erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
+
+raw = Raw(raw_fname, preload=True)
+
+tempdir = _TempDir()
+
+
+def test_io_cov():
+    """Test IO for noise covariance matrices
+    """
+    cov = read_cov(cov_fname)
+    cov.save(op.join(tempdir, 'cov.fif'))
+    cov2 = read_cov(op.join(tempdir, 'cov.fif'))
+    assert_array_almost_equal(cov.data, cov2.data)
+
+    cov2 = read_cov(cov_gz_fname)
+    assert_array_almost_equal(cov.data, cov2.data)
+    cov2.save(op.join(tempdir, 'cov.fif.gz'))
+    cov2 = read_cov(op.join(tempdir, 'cov.fif.gz'))
+    assert_array_almost_equal(cov.data, cov2.data)
+
+    cov['bads'] = ['EEG 039']
+    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
+    assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
+    assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
+    cov_sel.save(op.join(tempdir, 'cov.fif'))
+
+    cov2 = read_cov(cov_gz_fname)
+    assert_array_almost_equal(cov.data, cov2.data)
+    cov2.save(op.join(tempdir, 'cov.fif.gz'))
+    cov2 = read_cov(op.join(tempdir, 'cov.fif.gz'))
+    assert_array_almost_equal(cov.data, cov2.data)
+
+
+def test_cov_estimation_on_raw_segment():
+    """Test estimation from raw on continuous recordings (typically empty room)
+    """
+    cov = compute_raw_data_covariance(raw)
+    cov_mne = read_cov(erm_cov_fname)
+    assert_true(cov_mne.ch_names == cov.ch_names)
+    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
+                / linalg.norm(cov.data, ord='fro') < 1e-4)
+
+    # test IO when computation done in Python
+    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
+    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
+    assert_true(cov_read.ch_names == cov.ch_names)
+    assert_true(cov_read.nfree == cov.nfree)
+    assert_array_almost_equal(cov.data, cov_read.data)
+
+    # test with a subset of channels
+    picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
+    cov = compute_raw_data_covariance(raw, picks=picks)
+    assert_true(cov_mne.ch_names[:5] == cov.ch_names)
+    assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
+                ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
+    # make sure we get a warning with too short a segment
+    raw_2 = raw.crop(0, 1)
+    with warnings.catch_warnings(record=True) as w:
+        cov = compute_raw_data_covariance(raw_2)
+        assert_true(len(w) == 1)
+
+
+def test_cov_estimation_with_triggers():
+    """Test estimation from raw with triggers
+    """
+    events = find_events(raw, stim_channel='STI 014')
+    event_ids = [1, 2, 3, 4]
+    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
+
+    # cov with merged events and keep_sample_mean=True
+    events_merged = merge_events(events, event_ids, 1234)
+    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
+                    baseline=(-0.2, -0.1), proj=True,
+                    reject=reject, preload=True)
+
+    cov = compute_covariance(epochs, keep_sample_mean=True)
+    cov_mne = read_cov(cov_km_fname)
+    assert_true(cov_mne.ch_names == cov.ch_names)
+    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
+                 / linalg.norm(cov.data, ord='fro')) < 0.005)
+
+    # Test with tmin and tmax (different but not too much)
+    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
+    assert_true(np.all(cov.data != cov_tmin_tmax.data))
+    assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro')
+                 / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05)
+
+    # cov using a list of epochs and keep_sample_mean=True
+    epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
+              baseline=(-0.2, -0.1), proj=True, reject=reject)
+              for ev_id in event_ids]
+
+    cov2 = compute_covariance(epochs, keep_sample_mean=True)
+    assert_array_almost_equal(cov.data, cov2.data)
+    assert_true(cov.ch_names == cov2.ch_names)
+
+    # cov with keep_sample_mean=False using a list of epochs
+    cov = compute_covariance(epochs, keep_sample_mean=False)
+    cov_mne = read_cov(cov_fname)
+    assert_true(cov_mne.ch_names == cov.ch_names)
+    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
+                 / linalg.norm(cov.data, ord='fro')) < 0.005)
+
+    # test IO when computation done in Python
+    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
+    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
+    assert_true(cov_read.ch_names == cov.ch_names)
+    assert_true(cov_read.nfree == cov.nfree)
+    assert_true((linalg.norm(cov.data - cov_read.data, ord='fro')
+                 / linalg.norm(cov.data, ord='fro')) < 1e-5)
+
+    # cov with list of epochs with different projectors
+    epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
+              baseline=(-0.2, -0.1), proj=True, reject=reject),
+              Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
+              baseline=(-0.2, -0.1), proj=False, reject=reject)]
+    # these should fail
+    assert_raises(ValueError, compute_covariance, epochs)
+    assert_raises(ValueError, compute_covariance, epochs, projs=None)
+    # these should work, but won't be equal to above
+    with warnings.catch_warnings(True) as w:
+        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
+        cov = compute_covariance(epochs, projs=[])
+        assert_true(len(w) == 1)
+
+    # test new dict support
+    epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.2, tmax=0,
+                    baseline=(-0.2, -0.1), proj=True, reject=reject)
+    compute_covariance(epochs)
+
+
+def test_arithmetic_cov():
+    """Test arithmetic with noise covariance matrices
+    """
+    cov = read_cov(cov_fname)
+    cov_sum = cov + cov
+    assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
+    assert_array_almost_equal(2 * cov.data, cov_sum.data)
+    assert_true(cov.ch_names == cov_sum.ch_names)
+
+    cov += cov
+    assert_array_almost_equal(cov_sum.nfree, cov.nfree)
+    assert_array_almost_equal(cov_sum.data, cov.data)
+    assert_true(cov_sum.ch_names == cov.ch_names)
+
+
+def test_regularize_cov():
+    """Test cov regularization
+    """
+    noise_cov = read_cov(cov_fname)
+    # Regularize noise cov
+    reg_noise_cov = regularize(noise_cov, raw.info,
+                               mag=0.1, grad=0.1, eeg=0.1, proj=True)
+    assert_true(noise_cov['dim'] == reg_noise_cov['dim'])
+    assert_true(noise_cov['data'].shape == reg_noise_cov['data'].shape)
+    assert_true(np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08)
+
+
+def test_evoked_whiten():
+    """Test whitening of evoked data"""
+    evoked = Evoked(ave_fname, setno=0, baseline=(None, 0), proj=True)
+    cov = read_cov(cov_fname)
+
+    ###########################################################################
+    # Show result
+    picks = pick_types(evoked.info, meg=True, eeg=True, exclude='bads')
+
+    noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1)
+
+    evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
+    whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
+    mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
+    assert_true(np.all(mean_baseline < 1.))
+    assert_true(np.all(mean_baseline > 0.2))
diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py
new file mode 100644
index 0000000..5df53a1
--- /dev/null
+++ b/mne/tests/test_dipole.py
@@ -0,0 +1,20 @@
+import os.path as op
+from nose.tools import assert_true
+
+from mne import read_dip
+from mne.datasets import sample
+
+data_path = sample.data_path()
+dip_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_set1.dip')
+
+def test_io_dip():
+    """Test IO for .dip files
+    """
+    time, pos, amplitude, ori, gof = read_dip(dip_fname)
+
+    assert_true(pos.shape[1] == 3)
+    assert_true(ori.shape[1] == 3)
+    assert_true(len(time) == len(pos))
+    assert_true(len(time) == gof.size)
+    assert_true(len(time) == amplitude.size)
diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py
new file mode 100644
index 0000000..222831f
--- /dev/null
+++ b/mne/tests/test_epochs.py
@@ -0,0 +1,756 @@
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#         Denis Engemann <d.engemann at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from copy import deepcopy
+
+from nose.tools import assert_true, assert_equal, assert_raises
+from numpy.testing import assert_array_equal, assert_array_almost_equal, \
+                          assert_allclose
+import numpy as np
+import copy as cp
+import warnings
+
+from mne import fiff, Epochs, read_events, pick_events, read_epochs
+from mne.epochs import bootstrap, equalize_epoch_counts, combine_event_ids
+from mne.utils import _TempDir, requires_pandas, requires_nitime
+from mne.fiff import read_evoked
+from mne.fiff.proj import _has_eeg_average_ref_proj
+from mne.event import merge_events
+
+base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+event_id_2 = 2
+raw = fiff.Raw(raw_fname)
+events = read_events(event_name)
+picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=True,
+                        ecg=True, eog=True, include=['STI 014'],
+                        exclude='bads')
+
+reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+flat = dict(grad=1e-15, mag=1e-15)
+
+tempdir = _TempDir()
+
+
+def test_epoch_combine_ids():
+    """Test combining event ids in epochs compared to events
+    """
+    for preload in [False]:
+        epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
+                                      'd': 4, 'e': 5, 'f': 32},
+                        tmin, tmax, picks=picks, preload=preload)
+        events_new = merge_events(events, [1, 2], 12)
+        epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
+        assert_array_equal(events_new, epochs_new.events)
+        # should probably add test + functionality for non-replacement XXX
+
+
+def test_read_epochs_bad_events():
+    """Test epochs when events are at the beginning or the end of the file
+    """
+    # Event at the beginning
+    epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
+                    event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+    evoked = epochs.average()
+
+    epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
+                    event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+    epochs.drop_bad_epochs()
+    evoked = epochs.average()
+
+    # Event at the end
+    epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
+                    event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+    evoked = epochs.average()
+
+
+def test_read_write_epochs():
+    """Test epochs from raw files with IO as fif file
+    """
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    evoked = epochs.average()
+    data = epochs.get_data()
+
+    epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
+                          None, tmin, tmax, picks=picks,
+                          baseline=(None, 0))
+    assert_array_equal(data, epochs_no_id.get_data())
+
+    eog_picks = fiff.pick_types(raw.info, meg=False, eeg=False, stim=False,
+                                eog=True, exclude='bads')
+    epochs.drop_picks(eog_picks)
+    assert_true(len(epochs.info['chs']) == len(epochs.ch_names)
+                == epochs.get_data().shape[1])
+    data_no_eog = epochs.get_data()
+    assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
+
+    # test decim kwarg
+    with warnings.catch_warnings(record=True) as w:
+        epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            baseline=(None, 0), decim=4)
+        assert_equal(len(w), 1)
+
+    data_dec = epochs_dec.get_data()
+    assert_array_equal(data[:, :, epochs_dec._decim_idx], data_dec)
+
+    evoked_dec = epochs_dec.average()
+    assert_array_equal(evoked.data[:, epochs_dec._decim_idx], evoked_dec.data)
+
+    n = evoked.data.shape[1]
+    n_dec = evoked_dec.data.shape[1]
+    n_dec_min = n // 4
+    assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
+    assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
+
+    # test IO
+    epochs.save(op.join(tempdir, 'test-epo.fif'))
+    epochs_read = read_epochs(op.join(tempdir, 'test-epo.fif'))
+
+    assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
+    assert_array_equal(epochs_read.times, epochs.times)
+    assert_array_almost_equal(epochs_read.average().data, evoked.data)
+    assert_equal(epochs_read.proj, epochs.proj)
+    bmin, bmax = epochs.baseline
+    if bmin is None:
+        bmin = epochs.times[0]
+    if bmax is None:
+        bmax = epochs.times[-1]
+    baseline = (bmin, bmax)
+    assert_array_almost_equal(epochs_read.baseline, baseline)
+    assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
+    assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
+    assert_equal(epochs_read.event_id, epochs.event_id)
+
+    epochs.event_id.pop('1')
+    epochs.event_id.update({'a': 1})
+    epochs.save(op.join(tempdir, 'foo-epo.fif'))
+    epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
+    assert_equal(epochs_read2.event_id, epochs.event_id)
+
+    # add reject here so some of the epochs get dropped
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject)
+    epochs.save(op.join(tempdir, 'test-epo.fif'))
+    # ensure bad events are not saved
+    epochs_read3 = read_epochs(op.join(tempdir, 'test-epo.fif'))
+    assert_array_equal(epochs_read3.events, epochs.events)
+    data = epochs.get_data()
+    assert_true(epochs_read3.events.shape[0] == data.shape[0])
+
+    # test copying loaded one (raw property)
+    epochs_read4 = epochs_read3.copy()
+    assert_array_almost_equal(epochs_read4.get_data(), data)
+
+
+def test_epochs_proj():
+    """Test handling projection (apply proj in Raw or in Epochs)
+    """
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+    this_picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                                 eog=True, exclude=exclude)
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True)
+    assert_true(all(p['active'] == True for p in epochs.info['projs']))
+    evoked = epochs.average()
+    assert_true(all(p['active'] == True for p in evoked.info['projs']))
+    data = epochs.get_data()
+
+    raw_proj = fiff.Raw(raw_fname, proj=True)
+    epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
+                            picks=this_picks, baseline=(None, 0), proj=False)
+
+    data_no_proj = epochs_no_proj.get_data()
+    assert_true(all(p['active'] == True for p in epochs_no_proj.info['projs']))
+    evoked_no_proj = epochs_no_proj.average()
+    assert_true(all(p['active'] == True for p in evoked_no_proj.info['projs']))
+    assert_true(epochs_no_proj.proj == True)  # as projs are active from Raw
+
+    assert_array_almost_equal(data, data_no_proj, decimal=8)
+
+    # make sure we can exclude avg ref
+    this_picks = fiff.pick_types(raw.info, meg=True, eeg=True, stim=True,
+                                 eog=True, exclude=exclude)
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True, add_eeg_ref=True)
+    assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True, add_eeg_ref=False)
+    assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
+
+
+def test_evoked_arithmetic():
+    """Test arithmetic of evoked data
+    """
+    epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0))
+    evoked1 = epochs1.average()
+    epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0))
+    evoked2 = epochs2.average()
+    epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    evoked = epochs.average()
+    evoked_sum = evoked1 + evoked2
+    assert_array_equal(evoked.data, evoked_sum.data)
+    assert_array_equal(evoked.times, evoked_sum.times)
+    assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
+    evoked_diff = evoked1 - evoked1
+    assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
+
+
+def test_evoked_io_from_epochs():
+    """Test IO of evoked data made from epochs
+    """
+    # offset our tmin so we don't get exactly a zero value when decimating
+    with warnings.catch_warnings(True) as w:
+        epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
+                        picks=picks, baseline=(None, 0), decim=5)
+    assert_true(len(w) == 1)
+    evoked = epochs.average()
+    evoked.save(op.join(tempdir, 'evoked.fif'))
+    evoked2 = read_evoked(op.join(tempdir, 'evoked.fif'))
+    assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
+    assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
+                    atol=1 / evoked.info['sfreq'])
+
+    # now let's do one with negative time
+    with warnings.catch_warnings(True) as w:
+        epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
+                        picks=picks, baseline=(0.1, 0.2), decim=5)
+    evoked = epochs.average()
+    evoked.save(op.join(tempdir, 'evoked.fif'))
+    evoked2 = read_evoked(op.join(tempdir, 'evoked.fif'))
+    assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
+    assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
+
+    # should be equivalent to a cropped original
+    with warnings.catch_warnings(True) as w:
+        epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
+                        picks=picks, baseline=(0.1, 0.2), decim=5)
+    evoked = epochs.average()
+    evoked.crop(0.099, None)
+    assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
+    assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
+
+
+def test_evoked_standard_error():
+    """Test calculation and read/write of standard error
+    """
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    evoked = [epochs.average(), epochs.standard_error()]
+    fiff.write_evoked(op.join(tempdir, 'evoked.fif'), evoked)
+    evoked2 = read_evoked(op.join(tempdir, 'evoked.fif'), [0, 1])
+    evoked3 = [read_evoked(op.join(tempdir, 'evoked.fif'), 'Unknown'),
+               read_evoked(op.join(tempdir, 'evoked.fif'), 'Unknown',
+                           kind='standard_error')]
+    for evoked_new in [evoked2, evoked3]:
+        assert_true(evoked_new[0]._aspect_kind ==
+                    fiff.FIFF.FIFFV_ASPECT_AVERAGE)
+        assert_true(evoked_new[0].kind == 'average')
+        assert_true(evoked_new[1]._aspect_kind ==
+                    fiff.FIFF.FIFFV_ASPECT_STD_ERR)
+        assert_true(evoked_new[1].kind == 'standard_error')
+        for ave, ave2 in zip(evoked, evoked_new):
+            assert_array_almost_equal(ave.data, ave2.data)
+            assert_array_almost_equal(ave.times, ave2.times)
+            assert_equal(ave.nave, ave2.nave)
+            assert_equal(ave._aspect_kind, ave2._aspect_kind)
+            assert_equal(ave.kind, ave2.kind)
+            assert_equal(ave.last, ave2.last)
+            assert_equal(ave.first, ave2.first)
+
+
+def test_reject_epochs():
+    """Test of epochs rejection
+    """
+    epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                    reject=reject, flat=flat)
+    assert_raises(RuntimeError, len, epochs)
+    n_events = len(epochs.events)
+    data = epochs.get_data()
+    n_clean_epochs = len(data)
+    # Should match
+    # mne_process_raw --raw test_raw.fif --projoff \
+    #   --saveavetag -ave --ave test.ave --filteroff
+    assert_true(n_events > n_clean_epochs)
+    assert_true(n_clean_epochs == 3)
+    assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'],
+                                    ['MEG 2443'], ['MEG 2443'], ['MEG 2443']])
+
+    # Ensure epochs are not dropped based on a bad channel
+    raw_2 = raw.copy()
+    raw_2.info['bads'] = ['MEG 2443']
+    reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
+    epochs = Epochs(raw_2, events, event_id, tmin, tmax, baseline=(None, 0),
+                    reject=reject_crazy, flat=flat)
+    epochs.drop_bad_epochs()
+    assert_true(all(['MEG 2442' in e for e in epochs.drop_log]))
+    assert_true(all(['MEG 2443' not in e for e in epochs.drop_log]))
+
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject, flat=flat,
+                    reject_tmin=0., reject_tmax=.1)
+    data = epochs.get_data()
+    n_clean_epochs = len(data)
+    assert_true(n_clean_epochs == 7)
+    assert_true(len(epochs) == 7)
+    assert_true(epochs.times[epochs._reject_time][0] >= 0.)
+    assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
+
+
+def test_preload_epochs():
+    """Test preload of epochs
+    """
+    epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
+                            picks=picks, baseline=(None, 0), preload=True,
+                            reject=reject, flat=flat)
+    data_preload = epochs_preload.get_data()
+
+    epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    data = epochs.get_data()
+    assert_array_equal(data_preload, data)
+    assert_array_almost_equal(epochs_preload.average().data,
+                              epochs.average().data, 18)
+
+
+def test_indexing_slicing():
+    """Test of indexing and slicing operations
+    """
+    epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+
+    data_normal = epochs.get_data()
+
+    n_good_events = data_normal.shape[0]
+
+    # indices for slicing
+    start_index = 1
+    end_index = n_good_events - 1
+
+    assert((end_index - start_index) > 0)
+
+    for preload in [True, False]:
+        epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
+                         picks=picks, baseline=(None, 0), preload=preload,
+                         reject=reject, flat=flat)
+
+        if not preload:
+            epochs2.drop_bad_epochs()
+
+        # using slicing
+        epochs2_sliced = epochs2[start_index:end_index]
+
+        data_epochs2_sliced = epochs2_sliced.get_data()
+        assert_array_equal(data_epochs2_sliced,
+                           data_normal[start_index:end_index])
+
+        # using indexing
+        pos = 0
+        for idx in range(start_index, end_index):
+            data = epochs2_sliced[pos].get_data()
+            assert_array_equal(data[0], data_normal[idx])
+            pos += 1
+
+        # using indexing with an int
+        data = epochs2[data_epochs2_sliced.shape[0]].get_data()
+        assert_array_equal(data, data_normal[[idx]])
+
+        # using indexing with an array
+        idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
+        data = epochs2[idx].get_data()
+        assert_array_equal(data, data_normal[idx])
+
+        # using indexing with a list of indices
+        idx = [0]
+        data = epochs2[idx].get_data()
+        assert_array_equal(data, data_normal[idx])
+        idx = [0, 1]
+        data = epochs2[idx].get_data()
+        assert_array_equal(data, data_normal[idx])
+
+
+def test_comparision_with_c():
+    """Test of average obtained vs C code
+    """
+    c_evoked = fiff.Evoked(evoked_nf_name, setno=0)
+    epochs = Epochs(raw, events, event_id, tmin, tmax,
+                    baseline=None, preload=True,
+                    reject=None, flat=None)
+    evoked = epochs.average()
+    sel = fiff.pick_channels(c_evoked.ch_names, evoked.ch_names)
+    evoked_data = evoked.data
+    c_evoked_data = c_evoked.data[sel]
+
+    assert_true(evoked.nave == c_evoked.nave)
+    assert_array_almost_equal(evoked_data, c_evoked_data, 10)
+    assert_array_almost_equal(evoked.times, c_evoked.times, 12)
+
+
+def test_crop():
+    """Test of crop of epochs
+    """
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    data_normal = epochs.get_data()
+
+    epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
+                     picks=picks, baseline=(None, 0), preload=True,
+                     reject=reject, flat=flat)
+
+    # indices for slicing
+    tmin_window = tmin + 0.1
+    tmax_window = tmax - 0.1
+    tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
+    assert_true(tmin_window > tmin)
+    assert_true(tmax_window < tmax)
+    epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
+    data3 = epochs3.get_data()
+    epochs2.crop(tmin_window, tmax_window)
+    data2 = epochs2.get_data()
+    assert_array_equal(data2, data_normal[:, :, tmask])
+    assert_array_equal(data3, data_normal[:, :, tmask])
+
+
+def test_resample():
+    """Test of resample of epochs
+    """
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+    data_normal = cp.deepcopy(epochs.get_data())
+    times_normal = cp.deepcopy(epochs.times)
+    sfreq_normal = epochs.info['sfreq']
+    # upsample by 2
+    epochs.resample(sfreq_normal * 2, npad=0)
+    data_up = cp.deepcopy(epochs.get_data())
+    times_up = cp.deepcopy(epochs.times)
+    sfreq_up = epochs.info['sfreq']
+    # downsamply by 2, which should match
+    epochs.resample(sfreq_normal, npad=0)
+    data_new = cp.deepcopy(epochs.get_data())
+    times_new = cp.deepcopy(epochs.times)
+    sfreq_new = epochs.info['sfreq']
+    assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
+    assert_true(sfreq_up == 2 * sfreq_normal)
+    assert_true(sfreq_new == sfreq_normal)
+    assert_true(len(times_up) == 2 * len(times_normal))
+    assert_array_almost_equal(times_new, times_normal, 10)
+    assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
+    assert_array_almost_equal(data_new, data_normal, 5)
+
+    # use parallel
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+    epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
+    assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
+
+
+def test_detrend():
+    """Test detrending of epochs
+    """
+    # test first-order
+    epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                      baseline=None, detrend=1)
+    epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                      baseline=None, detrend=None)
+    data_picks = fiff.pick_types(epochs_1.info, meg=True, eeg=True,
+                                 exclude='bads')
+    evoked_1 = epochs_1.average()
+    evoked_2 = epochs_2.average()
+    evoked_2.detrend(1)
+    # Due to roundoff these won't be exactly equal, but they should be close
+    assert_true(np.allclose(evoked_1.data, evoked_2.data,
+                            rtol=1e-8, atol=1e-20))
+
+    # test zeroth-order case
+    for preload in [True, False]:
+        epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                          baseline=(None, None), preload=preload)
+        epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                          baseline=None, preload=preload, detrend=0)
+        a = epochs_1.get_data()
+        b = epochs_2.get_data()
+        # All data channels should be almost equal
+        assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
+                                rtol=1e-16, atol=1e-20))
+        # There are non-M/EEG channels that should not be equal:
+        assert_true(not np.allclose(a, b))
+
+
+def test_bootstrap():
+    """Test of bootstrapping of epochs
+    """
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+    epochs2 = bootstrap(epochs, random_state=0)
+    assert_true(len(epochs2.events) == len(epochs.events))
+    assert_true(epochs._data.shape == epochs2._data.shape)
+
+
+def test_epochs_copy():
+    """Test copy epochs
+    """
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+    copied = epochs.copy()
+    assert_array_equal(epochs._data, copied._data)
+
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    copied = epochs.copy()
+    data = epochs.get_data()
+    copied_data = copied.get_data()
+    assert_array_equal(data, copied_data)
+
+
+ at requires_nitime
+def test_epochs_to_nitime():
+    """Test test_to_nitime
+    """
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+
+    picks2 = [0, 3]
+
+    epochs_ts = epochs.to_nitime(picks=None, epochs_idx=[0],
+                                 collapse=True, copy=True)
+    assert_true(epochs_ts.ch_names == epochs.ch_names)
+
+    epochs_ts = epochs.to_nitime(picks=picks2, epochs_idx=None,
+                                 collapse=True, copy=True)
+    assert_true(epochs_ts.ch_names == [epochs.ch_names[k] for k in picks2])
+
+    epochs_ts = epochs.to_nitime(picks=None, epochs_idx=[0],
+                                 collapse=False, copy=False)
+    assert_true(epochs_ts.ch_names == epochs.ch_names)
+
+    epochs_ts = epochs.to_nitime(picks=picks2, epochs_idx=None,
+                                 collapse=False, copy=False)
+    assert_true(epochs_ts.ch_names == [epochs.ch_names[k] for k in picks2])
+
+
+def test_epoch_eq():
+    """Test epoch count equalization and condition combining
+    """
+    # equalizing epochs objects
+    epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
+    epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
+    epochs_1.drop_bad_epochs()  # make sure drops are logged
+    assert_true(len([l for l in epochs_1.drop_log if not l]) ==
+                len(epochs_1.events))
+    drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
+    drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
+                 epochs_1.drop_log]
+    assert_true(drop_log1 == drop_log2)
+    assert_true(len([l for l in epochs_1.drop_log if not l]) ==
+                len(epochs_1.events))
+    assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
+    equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
+    assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
+    epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
+    epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
+    equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
+    assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
+    assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
+
+    # equalizing conditions
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
+                    tmin, tmax, picks=picks, reject=reject)
+    epochs.drop_bad_epochs()  # make sure drops are logged
+    assert_true(len([l for l in epochs.drop_log if not l]) ==
+                len(epochs.events))
+    drop_log1 = [l for l in epochs.drop_log]  # now copy the log
+    old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    epochs.equalize_event_counts(['a', 'b'], copy=False)
+    # undo the eq logging
+    drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
+                 epochs.drop_log]
+    assert_true(drop_log1 == drop_log2)
+    assert_true(len([l for l in epochs.drop_log if not l]) ==
+                len(epochs.events))
+    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    assert_true(new_shapes[0] == new_shapes[1])
+    assert_true(new_shapes[2] == new_shapes[2])
+    assert_true(new_shapes[3] == new_shapes[3])
+    # now with two conditions collapsed
+    old_shapes = new_shapes
+    epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
+    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
+    assert_true(new_shapes[3] == old_shapes[3])
+    assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
+
+    # now let's combine conditions
+    old_shapes = new_shapes
+    epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
+    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
+    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
+    assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
+                  {'ab': 1})
+
+    combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
+    caught = 0
+    for key in ['a', 'b']:
+        try:
+            epochs[key]
+        except KeyError:
+            caught += 1
+    assert_raises(caught == 2)
+    assert_true(not np.any(epochs.events[:, 2] == 1))
+    assert_true(not np.any(epochs.events[:, 2] == 2))
+    epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
+    assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
+                                     epochs.events[:, 2] == 34)))
+    assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
+    assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
+
+
+def test_access_by_name():
+    """Test accessing epochs by event name
+    """
+    assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
+                  tmax, picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
+                  tmin, tmax, picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
+                  tmin, tmax, picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
+                  picks=picks)
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
+    assert_raises(KeyError, epochs.__getitem__, 'bar')
+
+    data = epochs['a'].get_data()
+    event_a = events[events[:, 2] == 1]
+    assert_true(len(data) == len(event_a))
+
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
+                    preload=True)
+    assert_raises(KeyError, epochs.__getitem__, 'bar')
+    epochs.save(op.join(tempdir, 'test-epo.fif'))
+    epochs2 = read_epochs(op.join(tempdir, 'test-epo.fif'))
+
+    for ep in [epochs, epochs2]:
+        data = ep['a'].get_data()
+        event_a = events[events[:, 2] == 1]
+        assert_true(len(data) == len(event_a))
+
+    assert_array_equal(epochs2['a'].events, epochs['a'].events)
+
+    epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
+                     tmin, tmax, picks=picks, preload=True)
+    epochs4 = epochs['a']
+    epochs5 = epochs3['a']
+    assert_array_equal(epochs4.events, epochs5.events)
+    # 20 is our tolerance because epochs are written out as floats
+    assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
+    epochs6 = epochs3[['a', 'b']]
+    assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
+                                  epochs6.events[:, 2] == 2)))
+    assert_array_equal(epochs.events, epochs6.events)
+    assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
+
+
+ at requires_pandas
+def test_as_data_frame():
+    """Test epochs Pandas exporter"""
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
+    assert_raises(ValueError, epochs.as_data_frame, index=['foo', 'bar'])
+    assert_raises(ValueError, epochs.as_data_frame, index='qux')
+    assert_raises(ValueError, epochs.as_data_frame, np.arange(400))
+    df = epochs.as_data_frame()
+    data = np.hstack(epochs.get_data())
+    assert_true((df.columns == epochs.ch_names).all())
+    assert_array_equal(df.values[:, 0], data[0] * 1e13)
+    assert_array_equal(df.values[:, 2], data[2] * 1e15)
+    for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
+        df = epochs.as_data_frame(index=ind)
+        assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
+        # test that non-indexed data were present as categorial variables
+        df.reset_index().columns[:3] == ['condition', 'epoch', 'time']
+
+
+def test_epochs_proj_mixin():
+    """Test SSP proj methods from ProjMixin class
+    """
+    for proj in [True, False]:
+        epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj=proj)
+
+        assert_true(all(p['active'] == proj for p in epochs.info['projs']))
+
+        # test adding / deleting proj
+        if proj:
+            epochs.get_data()
+            assert_true(all(p['active'] == proj for p in epochs.info['projs']))
+            assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
+                          {'remove_existing': True})
+            assert_raises(ValueError, epochs.add_proj, 'spam')
+            assert_raises(ValueError, epochs.del_proj, 0)
+        else:
+            projs = deepcopy(epochs.info['projs'])
+            n_proj = len(epochs.info['projs'])
+            epochs.del_proj(0)
+            assert_true(len(epochs.info['projs']) == n_proj - 1)
+            epochs.add_proj(projs, remove_existing=False)
+            assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
+            epochs.add_proj(projs, remove_existing=True)
+            assert_true(len(epochs.info['projs']) == n_proj)
+
+    # catch no-gos.
+    # wrong proj argument
+    assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
+                  picks=picks, baseline=(None, 0), proj='crazy')
+    # delayed without reject params
+    assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
+                  picks=picks, baseline=(None, 0), proj='delayed', reject=None)
+
+    for preload in [True, False]:
+        epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj='delayed', preload=preload,
+                        add_eeg_ref=True, verbose=True, reject=reject)
+        epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj=True, preload=preload,
+                        add_eeg_ref=True, reject=reject)
+        assert_allclose(epochs.copy().apply_proj().get_data()[0],
+                        epochs2.get_data()[0])
+
+        # make sure data output is constant across repeated calls
+        # e.g. drop bads
+        assert_array_equal(epochs.get_data(), epochs.get_data())
+        assert_array_equal(epochs2.get_data(), epochs2.get_data())
+
+    # test epochs.next calls
+    data = epochs.get_data().copy()
+    data2 = np.array([e for e in epochs])
+    assert_array_equal(data, data2)
+
+    # cross application from processing stream 1 to 2
+    epochs.apply_proj()
+    assert_array_equal(epochs._projector, epochs2._projector)
+    assert_allclose(epochs._data, epochs2.get_data())
+
+    # test mixin against manual application
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=None, proj=False, add_eeg_ref=True)
+    data = epochs.get_data().copy()
+    epochs.apply_proj()
+    assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py
new file mode 100644
index 0000000..59396f7
--- /dev/null
+++ b/mne/tests/test_event.py
@@ -0,0 +1,234 @@
+import os.path as op
+import os
+
+from nose.tools import assert_true
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+
+from mne import (read_events, write_events, make_fixed_length_events,
+                 find_events, find_stim_steps, fiff)
+from mne.utils import _TempDir
+from mne.event import define_target_events, merge_events
+
+base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+fname = op.join(base_dir, 'test-eve.fif')
+fname_gz = op.join(base_dir, 'test-eve.fif.gz')
+fname_1 = op.join(base_dir, 'test-eve-1.fif')
+fname_txt = op.join(base_dir, 'test-eve.eve')
+fname_txt_1 = op.join(base_dir, 'test-eve-1.eve')
+
+# using mne_process_raw --raw test_raw.fif --eventsout test-mpr-eve.eve:
+fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve')
+fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+tempdir = _TempDir()
+
+
+def test_merge_events():
+    """Test event merging
+    """
+    events = read_events(fname)  # Use as the gold standard
+    merges = [1, 2, 3, 4]
+    events_out = merge_events(events, merges, 1234)
+    events_out2 = events.copy()
+    for m in merges:
+        assert_true(not np.any(events_out[:, 2] == m))
+        events_out2[events[:, 2] == m, 2] = 1234
+    assert_array_equal(events_out, events_out2)
+    # test non-replacement functionality, should be sorted union of orig & new
+    events_out2 = merge_events(events, merges, 1234, False)
+    events_out = np.concatenate((events_out, events))
+    events_out = events_out[np.argsort(events_out[:, 0])]
+    assert_array_equal(events_out, events_out2)
+
+
+def test_io_events():
+    """Test IO for events
+    """
+    # Test binary fif IO
+    events = read_events(fname)  # Use as the gold standard
+    write_events(op.join(tempdir, 'events.fif'), events)
+    events2 = read_events(op.join(tempdir, 'events.fif'))
+    assert_array_almost_equal(events, events2)
+
+    # Test binary fif.gz IO
+    events2 = read_events(fname_gz)  # Use as the gold standard
+    assert_array_almost_equal(events, events2)
+    write_events(op.join(tempdir, 'events.fif.gz'), events2)
+    events2 = read_events(op.join(tempdir, 'events.fif.gz'))
+    assert_array_almost_equal(events, events2)
+
+    # Test new format text file IO
+    write_events(op.join(tempdir, 'events.eve'), events)
+    events2 = read_events(op.join(tempdir, 'events.eve'))
+    assert_array_almost_equal(events, events2)
+    events2 = read_events(fname_txt_mpr)
+    assert_array_almost_equal(events, events2)
+
+    # Test old format text file IO
+    events2 = read_events(fname_old_txt)
+    assert_array_almost_equal(events, events2)
+    write_events(op.join(tempdir, 'events.eve'), events)
+    events2 = read_events(op.join(tempdir, 'events.eve'))
+    assert_array_almost_equal(events, events2)
+
+    # Test event selection
+    a = read_events(op.join(tempdir, 'events.fif'), include=1)
+    b = read_events(op.join(tempdir, 'events.fif'), include=[1])
+    c = read_events(op.join(tempdir, 'events.fif'), exclude=[2, 3, 4, 5, 32])
+    d = read_events(op.join(tempdir, 'events.fif'), include=1, exclude=[2, 3])
+    assert_array_equal(a, b)
+    assert_array_equal(a, c)
+    assert_array_equal(a, d)
+
+    # Test binary file IO for 1 event
+    events = read_events(fname_1)  # Use as the new gold standard
+    write_events(op.join(tempdir, 'events.fif'), events)
+    events2 = read_events(op.join(tempdir, 'events.fif'))
+    assert_array_almost_equal(events, events2)
+
+    # Test text file IO for 1 event
+    write_events(op.join(tempdir, 'events.eve'), events)
+    events2 = read_events(op.join(tempdir, 'events.eve'))
+    assert_array_almost_equal(events, events2)
+
+
+def test_find_events():
+    """Test find events in raw file
+    """
+    events = read_events(fname)
+    raw = fiff.Raw(raw_fname, preload=True)
+    # let's test the defaulting behavior while we're at it
+    extra_ends = ['', '_1']
+    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
+    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
+    if 'MNE_STIM_CHANNEL_1' in os.environ:
+        del os.environ['MNE_STIM_CHANNEL_1']
+    events2 = find_events(raw)
+    assert_array_almost_equal(events, events2)
+
+    # Reset some data for ease of comparison
+    raw.first_samp = 0
+    raw.info['sfreq'] = 1000
+    stim_channel = fiff.pick_channels(raw.info['ch_names'], include='STI 014')
+
+    # test empty events channel
+    raw._data[stim_channel, :] = 0
+    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
+
+    raw._data[stim_channel, :4] = 1
+    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
+
+    raw._data[stim_channel, -1:] = 9
+    assert_array_equal(find_events(raw), [[14399, 0, 9]])
+
+    # Test that we can handle consecutive events with no gap
+    raw._data[stim_channel, 10:20] = 5
+    raw._data[stim_channel, 20:30] = 6
+    raw._data[stim_channel, 30:32] = 5
+    raw._data[stim_channel, 40] = 6
+
+    assert_array_equal(find_events(raw, consecutive=False),
+                       [[10, 0, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, consecutive=True),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [30, 6, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=False),
+                       [[31, 0, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=True),
+                       [[19, 6, 5],
+                        [29, 5, 6],
+                        [31, 0, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, output='step', consecutive=True),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [30, 6, 5],
+                        [32, 5, 0],
+                        [40, 0, 6],
+                        [41, 6, 0],
+                        [14399, 0, 9],
+                        [14400, 9, 0]])
+    assert_array_equal(find_events(raw, output='offset'),
+                       [[19, 6, 5],
+                        [31, 0, 6],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
+                       [[10, 0, 5]])
+    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [30, 6, 5]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=False,
+                                   min_duration=0.002),
+                       [[31, 0, 5]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=True,
+                                   min_duration=0.002),
+                       [[19, 6, 5],
+                        [29, 5, 6],
+                        [31, 0, 5]])
+    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
+                       [[10, 0, 5],
+                        [20, 5, 6]])
+
+    # test find_stim_steps merge parameter
+    raw._data[stim_channel, :] = 0
+    raw._data[stim_channel, 0] = 1
+    raw._data[stim_channel, 10] = 4
+    raw._data[stim_channel, 11:20] = 5
+    assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0),
+                       [[ 0, 0, 1],
+                        [ 1, 1, 0],
+                        [10, 0, 4],
+                        [11, 4, 5],
+                        [20, 5, 0]])
+    assert_array_equal(find_stim_steps(raw, merge= -1),
+                       [[ 1, 1, 0],
+                        [10, 0, 5],
+                        [20, 5, 0]])
+    assert_array_equal(find_stim_steps(raw, merge=1),
+                       [[ 1, 1, 0],
+                        [11, 0, 5],
+                        [20, 5, 0]])
+
+    # put back the env vars we trampled on
+    for s, o in zip(extra_ends, orig_envs):
+        if o is not None:
+            os.environ['MNE_STIM_CHANNEL%s' % s] = o
+
+
+def test_make_fixed_length_events():
+    """Test making events of a fixed length
+    """
+    raw = fiff.Raw(raw_fname)
+    events = make_fixed_length_events(raw, id=1)
+    assert_true(events.shape[1], 3)
+
+
+def test_define_events():
+    """Test defining response events
+    """
+    events = read_events(fname)
+    raw = fiff.Raw(raw_fname)
+    events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
+                                      .2, 0.7, 42, 99)
+    n_target = events[events[:, 2] == 5].shape[0]
+    n_miss = events_[events_[:, 2] == 99].shape[0]
+    n_target_ = events_[events_[:, 2] == 42].shape[0]
+
+    assert_true(n_target_ == (n_target - n_miss))
diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py
new file mode 100644
index 0000000..cafb690
--- /dev/null
+++ b/mne/tests/test_filter.py
@@ -0,0 +1,201 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_almost_equal
+from nose.tools import assert_true, assert_raises
+import os.path as op
+import warnings
+from scipy.signal import resample as sp_resample
+
+from mne.filter import band_pass_filter, high_pass_filter, low_pass_filter, \
+                       band_stop_filter, resample, construct_iir_filter, \
+                       notch_filter, detrend
+
+from mne import set_log_file
+from mne.utils import _TempDir
+from mne.cuda import requires_cuda
+
+
+tempdir = _TempDir()
+log_file = op.join(tempdir, 'temp_log.txt')
+
+
+def test_notch_filters():
+    """Test notch filters
+    """
+    # let's use an ugly, prime Fs for fun
+    Fs = 487.0
+    sig_len_secs = 20
+    t = np.arange(0, sig_len_secs * Fs) / Fs
+    freqs = np.arange(60, 241, 60)
+
+    # make a "signal"
+    rng = np.random.RandomState(0)
+    a = rng.randn(sig_len_secs * Fs)
+    orig_power = np.sqrt(np.mean(a ** 2))
+    # make line noise
+    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)
+
+    # only allow None line_freqs with 'spectrum_fit' mode
+    assert_raises(ValueError, notch_filter, a, Fs, None, 'fft')
+    assert_raises(ValueError, notch_filter, a, Fs, None, 'iir')
+    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
+    filter_lengths = [None, None, None, 8192, None]
+    line_freqs = [None, freqs, freqs, freqs, freqs]
+    tols = [2, 1, 1, 1]
+    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
+        if lf is None:
+            set_log_file(log_file, overwrite=True)
+
+        b = notch_filter(a, Fs, lf, filter_length=fl, method=meth,
+                         verbose='INFO')
+
+        if lf is None:
+            set_log_file()
+            out = open(log_file).readlines()
+            if len(out) != 2:
+                raise ValueError('Detected frequencies not logged properly')
+            out = np.fromstring(out[1], sep=', ')
+            assert_array_almost_equal(out, freqs)
+        new_power = np.sqrt(np.mean(b ** 2))
+        assert_almost_equal(new_power, orig_power, tol)
+
+
+def test_filters():
+    """Test low-, band-, high-pass, and band-stop filters plus resampling
+    """
+    Fs = 500
+    sig_len_secs = 30
+
+    a = np.random.randn(2, sig_len_secs * Fs)
+
+    # let's test our catchers
+    for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
+        assert_raises(ValueError, band_pass_filter, a, Fs, 4, 8,
+                      filter_length=fl)
+    for nj in ['blah', 0.5, 0]:
+        assert_raises(ValueError, band_pass_filter, a, Fs, 4, 8, n_jobs=nj)
+    # check our short-filter warning:
+    with warnings.catch_warnings(record=True) as w:
+        # Warning for low attenuation
+        band_pass_filter(a, Fs, 1, 8, filter_length=1024)
+        # Warning for too short a filter
+        band_pass_filter(a, Fs, 1, 8, filter_length='0.5s')
+    assert_true(len(w) >= 2)
+
+    # try new default and old default
+    for fl in ['10s', '5000ms', None]:
+        bp = band_pass_filter(a, Fs, 4, 8, filter_length=fl)
+        bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, filter_length=fl)
+        lp = low_pass_filter(a, Fs, 8, filter_length=fl, n_jobs=2)
+        hp = high_pass_filter(lp, Fs, 4, filter_length=fl)
+        assert_array_almost_equal(hp, bp, 2)
+        assert_array_almost_equal(bp + bs, a, 1)
+
+    # Overlap-add filtering with a fixed filter length
+    filter_length = 8192
+    bp_oa = band_pass_filter(a, Fs, 4, 8, filter_length)
+    bs_oa = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, filter_length)
+    lp_oa = low_pass_filter(a, Fs, 8, filter_length)
+    hp_oa = high_pass_filter(lp_oa, Fs, 4, filter_length)
+    assert_array_almost_equal(hp_oa, bp_oa, 2)
+    assert_array_almost_equal(bp_oa + bs_oa, a, 2)
+
+    # The two methods should give the same result
+    # As filtering for short signals uses a circular convolution (FFT) and
+    # the overlap-add filter implements a linear convolution, the signal
+    # boundary will be slightly different and we ignore it
+    n_edge_ignore = 0
+    assert_array_almost_equal(hp[n_edge_ignore:-n_edge_ignore],
+                              hp_oa[n_edge_ignore:-n_edge_ignore], 2)
+
+    # and since these are low-passed, downsampling/upsampling should be close
+    n_resamp_ignore = 10
+    bp_up_dn = resample(resample(bp_oa, 2, 1, n_jobs=2), 1, 2, n_jobs=2)
+    assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
+                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
+    # note that on systems without CUDA, this line serves as a test for a
+    # graceful fallback to n_jobs=1
+    bp_up_dn = resample(resample(bp_oa, 2, 1, n_jobs='cuda'), 1, 2,
+                        n_jobs='cuda')
+    assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
+                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
+    # test to make sure our resamling matches scipy's
+    bp_up_dn = sp_resample(sp_resample(bp_oa, 2 * len(bp_oa), window='boxcar'),
+                           len(bp_oa), window='boxcar')
+    assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
+                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
+
+    # make sure we don't alias
+    t = np.array(range(Fs * sig_len_secs)) / float(Fs)
+    # make sinusoid close to the Nyquist frequency
+    sig = np.sin(2 * np.pi * Fs / 2.2 * t)
+    # signal should disappear with 2x downsampling
+    sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
+    assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)
+
+    # let's construct some filters
+    iir_params = dict(ftype='cheby1', gpass=1, gstop=20)
+    iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
+    # this should be a third order filter
+    assert_true(iir_params['a'].size - 1 == 3)
+    assert_true(iir_params['b'].size - 1 == 3)
+    iir_params = dict(ftype='butter', order=4)
+    iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
+    assert_true(iir_params['a'].size - 1 == 4)
+    assert_true(iir_params['b'].size - 1 == 4)
+
+
+ at requires_cuda
+def test_cuda():
+    """Test CUDA-based filtering
+    """
+    # NOTE: don't make test_cuda() the last test, or pycuda might spew
+    # some warnings about clean-up failing
+    Fs = 500
+    sig_len_secs = 20
+    a = np.random.randn(sig_len_secs * Fs)
+
+    set_log_file(log_file, overwrite=True)
+    for fl in ['10s', None, 2048]:
+        bp = band_pass_filter(a, Fs, 4, 8, n_jobs=1, filter_length=fl)
+        bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs=1,
+                              filter_length=fl)
+        lp = low_pass_filter(a, Fs, 8, n_jobs=1, filter_length=fl)
+        hp = high_pass_filter(lp, Fs, 4, n_jobs=1, filter_length=fl)
+
+        bp_c = band_pass_filter(a, Fs, 4, 8, n_jobs='cuda', filter_length=fl,
+                                verbose='INFO')
+        bs_c = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs='cuda',
+                                filter_length=fl, verbose='INFO')
+        lp_c = low_pass_filter(a, Fs, 8, n_jobs='cuda', filter_length=fl,
+                               verbose='INFO')
+        hp_c = high_pass_filter(lp, Fs, 4, n_jobs='cuda', filter_length=fl,
+                                verbose='INFO')
+
+        assert_array_almost_equal(bp, bp_c, 12)
+        assert_array_almost_equal(bs, bs_c, 12)
+        assert_array_almost_equal(lp, lp_c, 12)
+        assert_array_almost_equal(hp, hp_c, 12)
+
+    # check to make sure we actually used CUDA
+    set_log_file()
+    out = open(log_file).readlines()
+    assert_true(sum(['Using CUDA for FFT FIR filtering' in o
+                     for o in out]) == 12)
+
+    # check resampling
+    a = np.random.RandomState(0).randn(3, sig_len_secs * Fs)
+    a1 = resample(a, 1, 2, n_jobs=2, npad=0)
+    a2 = resample(a, 1, 2, n_jobs='cuda', npad=0)
+    a3 = resample(a, 2, 1, n_jobs=2, npad=0)
+    a4 = resample(a, 2, 1, n_jobs='cuda', npad=0)
+    assert_array_almost_equal(a3, a4, 14)
+    assert_array_almost_equal(a1, a2, 14)
+
+
+def test_detrend():
+    """Test zeroth and first order detrending
+    """
+    x = np.arange(10)
+    assert_array_almost_equal(detrend(x, 1), np.zeros_like(x))
+    x = np.ones(10)
+    assert_array_almost_equal(detrend(x, 0), np.zeros_like(x))
diff --git a/mne/tests/test_fixes.py b/mne/tests/test_fixes.py
new file mode 100644
index 0000000..8ef53ce
--- /dev/null
+++ b/mne/tests/test_fixes.py
@@ -0,0 +1,73 @@
+# Authors: Emmanuelle Gouillart <emmanuelle.gouillart at normalesup.org>
+#          Gael Varoquaux <gael.varoquaux at normalesup.org>
+#          Alex Gramfort <gramfort at nmr.mgh.harvard.edu>
+# License: BSD
+
+import numpy as np
+
+from nose.tools import assert_equal
+from numpy.testing import assert_array_equal
+from scipy import signal
+
+from ..fixes import _in1d, _tril_indices, _copysign, _unravel_index
+from ..fixes import _firwin2 as mne_firwin2
+from ..fixes import _filtfilt as mne_filtfilt
+
+
+def test_in1d():
+    """Test numpy.in1d() replacement"""
+    a = np.arange(10)
+    b = a[a % 2 == 0]
+    assert_equal(_in1d(a, b).sum(), 5)
+
+
+def test_tril_indices():
+    """Test numpy.tril_indices() replacement"""
+    il1 = _tril_indices(4)
+    il2 = _tril_indices(4, -1)
+
+    a = np.array([[1, 2, 3, 4],
+                  [5, 6, 7, 8],
+                  [9, 10, 11, 12],
+                  [13, 14, 15, 16]])
+
+    assert_array_equal(a[il1],
+                       np.array([1,  5,  6,  9, 10, 11, 13, 14, 15, 16]))
+
+    assert_array_equal(a[il2], np.array([5, 9, 10, 13, 14, 15]))
+
+
+def test_unravel_index():
+    """Test numpy.unravel_index() replacement"""
+    assert_equal(_unravel_index(2, (2, 3)), (0, 2))
+    assert_equal(_unravel_index(2,(2,2)), (1,0))
+    assert_equal(_unravel_index(254,(17,94)), (2,66))
+    assert_equal(_unravel_index((2*3 + 1)*6 + 4, (4,3,6)), (2,1,4))
+    assert_array_equal(_unravel_index(np.array([22, 41, 37]), (7,6)),
+                    [[3, 6, 6],[4, 5, 1]])
+    assert_array_equal(_unravel_index(1621, (6,7,8,9)), (3,1,4,1))
+
+
+def test_copysign():
+    """Test numpy.copysign() replacement"""
+    a = np.array([-1, 1, -1])
+    b = np.array([1, -1, 1])
+
+    assert_array_equal(_copysign(a, b), b)
+    assert_array_equal(_copysign(b, a), a)
+
+
+def test_firwin2():
+    """Test firwin2 backport
+    """
+    taps1 = mne_firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+    taps2 = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+    assert_array_equal(taps1, taps2)
+
+def test_filtfilt():
+    """Test IIR filtfilt replacement
+    """
+    x = np.r_[1, np.zeros(100)]
+    # Filter with an impulse
+    y = mne_filtfilt([1, 0], [1, 0], x, padlen=0)
+    assert_array_equal(x, y)
diff --git a/mne/tests/test_forward.py b/mne/tests/test_forward.py
new file mode 100644
index 0000000..8dab51a
--- /dev/null
+++ b/mne/tests/test_forward.py
@@ -0,0 +1,329 @@
+import os
+import os.path as op
+from subprocess import CalledProcessError
+import warnings
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_equal, \
+                          assert_array_equal, assert_allclose
+
+from mne.datasets import sample
+from mne.fiff import Raw, Evoked, pick_types_forward
+from mne import read_forward_solution, apply_forward, apply_forward_raw, \
+                do_forward_solution, average_forward_solutions, \
+                write_forward_solution
+from mne import SourceEstimate, read_trans
+from mne.label import read_label
+from mne.utils import requires_mne, run_subprocess, _TempDir
+from mne.forward import restrict_forward_to_stc, restrict_forward_to_label
+
+data_path = sample.data_path()
+fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif')
+fname_meeg = op.join(data_path, 'MEG', 'sample',
+                     'sample_audvis-meg-eeg-oct-6-fwd.fif')
+
+fname_raw = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data',
+                    'test_raw.fif')
+
+fname_evoked = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data',
+                       'test-ave.fif')
+fname_mri = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
+temp_dir = _TempDir()
+# make a file that exists with some data in it
+existing_file = op.join(temp_dir, 'test.fif')
+with open(existing_file, 'wb') as fid:
+    fid.write('aoeu')
+
+
+def test_io_forward():
+    """Test IO for forward solutions
+    """
+    # test M/EEG
+    fwd_meeg = read_forward_solution(fname_meeg)
+    leadfield = fwd_meeg['sol']['data']
+    assert_equal(leadfield.shape, (366, 22494))
+    assert_equal(len(fwd_meeg['sol']['row_names']), 366)
+    fname_temp = op.join(temp_dir, 'fwd.fif')
+    write_forward_solution(fname_temp, fwd_meeg, overwrite=True)
+
+    fwd_meeg = read_forward_solution(fname_temp)
+    assert_allclose(leadfield, fwd_meeg['sol']['data'])
+    assert_equal(len(fwd_meeg['sol']['row_names']), 366)
+
+    # now do extensive tests with MEG
+    fwd = read_forward_solution(fname)
+    fwd = read_forward_solution(fname, surf_ori=True)
+    leadfield = fwd['sol']['data']
+    assert_equal(leadfield.shape, (306, 22494))
+    assert_equal(len(fwd['sol']['row_names']), 306)
+    fname_temp = op.join(temp_dir, 'fwd.fif')
+    write_forward_solution(fname_temp, fwd, overwrite=True)
+
+    fwd = read_forward_solution(fname, surf_ori=True)
+    fwd_read = read_forward_solution(fname_temp, surf_ori=True)
+    leadfield = fwd_read['sol']['data']
+    assert_equal(leadfield.shape, (306, 22494))
+    assert_equal(len(fwd_read['sol']['row_names']), 306)
+    assert_equal(len(fwd_read['info']['chs']), 306)
+    assert_true('dev_head_t' in fwd_read['info'])
+    assert_true('mri_head_t' in fwd_read)
+    assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
+
+    fwd = read_forward_solution(fname, force_fixed=True)
+    leadfield = fwd['sol']['data']
+    assert_equal(leadfield.shape, (306, 22494 / 3))
+    assert_equal(len(fwd['sol']['row_names']), 306)
+    assert_equal(len(fwd['info']['chs']), 306)
+    assert_true('dev_head_t' in fwd['info'])
+    assert_true('mri_head_t' in fwd)
+
+
+def test_apply_forward():
+    """Test projection of source space data to sensor space
+    """
+    start = 0
+    stop = 5
+    n_times = stop - start - 1
+    sfreq = 10.0
+    t_start = 0.123
+
+    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
+    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
+    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
+
+    gain_sum = np.sum(fwd['sol']['data'], axis=1)
+
+    # Evoked
+    with warnings.catch_warnings(record=True) as w:
+        evoked = Evoked(fname_evoked, setno=0)
+        evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
+        assert_equal(len(w), 2)
+        data = evoked.data
+        times = evoked.times
+
+        # do some tests
+        assert_array_almost_equal(evoked.info['sfreq'], sfreq)
+        assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
+        assert_array_almost_equal(times[0], t_start)
+        assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
+
+        # Raw
+        raw = Raw(fname_raw)
+        raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
+        data, times = raw_proj[:, :]
+
+        # do some tests
+        assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
+        assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
+        assert_array_almost_equal(times[0], t_start)
+        assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
+
+
+def test_restrict_forward_to_stc():
+    """Test restriction of source space to source SourceEstimate
+    """
+    start = 0
+    stop = 5
+    n_times = stop - start - 1
+    sfreq = 10.0
+    t_start = 0.123
+
+    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
+    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
+    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
+
+    fwd_out = restrict_forward_to_stc(fwd, stc)
+
+    assert_equal(fwd_out['sol']['ncol'], 20)
+    assert_equal(fwd_out['src'][0]['nuse'], 15)
+    assert_equal(fwd_out['src'][1]['nuse'], 5)
+    assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
+    assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
+
+    fwd = read_forward_solution(fname, force_fixed=False)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
+    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
+    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
+
+    fwd_out = restrict_forward_to_stc(fwd, stc)
+
+    assert_equal(fwd_out['sol']['ncol'], 60)
+    assert_equal(fwd_out['src'][0]['nuse'], 15)
+    assert_equal(fwd_out['src'][1]['nuse'], 5)
+    assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
+    assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
+
+
+def test_restrict_forward_to_label():
+    """Test restriction of source space to label
+    """
+    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
+    labels = ['Aud-lh', 'Vis-rh']
+    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
+    label_rh = read_label(op.join(label_path, labels[1] + '.label'))
+
+    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
+
+    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
+    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
+
+    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
+    src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)\
+                 + len(fwd['src'][0]['vertno'])
+
+    assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
+    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
+    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
+    assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
+    assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
+
+    fwd = read_forward_solution(fname, force_fixed=False)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
+    labels = ['Aud-lh', 'Vis-rh']
+    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
+    label_rh = read_label(op.join(label_path, labels[1] + '.label'))
+
+    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
+
+    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
+    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
+
+    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
+    src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)\
+                 + len(fwd['src'][0]['vertno'])
+
+    assert_equal(fwd_out['sol']['ncol'],
+                 3 * (len(src_sel_lh) + len(src_sel_rh)))
+    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
+    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
+    assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
+    assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
+
+
+ at requires_mne
+def test_average_forward_solution():
+    """Test averaging forward solutions
+    """
+    fwd = read_forward_solution(fname)
+    # input not a list
+    assert_raises(TypeError, average_forward_solutions, 1)
+    # list is too short
+    assert_raises(ValueError, average_forward_solutions, [])
+    # negative weights
+    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
+    # all zero weights
+    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
+    # weights not same length
+    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
+    # list does not only have all dict()
+    assert_raises(TypeError, average_forward_solutions, [1, fwd])
+
+    # try an easy case
+    fwd_copy = average_forward_solutions([fwd])
+    assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
+
+    # modify a fwd solution, save it, use MNE to average with old one
+    fwd_copy['sol']['data'] *= 0.5
+    fname_copy = op.join(temp_dir, 'fwd.fif')
+    write_forward_solution(fname_copy, fwd_copy, overwrite=True)
+    cmd = ('mne_average_forward_solutions', '--fwd', fname, '--fwd',
+           fname_copy, '--out' , fname_copy)
+    run_subprocess(cmd)
+
+    # now let's actually do it, with one filename and one fwd
+    fwd_ave = average_forward_solutions([fwd, fwd_copy])
+    assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
+    # fwd_ave_mne = read_forward_solution(fname_copy)
+    # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
+
+
+ at requires_mne
+def test_do_forward_solution():
+    """Test making forward solution from python
+    """
+    subjects_dir = os.path.join(data_path, 'subjects')
+
+    raw = Raw(fname_raw)
+    mri = read_trans(fname_mri)
+    fname_fake = op.join(temp_dir, 'no_have.fif')
+
+    # ## Error checks
+    # bad subject
+    assert_raises(ValueError, do_forward_solution, 1, fname_raw,
+                  subjects_dir=subjects_dir)
+    # bad meas
+    assert_raises(ValueError, do_forward_solution, 'sample', 1,
+                  subjects_dir=subjects_dir)
+    # meas doesn't exist
+    assert_raises(IOError, do_forward_solution, 'sample', fname_fake,
+                  subjects_dir=subjects_dir)
+    # don't specify trans and meas
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  subjects_dir=subjects_dir)
+    # specify both trans and meas
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  trans='me', mri='you', subjects_dir=subjects_dir)
+    # specify non-existent trans
+    assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
+                  trans=fname_fake, subjects_dir=subjects_dir)
+    # specify non-existent mri
+    assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_fake, subjects_dir=subjects_dir)
+    # specify non-string mri
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=1, subjects_dir=subjects_dir)
+    # specify non-string trans
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  trans=1, subjects_dir=subjects_dir)
+    # test specifying an actual trans in python space -- this should work but
+    # the transform I/O reduces our accuracy -- so we'll just hack a test here
+    # by making it bomb with eeg=False and meg=False
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=mri, eeg=False, meg=False, subjects_dir=subjects_dir)
+    # mindist as non-integer
+    assert_raises(TypeError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_mri, mindist=dict(), subjects_dir=subjects_dir)
+    # mindist as string but not 'all'
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_mri, eeg=False, mindist='yall',
+                  subjects_dir=subjects_dir)
+    # src, spacing, and bem as non-str
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_mri, src=1, subjects_dir=subjects_dir)
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_mri, spacing=1, subjects_dir=subjects_dir)
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_mri, bem=1, subjects_dir=subjects_dir)
+    # no overwrite flag
+    assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
+                  existing_file, mri=fname_mri, subjects_dir=subjects_dir)
+    # let's catch an MNE error, this time about trans being wrong
+    assert_raises(CalledProcessError, do_forward_solution, 'sample', fname_raw,
+                  existing_file, trans=fname_mri, overwrite=True,
+                  spacing='oct-6', subjects_dir=subjects_dir)
+
+    # ## Actually calculate one and check
+    # make a meas from raw (tests all steps in creating evoked),
+    # don't do EEG or 5120-5120-5120 BEM because they're ~3x slower
+    fwd_py = do_forward_solution('sample', raw, mindist=5, spacing='oct-6',
+                                 bem='sample-5120', mri=fname_mri, eeg=False,
+                                 subjects_dir=subjects_dir)
+    fwd = read_forward_solution(fname)
+    assert_allclose(fwd['sol']['data'], fwd_py['sol']['data'],
+                    rtol=1e-5, atol=1e-8)
+    assert_equal(fwd_py['sol']['data'].shape, (306, 22494))
+    assert_equal(len(fwd['sol']['row_names']), 306)
diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py
new file mode 100644
index 0000000..df217e8
--- /dev/null
+++ b/mne/tests/test_label.py
@@ -0,0 +1,287 @@
+import os
+import os.path as op
+import cPickle as pickle
+import glob
+
+import numpy as np
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+from nose.tools import assert_true, assert_raises
+
+from mne.datasets import sample
+from mne import label_time_courses, read_label, stc_to_label, \
+               read_source_estimate, read_source_spaces, grow_labels, \
+               labels_from_parc
+from mne.label import Label
+from mne.utils import requires_mne, run_subprocess, _TempDir
+from mne.fixes import in1d
+
+
+data_path = sample.data_path()
+subjects_dir = op.join(data_path, 'subjects')
+stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
+label = 'Aud-lh'
+label_fname = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-rh.label')
+src_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-eeg-oct-6p-fwd.fif')
+
+tempdir = _TempDir()
+
+
+def assert_labels_equal(l0, l1, decimal=5):
+    for attr in ['comment', 'hemi', 'subject']:
+        assert_true(getattr(l0, attr) == getattr(l1, attr))
+    for attr in ['vertices', 'pos', 'values']:
+        a0 = getattr(l0, attr)
+        a1 = getattr(l1, attr)
+        assert_array_almost_equal(a0, a1, decimal)
+
+
+def test_label_subject():
+    """Test label subject name extraction
+    """
+    label = read_label(label_fname)
+    assert_true(label.subject is None)
+    assert_true('unknown' in repr(label))
+    label = read_label(label_fname, subject='fsaverage')
+    assert_true(label.subject == 'fsaverage')
+    assert_true('fsaverage' in repr(label))
+
+
+def test_label_addition():
+    """Test label addition
+    """
+    pos = np.random.rand(10, 3)
+    values = np.arange(10.) / 10
+    idx0 = range(7)
+    idx1 = range(7, 10)  # non-overlapping
+    idx2 = range(5, 10)  # overlapping
+    l0 = Label(idx0, pos[idx0], values[idx0], 'lh')
+    l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
+    l2 = Label(idx2, pos[idx2], values[idx2], 'lh')
+
+    assert len(l0) == len(idx0)
+
+    # adding non-overlapping labels
+    l01 = l0 + l1
+    assert len(l01) == len(l0) + len(l1)
+    assert_array_equal(l01.values[:len(l0)], l0.values)
+
+    # adding overlappig labels
+    l = l0 + l2
+    i0 = np.where(l0.vertices == 6)[0][0]
+    i2 = np.where(l2.vertices == 6)[0][0]
+    i = np.where(l.vertices == 6)[0][0]
+    assert l.values[i] == l0.values[i0] + l2.values[i2]
+    assert l.values[0] == l0.values[0]
+    assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
+
+    # adding lh and rh
+    l2.hemi = 'rh'
+    # this now has deprecated behavior
+    bhl = l0 + l2
+    assert bhl.hemi == 'both'
+    assert len(bhl) == len(l0) + len(l2)
+
+    bhl = l1 + bhl
+    assert_labels_equal(bhl.lh, l01)
+
+
+def test_label_io_and_time_course_estimates():
+    """Test IO for label + stc files
+    """
+    values, times, vertices = label_time_courses(label_fname, stc_fname)
+    assert_true(len(times) == values.shape[1])
+    assert_true(len(vertices) == values.shape[0])
+
+
+def test_label_io():
+    """Test IO of label files
+    """
+    label = read_label(label_fname)
+    label.save(op.join(tempdir, 'foo'))
+    label2 = read_label(op.join(tempdir, 'foo-lh.label'))
+    assert_labels_equal(label, label2)
+
+    # pickling
+    dest = op.join(tempdir, 'foo.pickled')
+    with open(dest, 'w') as fid:
+        pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
+    with open(dest) as fid:
+        label2 = pickle.load(fid)
+    assert_labels_equal(label, label2)
+
+
+def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
+    """Make sure two sets of labels are equal"""
+    for label_a, label_b in zip(labels_a, labels_b):
+        assert_array_equal(label_a.vertices, label_b.vertices)
+        assert_true(label_a.name == label_b.name)
+        assert_true(label_a.hemi == label_b.hemi)
+        if not ignore_pos:
+            assert_array_equal(label_a.pos, label_b.pos)
+
+
+def test_labels_from_parc():
+    """Test reading labels from parcellation
+    """
+    # test some invalid inputs
+    assert_raises(ValueError, labels_from_parc, 'sample', hemi='bla',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, labels_from_parc, 'sample',
+                  annot_fname='bla.annot', subjects_dir=subjects_dir)
+
+    # read labels using hemi specification
+    labels_lh, colors_lh = labels_from_parc('sample', hemi='lh',
+                                            subjects_dir=subjects_dir)
+    for label in labels_lh:
+        assert_true(label.name.endswith('-lh'))
+        assert_true(label.hemi == 'lh')
+
+    assert_true(len(labels_lh) == len(colors_lh))
+
+    # read labels using annot_fname
+    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
+    labels_rh, colors_rh = labels_from_parc('sample', annot_fname=annot_fname,
+                                            subjects_dir=subjects_dir)
+
+    assert_true(len(labels_rh) == len(colors_rh))
+
+    for label in labels_rh:
+        assert_true(label.name.endswith('-rh'))
+        assert_true(label.hemi == 'rh')
+
+    # combine the lh, rh, labels and sort them
+    labels_lhrh = list()
+    labels_lhrh.extend(labels_lh)
+    labels_lhrh.extend(labels_rh)
+
+    names = [label.name for label in labels_lhrh]
+    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
+
+    # read all labels at once
+    labels_both, colors = labels_from_parc('sample', subjects_dir=subjects_dir)
+
+    assert_true(len(labels_both) == len(colors))
+
+    # we have the same result
+    _assert_labels_equal(labels_lhrh, labels_both)
+
+    # aparc has 68 cortical labels
+    assert_true(len(labels_both) == 68)
+
+    # test regexp
+    label = labels_from_parc('sample', parc='aparc.a2009s', regexp='Angu',
+                subjects_dir=subjects_dir)[0][0]
+    assert_true(label.name == 'G_pariet_inf-Angular-lh')
+    label = labels_from_parc('sample', parc='aparc.a2009s',
+                regexp='.*-.{4,}_.{3,3}-L',  # silly, but real regexp
+                subjects_dir=subjects_dir)[0][0]
+    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
+    assert_raises(RuntimeError, labels_from_parc, 'sample', parc='aparc',
+                annot_fname=annot_fname, regexp='JackTheRipper',
+                subjects_dir=subjects_dir)
+
+
+ at requires_mne
+def test_labels_from_parc_annot2labels():
+    """Test reading labels from parc. by comparing with mne_annot2labels
+    """
+
+    def _mne_annot2labels(subject, subjects_dir, parc):
+        """Get labels using mne_annot2lables"""
+        label_dir = _TempDir()
+        cwd = os.getcwd()
+        try:
+            os.chdir(label_dir)
+            env = os.environ.copy()
+            env['SUBJECTS_DIR'] = subjects_dir
+            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
+            run_subprocess(cmd, env=env)
+            label_fnames = glob.glob(label_dir + '/*.label')
+            label_fnames.sort()
+            labels = [read_label(fname) for fname in label_fnames]
+        finally:
+            del label_dir
+            os.chdir(cwd)
+
+        return labels
+
+    labels, _ = labels_from_parc('sample', subjects_dir=subjects_dir)
+    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')
+
+    # we have the same result, mne does not fill pos, so ignore it
+    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
+
+
+def test_stc_to_label():
+    """Test stc_to_label
+    """
+    src = read_source_spaces(src_fname)
+    stc = read_source_estimate(stc_fname, 'sample')
+    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
+    labels1 = stc_to_label(stc, src='sample', smooth=3)
+    labels2 = stc_to_label(stc, src=src, smooth=3)
+    assert_true(len(labels1) == len(labels2))
+    for l1, l2 in zip(labels1, labels2):
+        assert_labels_equal(l1, l2, decimal=4)
+
+
+def test_morph():
+    """Test inter-subject label morphing
+    """
+    label_orig = read_label(label_fname)
+    label_orig.subject = 'sample'
+    # should work for specifying vertices for both hemis, or just the
+    # hemi of the given label
+    vals = list()
+    for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
+        label = label_orig.copy()
+        # this should throw an error because the label has all zero values
+        assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
+        label.values.fill(1)
+        label.morph(None, 'fsaverage', 5, grade, subjects_dir, 2,
+                    copy=False)
+        label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2,
+                    copy=False)
+        assert_true(np.mean(in1d(label_orig.vertices, label.vertices)) == 1.0)
+        assert_true(len(label.vertices) < 3 * len(label_orig.vertices))
+        vals.append(label.vertices)
+    assert_array_equal(vals[0], vals[1])
+    # make sure label smoothing can run
+    label.morph(label.subject, 'fsaverage', 5,
+                [np.arange(10242), np.arange(10242)], subjects_dir, 2,
+                 copy=False)
+    # subject name should be inferred now
+    label.smooth()
+
+
+def test_grow_labels():
+    """Test generation of circular source labels"""
+    seeds = [0, 50000]
+    hemis = [0, 1]
+    labels = grow_labels('sample', seeds, 3, hemis)
+
+    for label, seed, hemi in zip(labels, seeds, hemis):
+        assert(np.any(label.vertices == seed))
+        if hemi == 0:
+            assert(label.hemi == 'lh')
+        else:
+            assert(label.hemi == 'rh')
+
+
+def test_label_time_course():
+    """Test extracting label data from SourceEstimate"""
+    values, times, vertices = label_time_courses(label_fname, stc_fname)
+    stc = read_source_estimate(stc_fname)
+    label_lh = read_label(label_fname)
+    stc_lh = stc.in_label(label_lh)
+    assert_array_almost_equal(stc_lh.data, values)
+    assert_array_almost_equal(stc_lh.times, times)
+    assert_array_almost_equal(stc_lh.vertno[0], vertices)
+
+    label_rh = read_label(label_rh_fname)
+    stc_rh = stc.in_label(label_rh)
+    label_bh = label_rh + label_lh
+    stc_bh = stc.in_label(label_bh)
+    assert_array_equal(stc_bh.data, np.vstack((stc_lh.data, stc_rh.data)))
diff --git a/mne/tests/test_misc.py b/mne/tests/test_misc.py
new file mode 100644
index 0000000..0c1008b
--- /dev/null
+++ b/mne/tests/test_misc.py
@@ -0,0 +1,14 @@
+import os.path as op
+from nose.tools import assert_true
+
+from mne.misc import parse_config
+
+ave_fname = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data',
+                'test.ave')
+
+
+def test_parse_ave():
+    """Test parsing of .ave file
+    """
+    conditions = parse_config(ave_fname)
+    assert_true(len(conditions) == 4)
diff --git a/mne/tests/test_proj.py b/mne/tests/test_proj.py
new file mode 100644
index 0000000..6e7303a
--- /dev/null
+++ b/mne/tests/test_proj.py
@@ -0,0 +1,190 @@
+import os.path as op
+from nose.tools import assert_true
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_allclose
+
+import copy as cp
+
+import mne
+from mne.datasets import sample
+from mne.fiff import Raw, pick_types
+from mne import compute_proj_epochs, compute_proj_evoked, compute_proj_raw
+from mne.fiff.proj import make_projector, activate_proj
+from mne.proj import read_proj, write_proj
+from mne import read_events, Epochs, sensitivity_map, read_source_estimate
+from mne.utils import _TempDir
+
+base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_fname = op.join(base_dir, 'test-eve.fif')
+proj_fname = op.join(base_dir, 'test_proj.fif')
+proj_gz_fname = op.join(base_dir, 'test_proj.fif.gz')
+bads_fname = op.join(base_dir, 'test_bads.txt')
+
+data_path = sample.data_path()
+sample_path = op.join(data_path, 'MEG', 'sample')
+fwd_fname = op.join(sample_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
+sensmap_fname = op.join(sample_path, 'sample_audvis-%s-oct-6-fwd-sensmap-%s.w')
+eog_fname = op.join(sample_path, 'sample_audvis_eog_proj.fif')
+
+tempdir = _TempDir()
+
+
+def test_sensitivity_maps():
+    """Test sensitivity map computation"""
+    fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
+    proj_eog = read_proj(eog_fname)
+    decim = 6
+    for ch_type in ['eeg', 'grad', 'mag']:
+        w = read_source_estimate(sensmap_fname % (ch_type, 'lh')).data
+        stc = sensitivity_map(fwd, projs=None, ch_type=ch_type,
+                              mode='free', exclude='bads')
+        assert_array_almost_equal(stc.data, w, decim)
+        assert_true(stc.subject == 'sample')
+        # let's just make sure the others run
+        if ch_type == 'grad':
+            # fixed (2)
+            w = read_source_estimate(sensmap_fname % (ch_type, '2-lh')).data
+            stc = sensitivity_map(fwd, projs=None, mode='fixed',
+                                  ch_type=ch_type, exclude='bads')
+            assert_array_almost_equal(stc.data, w, decim)
+        if ch_type == 'mag':
+            # ratio (3)
+            w = read_source_estimate(sensmap_fname % (ch_type, '3-lh')).data
+            stc = sensitivity_map(fwd, projs=None, mode='ratio',
+                                  ch_type=ch_type, exclude='bads')
+            assert_array_almost_equal(stc.data, w, decim)
+        if ch_type == 'eeg':
+            # radiality (4), angle (5), remaining (6), and  dampening (7)
+            modes = ['radiality', 'angle', 'remaining', 'dampening']
+            ends = ['4-lh', '5-lh', '6-lh', '7-lh']
+            for mode, end in zip(modes, ends):
+                w = read_source_estimate(sensmap_fname % (ch_type, end)).data
+                stc = sensitivity_map(fwd, projs=proj_eog, mode=mode,
+                                      ch_type=ch_type, exclude='bads')
+                assert_array_almost_equal(stc.data, w, decim)
+
+
+def test_compute_proj_epochs():
+    """Test SSP computation on epochs"""
+    event_id, tmin, tmax = 1, -0.2, 0.3
+
+    raw = Raw(raw_fname, preload=True)
+    events = read_events(event_fname)
+    bad_ch = 'MEG 2443'
+    picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
+                       exclude=[])
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=None, proj=False)
+
+    evoked = epochs.average()
+    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
+    write_proj(op.join(tempdir, 'proj.fif.gz'), projs)
+    for p_fname in [proj_fname, proj_gz_fname,
+                    op.join(tempdir, 'proj.fif.gz')]:
+        projs2 = read_proj(p_fname)
+
+        assert_true(len(projs) == len(projs2))
+
+        for p1, p2 in zip(projs, projs2):
+            assert_true(p1['desc'] == p2['desc'])
+            assert_true(p1['data']['col_names'] == p2['data']['col_names'])
+            assert_true(p1['active'] == p2['active'])
+            # compare with sign invariance
+            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
+            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
+            if bad_ch in p1['data']['col_names']:
+                bad = p1['data']['col_names'].index('MEG 2443')
+                mask = np.ones(p1_data.size, dtype=np.bool)
+                mask[bad] = False
+                p1_data = p1_data[:, mask]
+                p2_data = p2_data[:, mask]
+            corr = np.corrcoef(p1_data, p2_data)[0, 1]
+            assert_array_almost_equal(corr, 1.0, 5)
+
+    # test that you can compute the projection matrix
+    projs = activate_proj(projs)
+    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])
+
+    assert_true(nproj == 2)
+    assert_true(U.shape[1] == 2)
+
+    # test that you can save them
+    epochs.info['projs'] += projs
+    evoked = epochs.average()
+    evoked.save(op.join(tempdir, 'foo.fif'))
+
+    projs = read_proj(proj_fname)
+
+    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
+    assert_true(len(projs_evoked) == 2)
+    # XXX : test something
+
+    # test parallelization
+    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2)
+    projs = activate_proj(projs)
+    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
+    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
+
+
+def test_compute_proj_raw():
+    """Test SSP computation on raw"""
+    # Test that the raw projectors work
+    raw_time = 2.5  # Do shorter amount for speed
+    raw = Raw(raw_fname, preload=True).crop(0, raw_time, False)
+    for ii in (0.25, 0.5, 1, 2):
+        with warnings.catch_warnings(True) as w:
+            projs = compute_proj_raw(raw, duration=ii - 0.1, stop=raw_time,
+                                     n_grad=1, n_mag=1, n_eeg=0)
+            assert_true(len(w) == 1)
+
+        # test that you can compute the projection matrix
+        projs = activate_proj(projs)
+        proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])
+
+        assert_true(nproj == 2)
+        assert_true(U.shape[1] == 2)
+
+        # test that you can save them
+        raw.info['projs'] += projs
+        raw.save(op.join(tempdir, 'foo_%d_raw.fif' % ii), overwrite=True)
+
+    # Test that purely continuous (no duration) raw projection works
+    with warnings.catch_warnings(True) as w:
+        projs = compute_proj_raw(raw, duration=None, stop=raw_time,
+                                 n_grad=1, n_mag=1, n_eeg=0)
+        assert_true(len(w) == 1)
+
+    # test that you can compute the projection matrix
+    projs = activate_proj(projs)
+    proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])
+
+    assert_true(nproj == 2)
+    assert_true(U.shape[1] == 2)
+
+    # test that you can save them
+    raw.info['projs'] += projs
+    raw.save(op.join(tempdir, 'foo_rawproj_continuous_raw.fif'))
+
+    # test resampled-data projector, upsampling instead of downsampling
+    # here to save an extra filtering (raw would have to be LP'ed to be equiv)
+    raw_resamp = cp.deepcopy(raw)
+    raw_resamp.resample(raw.info['sfreq'] * 2, n_jobs=2)
+    with warnings.catch_warnings(True) as w:
+        projs = compute_proj_raw(raw_resamp, duration=None, stop=raw_time,
+                                 n_grad=1, n_mag=1, n_eeg=0)
+    projs = activate_proj(projs)
+    proj_new, _, _ = make_projector(projs, raw.ch_names, bads=[])
+    assert_array_almost_equal(proj_new, proj, 4)
+
+    # test with bads
+    raw.load_bad_channels(bads_fname)  # adds 2 bad mag channels
+    with warnings.catch_warnings(True) as w:
+        projs = compute_proj_raw(raw, n_grad=0, n_mag=0, n_eeg=1)
+
+    # test that bad channels can be excluded
+    proj, nproj, U = make_projector(projs, raw.ch_names,
+                                    bads=raw.ch_names)
+    assert_array_almost_equal(proj, np.eye(len(raw.ch_names)))
diff --git a/mne/tests/test_selection.py b/mne/tests/test_selection.py
new file mode 100644
index 0000000..4272ed0
--- /dev/null
+++ b/mne/tests/test_selection.py
@@ -0,0 +1,27 @@
+from mne import read_selection
+
+
+def test_read_selection():
+    """Test reading of selections"""
+    # test one channel for each selection
+    ch_names = ['MEG 2211', 'MEG 0223', 'MEG 1312', 'MEG 0412', 'MEG 1043',
+                'MEG 2042', 'MEG 2032', 'MEG 0522', 'MEG 1031']
+    sel_names = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal',
+                 'Right-parietal', 'Left-occipital', 'Right-occipital',
+                 'Left-frontal', 'Right-frontal']
+
+    for i, name in enumerate(sel_names):
+        sel = read_selection(name)
+        assert(ch_names[i] in sel)
+
+    # test some combinations
+    all_ch = read_selection(['L', 'R'])
+    left = read_selection('L')
+    right = read_selection('R')
+
+    assert(len(all_ch) == len(left) + len(right))
+    assert(len(set(left).intersection(set(right))) == 0)
+
+    frontal = read_selection('frontal')
+    occipital = read_selection('Right-occipital')
+    assert(len(set(frontal).intersection(set(occipital))) == 0)
diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py
new file mode 100644
index 0000000..cc98102
--- /dev/null
+++ b/mne/tests/test_source_estimate.py
@@ -0,0 +1,443 @@
+import os.path as op
+from nose.tools import assert_true, assert_raises
+import warnings
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal, \
+                          assert_allclose
+
+from scipy.fftpack import fft
+
+from mne.datasets import sample
+from mne import stats, SourceEstimate, Label
+from mne import read_source_estimate, morph_data, extract_label_time_course
+from mne.source_estimate import spatio_temporal_tris_connectivity, \
+                                spatio_temporal_src_connectivity, \
+                                compute_morph_matrix, grade_to_vertices, \
+                                _compute_nearest
+
+from mne.minimum_norm import read_inverse_operator
+from mne.label import labels_from_parc, label_sign_flip
+from mne.utils import _TempDir, requires_pandas
+
+data_path = sample.data_path()
+subjects_dir = op.join(data_path, 'subjects')
+fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
+fname_inv = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-meg-oct-6-meg-inv.fif')
+fname_vol = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis-grad-vol-7-fwd-sensmap-vol.w')
+tempdir = _TempDir()
+
+
+def test_volume_stc():
+    """Test reading and writing volume STCs
+    """
+    N = 100
+    data = np.arange(N)[:, np.newaxis]
+    datas = [data, data, np.arange(2)[:, np.newaxis]]
+    vertno = np.arange(N)
+    vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
+    vertno_reads = [vertno, vertno, np.arange(2)]
+    for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
+        stc = SourceEstimate(data, vertno, 0, 1)
+        assert_true(stc.is_surface() is False)
+        fname_temp = op.join(tempdir, 'temp-vl.stc')
+        stc_new = stc
+        for _ in xrange(2):
+            stc_new.save(fname_temp)
+            stc_new = read_source_estimate(fname_temp)
+            assert_true(stc_new.is_surface() is False)
+            assert_array_equal(vertno_read, stc_new.vertno)
+            assert_array_almost_equal(stc.data, stc_new.data)
+    # now let's actually read a MNE-C processed file
+    stc = read_source_estimate(fname_vol, 'sample')
+    assert_true('sample' in repr(stc))
+    stc_new = stc
+    assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
+    for _ in xrange(2):
+        fname_temp = op.join(tempdir, 'temp-vol.w')
+        stc_new.save(fname_temp, ftype='w')
+        stc_new = read_source_estimate(fname_temp)
+        assert_true(stc_new.is_surface() is False)
+        assert_array_equal(stc.vertno, stc_new.vertno)
+        assert_array_almost_equal(stc.data, stc_new.data)
+
+
+def test_expand():
+    """Test stc expansion
+    """
+    stc = read_source_estimate(fname, 'sample')
+    assert_true('sample' in repr(stc))
+    labels_lh, _ = labels_from_parc('sample', hemi='lh',
+                                    subjects_dir=subjects_dir)
+    stc_limited = stc.in_label(labels_lh[0] + labels_lh[1])
+    stc_new = stc_limited.copy()
+    stc_new.data.fill(0)
+    for label in labels_lh[:2]:
+        stc_new += stc.in_label(label).expand(stc_limited.vertno)
+    # make sure we can't add unless vertno agree
+    assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
+
+
+def test_io_stc():
+    """Test IO for STC files
+    """
+    stc = read_source_estimate(fname)
+    stc.save(op.join(tempdir, "tmp.stc"))
+    stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
+
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.tmin, stc2.tmin)
+    assert_true(len(stc.vertno) == len(stc2.vertno))
+    for v1, v2 in zip(stc.vertno, stc2.vertno):
+        assert_array_almost_equal(v1, v2)
+    assert_array_almost_equal(stc.tstep, stc2.tstep)
+
+
+def test_io_w():
+    """Test IO for w files
+    """
+    w_fname = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis-meg-oct-6-fwd-sensmap')
+
+    src = read_source_estimate(w_fname)
+
+    src.save(op.join(tempdir, 'tmp'), ftype='w')
+
+    src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
+
+    assert_array_almost_equal(src.data, src2.data)
+    assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
+    assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
+
+
+def test_stc_arithmetic():
+    """Test arithmetic for STC files
+    """
+    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
+    stc = read_source_estimate(fname)
+    data = stc.data.copy()
+
+    out = list()
+    for a in [data, stc]:
+        a = a + a * 3 + 3 * a - a ** 2 / 2
+
+        a += a
+        a -= a
+        a /= 2 * a
+        a *= -a
+
+        a += 2
+        a -= 1
+        a *= -1
+        a /= 2
+        a **= 3
+        out.append(a)
+
+    assert_array_equal(out[0], out[1].data)
+    assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
+
+
+def test_stc_methods():
+    """Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
+    """
+    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
+    stc = read_source_estimate(fname)
+
+    # lh_data / rh_data
+    assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
+    assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
+
+    # bin
+    bin = stc.bin(.12)
+    a = np.array((1,), dtype=stc.data.dtype)
+    a[0] = np.mean(stc.data[0, stc.times < .12])
+    assert a[0] == bin.data[0, 0]
+
+    assert_raises(ValueError, stc.center_of_mass, 'sample')
+    stc.lh_data[:] = 0
+    vertex, hemi, t = stc.center_of_mass('sample')
+    assert_true(hemi == 1)
+    # XXX Should design a fool-proof test case, but here were the results:
+    assert_true(vertex == 90186)
+    assert_true(np.round(t, 3) == 0.123)
+
+    stc = read_source_estimate(fname)
+    stc_new = deepcopy(stc)
+    o_sfreq = 1.0 / stc.tstep
+    # note that using no padding for this STC reduces edge ringing...
+    stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
+    assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
+    assert_true(stc_new.tstep == stc.tstep / 2)
+    stc_new.resample(o_sfreq, npad=0)
+    assert_true(stc_new.data.shape[1] == stc.data.shape[1])
+    assert_true(stc_new.tstep == stc.tstep)
+    assert_array_almost_equal(stc_new.data, stc.data, 5)
+
+
+def test_extract_label_time_course():
+    """Test extraction of label time courses from stc
+    """
+    n_stcs = 3
+    n_times = 50
+
+    src = read_inverse_operator(fname_inv)['src']
+    vertices = [src[0]['vertno'], src[1]['vertno']]
+    n_verts = len(vertices[0]) + len(vertices[1])
+
+    # get some labels
+    labels_lh, _ = labels_from_parc('sample', hemi='lh',
+                                    subjects_dir=subjects_dir)
+    labels_rh, _ = labels_from_parc('sample', hemi='rh',
+                                    subjects_dir=subjects_dir)
+    labels = list()
+    labels.extend(labels_lh[:5])
+    labels.extend(labels_rh[:4])
+
+    n_labels = len(labels)
+
+    label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
+
+    # compute the mean with sign flip
+    label_means_flipped = np.zeros_like(label_means)
+    for i, label in enumerate(labels):
+        label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
+
+    # generate some stc's with known data
+    stcs = list()
+    for i in range(n_stcs):
+        data = np.zeros((n_verts, n_times))
+        # set the value of the stc within each label
+        for j, label in enumerate(labels):
+            if label.hemi == 'lh':
+                idx = np.intersect1d(vertices[0], label.vertices)
+                idx = np.searchsorted(vertices[0], idx)
+            elif label.hemi == 'rh':
+                idx = np.intersect1d(vertices[1], label.vertices)
+                idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
+            data[idx] = label_means[j]
+
+        this_stc = SourceEstimate(data, vertices, 0, 1)
+        stcs.append(this_stc)
+
+    # test some invalid inputs
+    assert_raises(ValueError, extract_label_time_course, stcs, labels,
+                  src, mode='notamode')
+
+    # have an empty label
+    empty_label = labels[0].copy()
+    empty_label.vertices += 1000000
+    assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
+                  src, mode='mean')
+
+    # but this works:
+    tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
+                                   allow_empty=True)
+    for arr in tc:
+        assert_true(arr.shape == (1, n_times))
+        assert_array_equal(arr, np.zeros((1, n_times)))
+
+    # test the different modes
+    modes = ['mean', 'mean_flip', 'pca_flip']
+
+    for mode in modes:
+        label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
+        label_tc_method = [stc.extract_label_time_course(labels, src,
+                           mode=mode) for stc in stcs]
+        assert_true(len(label_tc) == n_stcs)
+        assert_true(len(label_tc_method) == n_stcs)
+        for tc1, tc2 in zip(label_tc, label_tc_method):
+            assert_true(tc1.shape == (n_labels, n_times))
+            assert_true(tc2.shape == (n_labels, n_times))
+            assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
+            if mode == 'mean':
+                assert_array_almost_equal(tc1, label_means)
+            if mode == 'mean_flip':
+                assert_array_almost_equal(tc1, label_means_flipped)
+
+    # test label with very few vertices (check SVD conditionals)
+    label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
+    x = label_sign_flip(label, src)
+    assert_true(len(x) == 2)
+    label = Label(vertices=[], hemi='lh')
+    x = label_sign_flip(label, src)
+    assert_true(x.size == 0)
+
+
+def test_compute_nearest():
+    """Test nearest neighbor searches"""
+    x = np.random.randn(500, 3)
+    x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None]
+    nn_true = np.random.permutation(np.arange(500, dtype=np.int))[:20]
+    y = x[nn_true]
+
+    nn1 = _compute_nearest(x, y, use_balltree=False)
+    nn2 = _compute_nearest(x, y, use_balltree=True)
+
+    assert_array_equal(nn_true, nn1)
+    assert_array_equal(nn_true, nn2)
+
+
+def test_morph_data():
+    """Test morphing of data
+    """
+    subject_from = 'sample'
+    subject_to = 'fsaverage'
+    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
+    stc_from = read_source_estimate(fname, subject='sample')
+    fname = op.join(data_path, 'MEG', 'sample', 'fsaverage_audvis-meg')
+    stc_to = read_source_estimate(fname)
+    # make sure we can specify grade
+    stc_from.crop(0.09, 0.1)  # for faster computation
+    stc_to.crop(0.09, 0.1)  # for faster computation
+    stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000)
+    stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
+    # make sure we can specify vertices
+    vertices_to = grade_to_vertices(subject_to, grade=3)
+    stc_to2 = morph_data(subject_from, subject_to, stc_from,
+                         grade=vertices_to, smooth=12, buffer_size=1000)
+    # make sure we can use different buffer_size
+    stc_to3 = morph_data(subject_from, subject_to, stc_from,
+                         grade=vertices_to, smooth=12, buffer_size=3)
+    # indexing silliness here due to mne_make_movie's indexing oddities
+    assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
+    assert_array_almost_equal(stc_to1.data, stc_to2.data)
+    assert_array_almost_equal(stc_to1.data, stc_to3.data)
+    # make sure precomputed morph matrices work
+    morph_mat = compute_morph_matrix(subject_from, subject_to,
+                                     stc_from.vertno, vertices_to,
+                                     smooth=12)
+    stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
+    assert_array_almost_equal(stc_to1.data, stc_to3.data)
+
+    mean_from = stc_from.data.mean(axis=0)
+    mean_to = stc_to1.data.mean(axis=0)
+    assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
+
+    # make sure we can fill by morphing
+    stc_to5 = morph_data(subject_from, subject_to, stc_from,
+                         grade=None, smooth=12, buffer_size=3)
+    assert_true(stc_to5.data.shape[0] == 163842 + 163842)
+
+
+def _my_trans(data):
+    """FFT that adds an additional dimension by repeating result"""
+    data_t = fft(data)
+    data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
+    return data_t, None
+
+
+def test_transform_data():
+    """Test applying linear (time) transform to data"""
+    # make up some data
+    n_sensors, n_vertices, n_times = 10, 20, 4
+    kernel = np.random.randn(n_vertices, n_sensors)
+    sens_data = np.random.randn(n_sensors, n_times)
+
+    vertices = np.arange(n_vertices)
+    data = np.dot(kernel, sens_data)
+
+    for idx, tmin_idx, tmax_idx in\
+            zip([None, np.arange(n_vertices / 2, n_vertices)],
+                [None, 1], [None, 3]):
+
+        if idx is None:
+            idx_use = slice(None, None)
+        else:
+            idx_use = idx
+
+        data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
+
+        for stc_data in (data, (kernel, sens_data)):
+            stc = SourceEstimate(stc_data, vertices=vertices,
+                                 tmin=0., tstep=1.)
+            stc_data_t = stc.transform_data(_my_trans, idx=idx,
+                                            tmin_idx=tmin_idx,
+                                            tmax_idx=tmax_idx)
+            assert_allclose(data_f, stc_data_t)
+
+
+def test_notify_array_source_estimate():
+    """Test that modifying the stc data removes the kernel and sensor data"""
+    # make up some data
+    n_sensors, n_vertices, n_times = 10, 20, 4
+    kernel = np.random.randn(n_vertices, n_sensors)
+    sens_data = np.random.randn(n_sensors, n_times)
+    vertices = np.arange(n_vertices)
+
+    stc = SourceEstimate((kernel, sens_data), vertices=vertices,
+                         tmin=0., tstep=1.)
+
+    assert_true(stc._data is None)
+    assert_true(stc._kernel is not None)
+    assert_true(stc._sens_data is not None)
+
+    # now modify the data in some way
+    data_half = stc.data[:, n_times / 2:]
+    data_half[0] = 1.0
+    data_half.fill(1.0)
+
+    # the kernel and sensor data can no longer be used: they have been removed
+    assert_true(stc._kernel is None)
+    assert_true(stc._sens_data is None)
+
+
+def test_spatio_temporal_tris_connectivity():
+    """Test spatio-temporal connectivity from triangles"""
+    tris = np.array([[0, 1, 2], [3, 4, 5]])
+    connectivity = spatio_temporal_tris_connectivity(tris, 2)
+    x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
+    components = stats.cluster_level._get_components(np.array(x), connectivity)
+    # _get_components works differently now...
+    old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
+    new_fmt = np.array(old_fmt)
+    new_fmt = [np.nonzero(new_fmt == v)[0]
+               for v in np.unique(new_fmt[new_fmt >= 0])]
+    assert_true(len(new_fmt), len(components))
+    for c, n in zip(components, new_fmt):
+        assert_array_equal(c, n)
+
+
+def test_spatio_temporal_src_connectivity():
+    """Test spatio-temporal connectivity from source spaces"""
+    tris = np.array([[0, 1, 2], [3, 4, 5]])
+    src = [dict(), dict()]
+    connectivity = spatio_temporal_tris_connectivity(tris, 2)
+    src[0]['use_tris'] = np.array([[0, 1, 2]])
+    src[1]['use_tris'] = np.array([[0, 1, 2]])
+    src[0]['vertno'] = np.array([0, 1, 2])
+    src[1]['vertno'] = np.array([0, 1, 2])
+    connectivity2 = spatio_temporal_src_connectivity(src, 2)
+    assert_array_equal(connectivity.todense(), connectivity2.todense())
+    # add test for dist connectivity
+    src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
+    src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
+    src[0]['vertno'] = [0, 1, 2]
+    src[1]['vertno'] = [0, 1, 2]
+    connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
+    assert_array_equal(connectivity.todense(), connectivity3.todense())
+    # add test for source space connectivity with omitted vertices
+    inverse_operator = read_inverse_operator(fname_inv)
+    with warnings.catch_warnings(record=True) as w:
+        connectivity = spatio_temporal_src_connectivity(
+                                            inverse_operator['src'], n_times=2)
+        assert len(w) == 1
+    a = connectivity.shape[0] / 2
+    b = sum([s['nuse'] for s in inverse_operator['src']])
+    assert_true(a == b)
+
+
+ at requires_pandas
+def test_as_data_frame():
+    """Test stc Pandas exporter"""
+    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
+    stc = read_source_estimate(fname, subject='sample')
+    assert_raises(ValueError, stc.as_data_frame, index=['foo', 'bar'])
+    for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
+        df = stc.as_data_frame(index=ind)
+        assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
+        assert_array_equal(df.values.T[ncat:], stc.data)
+        # test that non-indexed data were present as categorial variables
+        df.reset_index().columns[:3] == ['subject', 'time']
diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py
new file mode 100644
index 0000000..4e291fa
--- /dev/null
+++ b/mne/tests/test_source_space.py
@@ -0,0 +1,102 @@
+import os.path as op
+from nose.tools import assert_true
+import numpy as np
+from numpy.testing import assert_array_equal, assert_allclose
+
+from mne.datasets import sample
+from mne import read_source_spaces, vertex_to_mni, write_source_spaces
+from mne.utils import _TempDir, requires_fs_or_nibabel, requires_nibabel, \
+                      requires_freesurfer
+
+data_path = sample.data_path()
+fname = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif')
+fname_nodist = op.join(data_path, 'subjects', 'sample', 'bem',
+                       'sample-oct-6-orig-src.fif')
+
+tempdir = _TempDir()
+
+
+def test_read_source_spaces():
+    """Test reading of source space meshes
+    """
+    src = read_source_spaces(fname, add_geom=True)
+    print src
+
+    # 3D source space
+    lh_points = src[0]['rr']
+    lh_faces = src[0]['tris']
+    lh_use_faces = src[0]['use_tris']
+    rh_points = src[1]['rr']
+    rh_faces = src[1]['tris']
+    rh_use_faces = src[1]['use_tris']
+    assert_true(lh_faces.min() == 0)
+    assert_true(lh_faces.max() == lh_points.shape[0] - 1)
+    assert_true(lh_use_faces.min() >= 0)
+    assert_true(lh_use_faces.max() <= lh_points.shape[0] - 1)
+    assert_true(rh_faces.min() == 0)
+    assert_true(rh_faces.max() == rh_points.shape[0] - 1)
+    assert_true(rh_use_faces.min() >= 0)
+    assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1)
+
+
+def test_write_source_space():
+    """Test writing and reading of source spaces
+    """
+    src0 = read_source_spaces(fname, add_geom=False)
+    src0_old = read_source_spaces(fname, add_geom=False)
+    write_source_spaces(op.join(tempdir, 'tmp.fif'), src0)
+    src1 = read_source_spaces(op.join(tempdir, 'tmp.fif'), add_geom=False)
+    for orig in [src0, src0_old]:
+        for s0, s1 in zip(src0, src1):
+            for name in ['nuse', 'dist_limit', 'ntri', 'np', 'type', 'id',
+                         'subject_his_id']:
+                assert_true(s0[name] == s1[name])
+            for name in ['nn', 'rr', 'inuse', 'vertno', 'nuse_tri',
+                         'coord_frame', 'use_tris', 'tris', 'nearest',
+                         'nearest_dist']:
+                assert_array_equal(s0[name], s1[name])
+            for name in ['dist']:
+                if s0[name] is not None:
+                    assert_true(s1[name].shape == s0[name].shape)
+                    assert_true(len((s0['dist'] - s1['dist']).data) == 0)
+            for name in ['pinfo']:
+                if s0[name] is not None:
+                    assert_true(len(s0[name]) == len(s1[name]))
+                    for p1, p2 in zip(s0[name], s1[name]):
+                        assert_true(all(p1 == p2))
+        # The above "if s0[name] is not None" can be removed once the sample
+        # dataset is updated to have a source space with distance info
+    for name in ['working_dir', 'command_line']:
+        assert_true(src0.info[name] == src1.info[name])
+
+
+ at requires_fs_or_nibabel
+def test_vertex_to_mni():
+    """Test conversion of vertices to MNI coordinates
+    """
+    # obtained using "tksurfer (sample/fsaverage) (l/r)h white"
+    vertices = [100960, 7620, 150549, 96761]
+    coords_s = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
+                         [-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
+    coords_f = np.array([[-41.28, -40.04, 18.20], [-6.05, 49.74, -18.15],
+                         [-61.71, -14.55, 20.52], [21.70, -60.84, 25.02]])
+    hemis = [0, 0, 0, 1]
+    for coords, subj in zip([coords_s, coords_f], ['sample', 'fsaverage']):
+        coords_2 = vertex_to_mni(vertices, hemis, subj)
+        # less than 1mm error
+        assert_allclose(coords, coords_2, atol=1.0)
+
+
+ at requires_freesurfer
+ at requires_nibabel
+def test_vertex_to_mni_fs_nibabel():
+    """Test equivalence of vert_to_mni for nibabel and freesurfer
+    """
+    n_check = 1000
+    for subject in ['sample', 'fsaverage']:
+        vertices = np.random.randint(0, 100000, n_check)
+        hemis = np.random.randint(0, 1, n_check)
+        coords = vertex_to_mni(vertices, hemis, subject, mode='nibabel')
+        coords_2 = vertex_to_mni(vertices, hemis, subject, mode='freesurfer')
+        # less than 0.1 mm error
+        assert_allclose(coords, coords_2, atol=0.1)
diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py
new file mode 100644
index 0000000..2f96bd3
--- /dev/null
+++ b/mne/tests/test_surface.py
@@ -0,0 +1,40 @@
+import os.path as op
+
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+
+from mne.datasets import sample
+from mne import read_bem_surfaces, write_bem_surface, read_surface, \
+                write_surface
+from mne.utils import _TempDir
+
+data_path = sample.data_path()
+fname = op.join(data_path, 'subjects', 'sample', 'bem',
+                'sample-5120-5120-5120-bem-sol.fif')
+
+tempdir = _TempDir()
+
+
+def test_io_bem_surfaces():
+    """Test reading of bem surfaces
+    """
+    surf = read_bem_surfaces(fname, add_geom=True)
+    surf = read_bem_surfaces(fname, add_geom=False)
+    print "Number of surfaces : %d" % len(surf)
+
+    write_bem_surface(op.join(tempdir, 'bem_surf.fif'), surf[0])
+    surf_read = read_bem_surfaces(op.join(tempdir, 'bem_surf.fif'),
+                                  add_geom=False)
+
+    for key in surf[0].keys():
+        assert_array_almost_equal(surf[0][key], surf_read[0][key])
+
+
+def test_io_surface():
+    """Test reading and writing of Freesurfer surface mesh files
+    """
+    fname = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.inflated')
+    pts, tri = read_surface(fname)
+    write_surface(op.join(tempdir, 'tmp'), pts, tri)
+    c_pts, c_tri = read_surface(op.join(tempdir, 'tmp'))
+    assert_array_equal(pts, c_pts)
+    assert_array_equal(tri, c_tri)
diff --git a/mne/tests/test_utils.py b/mne/tests/test_utils.py
new file mode 100644
index 0000000..f340b4d
--- /dev/null
+++ b/mne/tests/test_utils.py
@@ -0,0 +1,152 @@
+from numpy.testing import assert_equal
+from nose.tools import assert_true, assert_raises
+import os.path as op
+import os
+import warnings
+import urllib2
+
+from ..utils import set_log_level, set_log_file, _TempDir, \
+                    get_config, set_config, deprecated, _fetch_file
+from ..fiff import Evoked, show_fiff
+
+base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+fname_evoked = op.join(base_dir, 'test-ave.fif')
+fname_raw = op.join(base_dir, 'test_raw.fif')
+fname_log = op.join(base_dir, 'test-ave.log')
+fname_log_2 = op.join(base_dir, 'test-ave-2.log')
+tempdir = _TempDir()
+test_name = op.join(tempdir, 'test.log')
+
+
+def clean_lines(lines):
+    # Function to scrub filenames for checking logging output (in test_logging)
+    return [l if 'Reading ' not in l else 'Reading test file' for l in lines]
+
+
+def test_logging():
+    """Test logging (to file)
+    """
+    old_log_file = open(fname_log, 'r')
+    old_lines = clean_lines(old_log_file.readlines())
+    old_log_file.close()
+    old_log_file_2 = open(fname_log_2, 'r')
+    old_lines_2 = clean_lines(old_log_file_2.readlines())
+    old_log_file_2.close()
+
+    if op.isfile(test_name):
+        os.remove(test_name)
+    # test it one way (printing default off)
+    set_log_file(test_name)
+    set_log_level('WARNING')
+    # should NOT print
+    evoked = Evoked(fname_evoked, setno=1)
+    assert_true(open(test_name).readlines() == [])
+    # should NOT print
+    evoked = Evoked(fname_evoked, setno=1, verbose=False)
+    assert_true(open(test_name).readlines() == [])
+    # should NOT print
+    evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
+    assert_true(open(test_name).readlines() == [])
+    # SHOULD print
+    evoked = Evoked(fname_evoked, setno=1, verbose=True)
+    new_log_file = open(test_name, 'r')
+    new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines)
+    new_log_file.close()
+    set_log_file(None)  # Need to do this to close the old file
+    os.remove(test_name)
+
+    # now go the other way (printing default on)
+    set_log_file(test_name)
+    set_log_level('INFO')
+    # should NOT print
+    evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
+    assert_true(open(test_name).readlines() == [])
+    # should NOT print
+    evoked = Evoked(fname_evoked, setno=1, verbose=False)
+    assert_true(open(test_name).readlines() == [])
+    # SHOULD print
+    evoked = Evoked(fname_evoked, setno=1)
+    new_log_file = open(test_name, 'r')
+    old_log_file = open(fname_log, 'r')
+    new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines)
+    # check to make sure appending works (and as default, raises a warning)
+    with warnings.catch_warnings(True) as w:
+        set_log_file(test_name, overwrite=False)
+        assert len(w) == 0
+        set_log_file(test_name)
+        assert len(w) == 1
+    evoked = Evoked(fname_evoked, setno=1)
+    new_log_file = open(test_name, 'r')
+    new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines_2)
+
+    # make sure overwriting works
+    set_log_file(test_name, overwrite=True)
+    # this line needs to be called to actually do some logging
+    evoked = Evoked(fname_evoked, setno=1)
+    del evoked
+    new_log_file = open(test_name, 'r')
+    new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines)
+
+
+def test_config():
+    """Test mne-python config file support"""
+    key = '_MNE_PYTHON_CONFIG_TESTING'
+    value = '123456'
+    old_val = os.getenv(key, None)
+    os.environ[key] = value
+    assert_true(get_config(key) == value)
+    del os.environ[key]
+    # catch the warning about it being a non-standard config key
+    with warnings.catch_warnings(True) as w:
+        set_config(key, None)
+        assert_true(len(w) == 1)
+    assert_true(get_config(key) is None)
+    assert_raises(KeyError, get_config, key, raise_error=True)
+    set_config(key, value)
+    assert_true(get_config(key) == value)
+    set_config(key, None)
+    if old_val is not None:
+        os.environ[key] = old_val
+
+
+def test_show_fiff():
+    """Test show_fiff
+    """
+    # this is not exhaustive, but hopefully bugs will be found in use
+    info = show_fiff(fname_evoked)
+    keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM',
+            'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE',
+            'FIFF_EPOCH']
+    assert_true(all([key in info for key in keys]))
+    info = show_fiff(fname_raw, read_limit=1024)
+
+
+ at deprecated('message')
+def deprecated_func():
+    pass
+
+
+def test_deprecated():
+    """Test deprecated function
+    """
+    with warnings.catch_warnings(True) as w:
+        deprecated_func()
+    assert_true(len(w) == 1)
+
+
+def test_fetch_file():
+    """Test file downloading
+    """
+    # Skipping test if no internet connection available
+    try:
+        urllib2.urlopen("http://github.com", timeout=1)
+    except urllib2.URLError:
+        from nose.plugins.skip import SkipTest
+        raise SkipTest('No internet connection, skipping download test.')
+    url = "http://github.com/mne-tools/mne-python/blob/master/README.rst"
+    archive_name = op.join(tempdir, "download_test")
+    _fetch_file(url, archive_name, print_destination=False)
diff --git a/mne/tests/test_viz.py b/mne/tests/test_viz.py
new file mode 100644
index 0000000..6aab242
--- /dev/null
+++ b/mne/tests/test_viz.py
@@ -0,0 +1,307 @@
+import os.path as op
+import numpy as np
+from numpy.testing import assert_raises
+
+from mne import fiff, read_events, Epochs, SourceEstimate, read_cov, read_proj
+from mne.layouts import read_layout
+from mne.fiff.pick import pick_channels_evoked
+from mne.viz import plot_topo, plot_topo_tfr, plot_topo_power, \
+                    plot_topo_phase_lock, plot_topo_image_epochs, \
+                    plot_evoked_topomap, plot_projs_topomap, \
+                    plot_sparse_source_estimates, plot_source_estimates, \
+                    plot_cov, mne_analyze_colormap, plot_image_epochs, \
+                    plot_connectivity_circle, circular_layout, plot_drop_log, \
+                    compare_fiff
+from mne.datasets.sample import data_path
+from mne.source_space import read_source_spaces
+from mne.preprocessing import ICA
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+lacks_mayavi = False
+try:
+    from mayavi import mlab
+except ImportError:
+    try:
+        from enthought.mayavi import mlab
+    except ImportError:
+        lacks_mayavi = True
+requires_mayavi = np.testing.dec.skipif(lacks_mayavi, 'Requires mayavi')
+
+if not lacks_mayavi:
+    mlab.options.backend = 'test'
+
+data_dir = data_path()
+subjects_dir = op.join(data_dir, 'subjects')
+sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
+                                        'bem', 'sample-oct-6-src.fif'))
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+evoked_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis-ave.fif')
+base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
+fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.2, 0.5
+n_chan = 15
+
+raw = fiff.Raw(raw_fname, preload=False)
+events = read_events(event_name)
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False,
+                        ecg=False, eog=False, exclude='bads')
+# Use a subset of channels for plotting speed
+picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
+epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                baseline=(None, 0))
+evoked = epochs.average()
+reject = dict(mag=4e-12)
+epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                            picks=picks, baseline=(None, 0), proj='delayed',
+                            reject=reject)
+evoked_delayed_ssp = epochs_delayed_ssp.average()
+layout = read_layout('Vectorview-all')
+
+
+def test_plot_topo():
+    """Test plotting of ERP topography
+    """
+    # Show topography
+    plot_topo(evoked, layout)
+    picked_evoked = pick_channels_evoked(evoked, evoked.ch_names[:3])
+
+    # test scaling
+    for ylim in [dict(mag=[-600, 600]), None]:
+        plot_topo([picked_evoked] * 2, layout, ylim=ylim)
+
+    for evo in [evoked, [evoked, picked_evoked]]:
+        assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
+
+    plot_topo(evoked_delayed_ssp, layout, proj='interactive')
+
+
+def test_plot_topo_tfr():
+    """Test plotting of TFR
+    """
+    # Make a fake dataset to plot
+    n_freqs = 11
+    con = np.random.randn(n_chan, n_freqs, len(epochs.times))
+    freqs = np.arange(n_freqs)
+    # Show topography of connectivity from seed
+    plot_topo_tfr(epochs, con, freqs, layout)
+
+
+def test_plot_topo_power():
+    """Test plotting of power
+    """
+    decim = 3
+    frequencies = np.arange(7, 30, 3)  # define frequencies of interest
+    power = np.abs(np.random.randn(n_chan, 7, 141))
+    phase_lock = np.random.randn(n_chan, 7, 141)
+    baseline = (None, 0)  # set the baseline for induced power
+    title = 'Induced power - MNE sample data'
+    plot_topo_power(epochs, power, frequencies, layout, baseline=baseline,
+                    mode='ratio', decim=decim, vmin=0., vmax=14, title=title)
+    title = 'Phase locking value - MNE sample data'
+    plot_topo_phase_lock(epochs, phase_lock, frequencies, layout,
+                         baseline=baseline, mode='mean', decim=decim,
+                         title=title)
+
+
+def test_plot_topo_image_epochs():
+    """Test plotting of epochs image topography
+    """
+    title = 'ERF images - MNE sample data'
+    plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
+                           colorbar=True, title=title)
+
+
+def test_plot_evoked():
+    """Test plotting of evoked
+    """
+    evoked.plot(proj=True, hline=[1])
+
+    # plot with bad channels excluded
+    evoked.plot(exclude='bads')
+    evoked.plot(exclude=evoked.info['bads'])  # does the same thing
+
+    # test selective updating of dict keys is working.
+    evoked.plot(hline=[1], units=dict(mag='femto foo'))
+    evoked_delayed_ssp.plot(proj='interactive')
+    evoked_delayed_ssp.apply_proj()
+    assert_raises(RuntimeError, evoked_delayed_ssp.plot, proj='interactive')
+    evoked_delayed_ssp.info['projs'] = []
+    assert_raises(RuntimeError, evoked_delayed_ssp.plot, proj='interactive')
+    assert_raises(RuntimeError, evoked_delayed_ssp.plot, proj='interactive',
+                  axes='foo')
+
+
+ at requires_mayavi
+def test_plot_sparse_source_estimates():
+    """Test plotting of (sparse) source estimates
+    """
+    # dense version
+    vertices = [s['vertno'] for s in sample_src]
+    n_time = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.zeros((n_verts * n_time))
+    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
+    stc_data.shape = (n_verts, n_time)
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    colormap = mne_analyze_colormap(format='matplotlib')
+    # don't really need to test matplotlib method since it's not used now...
+    colormap = mne_analyze_colormap()
+    plot_source_estimates(stc, 'sample', colormap=colormap,
+                          config_opts={'background': (1, 1, 0)},
+                          subjects_dir=subjects_dir)
+    assert_raises(RuntimeError, plot_source_estimates, stc, 'sample',
+                  figure='foo', hemi='both')
+
+    # now do sparse version
+    vertices = sample_src[0]['vertno']
+    n_verts = len(vertices)
+    stc_data = np.zeros((n_verts * n_time))
+    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
+    stc_data.shape = (n_verts, n_time)
+    inds = np.where(np.any(stc_data, axis=1))[0]
+    stc_data = stc_data[inds]
+    vertices = vertices[inds]
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
+                                 opacity=0.5, high_resolution=True)
+
+
+def test_plot_cov():
+    """Test plotting of covariances
+    """
+    cov = read_cov(cov_fname)
+    plot_cov(cov, raw.info, proj=True)
+
+
+def test_plot_ica_panel():
+    """Test plotting of ICA panel
+    """
+    ica_picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False,
+                                ecg=False, eog=False, exclude='bads')
+    cov = read_cov(cov_fname)
+    ica = ICA(noise_cov=cov, n_components=2, max_pca_components=3,
+              n_pca_components=3)
+    ica.decompose_raw(raw, picks=ica_picks)
+    ica.plot_sources_raw(raw)
+
+
+def test_plot_image_epochs():
+    """Test plotting of epochs image
+    """
+    plot_image_epochs(epochs, picks=[1, 2])
+
+
+def test_plot_connectivity_circle():
+    """Test plotting connectivity circle
+    """
+    node_order = ['frontalpole-lh', 'parsorbitalis-lh',
+                  'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
+                  'medialorbitofrontal-lh', 'parstriangularis-lh',
+                  'rostralanteriorcingulate-lh', 'temporalpole-lh',
+                  'parsopercularis-lh', 'caudalanteriorcingulate-lh',
+                  'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
+                  'caudalmiddlefrontal-lh', 'superiortemporal-lh',
+                  'parahippocampal-lh', 'middletemporal-lh',
+                  'inferiortemporal-lh', 'precentral-lh',
+                  'transversetemporal-lh', 'posteriorcingulate-lh',
+                  'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
+                  'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
+                  'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
+                  'superiorparietal-lh', 'pericalcarine-lh',
+                  'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
+                  'lateraloccipital-rh', 'pericalcarine-rh',
+                  'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
+                  'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
+                  'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
+                  'fusiform-rh', 'posteriorcingulate-rh',
+                  'transversetemporal-rh', 'precentral-rh',
+                  'inferiortemporal-rh', 'middletemporal-rh',
+                  'parahippocampal-rh', 'superiortemporal-rh',
+                  'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
+                  'entorhinal-rh', 'caudalanteriorcingulate-rh',
+                  'parsopercularis-rh', 'temporalpole-rh',
+                  'rostralanteriorcingulate-rh', 'parstriangularis-rh',
+                  'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
+                  'lateralorbitofrontal-rh', 'parsorbitalis-rh',
+                  'frontalpole-rh']
+    label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
+                   'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
+                   'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
+                   'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
+                   'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
+                   'inferiorparietal-lh', 'inferiorparietal-rh',
+                   'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
+                   'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
+                   'lateraloccipital-lh', 'lateraloccipital-rh',
+                   'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
+                   'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
+                   'medialorbitofrontal-rh', 'middletemporal-lh',
+                   'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
+                   'parahippocampal-lh', 'parahippocampal-rh',
+                   'parsopercularis-lh', 'parsopercularis-rh',
+                   'parsorbitalis-lh', 'parsorbitalis-rh',
+                   'parstriangularis-lh', 'parstriangularis-rh',
+                   'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
+                   'postcentral-rh', 'posteriorcingulate-lh',
+                   'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
+                   'precuneus-lh', 'precuneus-rh',
+                   'rostralanteriorcingulate-lh',
+                   'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
+                   'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
+                   'superiorfrontal-rh', 'superiorparietal-lh',
+                   'superiorparietal-rh', 'superiortemporal-lh',
+                   'superiortemporal-rh', 'supramarginal-lh',
+                   'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
+                   'transversetemporal-lh', 'transversetemporal-rh']
+    node_angles = circular_layout(label_names, node_order, start_pos=90)
+    con = np.random.randn(68, 68)
+    plot_connectivity_circle(con, label_names, n_lines=300,
+                             node_angles=node_angles, title='test')
+
+
+def test_plot_drop_log():
+    """Test plotting a drop log
+    """
+    plot_drop_log(epochs.drop_log)
+    plot_drop_log([['One'], [], []])
+    plot_drop_log([['One'], ['Two'], []])
+    plot_drop_log([['One'], ['One', 'Two'], []])
+
+
+def test_plot_raw():
+    """Test plotting of raw data
+    """
+    raw.plot(events=events, show_options=True)
+
+
+def test_plot_topomap():
+    """Testing topomap plotting
+    """
+    # evoked
+    evoked = fiff.read_evoked(evoked_fname, 'Left Auditory',
+                              baseline=(None, 0))
+    evoked.plot_topomap(0.1, 'mag', layout=layout)
+    plot_evoked_topomap(evoked, None, ch_type='mag')
+    times = [0.1, 0.2]
+    plot_evoked_topomap(evoked, times, ch_type='grad')
+    plot_evoked_topomap(evoked, times, ch_type='planar1')
+    plot_evoked_topomap(evoked, times, ch_type='mag', layout='auto')
+    plot_evoked_topomap(evoked, 0.1, 'mag', proj='interactive')
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked, np.repeat(.1, 50))
+    assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
+
+    # projs
+    projs = read_proj(ecg_fname)[:7]
+    plot_projs_topomap(projs)
+
+
+def test_compare_fiff():
+    """Test comparing fiff files
+    """
+    compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
diff --git a/mne/time_frequency/__init__.py b/mne/time_frequency/__init__.py
new file mode 100644
index 0000000..5a22be0
--- /dev/null
+++ b/mne/time_frequency/__init__.py
@@ -0,0 +1,8 @@
+"""Time frequency analysis tools
+"""
+
+from .tfr import induced_power, single_trial_power, morlet
+from .psd import compute_raw_psd
+from .ar import yule_walker, ar_raw, iir_filter_raw
+from .multitaper import dpss_windows, multitaper_psd
+from .stft import stft, istft, stftfreq
diff --git a/mne/time_frequency/ar.py b/mne/time_frequency/ar.py
new file mode 100644
index 0000000..62eacd7
--- /dev/null
+++ b/mne/time_frequency/ar.py
@@ -0,0 +1,152 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          The statsmodels folks for AR yule_walker
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.linalg import toeplitz
+
+
+# XXX : Back ported from statsmodels
+
+def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
+    """
+    Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
+
+    Unbiased or maximum-likelihood estimator (mle)
+
+    See, for example:
+
+    http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
+
+    Parameters
+    ----------
+    X : array-like
+        1d array
+    order : integer, optional
+        The order of the autoregressive process.  Default is 1.
+    method : string, optional
+       Method can be "unbiased" or "mle" and this determines denominator in
+       estimate of autocorrelation function (ACF) at lag k. If "mle", the
+       denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
+       The default is unbiased.
+    df : integer, optional
+       Specifies the degrees of freedom. If `df` is supplied, then it is assumed
+       the X has `df` degrees of freedom rather than `n`.  Default is None.
+    inv : bool
+        If inv is True the inverse of R is also returned.  Default is False.
+    demean : bool
+        True, the mean is subtracted from `X` before estimation.
+
+    Returns
+    -------
+    rho
+        The autoregressive coefficients
+    sigma
+        TODO
+
+    """
+#TODO: define R better, look back at notes and technical notes on YW.
+#First link here is useful
+#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
+    method = str(method).lower()
+    if method not in ["unbiased", "mle"]:
+        raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
+    X = np.array(X)
+    if demean:
+        X -= X.mean()                  # automatically demean's X
+    n = df or X.shape[0]
+
+    if method == "unbiased":        # this is df_resid ie., n - p
+        denom = lambda k: n - k
+    else:
+        denom = lambda k: n
+    if X.ndim > 1 and X.shape[1] != 1:
+        raise ValueError("expecting a vector to estimate AR parameters")
+    r = np.zeros(order+1, np.float64)
+    r[0] = (X**2).sum() / denom(0)
+    for k in range(1,order+1):
+        r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
+    R = toeplitz(r[:-1])
+
+    rho = np.linalg.solve(R, r[1:])
+    sigmasq = r[0] - (r[1:]*rho).sum()
+    if inv == True:
+        return rho, np.sqrt(sigmasq), np.linalg.inv(R)
+    else:
+        return rho, np.sqrt(sigmasq)
+
+
+def ar_raw(raw, order, picks, tmin=None, tmax=None):
+    """Fit AR model on raw data
+
+    Fit AR models for each channels and returns the models
+    coefficients for each of them.
+
+    Parameters
+    ----------
+    raw : Raw instance
+        The raw data
+    order : int
+        The AR model order
+    picks : array of int
+        The channels indices to include
+    tmin : float
+        The beginning of time interval in seconds.
+    tmax : float
+        The end of time interval in seconds.
+
+    Returns
+    -------
+    coefs : array
+        Sets of coefficients for each channel
+    """
+    start, stop = None, None
+    if tmin is not None:
+        start = raw.time_as_index(tmin)[0]
+    if tmax is not None:
+        stop = raw.time_as_index(tmax)[0] + 1
+    data, times = raw[picks, start:stop]
+
+    coefs = np.empty((len(data), order))
+    for k, d in enumerate(data):
+        this_coefs, _ = yule_walker(d, order=order)
+        coefs[k, :] = this_coefs
+    return coefs
+
+
+def iir_filter_raw(raw, order, picks, tmin=None, tmax=None):
+    """Fits an AR model to raw data and creates the corresponding IIR filter
+
+    The computed filter is the average filter for all the picked channels.
+    The returned filter coefficents are the denominator of the filter
+    (the numerator is 1). The frequency response is given by
+
+        jw   1
+     H(e) = --------------------------------
+                        -jw             -jnw
+            a[0] + a[1]e    + ... + a[n]e
+
+    Parameters
+    ----------
+    raw : Raw object
+        an instance of Raw
+    order : int
+        order of the FIR filter
+    picks : array of int
+        indices of selected channels
+    tmin : float
+        The beginning of time interval in seconds.
+    tmax : float
+        The end of time interval in seconds.
+
+    Returns
+    -------
+    a : array
+        filter coefficients
+    """
+    picks = picks[:5]
+    coefs = ar_raw(raw, order=order, picks=picks, tmin=tmin, tmax=tmax)
+    mean_coefs = np.mean(coefs, axis=0)  # mean model across channels
+    a = np.r_[1, -mean_coefs]  # filter coefficients
+    return a
diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py
new file mode 100644
index 0000000..1c39765
--- /dev/null
+++ b/mne/time_frequency/multitaper.py
@@ -0,0 +1,529 @@
+# Author : Martin Luessi mluessi at nmr.mgh.harvard.edu (2012)
+# License : BSD 3-clause
+
+# Parts of this code were copied from NiTime http://nipy.sourceforge.net/nitime
+from warnings import warn
+
+import numpy as np
+from scipy import fftpack, linalg, interpolate
+
+from ..parallel import parallel_func
+from .. import verbose
+
+
+def tridisolve(d, e, b, overwrite_b=True):
+    """
+    Symmetric tridiagonal system solver, from Golub and Van Loan pg 157
+
+    Note: Copied from NiTime
+
+    Parameters
+    ----------
+
+    d : ndarray
+      main diagonal stored in d[:]
+    e : ndarray
+      superdiagonal stored in e[:-1]
+    b : ndarray
+      RHS vector
+
+    Returns
+    -------
+
+    x : ndarray
+      Solution to Ax = b (if overwrite_b is False). Otherwise solution is
+      stored in previous RHS vector b
+
+    """
+    N = len(b)
+    # work vectors
+    dw = d.copy()
+    ew = e.copy()
+    if overwrite_b:
+        x = b
+    else:
+        x = b.copy()
+    for k in xrange(1, N):
+        # e^(k-1) = e(k-1) / d(k-1)
+        # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)
+        t = ew[k - 1]
+        ew[k - 1] = t / dw[k - 1]
+        dw[k] = dw[k] - t * ew[k - 1]
+    for k in xrange(1, N):
+        x[k] = x[k] - ew[k - 1] * x[k - 1]
+    x[N - 1] = x[N - 1] / dw[N - 1]
+    for k in xrange(N - 2, -1, -1):
+        x[k] = x[k] / dw[k] - ew[k] * x[k + 1]
+
+    if not overwrite_b:
+        return x
+
+
+def tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-8):
+    """Perform an inverse iteration to find the eigenvector corresponding
+    to the given eigenvalue in a symmetric tridiagonal system.
+
+    Note: Copied from NiTime
+
+    Parameters
+    ----------
+
+    d : ndarray
+      main diagonal of the tridiagonal system
+    e : ndarray
+      offdiagonal stored in e[:-1]
+    w : float
+      eigenvalue of the eigenvector
+    x0 : ndarray
+      initial point to start the iteration
+    rtol : float
+      tolerance for the norm of the difference of iterates
+
+    Returns
+    -------
+
+    e: ndarray
+      The converged eigenvector
+
+    """
+    eig_diag = d - w
+    if x0 is None:
+        x0 = np.random.randn(len(d))
+    x_prev = np.zeros_like(x0)
+    norm_x = np.linalg.norm(x0)
+    # the eigenvector is unique up to sign change, so iterate
+    # until || |x^(n)| - |x^(n-1)| ||^2 < rtol
+    x0 /= norm_x
+    while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol:
+        x_prev = x0.copy()
+        tridisolve(eig_diag, e, x0)
+        norm_x = np.linalg.norm(x0)
+        x0 /= norm_x
+    return x0
+
+
+def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
+                 interp_kind='linear'):
+    """
+    Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
+    for a given frequency-spacing multiple NW and sequence length N.
+
+    Note: Copied from NiTime
+
+    Parameters
+    ----------
+    N : int
+        Sequence length
+    half_nbw : float, unitless
+        Standardized half bandwidth corresponding to 2 * half_bw = BW*f0
+        = BW*N/dt but with dt taken as 1
+    Kmax : int
+        Number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
+    low_bias : Bool
+        Keep only tapers with eigenvalues > 0.9
+    interp_from : int (optional)
+        The dpss can be calculated using interpolation from a set of dpss
+        with the same NW and Kmax, but shorter N. This is the length of this
+        shorter set of dpss windows.
+    interp_kind : str (optional)
+        This input variable is passed to scipy.interpolate.interp1d and
+        specifies the kind of interpolation as a string ('linear', 'nearest',
+        'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
+        order of the spline interpolator to use.
+
+
+    Returns
+    -------
+    v, e : tuple,
+        v is an array of DPSS windows shaped (Kmax, N)
+        e are the eigenvalues
+
+    Notes
+    -----
+    Tridiagonal form of DPSS calculation from:
+
+    Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
+    uncertainty V: The discrete case. Bell System Technical Journal,
+    Volume 57 (1978), 1371430
+    """
+    Kmax = int(Kmax)
+    W = float(half_nbw) / N
+    nidx = np.arange(N, dtype='d')
+
+    # In this case, we create the dpss windows of the smaller size
+    # (interp_from) and then interpolate to the larger size (N)
+    if interp_from is not None:
+        if interp_from > N:
+            e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
+            e_s += 'and N is: %s. ' % N
+            e_s += 'Please enter interp_from smaller than N.'
+            raise ValueError(e_s)
+        dpss = []
+        d, e = dpss_windows(interp_from, half_nbw, Kmax, low_bias=False)
+        for this_d in d:
+            x = np.arange(this_d.shape[-1])
+            I = interpolate.interp1d(x, this_d, kind=interp_kind)
+            d_temp = I(np.arange(0, this_d.shape[-1] - 1,
+                                 float(this_d.shape[-1] - 1) / N))
+
+            # Rescale:
+            d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
+
+            dpss.append(d_temp)
+
+        dpss = np.array(dpss)
+
+    else:
+        # here we want to set up an optimization problem to find a sequence
+        # whose energy is maximally concentrated within band [-W,W].
+        # Thus, the measure lambda(T,W) is the ratio between the energy within
+        # that band, and the total energy. This leads to the eigen-system
+        # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
+        # eigenvalue is the sequence with maximally concentrated energy. The
+        # collection of eigenvectors of this system are called Slepian
+        # sequences, or discrete prolate spheroidal sequences (DPSS). Only the
+        # first K, K = 2NW/dt orders of DPSS will exhibit good spectral
+        # concentration
+        # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
+
+        # Here I set up an alternative symmetric tri-diagonal eigenvalue
+        # problem such that
+        # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
+        # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
+        # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
+        # [see Percival and Walden, 1993]
+        diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
+        off_diag = np.zeros_like(nidx)
+        off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
+        # put the diagonals in LAPACK "packed" storage
+        ab = np.zeros((2, N), 'd')
+        ab[1] = diagonal
+        ab[0, 1:] = off_diag[:-1]
+        # only calculate the highest Kmax eigenvalues
+        w = linalg.eigvals_banded(ab, select='i',
+                                  select_range=(N - Kmax, N - 1))
+        w = w[::-1]
+
+        # find the corresponding eigenvectors via inverse iteration
+        t = np.linspace(0, np.pi, N)
+        dpss = np.zeros((Kmax, N), 'd')
+        for k in xrange(Kmax):
+            dpss[k] = tridi_inverse_iteration(diagonal, off_diag, w[k],
+                                              x0=np.sin((k + 1) * t))
+
+    # By convention (Percival and Walden, 1993 pg 379)
+    # * symmetric tapers (k=0,2,4,...) should have a positive average.
+    # * antisymmetric tapers should begin with a positive lobe
+    fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
+    for i, f in enumerate(fix_symmetric):
+        if f:
+            dpss[2 * i] *= -1
+    fix_skew = (dpss[1::2, 1] < 0)
+    for i, f in enumerate(fix_skew):
+        if f:
+            dpss[2 * i + 1] *= -1
+
+    # Now find the eigenvalues of the original spectral concentration problem
+    # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
+
+    # compute autocorr using FFT (same as nitime.utils.autocorr(dpss) * N)
+    rxx_size = 2 * N - 1
+    NFFT = 2 ** np.ceil(np.log2(rxx_size))
+    dpss_fft = fftpack.fft(dpss, NFFT)
+    dpss_rxx = np.real(fftpack.ifft(dpss_fft * dpss_fft.conj()))
+    dpss_rxx = dpss_rxx[:, :N]
+
+    r = 4 * W * np.sinc(2 * W * nidx)
+    r[0] = 2 * W
+    eigvals = np.dot(dpss_rxx, r)
+
+    if low_bias:
+        idx = (eigvals > 0.9)
+        dpss, eigvals = dpss[idx], eigvals[idx]
+
+    return dpss, eigvals
+
+
+def _psd_from_mt_adaptive(x_mt, eigvals, freq_mask, max_iter=150,
+                          return_weights=False):
+    """
+    Perform an iterative procedure to compute the PSD from tapered spectra
+    using the optimal weights.
+
+    Note: Modified from NiTime
+
+    Parameters
+    ----------
+
+    x_mt : array, shape=(n_signals, n_tapers, n_freqs)
+       The DFTs of the tapered sequences (only positive frequencies)
+    eigvals : array, length n_tapers
+       The eigenvalues of the DPSS tapers
+    freq_mask : array
+        Frequency indices to keep
+    max_iter : int
+       Maximum number of iterations for weight computation
+    return_weights : bool
+       Also return the weights
+
+    Returns
+    -------
+    psd : array, shape=(n_signals, np.sum(freq_mask))
+        The computed PSDs
+    weights : array shape=(n_signals, n_tapers, np.sum(freq_mask))
+        The weights used to combine the tapered spectra
+
+    Notes
+    -----
+
+    The weights to use for making the multitaper estimate, such that
+    :math:`S_{mt} = \sum_{k} |w_k|^2S_k^{mt} / \sum_{k} |w_k|^2`
+    """
+    n_signals, n_tapers, n_freqs = x_mt.shape
+
+    if len(eigvals) != n_tapers:
+        raise ValueError('Need one eigenvalue for each taper')
+
+    if n_tapers < 3:
+        raise ValueError('Not enough tapers to compute adaptive weights.')
+
+    rt_eig = np.sqrt(eigvals)
+
+    # estimate the variance from an estimate with fixed weights
+    psd_est = _psd_from_mt(x_mt, rt_eig[np.newaxis, :, np.newaxis])
+    x_var = np.trapz(psd_est, dx=np.pi / n_freqs) / (2 * np.pi)
+    del psd_est
+
+    # allocate space for output
+    psd = np.empty((n_signals, np.sum(freq_mask)))
+
+    # only keep the frequencies of interest
+    x_mt = x_mt[:, :, freq_mask]
+
+    if return_weights:
+        weights = np.empty((n_signals, n_tapers, psd.shape[1]))
+
+    for i, (xk, var) in enumerate(zip(x_mt, x_var)):
+        # combine the SDFs in the traditional way in order to estimate
+        # the variance of the timeseries
+
+        # The process is to iteratively switch solving for the following
+        # two expressions:
+        # (1) Adaptive Multitaper SDF:
+        # S^{mt}(f) = [ sum |d_k(f)|^2 S_k(f) ]/ sum |d_k(f)|^2
+        #
+        # (2) Weights
+        # d_k(f) = [sqrt(lam_k) S^{mt}(f)] / [lam_k S^{mt}(f) + E{B_k(f)}]
+        #
+        # Where lam_k are the eigenvalues corresponding to the DPSS tapers,
+        # and the expected value of the broadband bias function
+        # E{B_k(f)} is replaced by its full-band integration
+        # (1/2pi) int_{-pi}^{pi} E{B_k(f)} = sig^2(1-lam_k)
+
+        # start with an estimate from incomplete data--the first 2 tapers
+        psd_iter = _psd_from_mt(xk[:2, :], rt_eig[:2, np.newaxis])
+
+        err = np.zeros_like(xk)
+        for n in range(max_iter):
+            d_k = psd_iter / (eigvals[:, np.newaxis] * psd_iter + \
+                  (1 - eigvals[:, np.newaxis]) * var)
+            d_k *= rt_eig[:, np.newaxis]
+            # Test for convergence -- this is overly conservative, since
+            # iteration only stops when all frequencies have converged.
+            # A better approach is to iterate separately for each freq, but
+            # that is a nonvectorized algorithm.
+            # Take the RMS difference in weights from the previous iterate
+            # across frequencies. If the maximum RMS error across freqs is
+            # less than 1e-10, then we're converged
+            err -= d_k
+            if np.max(np.mean(err ** 2, axis=0)) < 1e-10:
+                break
+
+            # update the iterative estimate with this d_k
+            psd_iter = _psd_from_mt(xk, d_k)
+            err = d_k
+
+        if n == max_iter - 1:
+            warn('Iterative multi-taper PSD computation did not converge.',
+                  RuntimeWarning)
+
+        psd[i, :] = psd_iter
+
+        if return_weights:
+            weights[i, :, :] = d_k
+
+    if return_weights:
+        return psd, weights
+    else:
+        return psd
+
+
+def _psd_from_mt(x_mt, weights):
+    """ compute PSD from tapered spectra
+
+    Parameters
+    ----------
+    x_mt : array
+        Tapered spectra
+    weights : array
+        Weights used to combine the tapered spectra
+
+    Returns
+    -------
+    psd : array
+        The computed PSD
+    """
+
+    psd = np.sum(np.abs(weights * x_mt) ** 2, axis=-2)
+    psd *= 2 / np.sum(np.abs(weights) ** 2, axis=-2)
+
+    return psd
+
+
+def _csd_from_mt(x_mt, y_mt, weights_x, weights_y):
+    """ Compute CSD from tapered spectra
+
+    Parameters
+    ----------
+    x_mt : array
+        Tapered spectra for x
+    y_mt : array
+        Tapered spectra for y
+    weights_x : array
+        Weights used to combine the tapered spectra of x_mt
+    weights_y : array
+        Weights used to combine the tapered spectra of y_mt
+
+    Returns
+    -------
+    psd: array
+        The computed PSD
+    """
+
+    csd = np.sum(weights_x * x_mt * (weights_y * y_mt).conj(), axis=-2)
+
+    denom = np.sqrt(np.sum(np.abs(weights_x) ** 2, axis=-2))\
+             * np.sqrt(np.sum(np.abs(weights_y) ** 2, axis=-2))
+
+    csd *= 2 / denom
+
+    return csd
+
+
+def _mt_spectra(x, dpss, sfreq):
+    """ Compute tapered spectra
+
+    Parameters
+    ----------
+    x : array, shape=(n_signals, n_times)
+        Input signal
+    dpss : array, shape=(n_tapers, n_times)
+        The tapers
+    sfreq : float
+        The sampling frequency
+
+    Returns
+    -------
+    x_mt : array, shape=(n_signals, n_tapers, n_times)
+        The tapered spectra
+    freqs : array
+        The frequency points in Hz of the spectra
+    """
+
+    # remove mean (do not use in-place subtraction as it may modify input x)
+    x = x - np.mean(x, axis=-1)[:, np.newaxis]
+    x_mt = fftpack.fft(x[:, np.newaxis, :] * dpss)
+
+    # only keep positive frequencies
+    freqs = fftpack.fftfreq(x.shape[1], 1. / sfreq)
+    freq_mask = (freqs >= 0)
+
+    x_mt = x_mt[:, :, freq_mask]
+    freqs = freqs[freq_mask]
+
+    return x_mt, freqs
+
+
+ at verbose
+def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
+                   adaptive=False, low_bias=True, n_jobs=1, verbose=None):
+    """Compute power spectrum density (PSD) using a multi-taper method
+
+    Parameters
+    ----------
+    x : array, shape=(n_signals, n_times) or (n_times,)
+        The data to compute PSD from.
+    sfreq : float
+        The sampling frequency.
+    fmin : float
+        The lower frequency of interest.
+    fmax : float
+        The upper frequency of interest.
+    bandwidth : float
+        The bandwidth of the multi taper windowing function in Hz.
+    adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD
+        (slow, use n_jobs >> 1 to speed up computation).
+    low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth.
+    n_jobs : int
+        Number of parallel jobs to use (only used if adaptive=True).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psd : array, shape=(n_signals, len(freqs)) or (len(freqs),)
+        The computed PSD.
+    freqs : array
+        The frequency points in Hz of the PSD.
+    """
+    if x.ndim > 2:
+        raise ValueError('x can only be 1d or 2d')
+
+    x_in = np.atleast_2d(x)
+
+    n_times = x_in.shape[1]
+
+    # compute standardized half-bandwidth
+    if bandwidth is not None:
+        half_nbw = float(bandwidth) * n_times / (2 * sfreq)
+    else:
+        half_nbw = 4
+
+    n_tapers_max = int(2 * half_nbw)
+
+    dpss, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                 low_bias=low_bias)
+
+    # compute the tapered spectra
+    x_mt, freqs = _mt_spectra(x_in, dpss, sfreq)
+
+    # descide which frequencies to keep
+    freq_mask = (freqs >= fmin) & (freqs <= fmax)
+
+    # combine the tapered spectra
+    if adaptive and len(eigvals) < 3:
+        warn('Not adaptively combining the spectral estimators '
+             'due to a low number of tapers.')
+        adaptive = False
+
+    if not adaptive:
+        x_mt = x_mt[:, :, freq_mask]
+        weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
+        psd = _psd_from_mt(x_mt, weights)
+    else:
+        parallel, my_psd_from_mt_adaptive, n_jobs = \
+            parallel_func(_psd_from_mt_adaptive, n_jobs)
+        out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
+                       for x in np.array_split(x_mt, n_jobs))
+        psd = np.concatenate(out)
+
+    if x.ndim == 1:
+        # return a 1d array if input was 1d
+        psd = psd[0, :]
+
+    freqs = freqs[freq_mask]
+
+    return psd, freqs
diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py
new file mode 100644
index 0000000..d4bd5ec
--- /dev/null
+++ b/mne/time_frequency/psd.py
@@ -0,0 +1,98 @@
+# Author : Alexandre Gramfort, gramfort at nmr.mgh.harvard.edu (2011)
+# License : BSD 3-clause
+
+import numpy as np
+
+import logging
+logger = logging.getLogger('mne')
+
+# XXX : don't import pylab here or you will break the doc
+
+from ..parallel import parallel_func
+from ..fiff.proj import make_projector_info
+from .. import verbose
+
+
+ at verbose
+def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
+                    fmin=0, fmax=np.inf, NFFT=2048, n_jobs=1,
+                    plot=False, proj=False, verbose=None):
+    """Compute power spectral density with multi-taper
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data.
+
+    tmin : float
+        Min time instant to consider
+
+    tmax : float
+        Max time instant to consider
+
+    picks : None or array of integers
+        The selection of channels to include in the computation.
+        If None, take all channels.
+
+    fmin : float
+        Min frequency of interest
+
+    fmax : float
+        Max frequency of interest
+
+    NFFT : int
+        The length of the tappers ie. the windows. The smaller
+        it is the smoother are the PSDs.
+
+    n_jobs : int
+        Number of CPUs to use in the computation.
+
+    plot : bool
+        Plot each PSD estimates
+
+    proj : bool
+        Apply SSP projection vectors
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psd : array of float
+        The PSD for all channels
+
+    freqs: array of float
+        The frequencies
+    """
+    start, stop = raw.time_as_index([tmin, tmax])
+    if picks is not None:
+        data, times = raw[picks, start:(stop + 1)]
+    else:
+        data, times = raw[:, start:(stop + 1)]
+
+    if proj:
+        proj, _ = make_projector_info(raw.info)
+        if picks is not None:
+            data = np.dot(proj[picks][:, picks], data)
+        else:
+            data = np.dot(proj, data)
+
+    NFFT = int(NFFT)
+    Fs = raw.info['sfreq']
+
+    logger.info("Effective window size : %0.3f (s)" % (NFFT / float(Fs)))
+
+    import pylab as pl
+    parallel, my_psd, n_jobs = parallel_func(pl.psd, n_jobs)
+    fig = pl.figure()
+    out = parallel(my_psd(d, Fs=Fs, NFFT=NFFT) for d in data)
+    if not plot:
+        pl.close(fig)
+    freqs = out[0][1]
+    psd = np.array(zip(*out)[0])
+
+    mask = (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[mask]
+    psd = psd[:, mask]
+
+    return psd, freqs
diff --git a/mne/time_frequency/stft.py b/mne/time_frequency/stft.py
new file mode 100644
index 0000000..e9fe61f
--- /dev/null
+++ b/mne/time_frequency/stft.py
@@ -0,0 +1,239 @@
+from math import ceil
+import numpy as np
+from scipy.fftpack import fft, ifft, fftfreq
+
+import logging
+logger = logging.getLogger('mne')
+
+from .. import verbose
+
+
+ at verbose
+def stft(x, wsize, tstep=None, verbose=None):
+    """STFT Short-Term Fourier Transform using a sine window.
+
+    The transformation is designed to be a tight frame that can be
+    perfectly inverted. It only returns the positive frequencies.
+
+    Parameters
+    ----------
+    x : 2d array of size n_signals x T
+        containing multi-channels signal
+    wsize : int
+        length of the STFT window in samples (must be a multiple of 4)
+    tstep : int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2)
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : 3d array of shape [n_signals, wsize / 2 + 1, n_step]
+        STFT coefficients for positive frequencies with
+        n_step = ceil(T / tstep)
+
+    Usage
+    -----
+    X = stft(x, wsize)
+    X = stft(x, wsize, tstep)
+
+    See also
+    --------
+    istft
+    stftfreq
+    """
+    if not np.isrealobj(x):
+        raise ValueError("x is not a real valued array")
+
+    if x.ndim == 1:
+        x = x[None, :]
+
+    n_signals, T = x.shape
+    wsize = int(wsize)
+
+    ### Errors and warnings ###
+    if wsize % 4:
+        raise ValueError('The window length must be a multiple of 4.')
+
+    if tstep is None:
+        tstep = wsize / 2
+
+    tstep = int(tstep)
+
+    if (wsize % tstep) or (tstep % 2):
+        raise ValueError('The step size must be a multiple of 2 and a '
+                         'divider of the window length.')
+
+    if tstep > wsize / 2:
+        raise ValueError('The step size must be smaller than half the '
+                         'window length.')
+
+    n_step = int(ceil(T / float(tstep)))
+    n_freq = wsize / 2 + 1
+    logger.info("Number of frequencies: %d" % n_freq)
+    logger.info("Number of time steps: %d" % n_step)
+
+    X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex)
+
+    if n_signals == 0:
+        return X
+
+    # Defining sine window
+    win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
+    win2 = win ** 2
+
+    swin = np.zeros((n_step - 1) * tstep + wsize)
+    for t in range(n_step):
+        swin[t * tstep:t * tstep + wsize] += win2
+    swin = np.sqrt(wsize * swin)
+
+    # Zero-padding and Pre-processing for edges
+    xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),
+                  dtype=x.dtype)
+    xp[:, (wsize - tstep) / 2: (wsize - tstep) / 2 + T] = x
+    x = xp
+
+    for t in range(n_step):
+        # Framing
+        wwin = win / swin[t * tstep: t * tstep + wsize]
+        frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :]
+        # FFT
+        fframe = fft(frame)
+        X[:, :, t] = fframe[:, :n_freq]
+
+    return X
+
+
+def istft(X, tstep=None, Tx=None):
+    """ISTFT Inverse Short-Term Fourier Transform using a sine window
+
+    Parameters
+    ----------
+    X : 3d array of shape [n_signals, wsize / 2 + 1,  n_step]
+        The STFT coefficients for positive frequencies
+    tstep : int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2)
+    Tx : int
+        Length of returned signal. If None Tx = n_step * tstep
+
+    Returns
+    -------
+    x : 1d array of length Tx
+        vector containing the inverse STFT signal
+
+    Usage
+    -----
+    x = istft(X)
+    x = istft(X, tstep)
+
+    See also
+    --------
+    stft
+    """
+    ### Errors and warnings ###
+    n_signals, n_win, n_step = X.shape
+    if (n_win % 2 == 0):
+        ValueError('The number of rows of the STFT matrix must be odd.')
+
+    wsize = 2 * (n_win - 1)
+    if tstep is None:
+        tstep = wsize / 2
+
+    if wsize % tstep:
+        raise ValueError('The step size must be a divider of two times the '
+                         'number of rows of the STFT matrix minus two.')
+
+    if wsize % 2:
+        raise ValueError('The step size must be a multiple of 2.')
+
+    if tstep > wsize / 2:
+        raise ValueError('The step size must be smaller than the number of '
+                         'rows of the STFT matrix minus one.')
+
+    if Tx is None:
+        Tx = n_step * tstep
+
+    T = n_step * tstep
+
+    x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float)
+
+    if n_signals == 0:
+        return x[:, :Tx]
+
+    ### Computing inverse STFT signal ###
+    # Defining sine window
+    win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
+    # win = win / norm(win);
+    # Pre-processing for edges
+    swin = np.zeros(T + wsize - tstep, dtype=np.float)
+    for t in range(n_step):
+        swin[t * tstep:t * tstep + wsize] += win ** 2
+    swin = np.sqrt(swin / wsize)
+
+    fframe = np.empty((n_signals, n_win + wsize / 2 - 1), dtype=X.dtype)
+    for t in range(n_step):
+        # IFFT
+        fframe[:, :n_win] = X[:, :, t]
+        fframe[:, n_win:] = np.conj(X[:, wsize / 2 - 1: 0: -1, t])
+        frame = ifft(fframe)
+        wwin = win / swin[t * tstep:t * tstep + wsize]
+        # Overlap-add
+        x[:, t * tstep: t * tstep + wsize] += np.real(np.conj(frame) * wwin)
+
+    # Truncation
+    x = x[:, (wsize - tstep) / 2: (wsize - tstep) / 2 + T + 1][:, :Tx].copy()
+    return x
+
+
+def stftfreq(wsize, sfreq=None):
+    """Frequencies of stft transformation
+
+    Parameters
+    ----------
+    wsize : int
+        Size of stft window
+    sfreq : float
+        Sampling frequency. If None the frequencies are given between 0 and pi
+        otherwise it's given in Hz.
+
+    Returns
+    -------
+    freqs : array
+        The positive frequencies returned by stft
+
+
+    See also
+    --------
+    stft
+    istft
+    """
+    n_freq = wsize / 2 + 1
+    freqs = fftfreq(wsize)
+    freqs = np.abs(freqs[:n_freq])
+    if sfreq is not None:
+        freqs *= float(sfreq)
+    return freqs
+
+
+def stft_norm2(X):
+    """Compute L2 norm of STFT transform
+
+    It takes into account that stft only return positive frequencies.
+    As we use tight frame this quantity is conserved by the stft.
+
+    Parameters
+    ----------
+    X : 3D complex array
+        The STFT transforms
+
+    Returns
+    -------
+    norms2 : array
+        The squared L2 norm of every raw of X.
+    """
+    X2 = np.abs(X) ** 2
+    # compute all L2 coefs and remove freq zero once.
+    norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1))
+    return norms2
diff --git a/mne/time_frequency/tests/__init__.py b/mne/time_frequency/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/time_frequency/tests/test_ar.py b/mne/time_frequency/tests/test_ar.py
new file mode 100644
index 0000000..b8003fe
--- /dev/null
+++ b/mne/time_frequency/tests/test_ar.py
@@ -0,0 +1,44 @@
+import os.path as op
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+import nose
+
+from mne import fiff
+from mne.time_frequency import yule_walker, ar_raw
+
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+                    'test_raw.fif')
+
+
+def test_yule_walker():
+    """Test Yule-Walker against statsmodels
+    """
+    try:
+        from statsmodels.regression.linear_model import yule_walker as sm_yw
+        d = np.random.randn(100)
+        sm_rho, sm_sigma = sm_yw(d, order=2)
+        rho, sigma = yule_walker(d, order=2)
+        assert_array_almost_equal(sm_sigma, sigma)
+        assert_array_almost_equal(sm_rho, rho)
+    except ImportError:
+        raise nose.SkipTest("XFailed Test")
+
+
+def test_ar_raw():
+    """Test fitting AR model on raw data
+    """
+    raw = fiff.Raw(raw_fname)
+
+    # picks MEG gradiometers
+    picks = fiff.pick_types(raw.info, meg='grad', exclude='bads')
+
+    picks = picks[:2]
+
+    tmin, tmax = 0, 10  # use the first s of data
+    order = 2
+    coefs = ar_raw(raw, picks=picks, order=order, tmin=tmin, tmax=tmax)
+    mean_coefs = np.mean(coefs, axis=0)
+
+    assert_true(coefs.shape == (len(picks), order))
+    assert_true(0.9 < mean_coefs[0] < 1.1)
diff --git a/mne/time_frequency/tests/test_multitaper.py b/mne/time_frequency/tests/test_multitaper.py
new file mode 100644
index 0000000..a6196c1
--- /dev/null
+++ b/mne/time_frequency/tests/test_multitaper.py
@@ -0,0 +1,47 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+
+from mne.time_frequency import dpss_windows, multitaper_psd
+from mne.utils import requires_nitime
+
+
+ at requires_nitime
+def test_dpss_windows():
+    """ Test computation of DPSS windows """
+
+    import nitime as ni
+    N = 1000
+    half_nbw = 4
+    Kmax = int(2 * half_nbw)
+
+    dpss, eigs = dpss_windows(N, half_nbw, Kmax, low_bias=False)
+    dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax)
+
+    assert_array_almost_equal(dpss, dpss_ni)
+    assert_array_almost_equal(eigs, eigs_ni)
+
+    dpss, eigs = dpss_windows(N, half_nbw, Kmax, interp_from=200, low_bias=False)
+    dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax, interp_from=200)
+
+    assert_array_almost_equal(dpss, dpss_ni)
+    assert_array_almost_equal(eigs, eigs_ni)
+
+
+ at requires_nitime
+def test_multitaper_psd():
+    """ Test multi-taper PSD computation """
+
+    import nitime as ni
+    n_times = 1000
+    x = np.random.randn(5, n_times)
+    sfreq = 500
+
+    for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):
+        psd, freqs = multitaper_psd(x, sfreq, adaptive=adaptive, n_jobs=n_jobs)
+        freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(x, sfreq,
+                              adaptive=adaptive, jackknife=False)
+
+        # for some reason nitime returns n_times + 1 frequency points
+        # causing the value at 0 to be different
+        assert_array_almost_equal(psd[:, 1:], psd_ni[:, 1:-1], decimal=3)
+        assert_array_almost_equal(freqs, freqs_ni[:-1])
diff --git a/mne/time_frequency/tests/test_psd.py b/mne/time_frequency/tests/test_psd.py
new file mode 100644
index 0000000..2769c10
--- /dev/null
+++ b/mne/time_frequency/tests/test_psd.py
@@ -0,0 +1,40 @@
+import numpy as np
+import os.path as op
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne import fiff
+from mne.time_frequency import compute_raw_psd
+
+
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+                    'test_raw.fif')
+
+
+def test_psd():
+    """Test PSD estimation
+    """
+    raw = fiff.Raw(raw_fname)
+
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = fiff.pick_types(raw.info, meg='mag', eeg=False, stim=False,
+                            exclude=exclude)
+
+    picks = picks[:2]
+
+    tmin, tmax = 0, 10  # use the first 60s of data
+    fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
+    NFFT = 124  # the FFT size (NFFT). Ideally a power of 2
+    psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                  fmin=fmin, fmax=fmax, NFFT=NFFT, n_jobs=1,
+                                  proj=False)
+    psds_proj, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                       fmin=fmin, fmax=fmax, NFFT=NFFT,
+                                       n_jobs=1, proj=True)
+
+    assert_array_almost_equal(psds, psds_proj)
+    assert_true(psds.shape == (len(picks), len(freqs)))
+    assert_true(np.sum(freqs < 0) == 0)
+    assert_true(np.sum(psds < 0) == 0)
diff --git a/mne/time_frequency/tests/test_stft.py b/mne/time_frequency/tests/test_stft.py
new file mode 100644
index 0000000..59b3789
--- /dev/null
+++ b/mne/time_frequency/tests/test_stft.py
@@ -0,0 +1,40 @@
+import numpy as np
+from scipy import linalg
+from numpy.testing import assert_almost_equal, assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.time_frequency.stft import stft, istft, stftfreq, stft_norm2
+
+
+def test_stft():
+    "Test stft and istft tight frame property"
+    sfreq = 1000.  # Hz
+    f = 7.  # Hz
+    for T in [253, 256]:  # try with even and odd numbers
+        t = np.arange(T).astype(np.float)
+        x = np.sin(2 * np.pi * f * t / sfreq)
+        x = np.array([x, x + 1.])
+        wsize = 128
+        tstep = 4
+        X = stft(x, wsize, tstep)
+        xp = istft(X, tstep, Tx=T)
+
+        freqs = stftfreq(wsize, sfreq=1000)
+
+        max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
+
+        assert_true(X.shape[1] == len(freqs))
+        assert_true(np.all(freqs >= 0.))
+        assert_true(np.abs(max_freq - f) < 1.)
+
+        assert_array_almost_equal(x, xp, decimal=6)
+
+        # norm conservation thanks to tight frame property
+        assert_almost_equal(np.sqrt(stft_norm2(X)),
+                            map(linalg.norm, x), decimal=2)
+
+        # Try with empty array
+        x = np.zeros((0, T))
+        X = stft(x, wsize, tstep)
+        xp = istft(X, tstep, T)
+        assert_true(xp.shape == x.shape)
diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py
new file mode 100644
index 0000000..ccce51a
--- /dev/null
+++ b/mne/time_frequency/tests/test_tfr.py
@@ -0,0 +1,75 @@
+import numpy as np
+import os.path as op
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne import fiff, Epochs, read_events
+from mne.time_frequency import induced_power, single_trial_power
+from mne.time_frequency.tfr import cwt_morlet, morlet
+
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests', 'data',
+                    'test_raw.fif')
+event_fname = op.join(op.dirname(__file__), '..', '..', 'fiff', 'tests',
+                      'data', 'test-eve.fif')
+
+
+def test_morlet():
+    """Test morlet with and without zero mean"""
+    Wz = morlet(1000, [10], 2., zero_mean=True)
+    W = morlet(1000, [10], 2., zero_mean=False)
+
+    assert_true(np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
+    assert_true(np.abs(np.mean(np.real(W[0]))) > 1e-3)
+
+
+def test_time_frequency():
+    """Test time frequency transform (PSD and phase lock)
+    """
+    # Set parameters
+    event_id = 1
+    tmin = -0.2
+    tmax = 0.5
+
+    # Setup for reading the raw data
+    raw = fiff.Raw(raw_fname)
+    events = read_events(event_fname)
+
+    include = []
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = fiff.pick_types(raw.info, meg='grad', eeg=False,
+                            stim=False, include=include, exclude=exclude)
+
+    picks = picks[:2]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    data = epochs.get_data()
+    times = epochs.times
+
+    frequencies = np.arange(6, 20, 5)  # define frequencies of interest
+    Fs = raw.info['sfreq']  # sampling in Hz
+    n_cycles = frequencies / float(4)
+    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
+                                      n_cycles=n_cycles, use_fft=True)
+
+    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
+    assert_true(power.shape == phase_lock.shape)
+    assert_true(np.sum(phase_lock >= 1) == 0)
+    assert_true(np.sum(phase_lock <= 0) == 0)
+
+    power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
+                                      n_cycles=2, use_fft=False)
+
+    assert_true(power.shape == (len(picks), len(frequencies), len(times)))
+    assert_true(power.shape == phase_lock.shape)
+    assert_true(np.sum(phase_lock >= 1) == 0)
+    assert_true(np.sum(phase_lock <= 0) == 0)
+
+    tfr = cwt_morlet(data[0], Fs, frequencies, use_fft=True, n_cycles=2)
+    assert_true(tfr.shape == (len(picks), len(frequencies), len(times)))
+
+    single_power = single_trial_power(data, Fs, frequencies, use_fft=False,
+                                      n_cycles=2)
+
+    assert_array_almost_equal(np.mean(single_power), power)
diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py
new file mode 100644
index 0000000..8613ceb
--- /dev/null
+++ b/mne/time_frequency/tfr.py
@@ -0,0 +1,407 @@
+"""A module which implements the continuous wavelet transform
+with complex Morlet wavelets.
+
+Author : Alexandre Gramfort, gramfort at nmr.mgh.harvard.edu (2011)
+License : BSD 3-clause
+
+inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
+"""
+
+from math import sqrt
+import numpy as np
+from scipy import linalg
+from scipy.fftpack import fftn, ifftn
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..baseline import rescale
+from ..parallel import parallel_func
+from .. import verbose
+
+
+def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
+    """Compute Wavelets for the given frequency range
+
+    Parameters
+    ----------
+    Fs : float
+        Sampling Frequency
+
+    freqs : array
+        frequency range of interest (1 x Frequencies)
+
+    n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency.
+
+    sigma : float, (optional)
+        It controls the width of the wavelet ie its temporal
+        resolution. If sigma is None the temporal resolution
+        is adapted with the frequency like for all wavelet transform.
+        The higher the frequency the shorter is the wavelet.
+        If sigma is fixed the temporal resolution is fixed
+        like for the short time Fourier transform and the number
+        of oscillations increases with the frequency.
+
+    zero_mean : bool
+        Make sure the wavelet is zero mean
+
+    Returns
+    -------
+    Ws : list of array
+        Wavelets time series
+    """
+    Ws = list()
+    n_cycles = np.atleast_1d(n_cycles)
+    if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
+        raise ValueError("n_cycles should be fixed or defined for "
+                         "each frequency.")
+    for k, f in enumerate(freqs):
+        if len(n_cycles) != 1:
+            this_n_cycles = n_cycles[k]
+        else:
+            this_n_cycles = n_cycles[0]
+        # fixed or scale-dependent window
+        if sigma is None:
+            sigma_t = this_n_cycles / (2.0 * np.pi * f)
+        else:
+            sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
+        # this scaling factor is proportional to (Tallon-Baudry 98):
+        # (sigma_t*sqrt(pi))^(-1/2);
+        t = np.arange(0, 5 * sigma_t, 1.0 / Fs)
+        t = np.r_[-t[::-1], t[1:]]
+        oscillation = np.exp(2.0 * 1j * np.pi * f * t)
+        gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
+        if zero_mean:  # to make it zero mean
+            real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
+            oscillation -= real_offset
+        W = oscillation * gaussian_enveloppe
+        W /= sqrt(0.5) * linalg.norm(W.ravel())
+        Ws.append(W)
+    return Ws
+
+
+def _centered(arr, newsize):
+    # Return the center newsize portion of the array.
+    newsize = np.asarray(newsize)
+    currsize = np.array(arr.shape)
+    startind = (currsize - newsize) / 2
+    endind = startind + newsize
+    myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
+    return arr[tuple(myslice)]
+
+
+def _cwt_fft(X, Ws, mode="same"):
+    """Compute cwt with fft based convolutions
+    Return a generator over signals.
+    """
+    X = np.asarray(X)
+
+    # Precompute wavelets for given frequency range to save time
+    n_signals, n_times = X.shape
+    n_freqs = len(Ws)
+
+    Ws_max_size = max(W.size for W in Ws)
+    size = n_times + Ws_max_size - 1
+    # Always use 2**n-sized FFT
+    fsize = 2 ** np.ceil(np.log2(size))
+
+    # precompute FFTs of Ws
+    fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
+    for i, W in enumerate(Ws):
+        if len(W) > n_times:
+            raise ValueError('Wavelet is too long for such a short signal. '
+                             'Reduce the number of cycles.')
+        fft_Ws[i] = fftn(W, [fsize])
+
+    for k, x in enumerate(X):
+        if mode == "full":
+            tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
+        elif mode == "same" or mode == "valid":
+            tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
+
+        fft_x = fftn(x, [fsize])
+        for i, W in enumerate(Ws):
+            ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
+            if mode == "valid":
+                sz = abs(W.size - n_times) + 1
+                offset = (n_times - sz) / 2
+                tfr[i, offset:(offset + sz)] = _centered(ret, sz)
+            else:
+                tfr[i, :] = _centered(ret, n_times)
+        yield tfr
+
+
+def _cwt_convolve(X, Ws, mode='same'):
+    """Compute time freq decomposition with temporal convolutions
+    Return a generator over signals.
+    """
+    X = np.asarray(X)
+
+    n_signals, n_times = X.shape
+    n_freqs = len(Ws)
+
+    # Compute convolutions
+    for x in X:
+        tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
+        for i, W in enumerate(Ws):
+            ret = np.convolve(x, W, mode=mode)
+            if len(W) > len(x):
+                raise ValueError('Wavelet is too long for such a short '
+                                 'signal. Reduce the number of cycles.')
+            if mode == "valid":
+                sz = abs(W.size - n_times) + 1
+                offset = (n_times - sz) / 2
+                tfr[i, offset:(offset + sz)] = ret
+            else:
+                tfr[i] = ret
+        yield tfr
+
+
+def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
+    """Compute time freq decomposition with Morlet wavelets
+
+    Parameters
+    ----------
+    X : array of shape [n_signals, n_times]
+        signals (one per line)
+    Fs : float
+        sampling Frequency
+    freqs : array
+        Array of frequencies of interest
+    use_fft : bool
+        Compute convolution with FFT or temoral convolution.
+    n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+
+    Returns
+    -------
+    tfr : 3D array
+        Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+    """
+    mode = 'same'
+    # mode = "valid"
+    n_signals, n_times = X.shape
+    n_frequencies = len(freqs)
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = morlet(Fs, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    if use_fft:
+        coefs = _cwt_fft(X, Ws, mode)
+    else:
+        coefs = _cwt_convolve(X, Ws, mode)
+
+    tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
+    for k, tfr in enumerate(coefs):
+        tfrs[k] = tfr
+
+    return tfrs
+
+
+def cwt(X, Ws, use_fft=True, mode='same', decim=1):
+    """Compute time freq decomposition with continuous wavelet transform
+
+    Parameters
+    ----------
+    X : array of shape [n_signals, n_times]
+        signals (one per line)
+    Ws : list of array
+        Wavelets time series
+    use_fft : bool
+        Use FFT for convolutions
+    mode : 'same' | 'valid' | 'full'
+        Convention for convolution
+    decim : int
+        Temporal decimation factor
+
+    Returns
+    -------
+    tfr : 3D array
+        Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+    """
+    n_signals, n_times = X[:, ::decim].shape
+    n_frequencies = len(Ws)
+
+    if use_fft:
+        coefs = _cwt_fft(X, Ws, mode)
+    else:
+        coefs = _cwt_convolve(X, Ws, mode)
+
+    tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
+    for k, tfr in enumerate(coefs):
+        tfrs[k] = tfr[..., ::decim]
+
+    return tfrs
+
+
+def _time_frequency(X, Ws, use_fft):
+    """Aux of time_frequency for parallel computing over channels
+    """
+    n_epochs, n_times = X.shape
+    n_frequencies = len(Ws)
+    psd = np.zeros((n_frequencies, n_times))  # PSD
+    plf = np.zeros((n_frequencies, n_times), dtype=np.complex)  # phase lock
+
+    mode = 'same'
+    if use_fft:
+        tfrs = _cwt_fft(X, Ws, mode)
+    else:
+        tfrs = _cwt_convolve(X, Ws, mode)
+
+    for tfr in tfrs:
+        tfr_abs = np.abs(tfr)
+        psd += tfr_abs ** 2
+        plf += tfr / tfr_abs
+
+    return psd, plf
+
+
+ at verbose
+def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
+                       baseline=None, baseline_mode='ratio', times=None,
+                       decim=1, n_jobs=1, zero_mean=False, verbose=None):
+    """Compute time-frequency power on single epochs
+
+    Parameters
+    ----------
+    data : array of shape [n_epochs, n_channels, n_times]
+        The epochs
+    Fs : float
+        Sampling rate
+    frequencies : array-like
+        The frequencies
+    use_fft : bool
+        Use the FFT for convolutions or not.
+    n_cycles : float | array of float
+        Number of cycles  in the Morlet wavelet. Fixed number
+        or one per frequency.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'ratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+    times : array
+        Required to define baseline
+    decim : int
+        Temporal decimation factor
+    n_jobs : int
+        The number of epochs to process at the same time
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : 4D array
+        Power estimate (Epochs x Channels x Frequencies x Timepoints).
+    """
+    mode = 'same'
+    n_frequencies = len(frequencies)
+    n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
+
+    logger.info("Computing time-frequency power on single epochs...")
+
+    power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
+                     dtype=np.float)
+
+    # Package arguments for `cwt` here to minimize omissions where only one of
+    # the two calls below is updated with new function arguments.
+    cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
+    if n_jobs == 1:
+        for k, e in enumerate(data):
+            power[k] = np.abs(cwt(e, **cwt_kw)) ** 2
+    else:
+        # Precompute tf decompositions in parallel
+        tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
+        for k, tfr in enumerate(tfrs):
+            power[k] = np.abs(tfr) ** 2
+
+    # Run baseline correction.  Be sure to decimate the times array as well if
+    # needed.
+    if times is not None:
+        times = times[::decim]
+    power = rescale(power, times, baseline, baseline_mode, copy=False)
+    return power
+
+
+def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
+                  decim=1, n_jobs=1, zero_mean=False):
+    """Compute time induced power and inter-trial phase-locking factor
+
+    The time frequency decomposition is done with Morlet wavelets
+
+    Parameters
+    ----------
+    data : array
+        3D array of shape [n_epochs, n_channels, n_times]
+    Fs : float
+        sampling Frequency
+    frequencies : array
+        Array of frequencies of interest
+    use_fft : bool
+        Compute transform with fft based convolutions or temporal
+        convolutions.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    decim: int
+        Temporal decimation factor
+    n_jobs : int
+        The number of CPUs used in parallel. All CPUs are used in -1.
+        Requires joblib package.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+
+    Returns
+    -------
+    power : 2D array
+        Induced power (Channels x Frequencies x Timepoints).
+        Squared amplitude of time-frequency coefficients.
+    phase_lock : 2D array
+        Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
+    """
+    n_frequencies = len(frequencies)
+    n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    if n_jobs == 1:
+        psd = np.empty((n_channels, n_frequencies, n_times))
+        plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex)
+
+        for c in range(n_channels):
+            X = np.squeeze(data[:, c, :])
+            this_psd, this_plf = _time_frequency(X, Ws, use_fft)
+            psd[c], plf[c] = this_psd[:, ::decim], this_plf[:, ::decim]
+    else:
+        parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
+
+        psd_plf = parallel(my_time_frequency(np.squeeze(data[:, c, :]),
+                                             Ws, use_fft)
+                           for c in range(n_channels))
+
+        psd = np.zeros((n_channels, n_frequencies, n_times))
+        plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex)
+        for c, (psd_c, plf_c) in enumerate(psd_plf):
+            psd[c, :, :], plf[c, :, :] = psd_c[:, ::decim], plf_c[:, ::decim]
+
+    psd /= n_epochs
+    plf = np.abs(plf) / n_epochs
+    return psd, plf
diff --git a/mne/transforms/__init__.py b/mne/transforms/__init__.py
new file mode 100644
index 0000000..990ca43
--- /dev/null
+++ b/mne/transforms/__init__.py
@@ -0,0 +1,2 @@
+from .transforms import read_trans, write_trans, invert_transform, \
+                        transform_source_space_to, transform_coordinates
diff --git a/mne/transforms/coreg.py b/mne/transforms/coreg.py
new file mode 100644
index 0000000..8002831
--- /dev/null
+++ b/mne/transforms/coreg.py
@@ -0,0 +1,67 @@
+"""Coregistration between different coordinate frames"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy import dot
+from scipy.optimize import leastsq
+
+from .transforms import apply_trans, rotation, translation
+
+
+def fit_matched_pts(src_pts, tgt_pts, tol=None, params=False):
+    """Find a transform that minimizes the squared distance between two
+    matching sets of points.
+
+    Uses :func:`scipy.optimize.leastsq` to find a transformation involving
+    rotation and translation.
+
+    Parameters
+    ----------
+    src_pts : array, shape = (n, 3)
+        Points to which the transform should be applied.
+    tgt_pts : array, shape = (n, 3)
+        Points to which src_pts should be fitted. Each point in tgt_pts should
+        correspond to the point in src_pts with the same index.
+    tol : scalar | None
+        The error tolerance. If the distance between any of the matched points
+        exceeds this value in the solution, a RuntimeError is raised. With
+        None, no error check is performed.
+    params : bool
+        Also return the estimated rotation and translation parameters.
+
+    Returns
+    -------
+    trans : array, shape = (4, 4)
+        Transformation that, if applied to src_pts, minimizes the squared
+        distance to tgt_pts.
+    [rotation : array, len = 3, optional]
+        The rotation parameters around the x, y, and z axes (in radians).
+    [translation : array, len = 3, optional]
+        The translation parameters in x, y, and z direction.
+    """
+    def error(params):
+        trans = dot(translation(*params[:3]), rotation(*params[3:]))
+        est = apply_trans(trans, src_pts)
+        return (tgt_pts - est).ravel()
+
+    x0 = (0, 0, 0, 0, 0, 0)
+    x, _, _, _, _ = leastsq(error, x0, full_output=True)
+
+    transl = x[:3]
+    rot = x[3:]
+    trans = dot(translation(*transl), rotation(*rot))
+
+    # assess the error of the solution
+    if tol is not None:
+        est_pts = apply_trans(trans, src_pts)
+        err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
+        if np.any(err > tol):
+            raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
+
+    if params:
+        return trans, rot, transl
+    else:
+        return trans
diff --git a/mne/transforms/tests/__init__.py b/mne/transforms/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/transforms/tests/test_coreg.py b/mne/transforms/tests/test_coreg.py
new file mode 100644
index 0000000..780c3b6
--- /dev/null
+++ b/mne/transforms/tests/test_coreg.py
@@ -0,0 +1,20 @@
+from nose.tools import assert_raises
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+
+from mne.transforms.coreg import fit_matched_pts
+from mne.transforms.transforms import apply_trans, rotation, translation
+
+
+def test_fit_matched_pts():
+    """Test fitting two matching sets of points"""
+    src_pts = np.random.normal(size=(5, 3))
+    trans0 = np.dot(translation(2, 65, 3), rotation(2, 6, 3))
+    tgt_pts = apply_trans(trans0, src_pts)
+    trans = fit_matched_pts(tgt_pts, src_pts)
+    est_pts = apply_trans(trans, tgt_pts)
+    assert_array_almost_equal(src_pts, est_pts)
+
+    # test exceeding tolerance
+    src_pts[0, :] += 20
+    assert_raises(RuntimeError, fit_matched_pts, src_pts, tgt_pts, tol=10)
diff --git a/mne/transforms/tests/test_transforms.py b/mne/transforms/tests/test_transforms.py
new file mode 100644
index 0000000..9995d74
--- /dev/null
+++ b/mne/transforms/tests/test_transforms.py
@@ -0,0 +1,31 @@
+import os.path as op
+
+from nose.tools import assert_true
+from numpy.testing import assert_array_equal
+
+from mne.datasets import sample
+from mne import read_trans, write_trans
+from mne.utils import _TempDir
+
+data_path = sample.data_path()
+fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
+
+tempdir = _TempDir()
+
+
+def test_io_trans():
+    """Test reading and writing of trans files
+    """
+    info0 = read_trans(fname)
+    fname1 = op.join(tempdir, 'test-trans.fif')
+    write_trans(fname1, info0)
+    info1 = read_trans(fname1)
+
+    # check all properties
+    assert_true(info0['from'] == info1['from'])
+    assert_true(info0['to'] == info1['to'])
+    assert_array_equal(info0['trans'], info1['trans'])
+    for d0, d1 in zip(info0['dig'], info1['dig']):
+        assert_array_equal(d0['r'], d1['r'])
+        for name in ['kind', 'ident', 'coord_frame']:
+            assert_true(d0[name] == d1[name])
diff --git a/mne/transforms/transforms.py b/mne/transforms/transforms.py
new file mode 100644
index 0000000..7d3db77
--- /dev/null
+++ b/mne/transforms/transforms.py
@@ -0,0 +1,415 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy import sin, cos
+from scipy import linalg
+
+import logging
+logger = logging.getLogger('mne')
+
+from ..fiff import FIFF
+from ..fiff.open import fiff_open
+from ..fiff.tag import read_tag, find_tag
+from ..fiff.tree import dir_tree_find
+from ..fiff.write import start_file, end_file, start_block, end_block, \
+                   write_coord_trans, write_dig_point, write_int
+
+
+def apply_trans(trans, pts):
+    """Apply a transform matrix to an array of points
+
+    Parameters
+    ----------
+    trans : array, shape = (4, 4)
+        Transform matrix.
+    pts : array, shape = (3,) | (n, 3)
+        Array with coordinates for one or n points.
+
+    Returns
+    -------
+    transformed_pts : shape = (3,) | (n, 3)
+        Transformed point(s).
+    """
+    trans = np.asarray(trans)
+    pts = np.asarray(pts)
+    if pts.ndim == 1:
+        pts = np.vstack((pts[:, None], [1]))
+        pts = np.dot(trans, pts)
+        pts = pts[:3, 0]
+    else:
+        pts = np.vstack((pts.T, np.ones(len(pts))))
+        pts = np.dot(trans, pts)
+        pts = pts[:3].T
+    return pts
+
+
+def rotation(x=0, y=0, z=0):
+    """Create an array with a rotation matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Rotation around the origin (in rad).
+
+    Returns
+    -------
+    r : array, shape = (4, 4)
+        The rotation matrix.
+    """
+    cos_x = cos(x)
+    cos_y = cos(y)
+    cos_z = cos(z)
+    sin_x = sin(x)
+    sin_y = sin(y)
+    sin_z = sin(z)
+    r = np.array([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z,
+                   sin_x * sin_z + cos_x * sin_y * cos_z, 0],
+                  [cos_y * sin_z, cos_x * cos_z + sin_x * sin_y * sin_z,
+                   - sin_x * cos_z + cos_x * sin_y * sin_z, 0],
+                  [-sin_y, sin_x * cos_y, cos_x * cos_y, 0],
+                  [0, 0, 0, 1]], dtype=float)
+    return r
+
+
+def scaling(x=1, y=1, z=1):
+    """Create an array with a scaling matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Scaling factors.
+
+    Returns
+    -------
+    s : array, shape = (4, 4)
+        The scaling matrix.
+    """
+    s = np.array([[x, 0, 0, 0],
+                  [0, y, 0, 0],
+                  [0, 0, z, 0],
+                  [0, 0, 0, 1]], dtype=float)
+    return s
+
+
+def translation(x=0, y=0, z=0):
+    """Create an array with a translation matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Translation parameters.
+
+    Returns
+    -------
+    m : array, shape = (4, 4)
+        The translation matrix.
+    """
+    m = np.array([[1, 0, 0, x],
+                  [0, 1, 0, y],
+                  [0, 0, 1, z],
+                  [0, 0, 0, 1]], dtype=float)
+    return m
+
+
+def read_trans(fname):
+    """Read a -trans.fif file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file.
+
+    Returns
+    -------
+    info : dict
+        The contents of the trans file.
+    """
+    info = {}
+    fid, tree, _ = fiff_open(fname)
+    block = dir_tree_find(tree, FIFF.FIFFB_MNE)[0]
+
+    tag = find_tag(fid, block, FIFF.FIFF_COORD_TRANS)
+    info.update(tag.data)
+
+    isotrak = dir_tree_find(block, FIFF.FIFFB_ISOTRAK)
+    isotrak = isotrak[0]
+
+    tag = find_tag(fid, isotrak, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        coord_frame = 0
+    else:
+        coord_frame = int(tag.data)
+
+    info['dig'] = dig = []
+    for k in range(isotrak['nent']):
+        kind = isotrak['directory'][k].kind
+        pos = isotrak['directory'][k].pos
+        if kind == FIFF.FIFF_DIG_POINT:
+            tag = read_tag(fid, pos)
+            tag.data['coord_frame'] = coord_frame
+            dig.append(tag.data)
+
+    fid.close()
+    return info
+
+
+def write_trans(fname, info):
+    """Write a -trans.fif file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file.
+    info : dict
+        Trans file data, as returned by read_trans.
+    """
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MNE)
+
+    write_coord_trans(fid, info)
+
+    dig = info['dig']
+    if dig:
+        start_block(fid, FIFF.FIFFB_ISOTRAK)
+
+        coord_frames = set(d['coord_frame'] for d in dig)
+        if len(coord_frames) > 1:
+            raise ValueError("dig points in different coord_frames")
+        coord_frame = coord_frames.pop()
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
+
+        for d in dig:
+            write_dig_point(fid, d)
+        end_block(fid, FIFF.FIFFB_ISOTRAK)
+
+    end_block(fid, FIFF.FIFFB_MNE)
+    end_file(fid)
+
+
+def invert_transform(trans):
+    """Invert a transformation between coordinate systems
+    """
+    itrans = {'to': trans['from'], 'from': trans['to'],
+              'trans': linalg.inv(trans['trans'])}
+    return itrans
+
+
+def transform_source_space_to(src, dest, trans):
+    """Transform source space data to the desired coordinate system
+
+    Parameters
+    ----------
+    src : dict
+        Source space.
+    dest : int
+        Destination coordinate system (one of mne.fiff.FIFF.FIFFV_COORD_...).
+    trans : dict
+        Transformation.
+
+    Returns
+    -------
+    res : dict
+        Transformed source space. Data are modified in-place.
+    """
+
+    if src['coord_frame'] == dest:
+        res = src
+        return res
+
+    if trans['to'] == src['coord_frame'] and trans['from'] == dest:
+        trans = invert_transform(trans)
+    elif trans['from'] != src['coord_frame'] or trans['to'] != dest:
+        raise ValueError('Cannot transform the source space using this '
+                         'coordinate transformation')
+
+    t = trans['trans'][:3, :]
+    res = src
+    res['coord_frame'] = dest
+
+    res['rr'] = np.dot(np.c_[res['rr'], np.ones((res['np'], 1))], t.T)
+    res['nn'] = np.dot(np.c_[res['nn'], np.zeros((res['np'], 1))], t.T)
+    return res
+
+
+def transform_coordinates(filename, pos, orig, dest):
+    """Transform coordinates between various MRI-related coordinate frames
+
+    Parameters
+    ----------
+    filename: string
+        Name of a fif file containing the coordinate transformations
+        This file can be conveniently created with mne_collect_transforms
+    pos: array of shape N x 3
+        array of locations to transform (in meters)
+    orig: 'meg' | 'mri'
+        Coordinate frame of the above locations.
+        'meg' is MEG head coordinates
+        'mri' surface RAS coordinates
+    dest: 'meg' | 'mri' | 'fs_tal' | 'mni_tal'
+        Coordinate frame of the result.
+        'mni_tal' is MNI Talairach
+        'fs_tal' is FreeSurfer Talairach
+
+    Returns
+    -------
+    trans_pos: array of shape N x 3
+        The transformed locations
+
+    Example
+    -------
+    transform_coordinates('all-trans.fif', np.eye(3), 'meg', 'fs_tal')
+    transform_coordinates('all-trans.fif', np.eye(3), 'mri', 'mni_tal')
+    """
+    #   Read the fif file containing all necessary transformations
+    fid, tree, directory = fiff_open(filename)
+
+    coord_names = dict(mri=FIFF.FIFFV_COORD_MRI,
+                       meg=FIFF.FIFFV_COORD_HEAD,
+                       mni_tal=FIFF.FIFFV_MNE_COORD_MNI_TAL,
+                       fs_tal=FIFF.FIFFV_MNE_COORD_FS_TAL)
+
+    orig = coord_names[orig]
+    dest = coord_names[dest]
+
+    T0 = T1 = T2 = T3plus = T3minus = None
+    for d in directory:
+        if d.kind == FIFF.FIFF_COORD_TRANS:
+            tag = read_tag(fid, d.pos)
+            trans = tag.data
+            if (trans['from'] == FIFF.FIFFV_COORD_MRI and
+                trans['to'] == FIFF.FIFFV_COORD_HEAD):
+                T0 = invert_transform(trans)
+            elif (trans['from'] == FIFF.FIFFV_COORD_MRI and
+                  trans['to'] == FIFF.FIFFV_MNE_COORD_RAS):
+                T1 = trans
+            elif (trans['from'] == FIFF.FIFFV_MNE_COORD_RAS and
+                  trans['to'] == FIFF.FIFFV_MNE_COORD_MNI_TAL):
+                T2 = trans
+            elif trans['from'] == FIFF.FIFFV_MNE_COORD_MNI_TAL:
+                if trans['to'] == FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ:
+                    T3plus = trans
+                elif trans['to'] == FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ:
+                    T3minus = trans
+    fid.close()
+    #
+    #   Check we have everything we need
+    #
+    if ((orig == FIFF.FIFFV_COORD_HEAD and T0 is None) or (T1 is None)
+            or (T2 is None) or (dest == FIFF.FIFFV_MNE_COORD_FS_TAL and
+                                ((T3minus is None) or (T3minus is None)))):
+        raise ValueError('All required coordinate transforms not found')
+
+    #
+    #   Go ahead and transform the data
+    #
+    if pos.shape[1] != 3:
+        raise ValueError('Coordinates must be given in a N x 3 array')
+
+    if dest == orig:
+        trans_pos = pos.copy()
+    else:
+        n_points = pos.shape[0]
+        pos = np.c_[pos, np.ones(n_points)].T
+        if orig == FIFF.FIFFV_COORD_HEAD:
+            pos = np.dot(T0['trans'], pos)
+        elif orig != FIFF.FIFFV_COORD_MRI:
+            raise ValueError('Input data must be in MEG head or surface RAS '
+                             'coordinates')
+
+        if dest == FIFF.FIFFV_COORD_HEAD:
+            pos = np.dot(linalg.inv(T0['trans']), pos)
+        elif dest != FIFF.FIFFV_COORD_MRI:
+            pos = np.dot(np.dot(T2['trans'], T1['trans']), pos)
+            if dest != FIFF.FIFFV_MNE_COORD_MNI_TAL:
+                if dest == FIFF.FIFFV_MNE_COORD_FS_TAL:
+                    for k in xrange(n_points):
+                        if pos[2, k] > 0:
+                            pos[:, k] = np.dot(T3plus['trans'], pos[:, k])
+                        else:
+                            pos[:, k] = np.dot(T3minus['trans'], pos[:, k])
+                else:
+                    raise ValueError('Illegal choice for the output '
+                                     'coordinates')
+
+        trans_pos = pos[:3, :].T
+
+    return trans_pos
+
+
+# @verbose
+# def transform_meg_chs(chs, trans, verbose=None):
+#     """
+#     %
+#     % [res, count] = fiff_transform_meg_chs(chs,trans)
+#     %
+#     % Move to another coordinate system in MEG channel channel info
+#     % Count gives the number of channels transformed
+#     %
+#     % NOTE: Only the coil_trans field is modified by this routine, not
+#     % loc which remains to reflect the original data read from the fif file
+#     %
+#     %
+#
+#     XXX
+#     """
+#
+#     res = copy.deepcopy(chs)
+#
+#     count = 0
+#     t = trans['trans']
+#     for ch in res:
+#         if (ch['kind'] == FIFF.FIFFV_MEG_CH
+#                                     or ch['kind'] == FIFF.FIFFV_REF_MEG_CH):
+#             if (ch['coord_frame'] == trans['from']
+#                                             and ch['coil_trans'] is not None):
+#                 ch['coil_trans'] = np.dot(t, ch['coil_trans'])
+#                 ch['coord_frame'] = trans['to']
+#                 count += 1
+#
+#     if count > 0:
+#         logger.info('    %d MEG channel locations transformed' % count)
+#
+#     return res, count
+
+# @verbose
+# def transform_eeg_chs(chs, trans, verbose=None):
+#     """
+#     %
+#     % [res, count] = fiff_transform_eeg_chs(chs,trans)
+#     %
+#     % Move to another coordinate system in EEG channel channel info
+#     % Count gives the number of channels transformed
+#     %
+#     % NOTE: Only the eeg_loc field is modified by this routine, not
+#     % loc which remains to reflect the original data read from the fif file
+#     %
+#
+#     XXX
+#     """
+#     res = copy.deepcopy(chs)
+#
+#     count = 0
+#     #
+#     #   Output unaugmented vectors from the transformation
+#     #
+#     t = trans['trans'][:3,:]
+#     for ch in res:
+#         if ch['kind'] == FIFF.FIFFV_EEG_CH:
+#             if (ch['coord_frame'] == trans['from']
+#                                             and ch['eeg_loc'] is not None):
+#                 #
+#                 # Transform the augmented EEG location vectors
+#                 #
+#                 for p in range(ch['eeg_loc'].shape[1]):
+#                     ch['eeg_loc'][:, p] = np.dot(t,
+#                                                 np.r_[ch['eeg_loc'][:,p], 1])
+#                 count += 1
+#                 ch['coord_frame'] = trans['to']
+#
+#     if count > 0:
+#         logger.info('    %d EEG electrode locations transformed\n' % count)
+#
+#     return res, count
diff --git a/mne/utils.py b/mne/utils.py
new file mode 100644
index 0000000..9a7cf43
--- /dev/null
+++ b/mne/utils.py
@@ -0,0 +1,996 @@
+"""Some utility functions"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import warnings
+import numpy as np
+import logging
+from distutils.version import LooseVersion
+import os
+import os.path as op
+from functools import wraps
+import inspect
+import subprocess
+import sys
+from sys import stdout
+import tempfile
+import shutil
+from shutil import rmtree
+import atexit
+from math import log
+import json
+import urllib
+import urllib2
+import ftplib
+import urlparse
+from scipy import linalg
+
+logger = logging.getLogger('mne')
+
+
+###############################################################################
+# RANDOM UTILITIES
+
+def check_random_state(seed):
+    """Turn seed into a np.random.RandomState instance
+
+    If seed is None, return the RandomState singleton used by np.random.
+    If seed is an int, return a new RandomState instance seeded with seed.
+    If seed is already a RandomState instance, return it.
+    Otherwise raise ValueError.
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (int, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, np.random.RandomState):
+        return seed
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
+
+
+def split_list(l, n):
+    """split list in n (approx) equal pieces"""
+    n = int(n)
+    sz = len(l) / n
+    for i in range(n - 1):
+        yield l[i * sz:(i + 1) * sz]
+    yield l[(n - 1) * sz:]
+
+
+class WrapStdOut(object):
+    """Ridiculous class to work around how doctest captures stdout"""
+    def __getattr__(self, name):
+        # Even more ridiculous than this class, this must be sys.stdout (not
+        # just stdout) in order for this to work (tested on OSX and Linux)
+        return getattr(sys.stdout, name)
+
+
+class _TempDir(str):
+    """Class for creating and auto-destroying temp dir
+
+    This is designed to be used with testing modules.
+
+    We cannot simply use __del__() method for cleanup here because the rmtree
+    function may be cleaned up before this object, so we use the atexit module
+    instead. Passing del_after and print_del kwargs to the constructor are
+    helpful primarily for debugging purposes.
+    """
+    def __new__(self, del_after=True, print_del=False):
+        new = str.__new__(self, tempfile.mkdtemp())
+        self._del_after = del_after
+        self._print_del = print_del
+        return new
+
+    def __init__(self):
+        self._path = self.__str__()
+        atexit.register(self.cleanup)
+
+    def cleanup(self):
+        if self._del_after is True:
+            if self._print_del is True:
+                print 'Deleting %s ...' % self._path
+            rmtree(self._path, ignore_errors=True)
+
+
+def estimate_rank(data, tol=1e-4, return_singular=False,
+                  copy=True):
+    """Helper to estimate the rank of data
+
+    This function will normalize the rows of the data (typically
+    channels or vertices) such that non-zero singular values
+    should be close to one.
+
+    Parameters
+    ----------
+    data : array
+        Data to estimate the rank of (should be 2-dimensional).
+    tol : float
+        Tolerance for singular values to consider non-zero in
+        calculating the rank. The singular values are calculated
+        in this method such that independent data are expected to
+        have singular value around one.
+    return_singular : bool
+        If True, also return the singular values that were used
+        to determine the rank.
+    copy : bool
+        If False, values in data will be modified in-place during
+        rank estimation (saves memory).
+
+    Returns
+    -------
+    rank : int
+        Estimated rank of the data.
+    s : array
+        If return_singular is True, the singular values that were
+        thresholded to determine the rank are also returned.
+    """
+    if copy is True:
+        data = data.copy()
+    norms = np.sqrt(np.sum(data ** 2, axis=1))
+    norms[norms == 0] = 1.0
+    data /= norms[:, np.newaxis]
+    s = linalg.svd(data, compute_uv=False, overwrite_a=True)
+    rank = np.sum(s >= tol)
+    if return_singular is True:
+        return rank, s
+    else:
+        return rank
+
+
+def run_subprocess(command, *args, **kwargs):
+    """Run command using subprocess.Popen
+
+    Run command and wait for command to complete. If the return code was zero
+    then return, otherwise raise CalledProcessError.
+    By default, this will also add stdout= and stderr=subproces.PIPE
+    to the call to Popen to suppress printing to the terminal.
+
+    Parameters
+    ----------
+    command : list of str
+        Command to run as subprocess (see subprocess.Popen documentation).
+    *args, **kwargs : arguments
+        Arguments to pass to subprocess.Popen.
+
+    Returns
+    -------
+    stdout : str
+        Stdout returned by the process.
+    stderr : str
+        Stderr returned by the process.
+    """
+    if 'stderr' not in kwargs:
+        kwargs['stderr'] = subprocess.PIPE
+    if 'stdout' not in kwargs:
+        kwargs['stdout'] = subprocess.PIPE
+
+    # Check the PATH environment variable. If run_subprocess() is to be called
+    # frequently this should be refactored so as to only check the path once.
+    env = kwargs.get('env', os.environ)
+    if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
+        msg = ("Your PATH environment variable contains at least one path "
+               "starting with a tilde ('~') character. Such paths are not "
+               "interpreted correctly from within Python. It is recommended "
+               "that you use '$HOME' instead of '~'.")
+        warnings.warn(msg)
+
+    logger.info("Running subprocess: %s" % str(command))
+    p = subprocess.Popen(command, *args, **kwargs)
+    stdout, stderr = p.communicate()
+
+    if stdout.strip():
+        logger.info("stdout:\n%s" % stdout)
+    if stderr.strip():
+        logger.info("stderr:\n%s" % stderr)
+
+    output = (stdout, stderr)
+    if p.returncode:
+        raise subprocess.CalledProcessError(p.returncode, command, output)
+
+    return output
+
+
+###############################################################################
+# DECORATORS
+
+# Following deprecated class copied from scikit-learn
+
+# force show of DeprecationWarning even on python 2.7
+warnings.simplefilter('default')
+
+
+class deprecated(object):
+    """Decorator to mark a function or class as deprecated.
+
+    Issue a warning when the function is called/the class is instantiated and
+    adds a warning to the docstring.
+
+    The optional extra argument will be appended to the deprecation message
+    and the docstring. Note: to use this with the default value for extra, put
+    in an empty of parentheses:
+
+    >>> from mne.utils import deprecated
+    >>> deprecated() # doctest: +ELLIPSIS
+    <mne.utils.deprecated object at ...>
+
+    >>> @deprecated()
+    ... def some_function(): pass
+    """
+    # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
+    # but with many changes.
+
+    # scikit-learn will not import on all platforms b/c it can be
+    # sklearn or scikits.learn, so a self-contained example is used above
+
+    def __init__(self, extra=''):
+        """
+        Parameters
+        ----------
+        extra: string
+          to be added to the deprecation messages
+
+        """
+        self.extra = extra
+
+    def __call__(self, obj):
+        if isinstance(obj, type):
+            return self._decorate_class(obj)
+        else:
+            return self._decorate_fun(obj)
+
+    def _decorate_class(self, cls):
+        msg = "Class %s is deprecated" % cls.__name__
+        if self.extra:
+            msg += "; %s" % self.extra
+
+        # FIXME: we should probably reset __new__ for full generality
+        init = cls.__init__
+
+        def wrapped(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning)
+            return init(*args, **kwargs)
+        cls.__init__ = wrapped
+
+        wrapped.__name__ = '__init__'
+        wrapped.__doc__ = self._update_doc(init.__doc__)
+        wrapped.deprecated_original = init
+
+        return cls
+
+    def _decorate_fun(self, fun):
+        """Decorate function fun"""
+
+        msg = "Function %s is deprecated" % fun.__name__
+        if self.extra:
+            msg += "; %s" % self.extra
+
+        def wrapped(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning)
+            return fun(*args, **kwargs)
+
+        wrapped.__name__ = fun.__name__
+        wrapped.__dict__ = fun.__dict__
+        wrapped.__doc__ = self._update_doc(fun.__doc__)
+
+        return wrapped
+
+    def _update_doc(self, olddoc):
+        newdoc = "DEPRECATED"
+        if self.extra:
+            newdoc = "%s: %s" % (newdoc, self.extra)
+        if olddoc:
+            newdoc = "%s\n\n%s" % (newdoc, olddoc)
+        return newdoc
+
+
+def verbose(function):
+    """Decorator to allow functions to override default log level
+
+    Do not call this function directly to set the global verbosity level,
+    instead use set_log_level().
+
+    Parameters (to decorated function)
+    ----------------------------------
+    verbose : bool, str, int, or None
+        The level of messages to print. If a str, it can be either DEBUG,
+        INFO, WARNING, ERROR, or CRITICAL. Note that these are for
+        convenience and are equivalent to passing in logging.DEBUG, etc.
+        For bool, True is the same as 'INFO', False is the same as 'WARNING'.
+        None defaults to using the current log level [e.g., set using
+        mne.set_log_level()].
+    """
+    arg_names = inspect.getargspec(function).args
+    # this wrap allows decorated functions to be pickled (e.g., for parallel)
+
+    @wraps(function)
+    def dec(*args, **kwargs):
+        # Check if the first arg is "self", if it has verbose, make it default
+        if len(arg_names) > 0 and arg_names[0] == 'self':
+            default_level = getattr(args[0], 'verbose', None)
+        else:
+            default_level = None
+        verbose_level = kwargs.get('verbose', default_level)
+        if verbose_level is not None:
+            old_level = set_log_level(verbose_level, True)
+            # set it back if we get an exception
+            try:
+                ret = function(*args, **kwargs)
+            except:
+                set_log_level(old_level)
+                raise
+            set_log_level(old_level)
+            return ret
+        else:
+            return function(*args, **kwargs)
+
+    # set __wrapped__ attribute so ?? in IPython gets the right source
+    dec.__wrapped__ = function
+
+    return dec
+
+
+def has_command_line_tools():
+    if 'MNE_ROOT' not in os.environ:
+        return False
+    else:
+        return True
+
+
+requires_mne = np.testing.dec.skipif(not has_command_line_tools(),
+                                     'Requires MNE command line tools')
+
+
+def has_nibabel():
+    try:
+        import nibabel
+        return True
+    except ImportError:
+        return False
+
+
+def has_freesurfer():
+    if not 'FREESURFER_HOME' in os.environ:
+        return False
+    else:
+        return True
+
+
+requires_fs_or_nibabel = np.testing.dec.skipif(not has_nibabel() and
+                                               not has_freesurfer(),
+                                               'Requires nibabel or '
+                                               'Freesurfer')
+requires_nibabel = np.testing.dec.skipif(not has_nibabel(),
+                                         'Requires nibabel')
+requires_freesurfer = np.testing.dec.skipif(not has_freesurfer(),
+                                            'Requires Freesurfer')
+
+
+def requires_pandas(function):
+    """Decorator to skip test if pandas is not available"""
+    @wraps(function)
+    def dec(*args, **kwargs):
+        skip = False
+        try:
+            import pandas
+            version = LooseVersion(pandas.__version__)
+            if version < '0.8.0':
+                skip = True
+        except ImportError:
+            skip = True
+
+        if skip is True:
+            from nose.plugins.skip import SkipTest
+            raise SkipTest('Test %s skipped, requires pandas'
+                           % function.__name__)
+        ret = function(*args, **kwargs)
+
+        return ret
+
+    return dec
+
+
+def make_skipper_dec(module, skip_str):
+    """Helper to make skipping decorators"""
+    skip = False
+    try:
+        __import__(module)
+    except ImportError:
+        skip = True
+    return np.testing.dec.skipif(skip, skip_str)
+
+
+requires_sklearn = make_skipper_dec('sklearn', 'scikit-learn not installed')
+requires_nitime = make_skipper_dec('nitime', 'nitime not installed')
+
+
+###############################################################################
+# LOGGING
+
+def set_log_level(verbose=None, return_old_level=False):
+    """Convenience function for setting the logging level
+
+    Parameters
+    ----------
+    verbose : bool, str, int, or None
+        The verbosity of messages to print. If a str, it can be either DEBUG,
+        INFO, WARNING, ERROR, or CRITICAL. Note that these are for
+        convenience and are equivalent to passing in logging.DEBUG, etc.
+        For bool, True is the same as 'INFO', False is the same as 'WARNING'.
+        If None, the environment variable MNE_LOG_LEVEL is read, and if
+        it doesn't exist, defaults to INFO.
+    return_old_level : bool
+        If True, return the old verbosity level.
+    """
+    if verbose is None:
+        verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')
+    elif isinstance(verbose, bool):
+        if verbose is True:
+            verbose = 'INFO'
+        else:
+            verbose = 'WARNING'
+    if isinstance(verbose, basestring):
+        verbose = verbose.upper()
+        logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
+                             WARNING=logging.WARNING, ERROR=logging.ERROR,
+                             CRITICAL=logging.CRITICAL)
+        if not verbose in logging_types:
+            raise ValueError('verbose must be of a valid type')
+        verbose = logging_types[verbose]
+    logger = logging.getLogger('mne')
+    old_verbose = logger.level
+    logger.setLevel(verbose)
+    return (old_verbose if return_old_level else None)
+
+
+def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
+    """Convenience function for setting the log to print to a file
+
+    Parameters
+    ----------
+    fname : str, or None
+        Filename of the log to print to. If None, stdout is used.
+        To suppress log outputs, use set_log_level('WARN').
+    output_format : str
+        Format of the output messages. See the following for examples:
+            http://docs.python.org/dev/howto/logging.html
+        e.g., "%(asctime)s - %(levelname)s - %(message)s".
+    overwrite : bool, or None
+        Overwrite the log file (if it exists). Otherwise, statements
+        will be appended to the log (default). None is the same as False,
+        but additionally raises a warning to notify the user that log
+        entries will be appended.
+    """
+    logger = logging.getLogger('mne')
+    handlers = logger.handlers
+    for h in handlers:
+        if isinstance(h, logging.FileHandler):
+            h.close()
+        logger.removeHandler(h)
+    if fname is not None:
+        if op.isfile(fname) and overwrite is None:
+            warnings.warn('Log entries will be appended to the file. Use '
+                          'overwrite=False to avoid this message in the '
+                          'future.')
+        mode = 'w' if overwrite is True else 'a'
+        lh = logging.FileHandler(fname, mode=mode)
+    else:
+        """ we should just be able to do:
+                lh = logging.StreamHandler(sys.stdout)
+            but because doctests uses some magic on stdout, we have to do this:
+        """
+        lh = logging.StreamHandler(WrapStdOut())
+
+    lh.setFormatter(logging.Formatter(output_format))
+    # actually add the stream handler
+    logger.addHandler(lh)
+
+
+###############################################################################
+# CONFIG / PREFS
+
+def get_subjects_dir(subjects_dir=None, raise_error=False):
+    """Safely use subjects_dir input to return SUBJECTS_DIR
+
+    Parameters
+    ----------
+    subjects_dir : str | None
+        If a value is provided, return subjects_dir. Otherwise, look for
+        SUBJECTS_DIR config and return the result.
+    raise_error : bool
+        If True, raise a KeyError if no value for SUBJECTS_DIR can be found
+        (instead of returning None).
+
+    Returns
+    -------
+    value : str | None
+        The SUBJECTS_DIR value.
+    """
+    if subjects_dir is None:
+        subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
+    return subjects_dir
+
+
+def get_config_path():
+    """Get path to standard mne-python config file
+
+    Returns
+    -------
+    config_path : str
+        The path to the mne-python configuration file. On windows, this
+        will be '%APPDATA%\.mne\mne-python.json'. On every other
+        system, this will be $HOME/.mne/mne-python.json.
+    """
+
+    # this has been checked on OSX64, Linux64, and Win32
+    val = os.getenv('APPDATA' if 'nt' == os.name.lower() else 'HOME', None)
+    if val is None:
+        raise ValueError('mne-python config file path could '
+                         'not be determined, please report this '
+                         'error to mne-python developers')
+
+    val = op.join(val, '.mne', 'mne-python.json')
+    return val
+
+
+# List the known configuration values
+known_config_types = [
+    'MNE_BROWSE_RAW_SIZE',
+    'MNE_CUDA_IGNORE_PRECISION',
+    'MNE_DATASETS_MEGSIM_PATH',
+    'MNE_DATASETS_SAMPLE_PATH',
+    'MNE_LOGGING_LEVEL',
+    'MNE_USE_CUDA',
+    'SUBJECTS_DIR',
+    ]
+# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
+known_config_wildcards = [
+    'MNE_STIM_CHANNEL',
+    ]
+
+
+def get_config(key, default=None, raise_error=False):
+    """Read mne(-python) preference from env, then mne-python config
+
+    Parameters
+    ----------
+    key : str
+        The preference key to look for. The os evironment is searched first,
+        then the mne-python config file is parsed.
+    default : str | None
+        Value to return if the key is not found.
+    raise_error : bool
+        If True, raise an error if the key is not found (instead of returning
+        default).
+
+    Returns
+    -------
+    value : str | None
+        The preference key value.
+    """
+
+    if not isinstance(key, basestring):
+        raise ValueError('key must be a string')
+
+    # first, check to see if key is in env
+    if key in os.environ:
+        return os.environ[key]
+
+    # second, look for it in mne-python config file
+    config_path = get_config_path()
+    if not op.isfile(config_path):
+        key_found = False
+        val = default
+    else:
+        with open(config_path, 'r') as fid:
+            config = json.load(fid)
+        key_found = True if key in config else False
+        val = config.get(key, default)
+
+    if not key_found and raise_error is True:
+        meth_1 = 'os.environ["%s"] = VALUE' % key
+        meth_2 = 'mne.utils.set_config("%s", VALUE)' % key
+        raise KeyError('Key "%s" not found in environment or in the '
+                       'mne-python config file:\n%s\nTry either:\n'
+                       '    %s\nfor a temporary solution, or:\n'
+                       '    %s\nfor a permanent one. You can also '
+                       'set the environment variable before '
+                       'running python.'
+                       % (key, config_path, meth_1, meth_2))
+    return val
+
+
+def set_config(key, value):
+    """Set mne-python preference in config
+
+    Parameters
+    ----------
+    key : str
+        The preference key to set.
+    value : str |  None
+        The value to assign to the preference key. If None, the key is
+        deleted.
+    """
+
+    if not isinstance(key, basestring):
+        raise ValueError('key must be a string')
+    # While JSON allow non-string types, we allow users to override config
+    # settings using env, which are strings, so we enforce that here
+    if not isinstance(value, basestring) and value is not None:
+        raise ValueError('value must be a string or None')
+    if not key in known_config_types and not \
+            any(k in key for k in known_config_wildcards):
+        warnings.warn('Setting non-standard config type: "%s"' % key)
+
+    # Read all previous values
+    config_path = get_config_path()
+    if op.isfile(config_path):
+        with open(config_path, 'r') as fid:
+            config = json.load(fid)
+    else:
+        config = dict()
+        logger.info('Attempting to create new mne-python configuration '
+                    'file:\n%s' % config_path)
+    if value is None:
+        config.pop(key, None)
+    else:
+        config[key] = value
+
+    # Write all values
+    directory = op.split(config_path)[0]
+    if not op.isdir(directory):
+        os.mkdir(directory)
+    with open(config_path, 'w') as fid:
+        json.dump(config, fid, sort_keys=True, indent=0)
+
+
+class ProgressBar(object):
+    """Class for generating a command-line progressbar
+
+    Parameters
+    ----------
+    max_value : int
+        Maximum value of process (e.g. number of samples to process, bytes to
+        download, etc.).
+    initial_value : int
+        Initial value of process, useful when resuming process from a specific
+        value, defaults to 0.
+    mesg : str
+        Message to include at end of progress bar.
+    max_chars : int
+        Number of characters to use for progress bar (be sure to save some room
+        for the message and % complete as well).
+    progress_character : char
+        Character in the progress bar that indicates the portion completed.
+    spinner : bool
+        Show a spinner.  Useful for long-running processes that may not
+        increment the progress bar very often.  This provides the user with
+        feedback that the progress has not stalled.
+
+    Example
+    -------
+    >>> progress = ProgressBar(13000)
+    >>> progress.update(3000) # doctest: +SKIP
+    [.........                               ] 23.07692 |
+    >>> progress.update(6000) # doctest: +SKIP
+    [..................                      ] 46.15385 |
+
+    >>> progress = ProgressBar(13000, spinner=True)
+    >>> progress.update(3000) # doctest: +SKIP
+    [.........                               ] 23.07692 |
+    >>> progress.update(6000) # doctest: +SKIP
+    [..................                      ] 46.15385 /
+    """
+
+    spinner_symbols = ['|', '/', '-', '\\']
+    template = '\r[{}{}] {:.05f} {} {}   '
+
+    def __init__(self, max_value, initial_value=0, mesg='', max_chars=40,
+                 progress_character='.', spinner=False):
+        self.cur_value = initial_value
+        self.max_value = float(max_value)
+        self.mesg = mesg
+        self.max_chars = max_chars
+        self.progress_character = progress_character
+        self.spinner = spinner
+        self.spinner_index = 0
+        self.n_spinner = len(self.spinner_symbols)
+
+    def update(self, cur_value, mesg=None):
+        """Update progressbar with current value of process
+
+        Parameters
+        ----------
+        cur_value : number
+            Current value of process.  Should be <= max_value (but this is not
+            enforced).  The percent of the progressbar will be computed as
+            (cur_value / max_value) * 100
+        mesg : str
+            Message to display to the right of the progressbar.  If None, the
+            last message provided will be used.  To clear the current message,
+            pass a null string, ''.
+        """
+        # Ensure floating-point division so we can get fractions of a percent
+        # for the progressbar.
+        self.cur_value = cur_value
+        progress = float(self.cur_value) / self.max_value
+        num_chars = int(progress * self.max_chars)
+        num_left = self.max_chars - num_chars
+
+        # Update the message
+        if mesg is not None:
+            self.mesg = mesg
+
+        # The \r tells the cursor to return to the beginning of the line rather
+        # than starting a new line.  This allows us to have a progressbar-style
+        # display in the console window.
+        bar = self.template.format(self.progress_character * num_chars,
+                                   ' ' * num_left,
+                                   progress * 100,
+                                   self.spinner_symbols[self.spinner_index],
+                                   self.mesg)
+        sys.stdout.write(bar)
+        # Increament the spinner
+        if self.spinner:
+            self.spinner_index = (self.spinner_index + 1) % self.n_spinner
+
+        # Force a flush because sometimes when using bash scripts and pipes,
+        # the output is not printed until after the program exits.
+        sys.stdout.flush()
+
+    def update_with_increment_value(self, increment_value, mesg=None):
+        """Update progressbar with the value of the increment instead of the
+        current value of process as in update()
+
+        Parameters
+        ----------
+        increment_value : int
+            Value of the increment of process.  The percent of the progressbar
+            will be computed as
+            (self.cur_value + increment_value / max_value) * 100
+        mesg : str
+            Message to display to the right of the progressbar.  If None, the
+            last message provided will be used.  To clear the current message,
+            pass a null string, ''.
+        """
+        self.cur_value += increment_value
+        self.update(self.cur_value, mesg)
+
+
+class _HTTPResumeURLOpener(urllib.FancyURLopener):
+    """Create sub-class in order to overide error 206.
+
+    This error means a partial file is being sent, which is ok in this case.
+    Do nothing with this error.
+    """
+    # Adapted from:
+    # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
+    # http://code.activestate.com/recipes/83208-resuming-download-of-a-file/
+
+    def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
+        pass
+
+
+def _chunk_read(response, local_file, chunk_size=65536, initial_size=0):
+    """Download a file chunk by chunk and show advancement
+
+    Can also be used when resuming downloads over http.
+
+    Parameters
+    ----------
+    response: urllib.addinfourl
+        Response to the download request in order to get file size.
+    local_file: file
+        Hard disk file where data should be written.
+    chunk_size: integer, optional
+        Size of downloaded chunks. Default: 8192
+    initial_size: int, optional
+        If resuming, indicate the initial size of the file.
+    """
+    # Adapted from NISL:
+    # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
+
+    bytes_so_far = initial_size
+    # Returns only amount left to download when resuming, not the size of the
+    # entire file
+    total_size = int(response.info().getheader('Content-Length').strip())
+    total_size += initial_size
+
+    progress = ProgressBar(total_size, initial_value=bytes_so_far,
+                           max_chars=40, spinner=True, mesg='downloading')
+    while True:
+        chunk = response.read(chunk_size)
+        bytes_so_far += len(chunk)
+        if not chunk:
+            sys.stderr.write('\n')
+            break
+        _chunk_write(chunk, local_file, progress)
+
+
+def _chunk_read_ftp_resume(url, temp_file_name, local_file):
+    """Resume downloading of a file from an FTP server"""
+    # Adapted from: https://pypi.python.org/pypi/fileDownloader.py
+    # but with changes
+
+    parsed_url = urlparse.urlparse(url)
+    file_name = os.path.basename(parsed_url.path)
+    server_path = parsed_url.path.replace(file_name, "")
+    unquoted_server_path = urllib.unquote(server_path)
+    local_file_size = os.path.getsize(temp_file_name)
+
+    data = ftplib.FTP()
+    data.connect(parsed_url.hostname, parsed_url.port)
+    data.login()
+    if len(server_path) > 1:
+        data.cwd(unquoted_server_path)
+    data.sendcmd("TYPE I")
+    data.sendcmd("REST " + str(local_file_size))
+    down_cmd = "RETR " + file_name
+    file_size = data.size(file_name)
+    progress = ProgressBar(file_size, initial_value=local_file_size,
+                           max_chars=40, spinner=True, mesg='downloading')
+    # Callback lambda function that will be passed the downloaded data
+    # chunk and will write it to file and update the progress bar
+    chunk_write = lambda chunk: _chunk_write(chunk, local_file, progress)
+    data.retrbinary(down_cmd, chunk_write)
+
+
+def _chunk_write(chunk, local_file, progress):
+    """Write a chunk to file and update the progress bar"""
+    local_file.write(chunk)
+    progress.update_with_increment_value(len(chunk))
+
+
+def _fetch_file(url, file_name, print_destination=True, resume=True):
+    """Load requested file, downloading it if needed or requested
+
+    Parameters
+    ----------
+    url: string
+        The url of file to be downloaded.
+    file_name: string
+        Name, along with the path, of where downloaded file will be saved.
+    print_destination: bool, optional
+        If true, destination of where file was saved will be printed after
+        download finishes.
+    resume: bool, optional
+        If true, try to resume partially downloaded files.
+    """
+    # Adapted from NISL:
+    # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
+
+    temp_file_name = file_name + ".part"
+    local_file = None
+    initial_size = 0
+    try:
+        # Checking file size and displaying it alongside the download url
+        u = urllib2.urlopen(url)
+        file_size = int(u.info().getheaders("Content-Length")[0])
+        print 'Downloading data from %s (%s)' % (url, sizeof_fmt(file_size))
+        # Downloading data
+        if resume and os.path.exists(temp_file_name):
+            local_file = open(temp_file_name, "ab")
+            # Resuming HTTP and FTP downloads requires different procedures
+            scheme = urlparse.urlparse(url).scheme
+            if scheme == 'http':
+                url_opener = _HTTPResumeURLOpener()
+                local_file_size = os.path.getsize(temp_file_name)
+                # If the file exists, then only download the remainder
+                url_opener.addheader("Range", "bytes=%s-" % (local_file_size))
+                try:
+                    data = url_opener.open(url)
+                except urllib2.HTTPError:
+                    # There is a problem that may be due to resuming, some
+                    # servers may not support the "Range" header. Switch back
+                    # to complete download method
+                    print 'Resuming download failed. Attempting to restart '\
+                          'downloading the entire file.'
+                    _fetch_file(url, resume=False)
+                _chunk_read(data, local_file, initial_size=local_file_size)
+            else:
+                _chunk_read_ftp_resume(url, temp_file_name, local_file)
+        else:
+            local_file = open(temp_file_name, "wb")
+            data = urllib2.urlopen(url)
+            _chunk_read(data, local_file, initial_size=initial_size)
+        # temp file must be closed prior to the move
+        if not local_file.closed:
+            local_file.close()
+        shutil.move(temp_file_name, file_name)
+        if print_destination is True:
+            stdout.write('File saved as %s.\n' % file_name)
+    except urllib2.HTTPError, e:
+        print 'Error while fetching file %s.' \
+            ' Dataset fetching aborted.' % url
+        print "HTTP Error:", e, url
+        raise
+    except urllib2.URLError, e:
+        print 'Error while fetching file %s.' \
+            ' Dataset fetching aborted.' % url
+        print "URL Error:", e, url
+        raise
+    finally:
+        if local_file is not None:
+            if not local_file.closed:
+                local_file.close()
+
+
+def sizeof_fmt(num):
+    """Turn number of bytes into human-readable str"""
+    unit_list = zip(['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
+                    [0, 0, 1, 2, 2, 2])
+    """Human friendly file size"""
+    if num > 1:
+        exponent = min(int(log(num, 1024)), len(unit_list) - 1)
+        quotient = float(num) / 1024 ** exponent
+        unit, num_decimals = unit_list[exponent]
+        format_string = '{:.%sf} {}' % (num_decimals)
+        return format_string.format(quotient, unit)
+    if num == 0:
+        return '0 bytes'
+    if num == 1:
+        return '1 byte'
+
+
+def _url_to_local_path(url, path):
+    """Mirror a url path in a local destination (keeping folder structure)"""
+    destination = urlparse.urlparse(url).path
+    # First char should be '/', and it needs to be discarded
+    if len(destination) < 2 or destination[0] != '/':
+        raise ValueError('Invalid URL')
+    destination = os.path.join(path, urllib2.url2pathname(destination)[1:])
+    return destination
+
+
+def _check_fname(fname, overwrite):
+    """Helper to check for file existence"""
+    if not isinstance(fname, basestring):
+        raise TypeError('file name is not a string')
+    if op.isfile(fname):
+        if not overwrite:
+            raise IOError('Destination file exists. Please use option '
+                          '"overwrite=True" to force overwriting.')
+        else:
+            logger.info('Overwriting existing file.')
+
+
+def _check_subject(class_subject, input_subject, raise_error=True):
+    """Helper to get subject name from class"""
+    if input_subject is not None:
+        if not isinstance(input_subject, basestring):
+            raise ValueError('subject input must be a string')
+        else:
+            return input_subject
+    elif class_subject is not None:
+        if not isinstance(class_subject, basestring):
+            raise ValueError('Neither subject input nor class subject '
+                             'attribute was a string')
+        else:
+            return class_subject
+    else:
+        if raise_error is True:
+            raise ValueError('Neither subject input nor class subject '
+                             'attribute was a string')
+        return None
+
+
+def _check_pandas_installed():
+    try:
+        import pandas as pd
+        return pd
+    except ImportError:
+        raise RuntimeError('For this method to work the Pandas library is'
+                           ' required.')
+
+
+def _check_pandas_index_arguments(index, defaults):
+    """ Helper function to check pandas index arguments """
+    if not any(isinstance(index, k) for k in (list, tuple)):
+        index = [index]
+    invalid_choices = [e for e in index if not e in defaults]
+    if invalid_choices:
+        options = [', '.join(e) for e in [invalid_choices, defaults]]
+        raise ValueError('[%s] is not an valid option. Valid index'
+                         'values are \'None\' or %s' % tuple(options))
diff --git a/mne/viz.py b/mne/viz.py
new file mode 100644
index 0000000..d7769b5
--- /dev/null
+++ b/mne/viz.py
@@ -0,0 +1,2960 @@
+"""Functions to plot M/EEG data e.g. topographies
+"""
+
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#          Denis Engemann <d.engemann at fz-juelich.de>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+import os
+import warnings
+from itertools import cycle
+from functools import partial
+from copy import deepcopy
+import math
+
+import difflib
+import tempfile
+import webbrowser
+
+import copy
+import inspect
+import numpy as np
+from scipy import linalg
+from scipy import ndimage
+from matplotlib import delaunay
+
+import logging
+logger = logging.getLogger('mne')
+from warnings import warn
+
+
+# XXX : don't import pylab here or you will break the doc
+from .fixes import tril_indices, Counter
+from .baseline import rescale
+from .utils import deprecated, get_subjects_dir, get_config, set_config, \
+                   _check_subject
+from .fiff import show_fiff, FIFF
+from .fiff.pick import channel_type, pick_types
+from .fiff.proj import make_projector, setup_proj
+from . import verbose
+
+COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
+          '#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
+
+
+DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
+                    emg='k', ref_meg='steelblue', misc='k', stim='k',
+                    resp='k', chpi='k'),
+                units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU'),
+                scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0),
+                scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
+                    eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc=1e-3,
+                    stim=1, resp=1, chpi=1e-4),
+                ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.),
+                          misc=(-5., 5.)),
+                titles=dict(eeg='EEG', grad='Gradiometers',
+                    mag='Magnetometers', misc='misc'))
+
+
+def _mutable_defaults(*mappings):
+    """ To avoid dicts as default keyword arguments
+
+    Use this function instead to resolve default dict values.
+    Example usage:
+    scalings, units = _mutable_defaults(('scalings', scalings,
+                                         'units', units))
+    """
+    out = []
+    for k, v in mappings:
+        this_mapping = DEFAULTS[k]
+        if v is not None:
+            this_mapping = deepcopy(DEFAULTS[k])
+            this_mapping.update(v)
+        out += [this_mapping]
+    return out
+
+
+def _clean_names(names):
+    """ Remove white-space on topo matching
+
+    Over the years, Neuromag systems employed inconsistent handling of
+    white-space in layout names. This function handles different naming
+    conventions and hence should be used in each topography-plot to
+    warrant compatibility across systems.
+
+    Usage
+    -----
+    Wrap this function around channel and layout names:
+    ch_names = _clean_names(epochs.ch_names)
+
+    for n in _clean_names(layout.names):
+        if n in ch_names:
+            # prepare plot
+
+    """
+    return [n.replace(' ', '') if ' ' in n else n for n in names]
+
+
+def _check_delayed_ssp(container):
+    """ Aux function to be used for interactive SSP selection
+    """
+    if container.proj is True:
+        raise RuntimeError('Projs are already applied. Please initialize'
+                ' the data with proj set to False.')
+    elif len(container.info['projs']) < 1:
+        raise RuntimeError('No projs found in evoked.')
+
+
+def tight_layout(pad=1.2, h_pad=None, w_pad=None):
+    """ Adjust subplot parameters to give specified padding.
+
+    Note. For plotting please use this function instead of pl.tight_layout
+
+    Parameters
+    ----------
+    pad : float
+        padding between the figure edge and the edges of subplots, as a
+        fraction of the font-size.
+    h_pad, w_pad : float
+        padding (height/width) between edges of adjacent subplots.
+        Defaults to `pad_inches`.
+    """
+    try:
+        import pylab as pl
+        pl.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
+    except:
+        msg = ('Matplotlib function \'tight_layout\'%s.'
+               ' Skipping subpplot adjusment.')
+        if not hasattr(pl, 'tight_layout'):
+            case = ' is not available'
+        else:
+            case = ' seems corrupted'
+        warn(msg % case)
+
+
+def _plot_topo(info=None, times=None, show_func=None, layout=None,
+               decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
+               border='none', cmap=None, layout_scale=None, title=None,
+               x_label=None, y_label=None, vline=None):
+    """Helper function to plot on sensor layout"""
+    import pylab as pl
+    orig_facecolor = pl.rcParams['axes.facecolor']
+    orig_edgecolor = pl.rcParams['axes.edgecolor']
+    try:
+        if cmap is None:
+            cmap = pl.cm.jet
+        ch_names = _clean_names(info['ch_names'])
+        pl.rcParams['axes.facecolor'] = 'k'
+        fig = pl.figure(facecolor='k')
+        pos = layout.pos.copy()
+        tmin, tmax = times[0], times[-1]
+        if colorbar:
+            pos[:, :2] *= layout_scale
+            pl.rcParams['axes.edgecolor'] = 'k'
+            sm = pl.cm.ScalarMappable(cmap=cmap,
+                                      norm=pl.normalize(vmin=vmin, vmax=vmax))
+            sm.set_array(np.linspace(vmin, vmax))
+            ax = pl.axes([0.015, 0.025, 1.05, .8], axisbg='k')
+            cb = fig.colorbar(sm, ax=ax)
+            cb_yticks = pl.getp(cb.ax.axes, 'yticklabels')
+            pl.setp(cb_yticks, color='w')
+        pl.rcParams['axes.edgecolor'] = border
+        for idx, name in enumerate(_clean_names(layout.names)):
+            if name in ch_names:
+                ax = pl.axes(pos[idx], axisbg='k')
+                ch_idx = ch_names.index(name)
+                # hack to inlcude channel idx and name, to use in callback
+                ax.__dict__['_mne_ch_name'] = name
+                ax.__dict__['_mne_ch_idx'] = ch_idx
+
+                if layout.kind == 'Vectorview-all' and ylim is not None:
+                    this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
+                    ylim_ = [v[this_type] if _check_vlim(v) else
+                                    v for v in ylim]
+                else:
+                    ylim_ = ylim
+
+                show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
+                          vmax=vmax, ylim=ylim_)
+
+                if ylim_ and not any(v is None for v in ylim_):
+                    pl.ylim(*ylim_)
+                pl.xticks([], ())
+                pl.yticks([], ())
+
+        # register callback
+        callback = partial(_plot_topo_onpick, show_func=show_func, tmin=tmin,
+                           tmax=tmax, vmin=vmin, vmax=vmax, ylim=ylim,
+                           colorbar=colorbar, title=title, x_label=x_label,
+                           y_label=y_label,
+                           vline=vline)
+
+        fig.canvas.mpl_connect('pick_event', callback)
+        if title is not None:
+            pl.figtext(0.03, 0.9, title, color='w', fontsize=19)
+
+    finally:
+        # Revert global pylab config
+        pl.rcParams['axes.facecolor'] = orig_facecolor
+        pl.rcParams['axes.edgecolor'] = orig_edgecolor
+
+    return fig
+
+
+def _plot_topo_onpick(event, show_func=None, tmin=None, tmax=None,
+                      vmin=None, vmax=None, ylim=None, colorbar=False,
+                      title=None, x_label=None, y_label=None, vline=None):
+    """Onpick callback that shows a single channel in a new figure"""
+
+    # make sure that the swipe gesture in OS-X doesn't open many figures
+    if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
+        return
+
+    artist = event.artist
+    try:
+        import pylab as pl
+        ch_idx = artist.axes._mne_ch_idx
+        fig, ax = pl.subplots(1)
+        ax.set_axis_bgcolor('k')
+        show_func(pl, ch_idx, tmin, tmax, vmin, vmax, ylim=ylim,
+                  vline=vline)
+        if colorbar:
+            pl.colorbar()
+        if title is not None:
+            pl.title(title + ' ' + artist.axes._mne_ch_name)
+        else:
+            pl.title(artist.axes._mne_ch_name)
+        if x_label is not None:
+            pl.xlabel(x_label)
+        if y_label is not None:
+            pl.ylabel(y_label)
+    except Exception as err:
+        # matplotlib silently ignores exceptions in event handlers, so we print
+        # it here to know what went wrong
+        print err
+        raise err
+
+
+def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
+                freq=None, vline=None):
+    """ Aux function to show time-freq map on topo """
+    extent = (tmin, tmax, freq[0], freq[-1])
+    ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
+              vmin=vmin, vmax=vmax, picker=True)
+
+
+def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
+                     times, vline=None):
+    """ Aux function to show time series on topo """
+    picker_flag = False
+    for data_, color_ in zip(data, color):
+        if not picker_flag:
+            # use large tol for picker so we can click anywhere in the axes
+            ax.plot(times, data_[ch_idx], color_, picker=1e9)
+            picker_flag = True
+        else:
+            ax.plot(times, data_[ch_idx], color_)
+    if vline:
+        import pylab as pl
+        [pl.axvline(x, color='w', linewidth=0.5) for x in vline]
+
+
+def _check_vlim(vlim):
+    """AUX function"""
+    return not np.isscalar(vlim) and not vlim is None
+
+
+def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
+              border='none', ylim=None, scalings=None, title=None, proj=False,
+              vline=[0.0]):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    title : str
+        Title of the figure.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+
+    if not type(evoked) in (tuple, list):
+        evoked = [evoked]
+
+    if type(color) in (tuple, list):
+        if len(color) != len(evoked):
+            raise ValueError('Lists of evoked objects and colors'
+                             ' must have the same length')
+    elif color is None:
+        colors = ['w'] + COLORS
+        stop = (slice(len(evoked)) if len(evoked) < len(colors)
+                else slice(len(colors)))
+        color = cycle(colors[stop])
+        if len(evoked) > len(colors):
+            warnings.warn('More evoked objects then colors available.'
+                          'You should pass a list of unique colors.')
+    else:
+        color = cycle([color])
+
+    times = evoked[0].times
+    if not all([(e.times == times).all() for e in evoked]):
+        raise ValueError('All evoked.times must be the same')
+
+    info = evoked[0].info
+    ch_names = evoked[0].ch_names
+    if not all([e.ch_names == ch_names for e in evoked]):
+        raise ValueError('All evoked.picks must be the same')
+    ch_names = _clean_names(ch_names)
+
+    if layout is None:
+        from .layouts.layout import find_layout
+        layout = find_layout(info['chs'])
+
+    # XXX. at the moment we are committed to 1- / 2-sensor-types layouts
+    layout = copy.deepcopy(layout)
+    layout.names = _clean_names(layout.names)
+    chs_in_layout = set(layout.names) & set(ch_names)
+    types_used = set(channel_type(info, ch_names.index(ch))
+                     for ch in chs_in_layout)
+    # one check for all vendors
+    meg_types = ['mag'], ['grad'], ['mag', 'grad'],
+    is_meg = any(types_used == set(k) for k in meg_types)
+    if is_meg:
+        types_used = list(types_used)[::-1]  # -> restore kwarg order
+        picks = [pick_types(info, meg=k, exclude=[]) for k in types_used]
+    else:
+        types_used_kwargs = dict((t, True) for t in types_used)
+        picks = [pick_types(info, meg=False, **types_used_kwargs)]
+    assert isinstance(picks, list) and len(types_used) == len(picks)
+
+    scalings = _mutable_defaults(('scalings', scalings))[0]
+    evoked = [e.copy() for e in evoked]
+    for e in evoked:
+        for pick, t in zip(picks, types_used):
+            e.data[pick] = e.data[pick] * scalings[t]
+
+    if proj is True and all([e.proj is not True for e in evoked]):
+        evoked = [e.apply_proj() for e in evoked]
+    elif proj == 'interactive':  # let it fail early.
+        for e in evoked:
+            _check_delayed_ssp(e)
+
+    plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
+                       color=color, times=times, vline=vline)
+
+    if ylim is None:
+        set_ylim = lambda x: np.abs(x).max()
+        ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
+        ymax = np.array(ylim_)
+        ylim_ = (-ymax, ymax)
+    elif isinstance(ylim, dict):
+        ylim_ = _mutable_defaults(('ylim', ylim))[0]
+        ylim_ = [ylim_[k] for k in types_used]
+        ylim_ = zip(*[np.array(yl) for yl in ylim_])
+    else:
+        raise ValueError('ylim must be None ore a dict')
+
+    fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
+                     decim=1, colorbar=False, ylim=ylim_, cmap=None,
+                     layout_scale=layout_scale, border=border, title=title,
+                     x_label='Time (s)', vline=vline)
+
+    if proj == 'interactive':
+        for e in evoked:
+            _check_delayed_ssp(e)
+        params = dict(evokeds=evoked, times=times,
+                      plot_update_proj_callback=_plot_update_evoked_topo,
+                      projs=evoked[0].info['projs'], fig=fig)
+        _draw_proj_checkbox(None, params)
+
+    return fig
+
+
+def _plot_update_evoked_topo(params, bools):
+    """Helper function to update topo sensor plots"""
+    evokeds, times, fig = [params[k] for k in 'evokeds', 'times', 'fig']
+
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+
+    params['proj_bools'] = bools
+    evokeds = [e.copy() for e in evokeds]
+    for e in evokeds:
+        e.info['projs'] = []
+        e.add_proj(projs)
+        e.apply_proj()
+
+    # make sure to only modify the time courses, not the ticks
+    axes = fig.get_axes()
+    n_lines = len(axes[0].lines)
+    n_diff = len(evokeds) - n_lines
+    ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
+    for ax in axes:
+        lines = ax.lines[ax_slice]
+        for line, evoked in zip(lines, evokeds):
+            line.set_data(times, evoked.data[ax._mne_ch_idx])
+
+    fig.canvas.draw()
+
+
+def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
+                  vmax=None, cmap=None, layout_scale=0.945, title=None):
+    """Plot time-frequency data on sensor layout
+
+    Clicking on the time-frequency map of an individual sensor opens a
+    new figure showing the time-frequency map of the selected sensor.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs used to generate the power
+    tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
+        The time-frequency data. Must have the same channels as Epochs.
+    freq : array-like
+        Frequencies of interest as passed to induced_power
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    colorbar : bool
+        If true, colorbar will be added to the plot
+    vmin : float
+        Minimum value mapped to lowermost color
+    vmax : float
+        Minimum value mapped to upppermost color
+    cmap : instance of matplotlib.pylab.colormap
+        Colors to be mapped to the values
+    layout_scale : float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    title : str
+        Title of the figure.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of time-frequency data at sensor locations
+    """
+
+    if vmin is None:
+        vmin = tfr.min()
+    if vmax is None:
+        vmax = tfr.max()
+
+    if layout is None:
+        from .layouts.layout import find_layout
+        layout = find_layout(epochs.info['chs'])
+    layout = copy.deepcopy(layout)
+    layout.names = _clean_names(layout.names)
+
+    tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq)
+
+    fig = _plot_topo(info=epochs.info, times=epochs.times,
+                     show_func=tfr_imshow, layout=layout, border='w',
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title,
+                     x_label='Time (s)', y_label='Frequency (Hz)')
+
+    return fig
+
+
+def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
+                    mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
+                    cmap=None, layout_scale=0.945, dB=True, title=None):
+    """Plot induced power on sensor layout
+
+    Clicking on the induced power map of an individual sensor opens a
+    new figure showing the induced power map of the selected sensor.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs used to generate the power
+    power : 3D-array
+        First return value from mne.time_frequency.induced_power
+    freq : array-like
+        Frequencies of interest as passed to induced_power
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    baseline : tuple or list of length 2
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or z-score (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+        If None, baseline no correction will be performed.
+    decim : integer
+        Increment for selecting each nth time slice
+    colorbar : bool
+        If true, colorbar will be added to the plot
+    vmin : float
+        Minimum value mapped to lowermost color
+    vmax : float
+        Minimum value mapped to upppermost color
+    cmap : instance of matplotlib.pylab.colormap
+        Colors to be mapped to the values
+    layout_scale : float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    dB : bool
+        If True, log10 will be applied to the data.
+    title : str
+        Title of the figure.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of induced power at sensor locations
+    """
+    times = epochs.times[::decim] * 1e3
+    if mode is not None:
+        if baseline is None:
+            baseline = epochs.baseline
+        power = rescale(power.copy(), times, baseline, mode)
+    if dB:
+        power = 20 * np.log10(power)
+    if vmin is None:
+        vmin = power.min()
+    if vmax is None:
+        vmax = power.max()
+    if layout is None:
+        from .layouts.layout import find_layout
+        layout = find_layout(epochs.info['chs'])
+    layout = copy.deepcopy(layout)
+    layout.names = _clean_names(layout.names)
+
+    power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
+
+    fig = _plot_topo(info=epochs.info, times=times,
+                     show_func=power_imshow, layout=layout, decim=decim,
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title, border='w',
+                     x_label='Time (s)', y_label='Frequency (Hz)')
+
+    return fig
+
+
+def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
+                         mode='mean', decim=1, colorbar=True, vmin=None,
+                         vmax=None, cmap=None, layout_scale=0.945,
+                         title=None):
+    """Plot phase locking values (PLV) on sensor layout
+
+    Clicking on the PLV map of an individual sensor opens a new figure
+    showing the PLV map of the selected sensor.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs used to generate the phase locking value
+    phase_lock : 3D-array
+        Phase locking value, second return value from
+        mne.time_frequency.induced_power.
+    freq : array-like
+        Frequencies of interest as passed to induced_power
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    baseline : tuple or list of length 2
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
+        Do baseline correction with ratio (phase is divided by mean
+        phase during baseline) or z-score (phase is divided by standard
+        deviation of phase during baseline after subtracting the mean,
+        phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
+        If None, baseline no correction will be performed.
+    decim : integer
+        Increment for selecting each nth time slice
+    colorbar : bool
+        If true, colorbar will be added to the plot
+    vmin : float
+        Minimum value mapped to lowermost color
+    vmax : float
+        Minimum value mapped to upppermost color
+    cmap : instance of matplotlib.pylab.colormap
+        Colors to be mapped to the values
+    layout_scale : float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas.
+    title : str
+        Title of the figure.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figrue
+        Phase lock images at sensor locations
+    """
+    times = epochs.times[::decim] * 1e3
+    if mode is not None:
+        if baseline is None:
+            baseline = epochs.baseline
+        phase = rescale(phase.copy(), times, baseline, mode)
+    if vmin is None:
+        vmin = phase.min()
+    if vmax is None:
+        vmax = phase.max()
+    if layout is None:
+        from .layouts.layout import find_layout
+        layout = find_layout(epochs.info['chs'])
+    layout = copy.deepcopy(layout)
+    layout.names = _clean_names(layout.names)
+
+    phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
+
+    fig = _plot_topo(info=epochs.info, times=times,
+                     show_func=phase_imshow, layout=layout, decim=decim,
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title, border='w',
+                     x_label='Time (s)', y_label='Frequency (Hz)')
+
+    return fig
+
+
+def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
+                     data=None, epochs=None, sigma=None,
+                     order=None, scalings=None, vline=None):
+    """Aux function to plot erfimage on sensor topography"""
+
+    this_data = data[:, ch_idx, :].copy()
+    ch_type = channel_type(epochs.info, ch_idx)
+    if not ch_type in scalings:
+        raise KeyError('%s channel type not in scalings' % ch_type)
+    this_data *= scalings[ch_type]
+
+    if callable(order):
+        order = order(epochs.times, this_data)
+
+    if order is not None:
+        this_data = this_data[order]
+
+    this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
+
+    ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
+              origin='lower', vmin=vmin, vmax=vmax, picker=True)
+
+
+def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
+                           vmax=None, colorbar=True, order=None, cmap=None,
+                           layout_scale=.95, title=None, scalings=None):
+    """Plot Event Related Potential / Fields image on topographies
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    layout: instance of Layout
+        System specific sensor positions.
+    sigma : float
+        The standard deviation of the Gaussian smoothing to apply along
+        the epoch axis to apply in the image.
+    vmin : float
+        The min value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers.
+    vmax : float
+        The max value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers.
+    colorbar : bool
+        Display or not a colorbar.
+    order : None | array of int | callable
+        If not None, order is used to reorder the epochs on the y-axis
+        of the image. If it's an array of int it should be of length
+        the number of good epochs. If it's a callable the arguments
+        passed are the times vector and the data as 2d array
+        (data.shape[1] == len(times)).
+    cmap : instance of matplotlib.pylab.colormap
+        Colors to be mapped to the values.
+    layout_scale: float
+        scaling factor for adjusting the relative size of the layout
+        on the canvas.
+    title : str
+        Title of the figure.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If
+        None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+
+    Returns
+    -------
+    fig : instacne fo matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    scalings = _mutable_defaults(('scalings', scalings))[0]
+    data = epochs.get_data()
+    if vmin is None:
+        vmin = data.min()
+    if vmax is None:
+        vmax = data.max()
+    if layout is None:
+        from .layouts.layout import find_layout
+        layout = find_layout(epochs.info['chs'])
+    layout = copy.deepcopy(layout)
+    layout.names = _clean_names(layout.names)
+
+    erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
+                         data=data, epochs=epochs, sigma=sigma)
+
+    fig = _plot_topo(info=epochs.info, times=epochs.times, show_func=erf_imshow,
+                     layout=layout, decim=1, colorbar=colorbar, vmin=vmin,
+                     vmax=vmax, cmap=cmap, layout_scale=layout_scale, title=title,
+                     border='w', x_label='Time (s)', y_label='Epoch')
+
+    return fig
+
+
+def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
+                        vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
+                        scale=None, unit=None, res=256, size=1, format='%3.1f',
+                        proj=False, show=True):
+    """Plot topographic maps of specific time points of evoked data
+
+    Parameters
+    ----------
+    evoked : Evoked
+        The Evoked object.
+    times : float | array of floats | None.
+        The time point(s) to plot. If None, 10 topographies will be shown
+        will a regular time spacing between the first and last time instant.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        The channel type to plot. For 'grad', the gradiometers are collected in
+        pairs and the RMS for each pair is plotted.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    vmax : scalar
+        The value specfying the range of the color scale (-vmax to +vmax). If
+        None, the largest absolute value in the data is used.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses).
+    colorbar : bool
+        Plot a colorbar.
+    scale : float | None
+        Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+        for grad and 1e15 for mag.
+    units : str | None
+        The units of the channel types used for colorbar lables. If
+        scale == None the unit is automatically determined.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : float
+        Side length per topomap in inches.
+    format : str
+        String format for colorbar values.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be show.
+    show : bool
+        Call pylab.show() at the end.
+    """
+    import pylab as pl
+
+    if scale is None:
+        if ch_type.startswith('planar'):
+            key = 'grad'
+        else:
+            key = ch_type
+        scale = DEFAULTS['scalings'][key]
+        unit = DEFAULTS['units'][key]
+
+    if times is None:
+        times = np.linspace(evoked.times[0], evoked.times[-1], 10)
+    elif np.isscalar(times):
+        times = [times]
+    if len(times) > 20:
+        raise RuntimeError('Too many plots requested. Please pass fewer '
+                           'than 20 time instants.')
+    tmin, tmax = evoked.times[[0, -1]]
+    for ii, t in enumerate(times):
+        if not tmin <= t <= tmax:
+            raise ValueError('Times should be between %0.3f and %0.3f. (Got '
+                             '%0.3f).' % (tmin, tmax, t))
+
+    info = copy.deepcopy(evoked.info)
+
+    if layout is None:
+        from .layouts.layout import find_layout
+        layout = find_layout(info['chs'])
+    elif layout == 'auto':
+        layout = None
+
+    info['ch_names'] = _clean_names(info['ch_names'])
+    for ii, this_ch in enumerate(info['chs']):
+        this_ch['ch_name'] = info['ch_names'][ii]
+
+    if layout is not None:
+        layout = copy.deepcopy(layout)
+        layout.names = _clean_names(layout.names)
+
+    # special case for merging grad channels
+    if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
+                    np.unique([ch['coil_type'] for ch in info['chs']])):
+        from .layouts.layout import _pair_grad_sensors, _merge_grad_data
+        picks, pos = _pair_grad_sensors(info, layout)
+        merge_grads = True
+    else:
+        merge_grads = False
+        picks = pick_types(info, meg=ch_type, exclude='bads')
+        if len(picks) == 0:
+            raise ValueError("No channels of type %r" % ch_type)
+
+        if layout is None:
+            chs = [info['chs'][i] for i in picks]
+            from .layouts.layout import _find_topomap_coords
+            pos = _find_topomap_coords(chs, layout)
+        else:
+            pos = [layout.pos[layout.names.index(info['ch_names'][k])] for k in
+                   picks]
+
+    n = len(times)
+    nax = n + bool(colorbar)
+    width = size * nax
+    height = size * 1. + max(0, 0.1 * (3 - size))
+    fig = pl.figure(figsize=(width, height))
+    w_frame = pl.rcParams['figure.subplot.wspace'] / (2 * nax)
+    top_frame = max(.05, .2 / size)
+    fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0, top=1 - top_frame)
+    time_idx = [np.where(evoked.times >= t)[0][0] for t in times]
+
+    if proj is True and evoked.proj is not True:
+        data = evoked.copy().apply_proj().data
+    else:
+        data = evoked.data
+
+    data = data[np.ix_(picks, time_idx)] * scale
+    if merge_grads:
+        data = _merge_grad_data(data)
+    vmax = vmax or np.max(np.abs(data))
+    images = []
+    for i, t in enumerate(times):
+        pl.subplot(1, nax, i + 1)
+        images.append(plot_topomap(data[:, i], pos, vmax=vmax, cmap=cmap,
+                      sensors=sensors, res=res))
+        pl.title('%i ms' % (t * 1000))
+
+    if colorbar:
+        cax = pl.subplot(1, n + 1, n + 1)
+        pl.colorbar(cax=cax, ticks=[-vmax, 0, vmax], format=format)
+        # resize the colorbar (by default the color fills the whole axes)
+        cpos = cax.get_position()
+        cpos.x0 = 1 - (.7 + .1 / size) / nax
+        cpos.x1 = cpos.x0 + .1 / nax
+        cpos.y0 = .1
+        cpos.y1 = .7
+        cax.set_position(cpos)
+        if unit is not None:
+            cax.set_title(unit)
+
+    if proj == 'interactive':
+        _check_delayed_ssp(evoked)
+        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
+                      picks=picks, images=images, time_idx=time_idx,
+                      scale=scale, merge_grads=merge_grads, res=res, pos=pos,
+                      plot_update_proj_callback=_plot_update_evoked_topomap)
+        _draw_proj_checkbox(None, params)
+
+    if show:
+        pl.show()
+
+    return fig
+
+
+def _plot_update_evoked_topomap(params, bools):
+    """ Helper to update topomaps """
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+
+    params['proj_bools'] = bools
+    new_evoked = params['evoked'].copy()
+    new_evoked.info['projs'] = []
+    new_evoked.add_proj(projs)
+    new_evoked.apply_proj()
+
+    data = new_evoked.data[np.ix_(params['picks'], params['time_idx'])] \
+                            * params['scale']
+    if params['merge_grads']:
+        from .layouts.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+
+    pos = np.asarray(params['pos'])
+    pos_x = pos[:, 0]
+    pos_y = pos[:, 1]
+    xmin, xmax = pos_x.min(), pos_x.max()
+    ymin, ymax = pos_y.min(), pos_y.max()
+    triang = delaunay.Triangulation(pos_x, pos_y)
+    x = np.linspace(xmin, xmax, params['res'])
+    y = np.linspace(ymin, ymax, params['res'])
+    xi, yi = np.meshgrid(x, y)
+
+    for ii, im in enumerate(params['images']):
+        interp = triang.linear_interpolator(data[:, ii])
+        im_ = interp[yi.min():yi.max():complex(0, yi.shape[0]),
+                     xi.min():xi.max():complex(0, xi.shape[1])]
+        im_ = np.ma.masked_array(im_, im_ == np.nan)
+        im.set_data(im_)
+    params['fig'].canvas.draw()
+
+
+def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
+                       colorbar=False, res=256, size=1, show=True):
+    """Plot topographic maps of SSP projections
+
+    Parameters
+    ----------
+    projs : list of Projection
+        The projections
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses).
+    colorbar : bool
+        Plot a colorbar.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : scalar
+        Side length of the topomaps in inches (only applies when plotting
+        multiple topomaps at a time).
+    show : bool
+        Show figures if True
+    """
+    import pylab as pl
+
+    if layout is None:
+        from .layouts import read_layout
+        layout = read_layout('Vectorview-all')
+
+    if not isinstance(layout, list):
+        layout = [layout]
+
+    n_projs = len(projs)
+    nrows = math.floor(math.sqrt(n_projs))
+    ncols = math.ceil(n_projs / nrows)
+
+    pl.clf()
+    for k, proj in enumerate(projs):
+        ch_names = proj['data']['col_names']
+        data = proj['data']['data'].ravel()
+
+        idx = []
+        for l in layout:
+            is_vv = l.kind.startswith('Vectorview')
+            if is_vv:
+                from .layouts.layout import _pair_grad_sensors_from_ch_names
+                grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
+                if grad_pairs:
+                    ch_names = [ch_names[i] for i in grad_pairs]
+
+            idx = [l.names.index(c) for c in ch_names if c in l.names]
+            if len(idx) == 0:
+                continue
+
+            pos = l.pos[idx]
+            if is_vv and grad_pairs:
+                from .layouts.layout import _merge_grad_data
+                shape = (len(idx) / 2, 2, -1)
+                pos = pos.reshape(shape).mean(axis=1)
+                data = _merge_grad_data(data[grad_pairs]).ravel()
+
+            break
+
+        ax = pl.subplot(nrows, ncols, k + 1)
+        ax.set_title(proj['desc'])
+        if len(idx):
+            plot_topomap(data, pos, vmax=None, cmap=cmap,
+                         sensors=sensors, res=res)
+            if colorbar:
+                pl.colorbar()
+        else:
+            raise RuntimeError('Cannot find a proper layout for projection %s'
+                               % proj['desc'])
+
+    if show:
+        pl.show()
+
+
+def plot_topomap(data, pos, vmax=None, cmap='RdBu_r', sensors='k,', res=100):
+    """Plot a topographic map as image
+
+    Parameters
+    ----------
+    data : array, length = n_points
+        The data values to plot.
+    pos : array, shape = (n_points, 2)
+        For each data point, the x and y coordinates.
+    vmax : scalar
+        The value specfying the range of the color scale (-vmax to +vmax). If
+        None, the largest absolute value in the data is used.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses).
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    """
+    import pylab as pl
+
+    data = np.asarray(data)
+    pos = np.asarray(pos)
+    if data.ndim > 1:
+        err = ("Data needs to be array of shape (n_sensors,); got shape "
+               "%s." % str(data.shape))
+        raise ValueError(err)
+    elif len(data) != len(pos):
+        err = ("Data and pos need to be of same length. Got data of shape %s, "
+               "pos of shape %s." % (str(), str()))
+
+    axes = pl.gca()
+    axes.set_frame_on(False)
+
+    vmax = vmax or np.abs(data).max()
+
+    pl.xticks(())
+    pl.yticks(())
+
+    pos_x = pos[:, 0]
+    pos_y = pos[:, 1]
+    if sensors:
+        if sensors == True:
+            sensors = 'k,'
+        pl.plot(pos_x, pos_y, sensors)
+
+    xmin, xmax = pos_x.min(), pos_x.max()
+    ymin, ymax = pos_y.min(), pos_y.max()
+    triang = delaunay.Triangulation(pos_x, pos_y)
+    interp = triang.linear_interpolator(data)
+    x = np.linspace(xmin, xmax, res)
+    y = np.linspace(ymin, ymax, res)
+    xi, yi = np.meshgrid(x, y)
+
+    im = interp[yi.min():yi.max():complex(0, yi.shape[0]),
+                xi.min():xi.max():complex(0, xi.shape[1])]
+    im = np.ma.masked_array(im, im == np.nan)
+
+    im = pl.imshow(im, cmap=cmap, vmin=-vmax, vmax=vmax, origin='lower',
+                   aspect='equal', extent=(xmin, xmax, ymin, ymax))
+    return im
+
+
+def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
+                ylim=None, proj=False, xlim='tight', hline=None, units=None,
+                scalings=None, titles=None, axes=None):
+    """Plot evoked data
+
+    Note: If bad channels are not excluded they are shown in red.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    picks : None | array-like of int
+        The indices of channels to plot. If None show all.
+    exclude : list of str | 'bads'
+        Channels names to exclude from being shown. If 'bads', the
+        bad channels are excluded.
+    unit : bool
+        Scale plot with channel (SI) unit.
+    show : bool
+        Call pylab.show() as the end or not.
+    ylim : dict | None
+        ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
+        Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
+        for each channel equals the pylab default.
+    xlim : 'tight' | tuple | None
+        xlim for plots.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    hline : list of floats | None
+        The values at which to show an horizontal line.
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    titles : dict | None
+        The titles associated with the channels. If None, defaults to
+        `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+    axes : instance of Axes | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of channel types. If instance of
+        Axes, there must be only one channel type plotted.
+    """
+    import pylab as pl
+    if axes is not None and proj == 'interactive':
+        raise RuntimeError('Currently only single axis figures are supported'
+                           ' for interactive SSP selection.')
+
+    scalings, titles, units = _mutable_defaults(('scalings', scalings),
+                                                ('titles', titles),
+                                                ('units', units))
+
+    channel_types = set(key for d in [scalings, titles, units] for key in d)
+    if picks is None:
+        picks = range(evoked.info['nchan'])
+
+    bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
+                  if ch in evoked.ch_names]
+    if len(exclude) > 0:
+        if isinstance(exclude, basestring) and exclude == 'bads':
+            exclude = bad_ch_idx
+        elif (isinstance(exclude, list)
+              and all([isinstance(ch, basestring) for ch in exclude])):
+            exclude = [evoked.ch_names.index(ch) for ch in exclude]
+        else:
+            raise ValueError('exclude has to be a list of channel names or '
+                             '"bads"')
+
+        picks = list(set(picks).difference(exclude))
+
+    types = [channel_type(evoked.info, idx) for idx in picks]
+    n_channel_types = 0
+    ch_types_used = []
+    for t in channel_types:
+        if t in types:
+            n_channel_types += 1
+            ch_types_used.append(t)
+
+    if axes is None:
+        pl.clf()
+        axes = [pl.subplot(n_channel_types, 1, c + 1)
+                for c in range(n_channel_types)]
+    if not isinstance(axes, list):
+        axes = [axes]
+    if not len(axes) == n_channel_types:
+        raise ValueError('Number of axes (%g) must match number of channel '
+                         'types (%g)' % (len(axes), n_channel_types))
+
+    fig = axes[0].get_figure()
+
+    # instead of projecting during each iteration let's use the mixin here.
+    if proj is True and evoked.proj is not True:
+        evoked = evoked.copy()
+        evoked.apply_proj()
+
+    times = 1e3 * evoked.times  # time in miliseconds
+    for ax, t in zip(axes, ch_types_used):
+        ch_unit = units[t]
+        this_scaling = scalings[t]
+        if unit is False:
+            this_scaling = 1.0
+            ch_unit = 'NA'  # no unit
+        idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+        if len(idx) > 0:
+            if any([i in bad_ch_idx for i in idx]):
+                colors = ['k'] * len(idx)
+                for i in bad_ch_idx:
+                    if i in idx:
+                        colors[idx.index(i)] = 'r'
+
+                ax._get_lines.color_cycle = iter(colors)
+            else:
+                ax._get_lines.color_cycle = cycle(['k'])
+
+            D = this_scaling * evoked.data[idx, :]
+            pl.axes(ax)
+            ax.plot(times, D.T)
+            if xlim is not None:
+                if xlim == 'tight':
+                    xlim = (times[0], times[-1])
+                pl.xlim(xlim)
+            if ylim is not None and t in ylim:
+                pl.ylim(ylim[t])
+            pl.title(titles[t] + ' (%d channel%s)' % (
+                     len(D), 's' if len(D) > 1 else ''))
+            pl.xlabel('time (ms)')
+            pl.ylabel('data (%s)' % ch_unit)
+
+            if hline is not None:
+                for h in hline:
+                    pl.axhline(h, color='r', linestyle='--', linewidth=2)
+
+    pl.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
+    tight_layout()
+
+    if proj == 'interactive':
+        _check_delayed_ssp(evoked)
+        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
+                      axes=axes, types=types, units=units, scalings=scalings,
+                      unit=unit, ch_types_used=ch_types_used, picks=picks,
+                      plot_update_proj_callback=_plot_update_evoked)
+        _draw_proj_checkbox(None, params)
+
+    if show:
+        pl.show()
+
+    return fig
+
+
+def _plot_update_evoked(params, bools):
+    """ update the plot evoked lines
+    """
+    picks, evoked = [params[k] for k in 'picks', 'evoked']
+    times = evoked.times * 1e3
+    projs = [proj for ii, proj in enumerate(params['projs'])
+        if ii in np.where(bools)[0]]
+    params['proj_bools'] = bools
+    new_evoked = evoked.copy()
+    new_evoked.info['projs'] = []
+    new_evoked.add_proj(projs)
+    new_evoked.apply_proj()
+    for ax, t in zip(params['axes'], params['ch_types_used']):
+        this_scaling = params['scalings'][t]
+        idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
+        D = this_scaling * new_evoked.data[idx, :]
+        [line.set_data(times, di) for line, di in zip(ax.lines, D)]
+    params['fig'].canvas.draw()
+
+
+def _draw_proj_checkbox(event, params, draw_current_state=True):
+    """Toggle options (projectors) dialog"""
+    import pylab as pl
+    projs = params['projs']
+    # turn on options dialog
+    fig_proj = figure_nobar()
+    fig_proj.canvas.set_window_title('SSP projection vectors')
+    ax_temp = pl.axes((0, 0, 1, 1))
+    ax_temp.get_yaxis().set_visible(False)
+    ax_temp.get_xaxis().set_visible(False)
+    fig_proj.add_axes(ax_temp)
+    labels = [p['desc'] for p in projs]
+    actives = [p['active'] for p in projs] if draw_current_state else \
+              [True] * len(params['projs'])
+    proj_checks = pl.mpl.widgets.CheckButtons(ax_temp, labels=labels,
+                                              actives=actives)
+    # change already-applied projectors to red
+    for ii, p in enumerate(projs):
+        if p['active'] is True:
+            for x in proj_checks.lines[ii]:
+                x.set_color('r')
+    # make minimal size
+    width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
+    height = len(projs) / 6.0 + 0.5
+    # have to try/catch when there's no toolbar
+    try:
+        fig_proj.set_size_inches((width, height), forward=True)
+    except Exception:
+        pass
+    # pass key presses from option dialog over
+    proj_checks.on_clicked(partial(_toggle_proj, params=params))
+    params['proj_checks'] = proj_checks
+    # this should work for non-test cases
+    try:
+        fig_proj.canvas.show()
+    except Exception:
+        pass
+
+
+def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
+                                 fontsize=18, bgcolor=(.05, 0, .1),
+                                 opacity=0.2, brain_color=(0.7,) * 3,
+                                 show=True, high_resolution=False,
+                                 fig_name=None, fig_number=None, labels=None,
+                                 modes=['cone', 'sphere'],
+                                 scale_factors=[1, 0.6],
+                                 verbose=None, **kwargs):
+    """Plot source estimates obtained with sparse solver
+
+    Active dipoles are represented in a "Glass" brain.
+    If the same source is active in multiple source estimates it is
+    displayed with a sphere otherwise with a cone in 3D.
+
+    Parameters
+    ----------
+    src : dict
+        The source space
+    stcs : instance of SourceEstimate or list of instances of SourceEstimate
+        The source estimates (up to 3)
+    colors : list
+        List of colors
+    linewidth : int
+        Line width in 2D plot
+    fontsize : int
+        Font size
+    bgcolor : tuple of length 3
+        Background color in 3D
+    opacity : float in [0, 1]
+        Opacity of brain mesh
+    brain_color : tuple of length 3
+        Brain color
+    show : bool
+        Show figures if True
+    fig_name :
+        Mayavi figure name
+    fig_number :
+        Pylab figure number
+    labels : ndarray or list of ndarrays
+        Labels to show sources in clusters. Sources with the same
+        label and the waveforms within each cluster are presented in
+        the same color. labels should be a list of ndarrays when
+        stcs is a list ie. one label for each stc.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    kwargs : kwargs
+        Keyword arguments to pass to mlab.triangular_mesh
+    """
+    if not isinstance(stcs, list):
+        stcs = [stcs]
+    if labels is not None and not isinstance(labels, list):
+        labels = [labels]
+
+    if colors is None:
+        colors = COLORS
+
+    linestyles = ['-', '--', ':']
+
+    # Show 3D
+    lh_points = src[0]['rr']
+    rh_points = src[1]['rr']
+    points = np.r_[lh_points, rh_points]
+
+    lh_normals = src[0]['nn']
+    rh_normals = src[1]['nn']
+    normals = np.r_[lh_normals, rh_normals]
+
+    if high_resolution:
+        use_lh_faces = src[0]['tris']
+        use_rh_faces = src[1]['tris']
+    else:
+        use_lh_faces = src[0]['use_tris']
+        use_rh_faces = src[1]['use_tris']
+
+    use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
+
+    points *= 170
+
+    vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
+               for stc in stcs]
+    unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
+
+    try:
+        from mayavi import mlab
+    except ImportError:
+        from enthought.mayavi import mlab
+
+    from matplotlib.colors import ColorConverter
+    color_converter = ColorConverter()
+
+    f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
+    mlab.clf()
+    if mlab.options.backend != 'test':
+        f.scene.disable_render = True
+    surface = mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
+                                   use_faces, color=brain_color,
+                                   opacity=opacity, **kwargs)
+
+    import pylab as pl
+    # Show time courses
+    pl.figure(fig_number)
+    pl.clf()
+
+    colors = cycle(colors)
+
+    logger.info("Total number of active sources: %d" % len(unique_vertnos))
+
+    if labels is not None:
+        colors = [colors.next() for _ in
+                        range(np.unique(np.concatenate(labels).ravel()).size)]
+
+    for idx, v in enumerate(unique_vertnos):
+        # get indices of stcs it belongs to
+        ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
+        is_common = len(ind) > 1
+
+        if labels is None:
+            c = colors.next()
+        else:
+            # if vertex is in different stcs than take label from first one
+            c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
+
+        mode = modes[1] if is_common else modes[0]
+        scale_factor = scale_factors[1] if is_common else scale_factors[0]
+
+        if (isinstance(scale_factor, (np.ndarray, list, tuple))
+            and len(unique_vertnos) == len(scale_factor)):
+            scale_factor = scale_factor[idx]
+
+        x, y, z = points[v]
+        nx, ny, nz = normals[v]
+        mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
+                      mode=mode, scale_factor=scale_factor)
+
+        for k in ind:
+            vertno = vertnos[k]
+            mask = (vertno == v)
+            assert np.sum(mask) == 1
+            linestyle = linestyles[k]
+            pl.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
+                    linewidth=linewidth, linestyle=linestyle)
+
+    pl.xlabel('Time (ms)', fontsize=18)
+    pl.ylabel('Source amplitude (nAm)', fontsize=18)
+
+    if fig_name is not None:
+        pl.title(fig_name)
+
+    if show:
+        pl.show()
+
+    surface.actor.property.backface_culling = True
+    surface.actor.property.shading = True
+
+    return surface
+
+
+ at verbose
+def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
+             show=True, verbose=None):
+    """Plot Covariance data
+
+    Parameters
+    ----------
+    cov : instance of Covariance
+        The covariance matrix.
+    info: dict
+        Measurement info.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any channel.
+        If 'bads', exclude info['bads'].
+    colorbar : bool
+        Show colorbar or not.
+    proj : bool
+        Apply projections or not.
+    show : bool
+        Call pylab.show() as the end or not.
+    show_svd : bool
+        Plot also singular values of the noise covariance for each sensor type.
+        We show square roots ie. standard deviations.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    if exclude == 'bads':
+        exclude = info['bads']
+    ch_names = [n for n in cov.ch_names if not n in exclude]
+    ch_idx = [cov.ch_names.index(n) for n in ch_names]
+    info_ch_names = info['ch_names']
+    sel_eeg = pick_types(info, meg=False, eeg=True, exclude=exclude)
+    sel_mag = pick_types(info, meg='mag', eeg=False, exclude=exclude)
+    sel_grad = pick_types(info, meg='grad', eeg=False, exclude=exclude)
+    idx_eeg = [ch_names.index(info_ch_names[c])
+               for c in sel_eeg if info_ch_names[c] in ch_names]
+    idx_mag = [ch_names.index(info_ch_names[c])
+               for c in sel_mag if info_ch_names[c] in ch_names]
+    idx_grad = [ch_names.index(info_ch_names[c])
+                for c in sel_grad if info_ch_names[c] in ch_names]
+
+    idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
+                 (idx_grad, 'Gradiometers', 'fT/cm', 1e13),
+                 (idx_mag, 'Magnetometers', 'fT', 1e15)]
+    idx_names = [(idx, name, unit, scaling)
+                 for idx, name, unit, scaling in idx_names if len(idx) > 0]
+
+    C = cov.data[ch_idx][:, ch_idx]
+
+    if proj:
+        projs = copy.deepcopy(info['projs'])
+
+        #   Activate the projection items
+        for p in projs:
+            p['active'] = True
+
+        P, ncomp, _ = make_projector(projs, ch_names)
+        if ncomp > 0:
+            logger.info('    Created an SSP operator (subspace dimension'
+                        ' = %d)' % ncomp)
+            C = np.dot(P, np.dot(C, P.T))
+        else:
+            logger.info('    The projection vectors do not apply to these '
+                        'channels.')
+
+    import pylab as pl
+
+    pl.figure(figsize=(2.5 * len(idx_names), 2.7))
+    for k, (idx, name, _, _) in enumerate(idx_names):
+        pl.subplot(1, len(idx_names), k + 1)
+        pl.imshow(C[idx][:, idx], interpolation="nearest")
+        pl.title(name)
+    pl.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
+    tight_layout()
+
+    if show_svd:
+        pl.figure()
+        for k, (idx, name, unit, scaling) in enumerate(idx_names):
+            _, s, _ = linalg.svd(C[idx][:, idx])
+            pl.subplot(1, len(idx_names), k + 1)
+            pl.ylabel('Noise std (%s)' % unit)
+            pl.xlabel('Eigenvalue index')
+            pl.semilogy(np.sqrt(s) * scaling)
+            pl.title(name)
+            tight_layout()
+
+    if show:
+        pl.show()
+
+
+def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
+                          colormap='hot', time_label='time=%0.2f ms',
+                          smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
+                          transparent=True, alpha=1.0, time_viewer=False,
+                          config_opts={}, subjects_dir=None, figure=None):
+    """Plot SourceEstimates with PySurfer
+
+    Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+    which will automatically be set by this function. Plotting multiple
+    SourceEstimates with different values for subjects_dir will cause
+    PySurfer to use the wrong FreeSurfer surfaces when using methods of
+    the returned Brain object. It is therefore recommended to set the
+    SUBJECTS_DIR environment variable or always use the same value for
+    subjects_dir (within the same Python session).
+
+    Parameters
+    ----------
+    stc : SourceEstimates
+        The source estimates to plot.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT. If None stc.subject will be used. If that
+        is None, the environment will be used.
+    surface : str
+        The type of surface (inflated, white etc.).
+    hemi : str, 'lh' | 'rh' | 'both'
+        The hemisphere to display. Using 'both' opens two separate figures,
+        one for each hemisphere.
+    colormap : str
+        The type of colormap to use.
+    time_label : str
+        How to print info about the time instant visualized.
+    smoothing_steps : int
+        The amount of smoothing
+    fmin : float
+        The minimum value to display.
+    fmid : float
+        The middle value on the colormap.
+    fmax : float
+        The maximum value for the colormap.
+    transparent : bool
+        If True, use a linear transparency between fmin and fmid.
+    alpha : float
+        Alpha value to apply globally to the overlay.
+    time_viewer : bool
+        Display time viewer GUI.
+    config_opts : dict
+        Keyword arguments for Brain initialization.
+        See pysurfer.viz.Brain.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    figure : instance of mayavi.core.scene.Scene | None
+        If None, the last figure will be cleaned and a new figure will
+        be created.
+
+    Returns
+    -------
+    brain : Brain | list of Brain
+        A instance of surfer.viz.Brain from PySurfer. For hemi='both',
+        a list with Brain instances for the left and right hemisphere is
+        returned.
+    """
+    from surfer import Brain, TimeViewer
+
+    if hemi not in ['lh', 'rh', 'both']:
+        raise ValueError('hemi has to be either "lh", "rh", or "both"')
+
+    if hemi == 'both' and figure is not None:
+        raise RuntimeError('`hemi` can\'t be `both` if the figure parameter'
+                           ' is supplied.')
+    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
+
+    subject = _check_subject(stc.subject, subject, False)
+    if subject is None:
+        if 'SUBJECT' in os.environ:
+            subject = os.environ['SUBJECT']
+        else:
+            raise ValueError('SUBJECT environment variable not set')
+
+    if hemi == 'both':
+        hemis = ['lh', 'rh']
+    else:
+        hemis = [hemi]
+
+    brains = list()
+    for hemi in hemis:
+        hemi_idx = 0 if hemi == 'lh' else 1
+
+        title = '%s-%s' % (subject, hemi)
+        args = inspect.getargspec(Brain.__init__)[0]
+        if 'subjects_dir' in args:
+            brain = Brain(subject, hemi, surface, title=title, figure=figure,
+                          config_opts=config_opts, subjects_dir=subjects_dir)
+        else:
+            # Current PySurfer versions need the SUBJECTS_DIR env. var.
+            # so we set it here. This is a hack as it can break other things
+            # XXX reminder to remove this once upstream pysurfer is changed
+            os.environ['SUBJECTS_DIR'] = subjects_dir
+            brain = Brain(subject, hemi, surface, config_opts=config_opts,
+                          title=title, figure=figure)
+
+        if hemi_idx == 0:
+            data = stc.data[:len(stc.vertno[0])]
+        else:
+            data = stc.data[len(stc.vertno[0]):]
+
+        vertices = stc.vertno[hemi_idx]
+
+        time = 1e3 * stc.times
+        brain.add_data(data, colormap=colormap, vertices=vertices,
+                       smoothing_steps=smoothing_steps, time=time,
+                       time_label=time_label, alpha=alpha)
+
+        # scale colormap and set time (index) to display
+        brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
+                                  transparent=transparent)
+        brains.append(brain)
+
+    if time_viewer:
+        viewer = TimeViewer(brains)
+
+    if len(brains) == 1:
+        return brains[0]
+    else:
+        return brains
+
+
+ at deprecated('Use plot_source_estimates. Will be removed in v0.7.')
+def plot_source_estimate(src, stc, n_smooth=200, cmap='jet'):
+    """Plot source estimates
+    """
+    from enthought.tvtk.api import tvtk
+    from enthought.traits.api import HasTraits, Range, Instance, \
+                                     on_trait_change
+    from enthought.traits.ui.api import View, Item, Group
+
+    from enthought.mayavi.core.api import PipelineBase
+    from enthought.mayavi.core.ui.api import MayaviScene, SceneEditor, \
+                    MlabSceneModel
+
+    class SurfaceViewer(HasTraits):
+        n_times = Range(0, 100, 0,)
+
+        scene = Instance(MlabSceneModel, ())
+        surf = Instance(PipelineBase)
+        text = Instance(PipelineBase)
+
+        def __init__(self, src, data, times, n_smooth=20, cmap='jet'):
+            super(SurfaceViewer, self).__init__()
+            self.src = src
+            self.data = data
+            self.times = times
+            self.n_smooth = n_smooth
+            self.cmap = cmap
+
+            lh_points = src[0]['rr']
+            rh_points = src[1]['rr']
+            # lh_faces = src[0]['tris']
+            # rh_faces = src[1]['tris']
+            lh_faces = src[0]['use_tris']
+            rh_faces = src[1]['use_tris']
+            points = np.r_[lh_points, rh_points]
+            points *= 200
+            faces = np.r_[lh_faces, lh_points.shape[0] + rh_faces]
+
+            lh_idx = np.where(src[0]['inuse'])[0]
+            rh_idx = np.where(src[1]['inuse'])[0]
+            use_idx = np.r_[lh_idx, lh_points.shape[0] + rh_idx]
+
+            self.points = points[use_idx]
+            self.faces = np.searchsorted(use_idx, faces)
+
+        # When the scene is activated, or when the parameters are changed, we
+        # update the plot.
+        @on_trait_change('n_times,scene.activated')
+        def update_plot(self):
+            idx = int(self.n_times * len(self.times) / 100)
+            t = self.times[idx]
+            d = self.data[:, idx].astype(np.float)  # 8bits for mayavi
+            points = self.points
+            faces = self.faces
+            info_time = "%d ms" % (1e3 * t)
+            if self.surf is None:
+                surface_mesh = self.scene.mlab.pipeline.triangular_mesh_source(
+                                    points[:, 0], points[:, 1], points[:, 2],
+                                    faces, scalars=d)
+                smooth_ = tvtk.SmoothPolyDataFilter(
+                                    number_of_iterations=self.n_smooth,
+                                    relaxation_factor=0.18,
+                                    feature_angle=70,
+                                    feature_edge_smoothing=False,
+                                    boundary_smoothing=False,
+                                    convergence=0.)
+                surface_mesh_smooth = self.scene.mlab.pipeline.user_defined(
+                                                surface_mesh, filter=smooth_)
+                self.surf = self.scene.mlab.pipeline.surface(
+                                    surface_mesh_smooth, colormap=self.cmap)
+
+                self.scene.mlab.colorbar()
+                self.text = self.scene.mlab.text(0.7, 0.9, info_time,
+                                                 width=0.2)
+                self.scene.background = (.05, 0, .1)
+            else:
+                self.surf.mlab_source.set(scalars=d)
+                self.text.set(text=info_time)
+
+        # The layout of the dialog created
+        view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
+                         height=800, width=800, show_label=False),
+                    Group('_', 'n_times',),
+                    resizable=True,)
+
+    viewer = SurfaceViewer(src, stc.data, stc.times, n_smooth=200)
+    viewer.configure_traits()
+    return viewer
+
+
+def _plot_ica_panel_onpick(event, sources=None, ylims=None):
+    """Onpick callback for plot_ica_panel"""
+
+    # make sure that the swipe gesture in OS-X doesn't open many figures
+    if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
+        return
+
+    artist = event.artist
+    try:
+        import pylab as pl
+        pl.figure()
+        src_idx = artist._mne_src_idx
+        component = artist._mne_component
+        pl.plot(sources[src_idx], 'r')
+        pl.ylim(ylims)
+        pl.grid(linestyle='-', color='gray', linewidth=.25)
+        pl.title(component)
+    except Exception as err:
+        # matplotlib silently ignores exceptions in event handlers, so we print
+        # it here to know what went wrong
+        print err
+        raise err
+
+
+ at verbose
+def plot_ica_panel(sources, start=None, stop=None, n_components=None,
+                   source_idx=None, ncol=3, nrow=10, verbose=None,
+                   title=None, show=True):
+    """Create panel plots of ICA sources
+
+    Note. Inspired by an example from Carl Vogel's stats blog 'Will it Python?'
+
+    Clicking on the plot of an individual source opens a new figure showing
+    the source.
+
+    Parameters
+    ----------
+    sources : ndarray
+        Sources as drawn from ica.get_sources.
+    start : int
+        x-axis start index. If None from the beginning.
+    stop : int
+        x-axis stop index. If None to the end.
+    n_components : int
+        Number of components fitted.
+    source_idx : array-like
+        Indices for subsetting the sources.
+    ncol : int
+        Number of panel-columns.
+    nrow : int
+        Number of panel-rows.
+    title : str
+        The figure title. If None a default is provided.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    show : bool
+        If True, plot will be shown, else just the figure is returned.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+    """
+    import pylab as pl
+
+    if source_idx is None:
+        source_idx = np.arange(len(sources))
+    else:
+        source_idx = np.array(source_idx)
+        sources = sources[source_idx]
+
+    if n_components is None:
+        n_components = len(sources)
+
+    hangover = n_components % ncol
+    nplots = nrow * ncol
+
+    if source_idx.size > nrow * ncol:
+        logger.info('More sources selected than rows and cols specified. '
+                    'Showing the first %i sources.' % nplots)
+        source_idx = np.arange(nplots)
+
+    sources = sources[:, start:stop]
+    ylims = sources.min(), sources.max()
+    fig, panel_axes = pl.subplots(nrow, ncol, sharey=True, figsize=(9, 10))
+    if title is None:
+        fig.suptitle('MEG signal decomposition'
+                     ' -- %i components.' % n_components, size=16)
+    elif title:
+        fig.suptitle(title, size=16)
+
+    pl.subplots_adjust(wspace=0.05, hspace=0.05)
+
+    iter_plots = ((row, col) for row in range(nrow) for col in range(ncol))
+
+    for idx, (row, col) in enumerate(iter_plots):
+        xs = panel_axes[row, col]
+        xs.grid(linestyle='-', color='gray', linewidth=.25)
+        if idx < n_components:
+            component = '[%i]' % idx
+            this_ax = xs.plot(sources[idx], linewidth=0.5, color='red',
+                              picker=1e9)
+            xs.text(0.05, .95, component,
+                    transform=panel_axes[row, col].transAxes,
+                    verticalalignment='top')
+            # emebed idx and comp. name to use in callback
+            this_ax[0].__dict__['_mne_src_idx'] = idx
+            this_ax[0].__dict__['_mne_component'] = component
+            pl.ylim(ylims)
+        else:
+            # Make extra subplots invisible
+            pl.setp(xs, visible=False)
+
+        xtl = xs.get_xticklabels()
+        ytl = xs.get_yticklabels()
+        if row < nrow - 2 or (row < nrow - 1 and
+                              (hangover == 0 or col <= hangover - 1)):
+            pl.setp(xtl, visible=False)
+        if (col > 0) or (row % 2 == 1):
+            pl.setp(ytl, visible=False)
+        if (col == ncol - 1) and (row % 2 == 1):
+            xs.yaxis.tick_right()
+
+        pl.setp(xtl, rotation=90.)
+
+    # register callback
+    callback = partial(_plot_ica_panel_onpick, sources=sources, ylims=ylims)
+    fig.canvas.mpl_connect('pick_event', callback)
+
+    if show:
+        pl.show()
+
+    return fig
+
+
+def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
+                      vmax=None, colorbar=True, order=None, show=True,
+                      units=None, scalings=None):
+    """Plot Event Related Potential / Fields image
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs
+    picks : int | array of int | None
+        The indices of the channels to consider. If None, all good
+        data channels are plotted.
+    sigma : float
+        The standard deviation of the Gaussian smoothing to apply along
+        the epoch axis to apply in the image.
+    vmin : float
+        The min value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers
+    vmax : float
+        The max value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers
+    colorbar : bool
+        Display or not a colorbar
+    order : None | array of int | callable
+        If not None, order is used to reorder the epochs on the y-axis
+        of the image. If it's an array of int it should be of length
+        the number of good epochs. If it's a callable the arguments
+        passed are the times vector and the data as 2d array
+        (data.shape[1] == len(times)
+    show : bool
+        Show or not the figure at the end
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting.
+        If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15)`
+
+    Returns
+    -------
+    figs : the list of matplotlib figures
+        One figure per channel displayed
+    """
+    units, scalings = _mutable_defaults(('units', units),
+                                        ('scalings', scalings))
+
+    import pylab as pl
+    if picks is None:
+        picks = pick_types(epochs.info, meg=True, eeg=True, exclude='bads')
+
+    if units.keys() != scalings.keys():
+        raise ValueError('Scalings and units must have the same keys.')
+
+    picks = np.atleast_1d(picks)
+    evoked = epochs.average(picks)
+    data = epochs.get_data()[:, picks, :]
+    if vmin is None:
+        vmin = data.min()
+    if vmax is None:
+        vmax = data.max()
+
+    figs = list()
+    for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
+        this_fig = pl.figure()
+        figs.append(this_fig)
+
+        ch_type = channel_type(epochs.info, idx)
+        if not ch_type in scalings:
+            # We know it's not in either scalings or units since keys match
+            raise KeyError('%s type not in scalings and units' % ch_type)
+        this_data *= scalings[ch_type]
+
+        this_order = order
+        if callable(order):
+            this_order = order(epochs.times, this_data)
+
+        if this_order is not None:
+            this_data = this_data[this_order]
+
+        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
+
+        ax1 = pl.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
+        im = pl.imshow(this_data,
+                       extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
+                               0, len(data)],
+                       aspect='auto', origin='lower',
+                       vmin=vmin, vmax=vmax)
+        ax2 = pl.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
+        if colorbar:
+            ax3 = pl.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
+        ax1.set_title(epochs.ch_names[idx])
+        ax1.set_ylabel('Epochs')
+        ax1.axis('auto')
+        ax1.axis('tight')
+        ax1.axvline(0, color='m', linewidth=3, linestyle='--')
+        ax2.plot(1e3 * evoked.times, scalings[ch_type] * evoked.data[i])
+        ax2.set_xlabel('Time (ms)')
+        ax2.set_ylabel(units[ch_type])
+        ax2.set_ylim([vmin, vmax])
+        ax2.axvline(0, color='m', linewidth=3, linestyle='--')
+        if colorbar:
+            pl.colorbar(im, cax=ax3)
+            tight_layout()
+
+    if show:
+        pl.show()
+
+    return figs
+
+
+def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
+    """Return a colormap similar to that used by mne_analyze
+
+    Parameters
+    ----------
+    limits : list (or array) of length 3
+        Bounds for the colormap.
+    format : str
+        Type of colormap to return. If 'matplotlib', will return a
+        matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
+        return an RGBA array of shape (256, 4).
+
+    Returns
+    -------
+    cmap : instance of matplotlib.pylab.colormap | array
+        A teal->blue->gray->red->yellow colormap.
+
+    Notes
+    -----
+    For this will return a colormap that will display correctly for data
+    that are scaled by the plotting function to span [-fmax, fmax].
+
+    Examples
+    --------
+    The following code will plot a STC using standard MNE limits:
+
+        colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
+        brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
+        brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
+
+    """
+    l = np.asarray(limits, dtype='float')
+    if len(l) != 3:
+        raise ValueError('limits must have 3 elements')
+    if any(l < 0):
+        raise ValueError('limits must all be positive')
+    if any(np.diff(l) <= 0):
+        raise ValueError('limits must be monotonically increasing')
+    if format == 'matplotlib':
+        from matplotlib import colors
+        l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
+        cdict = {'red': ((l[0], 0.0, 0.0),
+                         (l[1], 0.0, 0.0),
+                         (l[2], 0.5, 0.5),
+                         (l[3], 0.5, 0.5),
+                         (l[4], 1.0, 1.0),
+                         (l[5], 1.0, 1.0)),
+                 'green': ((l[0], 1.0, 1.0),
+                           (l[1], 0.0, 0.0),
+                           (l[2], 0.5, 0.5),
+                           (l[3], 0.5, 0.5),
+                           (l[4], 0.0, 0.0),
+                           (l[5], 1.0, 1.0)),
+                 'blue': ((l[0], 1.0, 1.0),
+                          (l[1], 1.0, 1.0),
+                          (l[2], 0.5, 0.5),
+                          (l[3], 0.5, 0.5),
+                          (l[4], 0.0, 0.0),
+                          (l[5], 0.0, 0.0))}
+        return colors.LinearSegmentedColormap('mne_analyze', cdict)
+    elif format == 'mayavi':
+        l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
+        r = np.array([0, 0, 0, 0, 1, 1, 1])
+        g = np.array([1, 0, 0, 0, 0, 0, 1])
+        b = np.array([1, 1, 1, 0, 0, 0, 0])
+        a = np.array([1, 1, 0, 0, 0, 1, 1])
+        xp = (np.arange(256) - 128) / 128.0
+        colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
+        return colormap
+    else:
+        raise ValueError('format must be either matplotlib or mayavi')
+
+
+def circular_layout(node_names, node_order, start_pos=90, start_between=True):
+    """Create layout arranging nodes on a circle.
+
+    Parameters
+    ----------
+    node_names : list of str
+        Node names.
+    node_order : list of str
+        List with node names defining the order in which the nodes are
+        arranged. Must have the elements as node_names but the order can be
+        different. The nodes are arranged clockwise starting at "start_pos"
+        degrees.
+    start_pos : float
+        Angle in degrees that defines where the first node is plotted.
+    start_between : bool
+        If True, the layout starts with the position between the nodes. This is
+        the same as adding "180. / len(node_names)" to start_pos.
+
+    Returns
+    -------
+    node_angles : array, shape=(len(node_names,))
+        Node angles in degrees.
+    """
+    n_nodes = len(node_names)
+
+    if len(node_order) != n_nodes:
+        raise ValueError('node_order has to be the same length as node_names')
+
+    # convert it to a list with indices
+    node_order = [node_order.index(name) for name in node_names]
+    node_order = np.array(node_order)
+    if len(np.unique(node_order)) != n_nodes:
+        raise ValueError('node_order has repeated entries')
+
+    if start_between:
+        start_pos += 180. / n_nodes
+
+    node_angles = start_pos + 360 * node_order / float(n_nodes)
+
+    return node_angles
+
+
+def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
+                             node_angles=None, node_width=None,
+                             node_colors=None, facecolor='black',
+                             textcolor='white', node_edgecolor='black',
+                             linewidth=1.5, colormap='hot', vmin=None,
+                             vmax=None, colorbar=True, title=None):
+    """Visualize connectivity as a circular graph.
+
+    Note: This code is based on the circle graph example by Nicolas P. Rougier
+    http://www.loria.fr/~rougier/coding/recipes.html
+
+    Parameters
+    ----------
+    con : array
+        Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
+        array is provided, "indices" has to be used to define the connection
+        indices.
+    node_names : list of str
+        Node names. The order corresponds to the order in con.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which the connections
+        strenghts are defined in con. Only needed if con is a 1D array.
+    n_lines : int | None
+        If not None, only the n_lines strongest connections (strength=abs(con))
+        are drawn.
+    node_angles : array, shape=(len(node_names,)) | None
+        Array with node positions in degrees. If None, the nodes are equally
+        spaced on the circle. See mne.viz.circular_layout.
+    node_width : float | None
+        Width of each node in degrees. If None, "360. / len(node_names)" is
+        used.
+    node_colors : list of tuples | list of str
+        List with the color to use for each node. If fewer colors than nodes
+        are provided, the colors will be repeated. Any color supported by
+        matplotlib can be used, e.g., RGBA tuples, named colors.
+    facecolor : str
+        Color to use for background. See matplotlib.colors.
+    textcolor : str
+        Color to use for text. See matplotlib.colors.
+    node_edgecolor : str
+        Color to use for lines around nodes. See matplotlib.colors.
+    linewidth : float
+        Line width to use for connections.
+    colormap : str
+        Colormap to use for coloring the connections.
+    vmin : float | None
+        Minimum value for colormap. If None, it is determined automatically.
+    vmax : float | None
+        Maximum value for colormap. If None, it is determined automatically.
+    colorbar : bool
+        Display a colorbar or not.
+    title : str
+        The figure title.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+        The figure handle.
+    """
+    import pylab as pl
+    import matplotlib.path as m_path
+    import matplotlib.patches as m_patches
+
+    n_nodes = len(node_names)
+
+    if node_angles is not None:
+        if len(node_angles) != n_nodes:
+            raise ValueError('node_angles has to be the same length '
+                             'as node_names')
+        # convert it to radians
+        node_angles = node_angles * np.pi / 180
+    else:
+        # uniform layout on unit circle
+        node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
+
+    if node_width is None:
+        node_width = 2 * np.pi / n_nodes
+    else:
+        node_width = node_width * np.pi / 180
+
+    if node_colors is not None:
+        if len(node_colors) < n_nodes:
+            node_colors = cycle(node_colors)
+    else:
+        # assign colors using colormap
+        node_colors = [pl.cm.spectral(i / float(n_nodes))
+                       for i in range(n_nodes)]
+
+    # handle 1D and 2D connectivity information
+    if con.ndim == 1:
+        if indices is None:
+            raise ValueError('indices has to be provided if con.ndim == 1')
+    elif con.ndim == 2:
+        if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
+            raise ValueError('con has to be 1D or a square matrix')
+        # we use the lower-triangular part
+        indices = tril_indices(n_nodes, -1)
+        con = con[indices]
+    else:
+        raise ValueError('con has to be 1D or a square matrix')
+
+    # get the colormap
+    if isinstance(colormap, basestring):
+        colormap = pl.get_cmap(colormap)
+
+    # Make figure background the same colors as axes
+    fig = pl.figure(figsize=(8, 8), facecolor=facecolor)
+
+    # Use a polar axes
+    axes = pl.subplot(111, polar=True, axisbg=facecolor)
+
+    # No ticks, we'll put our own
+    pl.xticks([])
+    pl.yticks([])
+
+    # Set y axes limit
+    pl.ylim(0, 10)
+
+    # Draw lines between connected nodes, only draw the strongest connections
+    if n_lines is not None and len(con) > n_lines:
+        con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
+    else:
+        con_thresh = 0.
+
+    # get the connections which we are drawing and sort by connection strength
+    # this will allow us to draw the strongest connections first
+    con_abs = np.abs(con)
+    con_draw_idx = np.where(con_abs >= con_thresh)[0]
+
+    con = con[con_draw_idx]
+    con_abs = con_abs[con_draw_idx]
+    indices = [ind[con_draw_idx] for ind in indices]
+
+    # now sort them
+    sort_idx = np.argsort(con_abs)
+    con_abs = con_abs[sort_idx]
+    con = con[sort_idx]
+    indices = [ind[sort_idx] for ind in indices]
+
+    # Get vmin vmax for color scaling
+    if vmin is None:
+        vmin = np.min(con[np.abs(con) >= con_thresh])
+    if vmax is None:
+        vmax = np.max(con)
+    vrange = vmax - vmin
+
+    # We want to add some "noise" to the start and end position of the
+    # edges: We modulate the noise with the number of connections of the
+    # node and the connection strength, such that the strongest connections
+    # are closer to the node center
+    nodes_n_con = np.zeros((n_nodes), dtype=np.int)
+    for i, j in zip(indices[0], indices[1]):
+        nodes_n_con[i] += 1
+        nodes_n_con[j] += 1
+
+    # initalize random number generator so plot is reproducible
+    rng = np.random.mtrand.RandomState(seed=0)
+
+    n_con = len(indices[0])
+    noise_max = 0.25 * node_width
+    start_noise = rng.uniform(-noise_max, noise_max, n_con)
+    end_noise = rng.uniform(-noise_max, noise_max, n_con)
+
+    nodes_n_con_seen = np.zeros_like(nodes_n_con)
+    for i, (start, end) in enumerate(zip(indices[0], indices[1])):
+        nodes_n_con_seen[start] += 1
+        nodes_n_con_seen[end] += 1
+
+        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start])
+                           / float(nodes_n_con[start]))
+        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end])
+                         / float(nodes_n_con[end]))
+
+    # scale connectivity for colormap (vmin<=>0, vmax<=>1)
+    con_val_scaled = (con - vmin) / vrange
+
+    # Finally, we draw the connections
+    for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
+        # Start point
+        t0, r0 = node_angles[i], 10
+
+        # End point
+        t1, r1 = node_angles[j], 10
+
+        # Some noise in start and end point
+        t0 += start_noise[pos]
+        t1 += end_noise[pos]
+
+        verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
+        codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
+                 m_path.Path.LINETO]
+        path = m_path.Path(verts, codes)
+
+        color = colormap(con_val_scaled[pos])
+
+        # Actual line
+        patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
+                                    linewidth=linewidth, alpha=1.)
+        axes.add_patch(patch)
+
+    # Draw ring with colored nodes
+    radii = np.ones(n_nodes) * 10
+    bars = axes.bar(node_angles, radii, width=node_width, bottom=9,
+                    edgecolor=node_edgecolor, lw=2, facecolor='.9',
+                    align='center')
+
+    for bar, color in zip(bars, node_colors):
+        bar.set_facecolor(color)
+
+    # Draw node labels
+    angles_deg = 180 * node_angles / np.pi
+    for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
+        if angle_deg >= 270:
+            ha = 'left'
+        else:
+            # Flip the label, so text is always upright
+            angle_deg += 180
+            ha = 'right'
+
+        pl.text(angle_rad, 10.4, name, size=10, rotation=angle_deg,
+                rotation_mode='anchor', horizontalalignment=ha,
+                verticalalignment='center', color=textcolor)
+
+    if title is not None:
+        pl.subplots_adjust(left=0.2, bottom=0.2, right=0.8, top=0.75)
+        pl.figtext(0.03, 0.95, title, color=textcolor, fontsize=14)
+    else:
+        pl.subplots_adjust(left=0.2, bottom=0.2, right=0.8, top=0.8)
+
+    if colorbar:
+        sm = pl.cm.ScalarMappable(cmap=colormap,
+                                  norm=pl.normalize(vmin=vmin, vmax=vmax))
+        sm.set_array(np.linspace(vmin, vmax))
+        ax = fig.add_axes([.92, 0.03, .015, .25])
+        cb = fig.colorbar(sm, cax=ax)
+        cb_yticks = pl.getp(cb.ax.axes, 'yticklabels')
+        pl.setp(cb_yticks, color=textcolor)
+
+    return fig
+
+
+def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
+                  color=(0.9, 0.9, 0.9), width=0.8):
+    """Show the channel stats based on a drop_log from Epochs
+
+    Parameters
+    ----------
+    drop_log : list of lists
+        Epoch drop log from Epochs.drop_log.
+    threshold : float
+        The percentage threshold to use to decide whether or not to
+        plot. Default is zero (always plot).
+    n_max_plot : int
+        Maximum number of channels to show stats for.
+    subject : str
+        The subject name to use in the title of the plot.
+    color : tuple | str
+        Color to use for the bars.
+    width : float
+        Width of the bars.
+
+    Returns
+    -------
+    perc : float
+        Total percentage of epochs dropped.
+    """
+    if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
+        raise ValueError('drop_log must be a list of lists')
+    import pylab as pl
+    scores = Counter([ch for d in drop_log for ch in d])
+    ch_names = np.array(scores.keys())
+    perc = 100 * np.mean([len(d) > 0 for d in drop_log])
+    if perc < threshold or len(ch_names) == 0:
+        return perc
+    counts = 100 * np.array(scores.values(), dtype=float) / len(drop_log)
+    n_plot = min(n_max_plot, len(ch_names))
+    order = np.flipud(np.argsort(counts))
+    pl.figure()
+    pl.title('%s: %0.1f%%' % (subject, perc))
+    x = np.arange(n_plot)
+    pl.bar(x, counts[order[:n_plot]], color=color, width=width)
+    pl.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
+              horizontalalignment='right')
+    pl.tick_params(axis='x', which='major', labelsize=10)
+    pl.ylabel('% of epochs rejected')
+    pl.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
+    pl.grid(True, axis='y')
+    pl.show()
+    return perc
+
+
+def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
+             bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
+             event_color='cyan', scalings=None, remove_dc=True, order='type',
+             show_options=False, title=None, show=True):
+    """Plot raw data
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data to plot.
+    events : array | None
+        Events to show with vertical bars.
+    duration : float
+        Time window (sec) to plot in a given time.
+    start : float
+        Initial time to show (can be changed dynamically once plotted).
+    n_channels : int
+        Number of channels to plot at once.
+    bgcolor : color object
+        Color of the background.
+    color : dict | color object | None
+        Color for the data traces. If None, defaults to:
+        `dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
+             ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
+    bad_color : color object
+        Color to make bad channels.
+    event_color : color object
+        Color to use for events.
+    scalings : dict | None
+        Scale factors for the traces. If None, defaults to:
+        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
+             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
+    remove_dc : bool
+        If True remove DC component when plotting data.
+    order : 'type' | 'original' | array
+        Order in which to plot data. 'type' groups by channel type,
+        'original' plots in the order of ch_names, array gives the
+        indices to use in plotting.
+    show_options : bool
+        If True, a dialog for options related to projecion is shown.
+    title : str | None
+        The title of the window. If None, and either the filename of the
+        raw object or '<unknown>' will be displayed as title.
+    show : bool
+        Show figure if True
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Raw traces.
+
+    Notes
+    -----
+    The arrow keys (up/down/left/right) can typically be used to navigate
+    between channels and time ranges, but this depends on the backend
+    matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
+    """
+    import pylab as pl
+    color, scalings = _mutable_defaults(('color', color),
+                                        ('scalings_plot_raw', scalings))
+
+    # make a copy of info, remove projection (for now)
+    info = copy.deepcopy(raw.info)
+    projs = info['projs']
+    info['projs'] = []
+    n_times = raw.n_times
+
+    # allow for raw objects without filename, e.g., ICA
+    if title is None:
+        title = (raw.info['filenames'][0] if 'filenames' in raw.info
+                 else '<unknown>')
+    elif not isinstance(title, basestring):
+        raise TypeError('title must be None or a string')
+    if len(title) > 60:
+        title = '...' + title[-60:]
+    if len(raw.info['filenames']) > 1:
+        title += ' ... (+ %d more) ' % (len(raw.info['filenames']) - 1)
+    if events is not None:
+        events = events[:, 0].astype(float) - raw.first_samp
+        events /= info['sfreq']
+
+    # reorganize the data in plotting order
+    inds = list()
+    types = list()
+    for t in ['grad', 'mag']:
+        inds += [pick_types(info, meg=t, exclude=[])]
+        types += [t] * len(inds[-1])
+    pick_args = dict(meg=False, exclude=[])
+    for t in ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp',
+              'misc', 'chpi']:
+        pick_args[t] = True
+        inds += [pick_types(raw.info, **pick_args)]
+        types += [t] * len(inds[-1])
+        pick_args[t] = False
+    inds = np.concatenate(inds).astype(int)
+    if not len(inds) == len(info['ch_names']):
+        raise RuntimeError('Some channels not classified, please report '
+                           'this problem')
+
+    # put them back to original or modified order for natral plotting
+    reord = np.argsort(inds)
+    types = [types[ri] for ri in reord]
+    if isinstance(order, str):
+        if order == 'original':
+            inds = inds[reord]
+        elif order != 'type':
+            raise ValueError('Unknown order type %s' % order)
+    elif isinstance(order, np.ndarray):
+        if not np.array_equal(np.sort(order),
+                              np.arange(len(info['ch_names']))):
+            raise ValueError('order, if array, must have integers from '
+                             '0 to n_channels - 1')
+        # put back to original order first, then use new order
+        inds = inds[reord][order]
+
+    # set up projection and data parameters
+    params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
+                  info=info, projs=projs, remove_dc=remove_dc,
+                  n_channels=n_channels, scalings=scalings, types=types,
+                  n_times=n_times, events=events)
+
+    # set up plotting
+    fig = figure_nobar(facecolor=bgcolor)
+    fig.canvas.set_window_title('mne_browse_raw')
+    size = get_config('MNE_BROWSE_RAW_SIZE')
+    if size is not None:
+        size = size.split(',')
+        size = tuple([float(s) for s in size])
+        # have to try/catch when there's no toolbar
+        try:
+            fig.set_size_inches(size, forward=True)
+        except Exception:
+            pass
+    ax = pl.subplot2grid((10, 10), (0, 0), colspan=9, rowspan=9)
+    ax.set_title(title, fontsize=12)
+    ax_hscroll = pl.subplot2grid((10, 10), (9, 0), colspan=9)
+    ax_hscroll.get_yaxis().set_visible(False)
+    ax_hscroll.set_xlabel('Time (s)')
+    ax_vscroll = pl.subplot2grid((10, 10), (0, 9), rowspan=9)
+    ax_vscroll.set_axis_off()
+    ax_button = pl.subplot2grid((10, 10), (9, 9))
+    # store these so they can be fixed on resize
+    params['fig'] = fig
+    params['ax'] = ax
+    params['ax_hscroll'] = ax_hscroll
+    params['ax_vscroll'] = ax_vscroll
+    params['ax_button'] = ax_button
+
+    # populate vertical and horizontal scrollbars
+    for ci in xrange(len(info['ch_names'])):
+        this_color = bad_color if info['ch_names'][inds[ci]] in info['bads'] \
+                else color
+        if isinstance(this_color, dict):
+            this_color = this_color[types[inds[ci]]]
+        ax_vscroll.add_patch(pl.mpl.patches.Rectangle((0, ci), 1, 1,
+                                                      facecolor=this_color,
+                                                      edgecolor=this_color))
+    vsel_patch = pl.mpl.patches.Rectangle((0, 0), 1, n_channels, facecolor='w',
+                                          edgecolor='w', alpha=0.5)
+    ax_vscroll.add_patch(vsel_patch)
+    params['vsel_patch'] = vsel_patch
+    hsel_patch = pl.mpl.patches.Rectangle((start, 0), duration, 1, color='k',
+                                          edgecolor=None, alpha=0.5)
+    ax_hscroll.add_patch(hsel_patch)
+    params['hsel_patch'] = hsel_patch
+    ax_hscroll.set_xlim(0, n_times / float(info['sfreq']))
+    n_ch = len(info['ch_names'])
+    ax_vscroll.set_ylim(n_ch, 0)
+    ax_vscroll.set_title('Ch.')
+
+    # make shells for plotting traces
+    offsets = np.arange(n_channels) * 2 + 1
+    ax.set_yticks(offsets)
+    ax.set_ylim([n_channels * 2 + 1, 0])
+    # plot event_line first so it's in the back
+    event_line = ax.plot([np.nan], color=event_color)[0]
+    lines = [ax.plot([np.nan])[0] for _ in xrange(n_ch)]
+    ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
+
+    params['plot_fun'] = partial(_plot_traces, params=params, inds=inds,
+                                 color=color, bad_color=bad_color, lines=lines,
+                                 event_line=event_line, offsets=offsets)
+
+    # set up callbacks
+    opt_button = pl.mpl.widgets.Button(ax_button, 'Opt')
+    callback_option = partial(_toggle_options, params=params)
+    opt_button.on_clicked(callback_option)
+    callback_key = partial(_plot_raw_onkey, params=params)
+    fig.canvas.mpl_connect('key_press_event', callback_key)
+    callback_pick = partial(_mouse_click, params=params)
+    fig.canvas.mpl_connect('button_press_event', callback_pick)
+    callback_resize = partial(_helper_resize, params=params)
+    fig.canvas.mpl_connect('resize_event', callback_resize)
+
+    # As here code is shared with plot_evoked, some extra steps:
+    # first the actual plot update function
+    params['plot_update_proj_callback'] = _plot_update_raw_proj
+    # then the toggle handler
+    callback_proj = partial(_toggle_proj, params=params)
+    # store these for use by callbacks in the options figure
+    params['callback_proj'] = callback_proj
+    params['callback_key'] = callback_key
+    # have to store this, or it could get garbage-collected
+    params['opt_button'] = opt_button
+
+    # do initial plots
+    callback_proj('none')
+    _layout_raw(params)
+
+    # deal with projectors
+    params['fig_opts'] = None
+    if show_options is True:
+        _toggle_options(None, params)
+
+    if show:
+        pl.show()
+    return fig
+
+
+def _toggle_options(event, params):
+    """Toggle options (projectors) dialog"""
+    import pylab as pl
+    if len(params['projs']) > 0:
+        if params['fig_opts'] is None:
+            _draw_proj_checkbox(event, params, draw_current_state=False)
+        else:
+            # turn off options dialog
+            pl.close(params['fig_opts'])
+            del params['proj_checks']
+            params['fig_opts'] = None
+
+
+def _toggle_proj(event, params):
+    """Operation to perform when proj boxes clicked"""
+    # read options if possible
+    if 'proj_checks' in params:
+        bools = [x[0].get_visible() for x in params['proj_checks'].lines]
+        for bi, (b, p) in enumerate(zip(bools, params['projs'])):
+            # see if they tried to deactivate an active one
+            if not b and p['active']:
+                bools[bi] = True
+    else:
+        bools = [True] * len(params['projs'])
+
+    compute_proj = False
+    if not 'proj_bools' in params:
+        compute_proj = True
+    elif not np.array_equal(bools, params['proj_bools']):
+        compute_proj = True
+
+    # if projectors changed, update plots
+    if compute_proj is True:
+        params['plot_update_proj_callback'](params, bools)
+
+
+def _plot_update_raw_proj(params, bools):
+    """Helper only needs to be called when proj is changed"""
+    inds = np.where(bools)[0]
+    params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
+                               for ii in inds]
+    params['proj_bools'] = bools
+    params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
+                                        verbose=False)
+    _update_raw_data(params)
+    params['plot_fun']()
+
+
+def _update_raw_data(params):
+    """Helper only needs to be called when time or proj is changed"""
+    start = params['t_start']
+    stop = params['raw'].time_as_index(start + params['duration'])[0]
+    start = params['raw'].time_as_index(start)[0]
+    data, times = params['raw'][:, start:stop]
+    if params['projector'] is not None:
+        data = np.dot(params['projector'], data)
+    # remove DC
+    if params['remove_dc'] is True:
+        data -= np.mean(data, axis=1)[:, np.newaxis]
+    # scale
+    for di in xrange(data.shape[0]):
+        data[di] /= params['scalings'][params['types'][di]]
+        # stim channels should be hard limited
+        if params['types'][di] == 'stim':
+            data[di] = np.minimum(data[di], 1.0)
+    params['data'] = data
+    params['times'] = times
+
+
+def _layout_raw(params):
+    """Set raw figure layout"""
+    s = params['fig'].get_size_inches()
+    scroll_width = 0.33
+    hscroll_dist = 0.33
+    vscroll_dist = 0.1
+    l_border = 1.2
+    r_border = 0.1
+    t_border = 0.33
+    b_border = 0.5
+
+    # only bother trying to reset layout if it's reasonable to do so
+    if s[0] < 2 * scroll_width or s[1] < 2 * scroll_width + hscroll_dist:
+        return
+
+    # convert to relative units
+    scroll_width_x = scroll_width / s[0]
+    scroll_width_y = scroll_width / s[1]
+    vscroll_dist /= s[0]
+    hscroll_dist /= s[1]
+    l_border /= s[0]
+    r_border /= s[0]
+    t_border /= s[1]
+    b_border /= s[1]
+    # main axis (traces)
+    ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
+    ax_y = hscroll_dist + scroll_width_y + b_border
+    ax_height = 1.0 - ax_y - t_border
+    params['ax'].set_position([l_border, ax_y, ax_width, ax_height])
+    # vscroll (channels)
+    pos = [ax_width + l_border + vscroll_dist, ax_y,
+           scroll_width_x, ax_height]
+    params['ax_vscroll'].set_position(pos)
+    # hscroll (time)
+    pos = [l_border, b_border, ax_width, scroll_width_y]
+    params['ax_hscroll'].set_position(pos)
+    # options button
+    pos = [l_border + ax_width + vscroll_dist, b_border,
+           scroll_width_x, scroll_width_y]
+    params['ax_button'].set_position(pos)
+    params['fig'].canvas.draw()
+
+
+def _helper_resize(event, params):
+    """Helper for resizing"""
+    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
+    set_config('MNE_BROWSE_RAW_SIZE', size)
+    _layout_raw(params)
+
+
+def _mouse_click(event, params):
+    """Vertical select callback"""
+    if event.inaxes is None or event.button != 1:
+        return
+    plot_fun = params['plot_fun']
+    # vertical scrollbar changed
+    if event.inaxes == params['ax_vscroll']:
+        ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
+        if params['ch_start'] != ch_start:
+            params['ch_start'] = ch_start
+            plot_fun()
+    # horizontal scrollbar changed
+    elif event.inaxes == params['ax_hscroll']:
+        _plot_raw_time(event.xdata - params['duration'] / 2, params)
+
+
+def _plot_raw_time(value, params):
+    """Deal with changed time value"""
+    info = params['info']
+    max_times = params['n_times'] / float(info['sfreq']) - params['duration']
+    if value > max_times:
+        value = params['n_times'] / info['sfreq'] - params['duration']
+    if value < 0:
+        value = 0
+    if params['t_start'] != value:
+        params['t_start'] = value
+        params['hsel_patch'].set_x(value)
+        _update_raw_data(params)
+        params['plot_fun']()
+
+
+def _plot_raw_onkey(event, params):
+    """Interpret key presses"""
+    import pylab as pl
+    # check for initial plot
+    plot_fun = params['plot_fun']
+    if event is None:
+        plot_fun()
+        return
+
+    # quit event
+    if event.key == 'escape':
+        pl.close(params['fig'])
+        return
+
+    # change plotting params
+    ch_changed = False
+    if event.key == 'down':
+        params['ch_start'] += params['n_channels']
+        ch_changed = True
+    elif event.key == 'up':
+        params['ch_start'] -= params['n_channels']
+        ch_changed = True
+    elif event.key == 'right':
+        _plot_raw_time(params['t_start'] + params['duration'], params)
+        return
+    elif event.key == 'left':
+        _plot_raw_time(params['t_start'] - params['duration'], params)
+        return
+    elif event.key in ['o', 'p']:
+        _toggle_options(None, params)
+        return
+
+    # deal with plotting changes
+    if ch_changed is True:
+        if params['ch_start'] >= len(params['info']['ch_names']):
+            params['ch_start'] = 0
+        elif params['ch_start'] < 0:
+            # wrap to end
+            rem = len(params['info']['ch_names']) % params['n_channels']
+            params['ch_start'] = len(params['info']['ch_names'])
+            params['ch_start'] -= rem if rem != 0 else params['n_channels']
+
+    if ch_changed:
+        plot_fun()
+
+
+def _plot_traces(params, inds, color, bad_color, lines, event_line, offsets):
+    """Helper for plotting raw"""
+
+    info = params['info']
+    n_channels = params['n_channels']
+
+    # do the plotting
+    tick_list = []
+    for ii in xrange(n_channels):
+        ch_ind = ii + params['ch_start']
+        # let's be generous here and allow users to pass
+        # n_channels per view >= the number of traces available
+        if ii >= len(lines):
+            break
+        elif ch_ind < len(info['ch_names']):
+            # scale to fit
+            ch_name = info['ch_names'][inds[ch_ind]]
+            tick_list += [ch_name]
+            offset = offsets[ii]
+
+            # do NOT operate in-place lest this get screwed up
+            this_data = params['data'][inds[ch_ind]]
+            this_color = bad_color if ch_name in info['bads'] else color
+            if isinstance(this_color, dict):
+                this_color = this_color[params['types'][inds[ch_ind]]]
+
+            # subtraction here gets corect orientation for flipped ylim
+            lines[ii].set_ydata(offset - this_data)
+            lines[ii].set_xdata(params['times'])
+            lines[ii].set_color(this_color)
+        else:
+            # "remove" lines
+            lines[ii].set_xdata([])
+            lines[ii].set_ydata([])
+    # deal with event lines
+    if params['events'] is not None:
+        t = params['events']
+        t = t[np.where(np.logical_and(t >= params['times'][0],
+                       t <= params['times'][-1]))[0]]
+        if len(t) > 0:
+            xs = list()
+            ys = list()
+            for tt in t:
+                xs += [tt, tt, np.nan]
+                ys += [0, 2 * n_channels + 1, np.nan]
+            event_line.set_xdata(xs)
+            event_line.set_ydata(ys)
+        else:
+            event_line.set_xdata([])
+            event_line.set_ydata([])
+    # finalize plot
+    params['ax'].set_xlim(params['times'][0],
+                params['times'][0] + params['duration'], False)
+    params['ax'].set_yticklabels(tick_list)
+    params['vsel_patch'].set_y(params['ch_start'])
+    params['fig'].canvas.draw()
+
+
+def figure_nobar(*args, **kwargs):
+    """Make matplotlib figure with no toolbar"""
+    import pylab as pl
+    old_val = pl.mpl.rcParams['toolbar']
+    try:
+        pl.mpl.rcParams['toolbar'] = 'none'
+        fig = pl.figure(*args, **kwargs)
+        # remove button press catchers (for toolbar)
+        for key in fig.canvas.callbacks.callbacks['key_press_event'].keys():
+            fig.canvas.callbacks.disconnect(key)
+    except Exception as ex:
+        raise ex
+    finally:
+        pl.mpl.rcParams['toolbar'] = old_val
+    return fig
+
+
+ at verbose
+def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
+                 read_limit=np.inf, max_str=30, verbose=None):
+    """Compare the contents of two fiff files using diff and show_fiff
+
+    Parameters
+    ----------
+    fname_1 : str
+        First file to compare.
+    fname_2 : str
+        Second file to compare.
+    fname_out : str | None
+        Filename to store the resulting diff. If None, a temporary
+        file will be created.
+    show : bool
+        If True, show the resulting diff in a new tab in a web browser.
+    indent : str
+        How to indent the lines.
+    read_limit : int
+        Max number of bytes of data to read from a tag. Can be np.inf
+        to always read all data (helps test read completion).
+    max_str : int
+        Max number of characters of string representation to print for
+        each tag's data.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fname_out : str
+        The filename used for storing the diff. Could be useful for
+        when a temporary file is used.
+    """
+    file_1 = show_fiff(fname_1, output=list, indent=indent,
+                       read_limit=read_limit, max_str=max_str)
+    file_2 = show_fiff(fname_2, output=list, indent=indent,
+                       read_limit=read_limit, max_str=max_str)
+    diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
+    if fname_out is not None:
+        f = open(fname_out, 'w')
+    else:
+        f = tempfile.NamedTemporaryFile('w', delete=False)
+        fname_out = f.name
+    with f as fid:
+        fid.write(diff)
+    if show is True:
+        webbrowser.open_new_tab(fname_out)
+    return fname_out
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..299c8be
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,25 @@
+[aliases]
+release = egg_info -RDb ''
+# Make sure the sphinx docs are built each time we do a dist.
+# bdist = build_sphinx bdist
+# sdist = build_sphinx sdist
+# Make sure a zip file is created each time we build the sphinx docs
+# build_sphinx = generate_help build_sphinx zip_help
+# Make sure the docs are uploaded when we do an upload
+# upload = upload upload_help
+
+[egg_info]
+# tag_build = .dev
+
+[bdist_rpm]
+doc-files = doc
+
+[nosetests]
+verbosity = 2
+detailed-errors = 1
+with-coverage = 1
+cover-package = mne
+#pdb = 1
+#pdb-failures = 1
+with-doctest = 1
+doctest-extension=rst
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..2253d7e
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,76 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2011 Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+
+import os
+import mne
+
+import setuptools  # we are using a setuptools namespace
+from numpy.distutils.core import setup
+
+descr = """MNE python project for MEG and EEG data analysis."""
+
+DISTNAME            = 'mne'
+DESCRIPTION         = descr
+MAINTAINER          = 'Alexandre Gramfort'
+MAINTAINER_EMAIL    = 'gramfort at nmr.mgh.harvard.edu'
+URL                 = 'http://martinos.org/mne'
+LICENSE             = 'BSD (3-clause)'
+DOWNLOAD_URL        = 'http://github.com/mne-tools/mne-python'
+VERSION             = mne.__version__
+
+
+if __name__ == "__main__":
+    if os.path.exists('MANIFEST'):
+        os.remove('MANIFEST')
+
+    setup(name=DISTNAME,
+          maintainer=MAINTAINER,
+          include_package_data=True,
+          maintainer_email=MAINTAINER_EMAIL,
+          description=DESCRIPTION,
+          license=LICENSE,
+          url=URL,
+          version=VERSION,
+          download_url=DOWNLOAD_URL,
+          long_description=open('README.rst').read(),
+          zip_safe=False,  # the package can run out of an .egg file
+          classifiers=['Intended Audience :: Science/Research',
+                       'Intended Audience :: Developers',
+                       'License :: OSI Approved',
+                       'Programming Language :: Python',
+                       'Topic :: Software Development',
+                       'Topic :: Scientific/Engineering',
+                       'Operating System :: Microsoft :: Windows',
+                       'Operating System :: POSIX',
+                       'Operating System :: Unix',
+                       'Operating System :: MacOS'],
+          platforms='any',
+          packages=['mne', 'mne.tests',
+                    'mne.beamformer', 'mne.beamformer.tests',
+                    'mne.connectivity', 'mne.connectivity.tests',
+                    'mne.data',
+                    'mne.datasets',
+                    'mne.datasets.sample',
+                    'mne.datasets.megsim',
+                    'mne.fiff', 'mne.fiff.tests',
+                    'mne.fiff.bti', 'mne.fiff.bti.tests',
+                    'mne.fiff.kit', 'mne.fiff.kit.tests',
+                    'mne.layouts', 'mne.layouts.tests',
+                    'mne.minimum_norm', 'mne.minimum_norm.tests',
+                    'mne.mixed_norm',
+                    'mne.inverse_sparse', 'mne.inverse_sparse.tests',
+                    'mne.preprocessing', 'mne.preprocessing.tests',
+                    'mne.simulation', 'mne.simulation.tests',
+                    'mne.tests',
+                    'mne.transforms',
+                    'mne.stats', 'mne.stats.tests',
+                    'mne.time_frequency', 'mne.time_frequency.tests'],
+          package_data={'mne': ['data/*.sel',
+                                'data/icos.fif.gz',
+                                'layouts/*.lout']},
+          scripts=['bin/mne_clean_eog_ecg.py', 'bin/mne_flash_bem_model.py',
+                   'bin/mne_surf2bem.py', 'bin/mne_compute_proj_ecg.py',
+                   'bin/mne_compute_proj_eog.py', 'bin/mne_maxfilter.py',
+                   'bin/mne_bti2fiff.py', 'bin/mne_kit2fiff.py',
+                   'bin/mne_browse_raw.py'])

-- 
Alioth's /git/debian-med/git-commit-notice on /srv/git.debian.org/git/debian-med/mne-python.git



More information about the debian-med-commit mailing list