[med-svn] [python-mne] 200/376: pep8
Yaroslav Halchenko
debian at onerussian.com
Fri Nov 27 17:22:36 UTC 2015
This is an automated email from the git hooks/post-receive script.
yoh pushed a commit to annotated tag v0.1
in repository python-mne.
commit fe4fbeca9738b946668e52bce771573acf6524fd
Author: Alexandre Gramfort <alexandre.gramfort at inria.fr>
Date: Sun Apr 17 18:25:43 2011 -0400
pep8
---
examples/plot_compute_mne_inverse.py | 4 +-
examples/plot_from_raw_to_epochs_to_evoked.py | 12 +--
examples/plot_minimum_norm_estimate.py | 4 +-
examples/plot_read_and_write_raw_data.py | 8 +-
examples/plot_read_epochs.py | 4 +-
examples/plot_read_evoked.py | 2 +-
examples/plot_read_forward.py | 8 +-
examples/plot_read_source_space.py | 9 ++-
examples/plot_read_stc.py | 2 +-
examples/plot_whitened_evoked_data.py | 10 +--
examples/read_bem_surfaces.py | 6 +-
examples/read_inverse.py | 6 +-
.../plot_cluster_1samp_test_time_frequency.py | 22 ++---
examples/stats/plot_cluster_stats_evoked.py | 12 +--
.../stats/plot_cluster_stats_time_frequency.py | 28 +++----
examples/stats/plot_sensor_permutation_test.py | 4 +-
examples/time_frequency/plot_time_frequency.py | 22 ++---
mne/bem_surfaces.py | 8 +-
mne/fiff/__init__.py | 1 -
mne/fiff/bunch.py | 2 +-
mne/fiff/channels.py | 3 +-
mne/fiff/constants.py | 94 +++++++++++-----------
mne/fiff/ctf.py | 21 ++---
mne/fiff/diff.py | 4 +-
mne/fiff/evoked.py | 42 +++++-----
mne/fiff/matrix.py | 22 ++---
mne/fiff/open.py | 11 ++-
mne/fiff/pick.py | 10 ++-
mne/fiff/proj.py | 59 +++++++-------
mne/fiff/raw.py | 36 ++++-----
mne/fiff/tests/test_evoked.py | 12 +--
mne/fiff/tree.py | 18 ++---
mne/label.py | 2 +-
mne/layouts/__init__.py | 2 +-
mne/layouts/layout.py | 16 ++--
mne/misc.py | 18 +++--
mne/source_space.py | 51 ++++++------
mne/stats/cluster_level.py | 41 +++++-----
mne/stats/parametric.py | 12 +--
mne/stats/permutations.py | 21 +++--
mne/stc.py | 14 ++--
mne/tests/test_bem_surfaces.py | 1 +
mne/tests/test_inverse.py | 6 +-
mne/tests/test_label.py | 1 +
mne/time_frequency/tfr.py | 35 ++++----
mne/viz.py | 8 +-
46 files changed, 375 insertions(+), 359 deletions(-)
diff --git a/examples/plot_compute_mne_inverse.py b/examples/plot_compute_mne_inverse.py
index 431f06e..7c1fbbf 100755
--- a/examples/plot_compute_mne_inverse.py
+++ b/examples/plot_compute_mne_inverse.py
@@ -26,7 +26,7 @@ fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
setno = 0
snr = 3.0
-lambda2 = 1.0 / snr**2
+lambda2 = 1.0 / snr ** 2
dSPM = True
# Load data
@@ -50,7 +50,7 @@ mne.write_stc('mne_dSPM_inverse-rh.stc', tmin=res['tmin'], tstep=res['tstep'],
###############################################################################
# View activation time-series
times = res['tmin'] + res['tstep'] * np.arange(lh_data.shape[1])
-pl.plot(1e3*times, res['sol'][::100,:].T)
+pl.plot(1e3 * times, res['sol'][::100, :].T)
pl.xlabel('time (ms)')
pl.ylabel('dSPM value')
pl.show()
diff --git a/examples/plot_from_raw_to_epochs_to_evoked.py b/examples/plot_from_raw_to_epochs_to_evoked.py
index 5914cfa..e8b37f1 100755
--- a/examples/plot_from_raw_to_epochs_to_evoked.py
+++ b/examples/plot_from_raw_to_epochs_to_evoked.py
@@ -32,8 +32,8 @@ raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
-include = [] # or stim channels ['STI 014']
-exclude = raw.info['bads'] + ['EEG 053'] # bads + 1 more
+include = [] # or stim channels ['STI 014']
+exclude = raw.info['bads'] + ['EEG 053'] # bads + 1 more
# pick EEG channels
picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
@@ -41,16 +41,16 @@ picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(eeg=40e-6, eog=150e-6))
-evoked = epochs.average() # average epochs and get an Evoked dataset.
+evoked = epochs.average() # average epochs and get an Evoked dataset.
-evoked.save('sample_audvis_eeg-ave.fif') # save evoked data to disk
+evoked.save('sample_audvis_eeg-ave.fif') # save evoked data to disk
###############################################################################
# View evoked response
-times = 1e3 * epochs.times # time in miliseconds
+times = 1e3 * epochs.times # time in miliseconds
import pylab as pl
pl.clf()
-pl.plot(times, 1e6*evoked.data.T)
+pl.plot(times, 1e6 * evoked.data.T)
pl.xlim([times[0], times[-1]])
pl.xlabel('time (ms)')
pl.ylabel('Potential (uV)')
diff --git a/examples/plot_minimum_norm_estimate.py b/examples/plot_minimum_norm_estimate.py
index 490bb20..6316017 100644
--- a/examples/plot_minimum_norm_estimate.py
+++ b/examples/plot_minimum_norm_estimate.py
@@ -27,7 +27,7 @@ fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
setno = 0
snr = 3.0
-lambda2 = 1.0 / snr**2
+lambda2 = 1.0 / snr ** 2
dSPM = True
# Load data
@@ -57,7 +57,7 @@ mne.write_stc('mne_dSPM_inverse-rh.stc', tmin=stc['tmin'], tstep=stc['tstep'],
# View activation time-series
times = stc['tmin'] + stc['tstep'] * np.arange(stc['sol'].shape[1])
pl.close('all')
-pl.plot(1e3*times, stc['sol'][::100,:].T)
+pl.plot(1e3 * times, stc['sol'][::100, :].T)
pl.xlabel('time (ms)')
pl.ylabel('dSPM value')
pl.show()
diff --git a/examples/plot_read_and_write_raw_data.py b/examples/plot_read_and_write_raw_data.py
index 6a6b9e5..510b3b2 100755
--- a/examples/plot_read_and_write_raw_data.py
+++ b/examples/plot_read_and_write_raw_data.py
@@ -26,15 +26,15 @@ want_meg = True
want_eeg = False
want_stim = False
include = ['STI 014']
-exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bad channels + 2 more
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bad channels + 2 more
picks = fiff.pick_types(raw.info, meg=want_meg, eeg=want_eeg,
stim=want_stim, include=include,
exclude=exclude)
-some_picks = picks[:5] # take 5 first
-start, stop = raw.time_to_index(0, 15) # read the first 15s of data
-data, times = raw[some_picks, start:(stop+1)]
+some_picks = picks[:5] # take 5 first
+start, stop = raw.time_to_index(0, 15) # read the first 15s of data
+data, times = raw[some_picks, start:(stop + 1)]
# save 150s of MEG data in FIF file
raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks)
diff --git a/examples/plot_read_epochs.py b/examples/plot_read_epochs.py
index feeed3b..c1d3588 100755
--- a/examples/plot_read_epochs.py
+++ b/examples/plot_read_epochs.py
@@ -32,7 +32,7 @@ raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
-exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude=exclude)
@@ -40,7 +40,7 @@ picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
-evoked = epochs.average() # average epochs to get the evoked response
+evoked = epochs.average() # average epochs to get the evoked response
###############################################################################
# Show result
diff --git a/examples/plot_read_evoked.py b/examples/plot_read_evoked.py
index 8573406..d81ebe3 100755
--- a/examples/plot_read_evoked.py
+++ b/examples/plot_read_evoked.py
@@ -24,5 +24,5 @@ evoked = fiff.Evoked(fname, setno=0, baseline=(None, 0))
###############################################################################
# Show result
picks = fiff.pick_types(evoked.info, meg=True, eeg=True,
- exclude=evoked.info['bads']) # Pick channels to view
+ exclude=evoked.info['bads']) # Pick channels to view
plot_evoked(evoked, picks=picks)
diff --git a/examples/plot_read_forward.py b/examples/plot_read_forward.py
index 308f3b9..faf1f30 100755
--- a/examples/plot_read_forward.py
+++ b/examples/plot_read_forward.py
@@ -24,7 +24,7 @@ print "Leadfield size : %d x %d" % leadfield.shape
# Show result
import pylab as pl
-pl.matshow(leadfield[:,:500])
+pl.matshow(leadfield[:, :500])
pl.xlabel('sources')
pl.ylabel('sensors')
pl.title('Lead field matrix')
@@ -36,5 +36,7 @@ lh_faces = fwd['src'][0]['use_tris']
rh_points = fwd['src'][1]['rr']
rh_faces = fwd['src'][1]['use_tris']
from enthought.mayavi import mlab
-mlab.triangular_mesh(lh_points[:,0], lh_points[:,1], lh_points[:,2], lh_faces)
-mlab.triangular_mesh(rh_points[:,0], rh_points[:,1], rh_points[:,2], rh_faces)
+mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+ lh_faces)
+mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+ rh_faces)
diff --git a/examples/plot_read_source_space.py b/examples/plot_read_source_space.py
index 5c13713..d7000ac 100755
--- a/examples/plot_read_source_space.py
+++ b/examples/plot_read_source_space.py
@@ -15,10 +15,9 @@ import mne
from mne.datasets import sample
data_path = sample.data_path('.')
-# fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-eeg-oct-6-fwd.fif')
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-eeg-oct-6p-fwd.fif')
-add_geom = True # include high resolution source space
+add_geom = True # include high resolution source space
src = mne.read_source_spaces(fname, add_geom=add_geom)
# 3D source space (high sampling)
@@ -27,5 +26,7 @@ lh_faces = src[0]['tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
from enthought.mayavi import mlab
-mlab.triangular_mesh(lh_points[:,0], lh_points[:,1], lh_points[:,2], lh_faces)
-mlab.triangular_mesh(rh_points[:,0], rh_points[:,1], rh_points[:,2], rh_faces)
+mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+ lh_faces)
+mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+ rh_faces)
diff --git a/examples/plot_read_stc.py b/examples/plot_read_stc.py
index 374c614..4c58577 100755
--- a/examples/plot_read_stc.py
+++ b/examples/plot_read_stc.py
@@ -31,7 +31,7 @@ print "stc data size: %s (nb of vertices) x %s (nb of samples)" % (
# View source activations
times = stc['tmin'] + stc['tstep'] * np.arange(n_samples)
import pylab as pl
-pl.plot(times, stc['data'][::100,:].T)
+pl.plot(times, stc['data'][::100, :].T)
pl.xlabel('time (ms)')
pl.ylabel('Source amplitude')
pl.show()
diff --git a/examples/plot_whitened_evoked_data.py b/examples/plot_whitened_evoked_data.py
index cd88035..50432f3 100755
--- a/examples/plot_whitened_evoked_data.py
+++ b/examples/plot_whitened_evoked_data.py
@@ -34,23 +34,23 @@ raw = fiff.Raw(raw_fname)
events = mne.find_events(raw)
# pick EEG channels - bad channels (modify to your needs)
-exclude = raw.info['bads'] + ['EEG 053'] # bads + 1 more
+exclude = raw.info['bads'] + ['EEG 053'] # bads + 1 more
picks = fiff.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
exclude=exclude)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(eeg=40e-6, eog=150e-6))
-evoked = epochs.average() # average epochs and get an Evoked dataset.
+evoked = epochs.average() # average epochs and get an Evoked dataset.
cov = mne.Covariance(cov_fname)
# Whiten data
-whitener = cov.get_whitener(evoked.info, pca=False) # get whitening matrix
+whitener = cov.get_whitener(evoked.info, pca=False) # get whitening matrix
sel = mne.fiff.pick_channels(evoked.ch_names, include=whitener.ch_names)
-whitened_data = np.dot(whitener.W, evoked.data[sel]) # apply whitening
+whitened_data = np.dot(whitener.W, evoked.data[sel]) # apply whitening
###############################################################################
# Show result
-times = 1e3 * epochs.times # in ms
+times = 1e3 * epochs.times # in ms
import pylab as pl
pl.clf()
pl.plot(times, whitened_data.T)
diff --git a/examples/read_bem_surfaces.py b/examples/read_bem_surfaces.py
index dcd3b28..d70d3c1 100755
--- a/examples/read_bem_surfaces.py
+++ b/examples/read_bem_surfaces.py
@@ -22,9 +22,9 @@ print "Number of surfaces : %d" % len(surfaces)
###############################################################################
# Show result
-head_col = (0.95, 0.83, 0.83) # light pink
+head_col = (0.95, 0.83, 0.83) # light pink
skull_col = (0.91, 0.89, 0.67)
-brain_col = (0.67, 0.89, 0.91) # light blue
+brain_col = (0.67, 0.89, 0.91) # light blue
colors = [head_col, skull_col, brain_col]
# 3D source space
@@ -33,5 +33,5 @@ mlab.clf()
for c, surf in zip(colors, surfaces):
points = surf['rr']
faces = surf['tris']
- mlab.triangular_mesh(points[:,0], points[:,1], points[:,2], faces,
+ mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
color=c, opacity=0.3)
diff --git a/examples/read_inverse.py b/examples/read_inverse.py
index df58d6b..2c71692 100755
--- a/examples/read_inverse.py
+++ b/examples/read_inverse.py
@@ -32,6 +32,8 @@ lh_faces = inv['src'][0]['use_tris']
rh_points = inv['src'][1]['rr']
rh_faces = inv['src'][1]['use_tris']
from enthought.mayavi import mlab
-mlab.triangular_mesh(lh_points[:,0], lh_points[:,1], lh_points[:,2], lh_faces)
-mlab.triangular_mesh(rh_points[:,0], rh_points[:,1], rh_points[:,2], rh_faces)
+mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+ lh_faces)
+mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+ rh_faces)
mlab.show()
diff --git a/examples/stats/plot_cluster_1samp_test_time_frequency.py b/examples/stats/plot_cluster_1samp_test_time_frequency.py
index 20ddeb4..03a7ba5 100755
--- a/examples/stats/plot_cluster_1samp_test_time_frequency.py
+++ b/examples/stats/plot_cluster_1samp_test_time_frequency.py
@@ -42,7 +42,7 @@ raw = fiff.Raw(raw_fname)
events = mne.find_events(raw)
include = []
-exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
@@ -52,22 +52,22 @@ picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
-data = epochs.get_data() # as 3D matrix
-data *= 1e13 # change unit to fT / cm
+data = epochs.get_data() # as 3D matrix
+data *= 1e13 # change unit to fT / cm
# Time vector
-times = 1e3 * epochs.times # change unit to ms
+times = 1e3 * epochs.times # change unit to ms
# Take only one channel
ch_name = raw.info['ch_names'][97]
-data = data[:,97:98,:]
+data = data[:, 97:98, :]
evoked_data = np.mean(data, 0)
# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)
-frequencies = np.arange(8, 40, 2) # define frequencies of interest
-Fs = raw.info['sfreq'] # sampling in Hz
+frequencies = np.arange(8, 40, 2) # define frequencies of interest
+Fs = raw.info['sfreq'] # sampling in Hz
epochs_power = single_trial_power(data, Fs=Fs, frequencies=frequencies,
n_cycles=4, use_fft=False, n_jobs=1,
baseline=(-100, 0), times=times,
@@ -75,12 +75,12 @@ epochs_power = single_trial_power(data, Fs=Fs, frequencies=frequencies,
# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
-epochs_power = epochs_power[:,:,:,time_mask]
-evoked_data = evoked_data[:,time_mask]
+epochs_power = epochs_power[:, :, :, time_mask]
+evoked_data = evoked_data[:, time_mask]
times = times[time_mask]
-epochs_power = epochs_power[:,0,:,:]
-epochs_power = np.log10(epochs_power) # take log of ratio
+epochs_power = epochs_power[:, 0, :, :]
+epochs_power = np.log10(epochs_power) # take log of ratio
# under the null hypothesis epochs_power should be now be 0
###############################################################################
diff --git a/examples/stats/plot_cluster_stats_evoked.py b/examples/stats/plot_cluster_stats_evoked.py
index 5ab6d02..34a4ad1 100755
--- a/examples/stats/plot_cluster_stats_evoked.py
+++ b/examples/stats/plot_cluster_stats_evoked.py
@@ -43,15 +43,15 @@ event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
-condition1 = epochs1.get_data() # as 3D matrix
+condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
-condition2 = epochs2.get_data() # as 3D matrix
+condition2 = epochs2.get_data() # as 3D matrix
-condition1 = condition1[:,0,:] # take only one channel to get a 2D array
-condition2 = condition2[:,0,:] # take only one channel to get a 2D array
+condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
+condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
@@ -75,9 +75,9 @@ pl.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
- h = pl.axvspan(times[c.start], times[c.stop-1], color='r', alpha=0.3)
+ h = pl.axvspan(times[c.start], times[c.stop - 1], color='r', alpha=0.3)
else:
- pl.axvspan(times[c.start], times[c.stop-1], color=(0.3, 0.3, 0.3),
+ pl.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = pl.plot(times, T_obs, 'g')
pl.legend((h, ), ('cluster p-value < 0.05', ))
diff --git a/examples/stats/plot_cluster_stats_time_frequency.py b/examples/stats/plot_cluster_stats_time_frequency.py
index bf0392d..f9a6d30 100755
--- a/examples/stats/plot_cluster_stats_time_frequency.py
+++ b/examples/stats/plot_cluster_stats_time_frequency.py
@@ -44,7 +44,7 @@ raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
-exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
@@ -58,26 +58,26 @@ event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
-data_condition_1 = epochs_condition_1.get_data() # as 3D matrix
-data_condition_1 *= 1e13 # change unit to fT / cm
+data_condition_1 = epochs_condition_1.get_data() # as 3D matrix
+data_condition_1 *= 1e13 # change unit to fT / cm
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
-data_condition_2 = epochs_condition_2.get_data() # as 3D matrix
-data_condition_2 *= 1e13 # change unit to fT / cm
+data_condition_2 = epochs_condition_2.get_data() # as 3D matrix
+data_condition_2 *= 1e13 # change unit to fT / cm
# Take only one channel
-data_condition_1 = data_condition_1[:,97:98,:]
-data_condition_2 = data_condition_2[:,97:98,:]
+data_condition_1 = data_condition_1[:, 97:98, :]
+data_condition_2 = data_condition_2[:, 97:98, :]
# Time vector
-times = 1e3 * epochs_condition_1.times # change unit to ms
+times = 1e3 * epochs_condition_1.times # change unit to ms
-frequencies = np.arange(7, 30, 3) # define frequencies of interest
-Fs = raw.info['sfreq'] # sampling in Hz
+frequencies = np.arange(7, 30, 3) # define frequencies of interest
+Fs = raw.info['sfreq'] # sampling in Hz
n_cycles = 1.5
epochs_power_1 = single_trial_power(data_condition_1, Fs=Fs,
frequencies=frequencies,
@@ -87,12 +87,12 @@ epochs_power_2 = single_trial_power(data_condition_2, Fs=Fs,
frequencies=frequencies,
n_cycles=n_cycles, use_fft=False)
-epochs_power_1 = epochs_power_1[:,0,:,:] # only 1 channel to get a 3D matrix
-epochs_power_2 = epochs_power_2[:,0,:,:] # only 1 channel to get a 3D matrix
+epochs_power_1 = epochs_power_1[:, 0, :, :] # only 1 channel to get 3D matrix
+epochs_power_2 = epochs_power_2[:, 0, :, :] # only 1 channel to get 3D matrix
# do ratio with baseline power:
-epochs_power_1 /= np.mean(epochs_power_1[:,:,times < 0], axis=2)[:,:,None]
-epochs_power_2 /= np.mean(epochs_power_2[:,:,times < 0], axis=2)[:,:,None]
+epochs_power_1 /= np.mean(epochs_power_1[:, :, times < 0], axis=2)[:, :, None]
+epochs_power_2 /= np.mean(epochs_power_2[:, :, times < 0], axis=2)[:, :, None]
###############################################################################
# Compute statistic
diff --git a/examples/stats/plot_sensor_permutation_test.py b/examples/stats/plot_sensor_permutation_test.py
index c219469..65084a1 100755
--- a/examples/stats/plot_sensor_permutation_test.py
+++ b/examples/stats/plot_sensor_permutation_test.py
@@ -36,8 +36,8 @@ raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
-include = [] # or stim channel ['STI 014']
-exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
+include = [] # or stim channel ['STI 014']
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# pick MEG Magnetometers
picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
diff --git a/examples/time_frequency/plot_time_frequency.py b/examples/time_frequency/plot_time_frequency.py
index df35773..21dcbff 100755
--- a/examples/time_frequency/plot_time_frequency.py
+++ b/examples/time_frequency/plot_time_frequency.py
@@ -35,7 +35,7 @@ raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
-exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
@@ -43,22 +43,22 @@ picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
-data = epochs.get_data() # as 3D matrix
-evoked = epochs.average() # compute evoked fields
+data = epochs.get_data() # as 3D matrix
+evoked = epochs.average() # compute evoked fields
-times = 1e3 * epochs.times # change unit to ms
-evoked_data = evoked.data * 1e13 # change unit to fT / cm
+times = 1e3 * epochs.times # change unit to ms
+evoked_data = evoked.data * 1e13 # change unit to fT / cm
# Take only one channel
-data = data[:,97:98,:]
-evoked_data = evoked_data[97:98,:]
+data = data[:, 97:98, :]
+evoked_data = evoked_data[97:98, :]
-frequencies = np.arange(7, 30, 3) # define frequencies of interest
-Fs = raw.info['sfreq'] # sampling in Hz
+frequencies = np.arange(7, 30, 3) # define frequencies of interest
+Fs = raw.info['sfreq'] # sampling in Hz
power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
n_cycles=2, n_jobs=1, use_fft=False)
-power /= np.mean(power[:,:,times<0], axis=2)[:,:,None] # baseline ratio
+power /= np.mean(power[:, :, times < 0], axis=2)[:, :, None] # baseline ratio
###############################################################################
# View time-frequency plots
@@ -74,7 +74,7 @@ pl.xlim(times[0], times[-1])
pl.ylim(-150, 300)
pl.subplot(3, 1, 2)
-pl.imshow(20*np.log10(power[0]), extent=[times[0], times[-1],
+pl.imshow(20 * np.log10(power[0]), extent=[times[0], times[-1],
frequencies[0], frequencies[-1]],
aspect='auto', origin='lower')
pl.xlabel('Time (s)')
diff --git a/mne/bem_surfaces.py b/mne/bem_surfaces.py
index c9fc3db..90fbb0f 100755
--- a/mne/bem_surfaces.py
+++ b/mne/bem_surfaces.py
@@ -178,8 +178,8 @@ def _complete_surface_info(this):
#
# Main triangulation
#
- print '\tCompleting triangulation info...'
- print 'triangle normals...'
+ print '\tCompleting triangulation info...',
+ print 'triangle normals...',
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
@@ -197,7 +197,7 @@ def _complete_surface_info(this):
#
# Accumulate the vertex normals
#
- print 'vertex normals...'
+ print 'vertex normals...',
this['nn'] = np.zeros((this['np'], 3))
for p in range(this['ntri']):
this['nn'][this['tris'][p, :], :] = this['nn'][this['tris'][p, :], :] \
@@ -205,7 +205,7 @@ def _complete_surface_info(this):
#
# Compute the lengths of the vertex normals and scale
#
- print 'normalize...'
+ print 'normalize...',
for p in range(this['np']):
size = linalg.norm(this['nn'][p, :])
if size > 0:
diff --git a/mne/fiff/__init__.py b/mne/fiff/__init__.py
index 60aef75..67a9b03 100755
--- a/mne/fiff/__init__.py
+++ b/mne/fiff/__init__.py
@@ -12,4 +12,3 @@ from .raw import Raw, read_raw_segment, read_raw_segment_times, \
start_writing_raw, write_raw_buffer, finish_writing_raw
from .pick import pick_types, pick_channels
from .compensator import get_current_comp
-
diff --git a/mne/fiff/bunch.py b/mne/fiff/bunch.py
index 5d226fe..3e4545e 100755
--- a/mne/fiff/bunch.py
+++ b/mne/fiff/bunch.py
@@ -3,6 +3,7 @@
#
# License: BSD (3-clause)
+
class Bunch(dict):
""" Container object for datasets: dictionnary-like object that
exposes its keys as attributes.
@@ -11,4 +12,3 @@ class Bunch(dict):
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
-
diff --git a/mne/fiff/channels.py b/mne/fiff/channels.py
index 7b6b530..9e3c708 100755
--- a/mne/fiff/channels.py
+++ b/mne/fiff/channels.py
@@ -26,11 +26,10 @@ def _read_bad_channels(fid, node):
"""
nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
- bads = [];
+ bads = []
if len(nodes) > 0:
for node in nodes:
tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
if tag.data is not None:
bads = tag.data.split(':')
return bads
-
diff --git a/mne/fiff/constants.py b/mne/fiff/constants.py
index 363f5e1..c1242b2 100755
--- a/mne/fiff/constants.py
+++ b/mne/fiff/constants.py
@@ -90,16 +90,16 @@ FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring
#
# Quaternion channels for head position monitoring
#
-FIFF.FIFFV_QUAT_0 = 700 # Quaternion parameter q0 obsolete for unit quaternion
-FIFF.FIFFV_QUAT_1 = 701 # Quaternion parameter q1 rotation
-FIFF.FIFFV_QUAT_2 = 702 # Quaternion parameter q2 rotation
-FIFF.FIFFV_QUAT_3 = 703 # Quaternion parameter q3 rotation
-FIFF.FIFFV_QUAT_4 = 704 # Quaternion parameter q4 translation
-FIFF.FIFFV_QUAT_5 = 705 # Quaternion parameter q5 translation
-FIFF.FIFFV_QUAT_6 = 706 # Quaternion parameter q6 translation
-FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi
-FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi
-FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi
+FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion
+FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation
+FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation
+FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation
+FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation
+FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation
+FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation
+FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi
+FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi
+FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi
#
# Coordinate frames
#
@@ -119,22 +119,22 @@ FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
FIFF.FIFF_FIRST_SAMPLE = 208
FIFF.FIFF_LAST_SAMPLE = 209
FIFF.FIFF_ASPECT_KIND = 210
-FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data
-FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers
-FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel
-FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples
+FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data
+FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers
+FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel
+FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples
#
# Different aspects of data
#
-FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs
-FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean
-FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data
+FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs
+FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean
+FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data
FIFF.FIFFV_ASPECT_SUBAVERAGE = 103
-FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage
-FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph
-FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum
-FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve
+FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage
+FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph
+FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum
+FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve
#
# BEM surface IDs
#
@@ -217,46 +217,46 @@ FIFF.FIFF_MNE_ROW_NAMES = 3502
FIFF.FIFF_MNE_COL_NAMES = 3503
FIFF.FIFF_MNE_NROW = 3504
FIFF.FIFF_MNE_NCOL = 3505
-FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults:
+FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults:
# FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI
# FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD
# FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD
FIFF.FIFF_MNE_CH_NAME_LIST = 3507
-FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501)
+FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501)
#
# 3510... 3590... Source space or surface
#
-FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices
-FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals
-FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices
-FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space
-FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use
-FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices
-FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices
-FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier
-FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume
+FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals
+FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space
+FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use
+FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier
+FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume
-FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles
-FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation
-FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use
-FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space
+FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles
+FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation
+FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use
+FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space
#
# 3520... Forward solution
#
FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520
-FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free
+FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free
FIFF.FIFF_MNE_INCLUDED_METHODS = 3522
FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523
#
# 3530... Covariance matrix
#
-FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix
-FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension
-FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle)
-FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix
-FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above
+FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix
+FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension
+FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle)
+FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix
+FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above
FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535
-FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom
+FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom
#
# 3540... Inverse operator
#
@@ -428,10 +428,10 @@ FIFF.FIFF_UNIT_LX = 116
#
# Others we need
#
-FIFF.FIFF_UNIT_T_M = 201 # T/m
-FIFF.FIFF_UNIT_AM = 202 # Am
-FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2
-FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3
+FIFF.FIFF_UNIT_T_M = 201 # T/m
+FIFF.FIFF_UNIT_AM = 202 # Am
+FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2
+FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3
#
# Multipliers
#
diff --git a/mne/fiff/ctf.py b/mne/fiff/ctf.py
index 6386520..d59f9a1 100755
--- a/mne/fiff/ctf.py
+++ b/mne/fiff/ctf.py
@@ -31,17 +31,18 @@ def _read_named_matrix(fid, node, matkind):
node = node.children(k)
break
else:
- raise ValueError, 'Desired named matrix (kind = %d) not' \
- ' available' % matkind
+ raise ValueError('Desired named matrix (kind = %d) not'
+ ' available' % matkind)
else:
if not has_tag(node, matkind):
- raise 'Desired named matrix (kind = %d) not available' % matkind
+ raise ValueError('Desired named matrix (kind = %d) not available'
+ % matkind)
# Read everything we need
tag = find_tag(fid, node, matkind)
if tag is None:
- raise ValueError, 'Matrix data missing'
+ raise ValueError('Matrix data missing')
else:
data = tag.data
@@ -49,14 +50,14 @@ def _read_named_matrix(fid, node, matkind):
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
if tag is not None:
if tag.data != nrow:
- raise ValueError, 'Number of rows in matrix data and ' \
- 'FIFF_MNE_NROW tag do not match'
+ raise ValueError('Number of rows in matrix data and '
+ 'FIFF_MNE_NROW tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
if tag is not None:
if tag.data != ncol:
- raise ValueError, 'Number of columns in matrix data and ' \
- 'FIFF_MNE_NCOL tag do not match'
+ raise ValueError('Number of columns in matrix data and '
+ 'FIFF_MNE_NCOL tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
if tag is not None:
@@ -110,7 +111,7 @@ def read_ctf_comp(fid, node, chs):
for node in comps:
# XXX
- raise NotImplementedError, "CTF data processing is not supported yet"
+ raise NotImplementedError("CTF data processing is not supported yet")
#
# # Read the data we need
@@ -223,7 +224,7 @@ def write_ctf_comp(fid, comps):
return
# XXX
- raise NotImplementedError, "CTF data processing is not supported yet"
+ raise NotImplementedError("CTF data processing is not supported yet")
# # This is very simple in fact
# start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
diff --git a/mne/fiff/diff.py b/mne/fiff/diff.py
index 88e4f61..2305d4f 100755
--- a/mne/fiff/diff.py
+++ b/mne/fiff/diff.py
@@ -4,6 +4,7 @@
import numpy as np
+
def is_equal(first, second):
""" Says if 2 python structures are the same. Designed to
handle dict, list, np.ndarray etc.
@@ -14,7 +15,7 @@ def is_equal(first, second):
all_equal = False
if isinstance(first, dict):
for key in first.keys():
- if (not second.has_key(key)):
+ if (not key in second):
print "Missing key %s in %s" % (key, second)
all_equal = False
else:
@@ -33,4 +34,3 @@ def is_equal(first, second):
print '%s and\n%s are different' % (first, second)
all_equal = False
return all_equal
-
diff --git a/mne/fiff/evoked.py b/mne/fiff/evoked.py
index 915c866..aadd7a0 100755
--- a/mne/fiff/evoked.py
+++ b/mne/fiff/evoked.py
@@ -70,23 +70,25 @@ class Evoked(object):
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
fid.close()
- raise ValueError, 'Could not find processed data'
+ raise ValueError('Could not find processed data')
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
if len(evoked_node) == 0:
fid.close()
- raise ValueError, 'Could not find evoked data'
+ raise ValueError('Could not find evoked data')
if setno is None:
if len(evoked_node) > 1:
fid.close()
- raise ValueError, '%d datasets present. setno parameter mush be set'
+ raise ValueError('%d datasets present. '
+ 'setno parameter mush be set'
+ % len(evoked_node))
else:
setno = 0
if setno >= len(evoked_node) or setno < 0:
fid.close()
- raise ValueError, 'Data set selector out of range'
+ raise ValueError('Data set selector out of range')
my_evoked = evoked_node[setno]
@@ -130,26 +132,28 @@ class Evoked(object):
if nchan > 0:
if chs is None:
fid.close()
- raise ValueError, 'Local channel information was not found ' \
- 'when it was expected.'
+ raise ValueError('Local channel information was not found '
+ 'when it was expected.')
if len(chs) != nchan:
fid.close()
- raise ValueError, 'Number of channels and number of channel ' \
- 'definitions are different'
+ raise ValueError('Number of channels and number of channel '
+ 'definitions are different')
info['chs'] = chs
info['nchan'] = nchan
- print '\tFound channel information in evoked data. nchan = %d' % nchan
+ print ('\tFound channel information in evoked data. nchan = %d'
+ % nchan)
if sfreq > 0:
info['sfreq'] = sfreq
nsamp = last - first + 1
print '\tFound the data of interest:'
print '\t\tt = %10.2f ... %10.2f ms (%s)' % (
- 1000*first/info['sfreq'], 1000*last/info['sfreq'], comment)
+ 1000 * first / info['sfreq'], 1000 * last / info['sfreq'], comment)
if info['comps'] is not None:
- print '\t\t%d CTF compensation matrices available' % len(info['comps'])
+ print ('\t\t%d CTF compensation matrices available'
+ % len(info['comps']))
# Read the data in the aspect block
nave = 1
@@ -175,8 +179,8 @@ class Evoked(object):
nepoch = len(epoch)
if nepoch != 1 and nepoch != info.nchan:
fid.close()
- raise ValueError, 'Number of epoch tags is unreasonable '\
- '(nepoch = %d nchan = %d)' % (nepoch, info.nchan)
+ raise ValueError('Number of epoch tags is unreasonable '
+ '(nepoch = %d nchan = %d)' % (nepoch, info.nchan))
if nepoch == 1:
# Only one epoch
@@ -193,12 +197,12 @@ class Evoked(object):
if all_data.shape[1] != nsamp:
fid.close()
- raise ValueError, 'Incorrect number of samples (%d instead of %d)' % (
- all_data.shape[1], nsamp)
+ raise ValueError('Incorrect number of samples (%d instead of %d)'
+ % (all_data.shape[1], nsamp))
# Calibrate
cals = np.array([info['chs'][k].cal for k in range(info['nchan'])])
- all_data = np.dot(np.diag(cals.ravel()), all_data) # XXX : can be better
+ all_data = cals[:, None] * all_data
times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
@@ -215,7 +219,7 @@ class Evoked(object):
imax = len(times)
else:
imax = int(np.where(times <= bmax)[0][-1]) + 1
- all_data -= np.mean(all_data[:, imin:imax], axis=1)[:,None]
+ all_data -= np.mean(all_data[:, imin:imax], axis=1)[:, None]
else:
print "No baseline correction applied..."
@@ -270,7 +274,7 @@ class Evoked(object):
write_int(fid, FIFF.FIFF_NAVE, self.nave)
decal = np.zeros((self.info['nchan'], self.info['nchan']))
- for k in range(self.info['nchan']): # XXX : can be improved
+ for k in range(self.info['nchan']):
decal[k, k] = 1.0 / self.info['chs'][k]['cal']
write_float_matrix(fid, FIFF.FIFF_EPOCH, np.dot(decal, self.data))
@@ -303,7 +307,7 @@ class Evoked(object):
self.times = times[mask]
self.first = - int(np.sum(self.times < 0))
self.last = int(np.sum(self.times > 0))
- self.data = self.data[:,mask]
+ self.data = self.data[:, mask]
def read_evoked(fname, setno=0, baseline=None):
diff --git a/mne/fiff/matrix.py b/mne/fiff/matrix.py
index 595a432..ebf4823 100755
--- a/mne/fiff/matrix.py
+++ b/mne/fiff/matrix.py
@@ -31,28 +31,32 @@ def _read_named_matrix(fid, node, matkind):
node = node.children[k]
break
else:
- raise ValueError, 'Desired named matrix (kind = %d) not available' % matkind
+ raise ValueError('Desired named matrix (kind = %d) not available'
+ % matkind)
else:
- if not has_tag(node, matkind):
- raise ValueError, 'Desired named matrix (kind = %d) not available' % matkind
+ if not has_tag(node, matkind):
+ raise ValueError('Desired named matrix (kind = %d) not available'
+ % matkind)
# Read everything we need
tag = find_tag(fid, node, matkind)
if tag is None:
- raise ValueError, 'Matrix data missing'
+ raise ValueError('Matrix data missing')
else:
- data = tag.data;
+ data = tag.data
nrow, ncol = data.shape
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
if tag is not None:
- if tag.data != nrow:
- raise ValueError, 'Number of rows in matrix data and FIFF_MNE_NROW tag do not match'
+ if tag.data != nrow:
+ raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW '
+ 'tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
if tag is not None:
- if tag.data != ncol:
- raise ValueError, 'Number of columns in matrix data and FIFF_MNE_NCOL tag do not match'
+ if tag.data != ncol:
+ raise ValueError('Number of columns in matrix data and '
+ 'FIFF_MNE_NCOL tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
if tag is not None:
diff --git a/mne/fiff/open.py b/mne/fiff/open.py
index 7ee6ec6..4d27c7b 100755
--- a/mne/fiff/open.py
+++ b/mne/fiff/open.py
@@ -32,25 +32,24 @@ def fiff_open(fname, verbose=False):
list of nodes.
"""
-
- fid = open(fname, "rb") # Open in binary mode
+ fid = open(fname, "rb") # Open in binary mode
tag = read_tag_info(fid)
# Check that this looks like a fif file
if tag.kind != FIFF.FIFF_FILE_ID:
- raise ValueError, 'file does not start with a file id tag'
+ raise ValueError('file does not start with a file id tag')
if tag.type != FIFF.FIFFT_ID_STRUCT:
- raise ValueError, 'file does not start with a file id tag'
+ raise ValueError('file does not start with a file id tag')
if tag.size != 20:
- raise ValueError, 'file does not start with a file id tag'
+ raise ValueError('file does not start with a file id tag')
tag = read_tag(fid)
if tag.kind != FIFF.FIFF_DIR_POINTER:
- raise ValueError, 'file does have a directory pointer'
+ raise ValueError('file does have a directory pointer')
# Read or create the directory tree
if verbose:
diff --git a/mne/fiff/pick.py b/mne/fiff/pick.py
index 22a7dc8..262bc6f 100755
--- a/mne/fiff/pick.py
+++ b/mne/fiff/pick.py
@@ -8,6 +8,7 @@ from copy import copy
import numpy as np
from .constants import FIFF
+
def channel_type(info, idx):
"""Get channel type
@@ -159,7 +160,7 @@ def pick_info(info, sel=[]):
res = copy(info)
if len(sel) == 0:
- raise ValueError, 'Warning : No channels match the selection.'
+ raise ValueError('Warning : No channels match the selection.')
res['chs'] = [res['chs'][k] for k in sel]
res['ch_names'] = [res['ch_names'][k] for k in sel]
@@ -195,7 +196,7 @@ def pick_channels_evoked(orig, include=[], exclude=[]):
exclude=exclude)
if len(sel) == 0:
- raise ValueError, 'Warning : No channels match the selection.'
+ raise ValueError('Warning : No channels match the selection.')
res = copy(orig)
#
@@ -205,7 +206,7 @@ def pick_channels_evoked(orig, include=[], exclude=[]):
#
# Create the reduced data set
#
- res.data = res.data[sel,:]
+ res.data = res.data[sel, :]
return res
@@ -242,7 +243,7 @@ def pick_channels_forward(orig, include=[], exclude=[]):
# Do we have something?
nuse = len(sel)
if nuse == 0:
- raise ValueError, 'Nothing remains after picking'
+ raise ValueError('Nothing remains after picking')
print '\t%d out of %d channels remain after picking' % (nuse,
fwd['nchan'])
@@ -262,6 +263,7 @@ def pick_channels_forward(orig, include=[], exclude=[]):
return fwd
+
def channel_indices_by_type(info):
"""Get indices of channels by type
"""
diff --git a/mne/fiff/proj.py b/mne/fiff/proj.py
index 920ef76..7f8b41b 100755
--- a/mne/fiff/proj.py
+++ b/mne/fiff/proj.py
@@ -60,37 +60,38 @@ def read_proj(fid, node):
if tag is not None:
desc = tag.data
else:
- raise ValueError, 'Projection item description missing'
+ raise ValueError('Projection item description missing')
- tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
- if tag is not None:
- namelist = tag.data
- else:
- raise ValueError, 'Projection item channel list missing'
+ # XXX : is this useful ?
+ # tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
+ # if tag is not None:
+ # namelist = tag.data
+ # else:
+ # raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
if tag is not None:
kind = int(tag.data)
else:
- raise ValueError, 'Projection item kind missing'
+ raise ValueError('Projection item kind missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
if tag is not None:
nvec = int(tag.data)
else:
- raise ValueError, 'Number of projection vectors not specified'
+ raise ValueError('Number of projection vectors not specified')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
if tag is not None:
names = tag.data.split(':')
else:
- raise ValueError, 'Projection item channel list missing'
+ raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
if tag is not None:
data = tag.data
else:
- raise ValueError, 'Projection item data missing'
+ raise ValueError('Projection item data missing')
tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
if tag is not None:
@@ -99,8 +100,8 @@ def read_proj(fid, node):
active = False
if data.shape[1] != len(names):
- raise ValueError, ('Number of channel names does not match the '
- 'size of data matrix')
+ raise ValueError('Number of channel names does not match the '
+ 'size of data matrix')
# Use exactly the same fields in data as in a named matrix
one = Bunch(kind=kind, active=active, desc=desc,
@@ -162,6 +163,7 @@ def write_proj(fid, projs):
end_block(fid, FIFF.FIFFB_PROJ)
+
###############################################################################
# Utils
@@ -188,7 +190,7 @@ def make_projector(projs, ch_names, bads=[]):
"""
nchan = len(ch_names)
if nchan == 0:
- raise ValueError, 'No channel names specified'
+ raise ValueError('No channel names specified')
proj = np.eye(nchan, nchan)
nproj = 0
@@ -214,45 +216,44 @@ def make_projector(projs, ch_names, bads=[]):
nonzero = 0
for k, p in enumerate(projs):
if p.active:
- one = p # XXX really necessary?
- if len(one['data']['col_names']) != \
- len(np.unique(one['data']['col_names'])):
- raise ValueError, ('Channel name list in projection item %d'
- ' contains duplicate items' % k)
+ if len(p['data']['col_names']) != \
+ len(np.unique(p['data']['col_names'])):
+ raise ValueError('Channel name list in projection item %d'
+ ' contains duplicate items' % k)
# Get the two selection vectors to pick correct elements from
# the projection vectors omitting bad channels
sel = []
vecsel = []
for c, name in enumerate(ch_names):
- if name in one['data']['col_names']:
+ if name in p['data']['col_names']:
sel.append(c)
- vecsel.append(one['data']['col_names'].index(name))
+ vecsel.append(p['data']['col_names'].index(name))
# If there is something to pick, pickit
if len(sel) > 0:
- for v in range(one['data']['nrow']):
- vecs[sel, nvec+v] = one['data']['data'][v, vecsel].T
+ for v in range(p['data']['nrow']):
+ vecs[sel, nvec + v] = p['data']['data'][v, vecsel].T
# Rescale for better detection of small singular values
- for v in range(one['data']['nrow']):
- onesize = sqrt(np.sum(vecs[:, nvec + v] * vecs[:, nvec + v]))
- if onesize > 0:
- vecs[:, nvec+v] /= onesize
+ for v in range(p['data']['nrow']):
+ psize = sqrt(np.sum(vecs[:, nvec + v] * vecs[:, nvec + v]))
+ if psize > 0:
+ vecs[:, nvec + v] /= psize
nonzero += 1
- nvec += one['data']['nrow']
+ nvec += p['data']['nrow']
# Check whether all of the vectors are exactly zero
if nonzero == 0:
return proj, nproj, U
# Reorthogonalize the vectors
- U, S, V = linalg.svd(vecs[:,:nvec], full_matrices=False)
+ U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
# Throw away the linearly dependent guys
nproj = np.sum((S / S[0]) > 1e-2)
- U = U[:,:nproj]
+ U = U[:, :nproj]
# Here is the celebrated result
proj -= np.dot(U, U.T)
diff --git a/mne/fiff/raw.py b/mne/fiff/raw.py
index 4ee74b9..702da96 100755
--- a/mne/fiff/raw.py
+++ b/mne/fiff/raw.py
@@ -100,13 +100,13 @@ class Raw(dict):
elif ent.kind == FIFF.FIFF_DATA_BUFFER:
# Figure out the number of samples in this buffer
if ent.type == FIFF.FIFFT_DAU_PACK16:
- nsamp = ent.size / (2*nchan)
+ nsamp = ent.size / (2 * nchan)
elif ent.type == FIFF.FIFFT_SHORT:
- nsamp = ent.size / (2*nchan)
+ nsamp = ent.size / (2 * nchan)
elif ent.type == FIFF.FIFFT_FLOAT:
- nsamp = ent.size / (4*nchan)
+ nsamp = ent.size / (4 * nchan)
elif ent.type == FIFF.FIFFT_INT:
- nsamp = ent.size / (4*nchan)
+ nsamp = ent.size / (4 * nchan)
else:
fid.close()
raise ValueError('Cannot handle data buffers of type %d' %
@@ -122,9 +122,9 @@ class Raw(dict):
if nskip > 0:
import pdb; pdb.set_trace()
rawdir.append(dict(ent=None, first=first_samp,
- last=first_samp + nskip*nsamp - 1,
- nsamp=nskip*nsamp))
- first_samp += nskip*nsamp
+ last=first_samp + nskip * nsamp - 1,
+ nsamp=nskip * nsamp))
+ first_samp += nskip * nsamp
nskip = 0
# Add a data buffer
@@ -156,8 +156,8 @@ class Raw(dict):
def __getitem__(self, item):
"""getting raw data content with python slicing"""
- if isinstance(item, tuple): # slicing required
- if len(item) == 2: # channels and time instants
+ if isinstance(item, tuple): # slicing required
+ if len(item) == 2: # channels and time instants
time_slice = item[1]
if isinstance(item[0], slice):
start = item[0].start if item[0].start is not None else 0
@@ -321,11 +321,11 @@ def read_raw_segment(raw, start=0, stop=None, sel=None):
cal = np.diag(raw.cals[sel].ravel())
else:
if raw.proj is None:
- mult = raw.comp[sel,:] * cal
+ mult = raw.comp[sel, :] * cal
elif raw.comp is None:
- mult = raw.proj[sel,:] * cal
+ mult = raw.proj[sel, :] * cal
else:
- mult = raw.proj[sel,:] * raw.comp * cal
+ mult = raw.proj[sel, :] * raw.comp * cal
do_debug = False
# do_debug = True
@@ -361,7 +361,7 @@ def read_raw_segment(raw, start=0, stop=None, sel=None):
else:
one = tag.data.reshape(this['nsamp'],
nchan).astype(np.float).T
- one = cal * one[sel,:]
+ one = cal * one[sel, :]
else:
one = mult * tag.data.reshape(this['nsamp'],
nchan).astype(np.float).T
@@ -396,17 +396,17 @@ def read_raw_segment(raw, start=0, stop=None, sel=None):
# Now we are ready to pick
picksamp = last_pick - first_pick
if picksamp > 0:
- data[:, dest:dest+picksamp] = one[:, first_pick:last_pick]
+ data[:, dest:(dest + picksamp)] = one[:, first_pick:last_pick]
dest += picksamp
# Done?
- if this['last'] >= stop-1:
+ if this['last'] >= stop - 1:
print ' [done]'
break
times = (np.arange(start, stop) - raw.first_samp) / raw.info['sfreq']
- raw.fid.seek(0, 0) # Go back to beginning of the file
+ raw.fid.seek(0, 0) # Go back to beginning of the file
return data, times
@@ -580,7 +580,7 @@ def start_writing_raw(name, info, sel=None):
#
# Scan numbers may have been messed up
#
- chs[k].scanno = k + 1 # scanno starts at 1 in FIF format
+ chs[k].scanno = k + 1 # scanno starts at 1 in FIF format
chs[k].range = 1.0
cals.append(chs[k]['cal'])
write_ch_info(fid, chs[k])
@@ -611,7 +611,7 @@ def write_raw_buffer(fid, buf, cals):
if buf.shape[0] != len(cals):
raise ValueError('buffer and calibration sizes do not match')
- write_float(fid, FIFF.FIFF_DATA_BUFFER, # XXX can do better
+ write_float(fid, FIFF.FIFF_DATA_BUFFER, # XXX can do better
np.dot(np.diag(1.0 / np.ravel(cals)), buf))
diff --git a/mne/fiff/tests/test_evoked.py b/mne/fiff/tests/test_evoked.py
index d14bbcb..386fe30 100755
--- a/mne/fiff/tests/test_evoked.py
+++ b/mne/fiff/tests/test_evoked.py
@@ -16,9 +16,9 @@ def test_io_evoked():
write_evoked('evoked.fif', ave)
ave2 = read_evoked('evoked.fif')
- print assert_array_almost_equal(ave.data, ave2.data)
- print assert_array_almost_equal(ave.times, ave2.times)
- print assert_equal(ave.nave, ave2.nave)
- print assert_equal(ave.aspect_kind, ave2.aspect_kind)
- print assert_equal(ave.last, ave2.last)
- print assert_equal(ave.first, ave2.first)
+ assert_array_almost_equal(ave.data, ave2.data)
+ assert_array_almost_equal(ave.times, ave2.times)
+ assert_equal(ave.nave, ave2.nave)
+ assert_equal(ave.aspect_kind, ave2.aspect_kind)
+ assert_equal(ave.last, ave2.last)
+ assert_equal(ave.first, ave2.first)
diff --git a/mne/fiff/tree.py b/mne/fiff/tree.py
index ddd2cfa..bccd879 100755
--- a/mne/fiff/tree.py
+++ b/mne/fiff/tree.py
@@ -46,7 +46,7 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=False):
block = 0
if verbose:
- print '\t'*indent + 'start { %d' % block
+ print '\t' * indent + 'start { %d' % block
this = start
@@ -63,7 +63,7 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=False):
if directory[this].kind == FIFF_BLOCK_START:
if this != start:
child, this = make_dir_tree(fid, directory, this,
- indent+1, verbose)
+ indent + 1, verbose)
tree.nchild += 1
tree.children.append(child)
elif directory[this].kind == FIFF_BLOCK_END:
@@ -96,9 +96,9 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=False):
tree.directory = None
if verbose:
- print '\t'*(indent+1) + 'block = %d nent = %d nchild = %d' % (
+ print '\t' * (indent + 1) + 'block = %d nent = %d nchild = %d' % (
tree.block, tree.nent, tree.nchild)
- print '\t'*indent, 'end } %d' % block
+ print '\t' * indent, 'end } %d' % block
last = this
return tree, last
@@ -114,13 +114,7 @@ from .write import write_id, start_block, end_block, _write
def copy_tree(fidin, in_id, nodes, fidout):
- """
- %
- % fiff_copy_tree(fidin, in_id, nodes, fidout)
- %
- % Copies directory subtrees from fidin to fidout
- %
- """
+ """Copies directory subtrees from fidin to fidout"""
if len(nodes) <= 0:
return
@@ -147,7 +141,7 @@ def copy_tree(fidin, in_id, nodes, fidout):
# Read and write tags, pass data through transparently
fidin.seek(d.pos, 0)
- s = fidin.read(4*4)
+ s = fidin.read(4 * 4)
tag = Tag(*struct.unpack(">iIii", s))
tag.data = np.fromfile(fidin, dtype='>B', count=tag.size)
diff --git a/mne/label.py b/mne/label.py
index 8634606..b5ab431 100755
--- a/mne/label.py
+++ b/mne/label.py
@@ -67,7 +67,7 @@ def label_time_courses(labelfile, stcfile):
if stc['vertices'][k] in vertices]
if len(vertices) == 0:
- raise ValueError, 'No vertices match the label in the stc file'
+ raise ValueError('No vertices match the label in the stc file')
values = stc['data'][idx]
times = stc['tmin'] + stc['tstep'] * np.arange(stc['data'].shape[1])
diff --git a/mne/layouts/__init__.py b/mne/layouts/__init__.py
index 481ce9b..9109855 100755
--- a/mne/layouts/__init__.py
+++ b/mne/layouts/__init__.py
@@ -1 +1 @@
-from .layout import Layout
\ No newline at end of file
+from .layout import Layout
diff --git a/mne/layouts/layout.py b/mne/layouts/layout.py
index 6c55fd8..8ebf3ca 100755
--- a/mne/layouts/layout.py
+++ b/mne/layouts/layout.py
@@ -20,7 +20,7 @@ class Layout(object):
lout_fname = op.join(path, kind + '.lout')
f = open(lout_fname)
- f.readline() # skip first line
+ f.readline() # skip first line
names = []
pos = []
@@ -41,9 +41,9 @@ class Layout(object):
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
- pos[:,:2] += 0.03
- pos[:,:2] *= 0.97 / 1.03
- pos[:,2:] *= 0.94
+ pos[:, :2] += 0.03
+ pos[:, :2] *= 0.97 / 1.03
+ pos[:, 2:] *= 0.94
f.close()
@@ -52,14 +52,14 @@ class Layout(object):
self.names = names
# if __name__ == '__main__':
-#
+#
# layout = Layout()
-#
+#
# import pylab as pl
# pl.rcParams['axes.edgecolor'] = 'w'
# pl.close('all')
# pl.figure(facecolor='k', )
-#
+#
# for i in range(5):
# # for i in range(len(pos)):
# ax = pl.axes(layout.pos[i], axisbg='k')
@@ -67,5 +67,5 @@ class Layout(object):
# pl.xticks([], ())
# pl.yticks([], ())
# pl.gca().grid(color='w')
-#
+#
# pl.show()
diff --git a/mne/misc.py b/mne/misc.py
index 1ca6a2f..36dffd7 100644
--- a/mne/misc.py
+++ b/mne/misc.py
@@ -3,6 +3,7 @@
#
# License: BSD (3-clause)
+
def parse_config(fname):
"""Parse a config file (like .ave and .cov files)
@@ -25,12 +26,12 @@ def parse_config(fname):
with open(fname, 'r') as f:
lines = f.readlines()
except:
- print("Error while reading %s" % fname)
+ raise ValueError("Error while reading %s" % fname)
cat_ind = [i for i, x in enumerate(lines) if "category {" in x]
event_dict = dict()
for ind in cat_ind:
- for k in range(ind+1, ind+7):
+ for k in range(ind + 1, ind + 7):
words = lines[k].split()
if len(words) >= 2:
key = words[0]
@@ -40,7 +41,7 @@ def parse_config(fname):
else:
raise ValueError('Could not find event id.')
event_dict[event] = dict(**reject_params)
- for k in range(ind+1, ind+7):
+ for k in range(ind + 1, ind + 7):
words = lines[k].split()
if len(words) >= 2:
key = words[0]
@@ -55,6 +56,7 @@ def parse_config(fname):
event_dict[event][key] = float(words[1])
return event_dict
+
def read_reject_parameters(fname):
"""Read rejection parameters from .cov or .ave config file"""
@@ -62,20 +64,21 @@ def read_reject_parameters(fname):
with open(fname, 'r') as f:
lines = f.readlines()
except:
- print("Error while reading %s" % fname)
+ raise ValueError("Error while reading %s" % fname)
- reject_names = ['gradReject', 'magReject', 'eegReject', 'eogReject', 'ecgReject']
+ reject_names = ['gradReject', 'magReject', 'eegReject', 'eogReject',
+ 'ecgReject']
reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
reject = dict()
for line in lines:
words = line.split()
- print words
if words[0] in reject_names:
reject[reject_pynames[reject_names.index(words[0])]] = \
float(words[1])
return reject
+
def read_flat_parameters(fname):
"""Read flat channel rejection parameters from .cov or .ave config file"""
@@ -83,14 +86,13 @@ def read_flat_parameters(fname):
with open(fname, 'r') as f:
lines = f.readlines()
except:
- print("Error while reading %s" % fname)
+ raise ValueError("Error while reading %s" % fname)
reject_names = ['gradFlat', 'magFlat', 'eegFlat', 'eogFlat', 'ecgFlat']
reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
flat = dict()
for line in lines:
words = line.split()
- print words
if words[0] in reject_names:
flat[reject_pynames[reject_names.index(words[0])]] = \
float(words[1])
diff --git a/mne/source_space.py b/mne/source_space.py
index a2cb303..5240b48 100755
--- a/mne/source_space.py
+++ b/mne/source_space.py
@@ -70,7 +70,7 @@ def read_source_spaces_from_tree(fid, tree, add_geom=False):
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
- raise ValueError, 'No source spaces found'
+ raise ValueError('No source spaces found')
src = list()
for s in spaces:
@@ -123,7 +123,7 @@ def _read_one_source_space(fid, this):
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
- raise ValueError, 'Number of vertices not found'
+ raise ValueError('Number of vertices not found')
res['np'] = tag.data
@@ -139,40 +139,40 @@ def _read_one_source_space(fid, this):
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
- raise ValueError, 'Coordinate frame information not found'
+ raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
- raise ValueError, 'Vertex data not found'
+ raise ValueError('Vertex data not found')
- res['rr'] = tag.data.astype(np.float) # make it double precision for mayavi
+ res['rr'] = tag.data.astype(np.float) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
- raise ValueError, 'Vertex information is incorrect'
+ raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
- raise ValueError, 'Vertex normals not found'
+ raise ValueError('Vertex normals not found')
res['nn'] = tag.data
if res['nn'].shape[0] != res['np']:
- raise ValueError, 'Vertex normal information is incorrect'
+ raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
- raise ValueError, 'Triangulation not found'
+ raise ValueError('Triangulation not found')
else:
- res['tris'] = tag.data - 1 # index start at 0 in Python
+ res['tris'] = tag.data - 1 # index start at 0 in Python
else:
- res['tris'] = tag.data - 1 # index start at 0 in Python
+ res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
- raise ValueError, 'Triangulation information is incorrect'
+ raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
@@ -186,12 +186,12 @@ def _read_one_source_space(fid, this):
res['nuse'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
- raise ValueError, 'Source selection information missing'
+ raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int).T
if len(res['inuse']) != res['np']:
- raise ValueError, 'Incorrect number of entries in source space ' \
- 'selection'
+ raise ValueError('Incorrect number of entries in source space '
+ 'selection')
res['vertno'] = np.where(res['inuse'])[0]
@@ -203,7 +203,7 @@ def _read_one_source_space(fid, this):
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
- res['use_tris'] = tag2.data - 1 # index start at 0 in Python
+ res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
@@ -233,24 +233,25 @@ def complete_source_space_info(this):
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
- this['tri_nn'] = np.cross((r2-r1), (r3-r1))
+ this['tri_nn'] = np.cross((r2 - r1), (r3 - r1))
- for p in range(this['ntri']): # XXX : can do better
- size = sqrt(np.sum(this['tri_nn'][p,:]**2))
+ for p in range(this['ntri']): # XXX : can do better
+ size = sqrt(np.sum(this['tri_nn'][p, :] ** 2))
this['tri_area'][p] = size / 2.0
- this['tri_nn'][p,:] = this['tri_nn'][p,:] / size
+ this['tri_nn'][p, :] = this['tri_nn'][p, :] / size
print '[done]'
# Selected triangles
print '\tCompleting selection triangulation info...',
if this['nuse_tri'] > 0:
- r1 = this['rr'][this['use_tris'][:, 0],:]
- r2 = this['rr'][this['use_tris'][:, 1],:]
- r3 = this['rr'][this['use_tris'][:, 2],:]
+ r1 = this['rr'][this['use_tris'][:, 0], :]
+ r2 = this['rr'][this['use_tris'][:, 1], :]
+ r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
- this['use_tri_nn'] = np.cross((r2-r1), (r3-r1))
- this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn']**2, axis=1)) / 2.0
+ this['use_tri_nn'] = np.cross((r2 - r1), (r3 - r1))
+ this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn'] ** 2, axis=1)
+ ) / 2.0
print '[done]'
diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py
index 599da57..57e693b 100755
--- a/mne/stats/cluster_level.py
+++ b/mne/stats/cluster_level.py
@@ -51,14 +51,15 @@ def _find_clusters(x, threshold, tail=0):
if x.ndim == 1:
clusters = ndimage.find_objects(labels, n_labels)
- sums = ndimage.measurements.sum(x, labels, index=range(1, n_labels+1))
+ sums = ndimage.measurements.sum(x, labels,
+ index=range(1, n_labels + 1))
else:
clusters = list()
sums = np.empty(n_labels)
- for l in range(1, n_labels+1):
+ for l in range(1, n_labels + 1):
c = labels == l
clusters.append(c)
- sums[l-1] = np.sum(x[c])
+ sums[l - 1] = np.sum(x[c])
return clusters, sums
@@ -73,11 +74,11 @@ def _pval_from_histogram(T, H0, tail):
raise ValueError('invalid tail parameter')
# from pct to fraction
- if tail == -1: # up tail
- pval = np.array([np.mean(H0 <= t) for t in T])
- elif tail == 1: # low tail
+ if tail == -1: # up tail
+ pval = np.array([np.mean(H0 <= t) for t in T])
+ elif tail == 1: # low tail
pval = np.array([np.mean(H0 >= t) for t in T])
- elif tail == 0: # both tails
+ elif tail == 0: # both tails
pval = np.array([np.mean(H0 >= abs(t)) for t in T])
pval += np.array([np.mean(H0 <= -abs(t)) for t in T])
@@ -142,13 +143,13 @@ def permutation_cluster_test(X, stat_fun=f_oneway, threshold=1.67,
# make list of indices for random data split
splits_idx = np.append([0], np.cumsum(n_samples_per_condition))
- slices = [slice(splits_idx[k], splits_idx[k+1])
+ slices = [slice(splits_idx[k], splits_idx[k + 1])
for k in range(len(X))]
# Step 2: If we have some clusters, repeat process on permuted data
# -------------------------------------------------------------------
if len(clusters) > 0:
- H0 = np.zeros(n_permutations) # histogram
+ H0 = np.zeros(n_permutations) # histogram
for i_s in range(n_permutations):
np.random.shuffle(X_full)
X_shuffle_list = [X_full[s] for s in slices]
@@ -226,7 +227,7 @@ def permutation_cluster_t_test(X, threshold=1.67, n_permutations=1000, tail=0):
# Step 2: If we have some clusters, repeat process on permuted data
# -------------------------------------------------------------------
if len(clusters) > 0:
- H0 = np.empty(n_permutations) # histogram
+ H0 = np.empty(n_permutations) # histogram
for i_s in range(n_permutations):
# new surrogate data with random sign flip
signs = np.sign(0.5 - np.random.rand(n_samples, *shape_ones))
@@ -238,7 +239,7 @@ def permutation_cluster_t_test(X, threshold=1.67, n_permutations=1000, tail=0):
if len(perm_clusters_sums) > 0:
idx_max = np.argmax(np.abs(perm_clusters_sums))
- H0[i_s] = perm_clusters_sums[idx_max] # get max with sign info
+ H0[i_s] = perm_clusters_sums[idx_max] # get max with sign info
else:
H0[i_s] = 0
@@ -254,7 +255,7 @@ permutation_cluster_t_test.__test__ = False
# if __name__ == "__main__":
# noiselevel = 30
# np.random.seed(0)
-#
+#
# # 1D
# normfactor = np.hanning(20).sum()
# condition1 = np.random.randn(50, 300) * noiselevel
@@ -268,7 +269,7 @@ permutation_cluster_t_test.__test__ = False
# pseudoekp = 5 * np.hanning(150)[None,:]
# condition1[:, 100:250] += pseudoekp
# condition2[:, 100:250] -= pseudoekp
-#
+#
# # Make it 2D
# condition1 = np.tile(condition1[:,100:275,None], (1, 1, 15))
# condition2 = np.tile(condition2[:,100:275,None], (1, 1, 15))
@@ -278,26 +279,26 @@ permutation_cluster_t_test.__test__ = False
# condition2[..., :3] = np.random.randn(*shape2) * noiselevel
# condition1[..., -3:] = np.random.randn(*shape1) * noiselevel
# condition2[..., -3:] = np.random.randn(*shape2) * noiselevel
-#
+#
# # X, threshold, tail = condition1, 1.67, 1
# # X, threshold, tail = -condition1, -1.67, -1
# # # X, threshold, tail = condition1, 1.67, 0
# # fs, clusters, cluster_p_values, histogram = permutation_cluster_t_test(
# # condition1, n_permutations=500, tail=tail,
# # threshold=threshold)
-#
+#
# # import pylab as pl
# # pl.close('all')
# # pl.hist(histogram)
# # pl.show()
-#
+#
# fs, clusters, cluster_p_values, histogram = permutation_cluster_test(
# [condition1, condition2], n_permutations=1000)
-#
+#
# # Plotting for a better understanding
# import pylab as pl
# pl.close('all')
-#
+#
# if condition1.ndim == 2:
# pl.subplot(211)
# pl.plot(condition1.mean(axis=0), label="Condition 1")
@@ -319,7 +320,7 @@ permutation_cluster_t_test.__test__ = False
# for c, p_val in zip(clusters, cluster_p_values):
# if p_val <= 0.05:
# fs_plot[c] = fs[c]
-#
+#
# pl.imshow(fs.T, cmap=pl.cm.gray)
# pl.imshow(fs_plot.T, cmap=pl.cm.jet)
# # pl.imshow(fs.T, cmap=pl.cm.gray, alpha=0.6)
@@ -327,5 +328,5 @@ permutation_cluster_t_test.__test__ = False
# pl.xlabel('time')
# pl.ylabel('Freq')
# pl.colorbar()
-#
+#
# pl.show()
diff --git a/mne/stats/parametric.py b/mne/stats/parametric.py
index a489a4e..2e807d4 100755
--- a/mne/stats/parametric.py
+++ b/mne/stats/parametric.py
@@ -1,6 +1,7 @@
import numpy as np
from scipy import stats
+
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
@@ -46,7 +47,8 @@ def _f_oneway(*args):
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
- Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html
+ Statistics". Chapter 14.
+ http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
@@ -54,10 +56,11 @@ def _f_oneway(*args):
n_classes = len(args)
n_samples_per_class = np.array([len(a) for a in args])
n_samples = np.sum(n_samples_per_class)
- ss_alldata = reduce(lambda x, y: x+y, [np.sum(a**2, axis=0) for a in args])
+ ss_alldata = reduce(lambda x, y: x + y,
+ [np.sum(a ** 2, axis=0) for a in args])
sums_args = [np.sum(a, axis=0) for a in args]
- square_of_sums_alldata = reduce(lambda x, y: x+y, sums_args)**2
- square_of_sums_args = [s**2 for s in sums_args]
+ square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
+ square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0
for k, _ in enumerate(args):
@@ -76,4 +79,3 @@ def _f_oneway(*args):
def f_oneway(*args):
"""Call scipy.stats.f_oneway, but return only f-value"""
return _f_oneway(*args)[0]
-
diff --git a/mne/stats/permutations.py b/mne/stats/permutations.py
index 8b1cda7..53eaa0d 100755
--- a/mne/stats/permutations.py
+++ b/mne/stats/permutations.py
@@ -31,14 +31,14 @@ def bin_perm_rep(ndim, a=0, b=1):
"""
# Create the leftmost column as 0,0,...,1,1,...
- nperms = 2**ndim
+ nperms = 2 ** ndim
perms = np.empty((nperms, ndim), type(a))
perms.fill(a)
half_point = nperms / 2
perms[half_point:, 0] = b
# Fill the rest of the table by sampling the pervious column every 2 items
for j in range(1, ndim):
- half_col = perms[::2, j-1]
+ half_col = perms[::2, j - 1]
perms[:half_point, j] = half_col
perms[half_point:, j] = half_col
@@ -101,24 +101,24 @@ def permutation_t_test(X, n_permutations=10000, tail=0):
n_samples, n_tests = X.shape
do_exact = False
- if n_permutations is 'all' or (n_permutations >= 2**n_samples - 1):
+ if n_permutations is 'all' or (n_permutations >= 2 ** n_samples - 1):
do_exact = True
- n_permutations = 2**n_samples - 1
+ n_permutations = 2 ** n_samples - 1
- X2 = np.mean(X**2, axis=0) # precompute moments
+ X2 = np.mean(X ** 2, axis=0) # precompute moments
mu0 = np.mean(X, axis=0)
dof_scaling = sqrt(n_samples / (n_samples - 1.0))
- std0 = np.sqrt(X2 - mu0**2) * dof_scaling # get std with variance splitting
+ std0 = np.sqrt(X2 - mu0 ** 2) * dof_scaling # get std with var splitting
T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples))
if do_exact:
- perms = bin_perm_rep(n_samples, a=1, b=-1)[1:,:]
+ perms = bin_perm_rep(n_samples, a=1, b=-1)[1:, :]
else:
perms = np.sign(0.5 - np.random.rand(n_permutations, n_samples))
mus = np.dot(perms, X) / float(n_samples)
- stds = np.sqrt(X2[None,:] - mus**2) * dof_scaling # std with splitting
- max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1) # t-max
+ stds = np.sqrt(X2[None, :] - mus ** 2) * dof_scaling # std with splitting
+ max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1) # t-max
H0 = np.sort(max_abs)
scaling = float(n_permutations + 1)
@@ -132,5 +132,4 @@ def permutation_t_test(X, n_permutations=10000, tail=0):
return T_obs, p_values, H0
-permutation_t_test.__test__ = False # for nosetests
-
+permutation_t_test.__test__ = False # for nosetests
diff --git a/mne/stc.py b/mne/stc.py
index 9184ac2..129a1ae 100755
--- a/mne/stc.py
+++ b/mne/stc.py
@@ -29,9 +29,9 @@ def read_stc(filename):
stc = dict()
- fid.seek(0, 2) # go to end of file
+ fid.seek(0, 2) # go to end of file
file_length = fid.tell()
- fid.seek(0, 0) # go to beginning of file
+ fid.seek(0, 0) # go to beginning of file
# read tmin in ms
stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1))
@@ -50,11 +50,11 @@ def read_stc(filename):
# read the number of timepts
data_n = np.fromfile(fid, dtype=">I4", count=1)
- if ((file_length/4 -4 - vertices_n) % (data_n*vertices_n)) != 0:
- raise ValueError, 'incorrect stc file size'
+ if ((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0:
+ raise ValueError('incorrect stc file size')
# read the data matrix
- stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n*data_n)
+ stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
# close the file
@@ -81,9 +81,9 @@ def write_stc(filename, tmin, tstep, vertices, data):
fid = open(filename, 'wb')
# write start time in ms
- fid.write(np.array(1000*tmin, dtype='>f4').tostring())
+ fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
# write sampling rate in ms
- fid.write(np.array(1000*tstep, dtype='>f4').tostring())
+ fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>I4').tostring())
# write the vertex indices
diff --git a/mne/tests/test_bem_surfaces.py b/mne/tests/test_bem_surfaces.py
index 7400776..96bd526 100755
--- a/mne/tests/test_bem_surfaces.py
+++ b/mne/tests/test_bem_surfaces.py
@@ -10,6 +10,7 @@ data_path = sample.data_path(examples_folder)
fname = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-5120-5120-5120-bem-sol.fif')
+
def test_io_bem_surfaces():
"""Testing reading of bem surfaces
"""
diff --git a/mne/tests/test_inverse.py b/mne/tests/test_inverse.py
index 6321089..d6753b0 100755
--- a/mne/tests/test_inverse.py
+++ b/mne/tests/test_inverse.py
@@ -24,7 +24,7 @@ def test_apply_mne_inverse_operator():
setno = 0
snr = 3.0
- lambda2 = 1.0 / snr**2
+ lambda2 = 1.0 / snr ** 2
dSPM = True
evoked = mne.fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
@@ -46,7 +46,7 @@ def test_compute_minimum_norm():
evoked = mne.fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
whitener = noise_cov.get_whitener(evoked.info, mag_reg=0.1,
grad_reg=0.1, eeg_reg=0.1, pca=True)
- stc, K, W = mne.minimum_norm(evoked, forward, whitener, orientation='loose',
- method='dspm', snr=3, loose=0.2)
+ stc, K, W = mne.minimum_norm(evoked, forward, whitener,
+ orientation='loose', method='dspm', snr=3, loose=0.2)
# XXX : test something
diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py
index 4ea5894..54fe29b 100755
--- a/mne/tests/test_label.py
+++ b/mne/tests/test_label.py
@@ -9,6 +9,7 @@ stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
label = 'Aud-lh'
label_fname = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+
def test_label_io_and_time_course_estimates():
"""Test IO for STC files
"""
diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py
index b5b4ee5..063f051 100755
--- a/mne/time_frequency/tfr.py
+++ b/mne/time_frequency/tfr.py
@@ -50,10 +50,10 @@ def morlet(Fs, freqs, n_cycles=7, sigma=None):
sigma_t = n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
- t = np.arange(0, 5*sigma_t, 1.0 / Fs)
+ t = np.arange(0, 5 * sigma_t, 1.0 / Fs)
t = np.r_[-t[::-1], t[1:]]
- W = np.exp(2.0 * 1j * np.pi * f *t)
- W *= np.exp(-t**2 / (2.0 * sigma_t**2))
+ W = np.exp(2.0 * 1j * np.pi * f * t)
+ W *= np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
@@ -82,7 +82,7 @@ def _cwt_fft(X, Ws, mode="same"):
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
- fsize = 2**np.ceil(np.log2(size))
+ fsize = 2 ** np.ceil(np.log2(size))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
@@ -168,6 +168,7 @@ def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0):
return tfrs
+
def cwt(X, Ws, use_fft=True, mode='same'):
"""Compute time freq decomposition with continuous wavelet transform
@@ -210,8 +211,8 @@ def _time_frequency(X, Ws, use_fft):
"""
n_epochs, n_times = X.shape
n_frequencies = len(Ws)
- psd = np.zeros((n_frequencies, n_times)) # PSD
- plf = np.zeros((n_frequencies, n_times), dtype=np.complex) # phase lock
+ psd = np.zeros((n_frequencies, n_times)) # PSD
+ plf = np.zeros((n_frequencies, n_times), dtype=np.complex) # phase lock
mode = 'same'
if use_fft:
@@ -221,7 +222,7 @@ def _time_frequency(X, Ws, use_fft):
for tfr in tfrs:
tfr_abs = np.abs(tfr)
- psd += tfr_abs**2
+ psd += tfr_abs ** 2
plf += tfr / tfr_abs
return psd, plf
@@ -234,7 +235,7 @@ def single_trial_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7,
Parameters
----------
- epochs : instance of Epochs | array of shape [n_epochs x n_channels x n_times]
+ epochs : instance Epochs | array of shape [n_epochs, n_channels, n_times]
The epochs
Fs : float
Sampling rate
@@ -290,12 +291,12 @@ def single_trial_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7,
dtype=np.float)
if n_jobs == 1:
for k, e in enumerate(epochs):
- power[k] = np.abs(cwt(e, Ws, mode))**2
+ power[k] = np.abs(cwt(e, Ws, mode)) ** 2
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, Ws, use_fft, mode) for e in epochs)
for k, tfr in enumerate(tfrs):
- power[k] = np.abs(tfr)**2
+ power[k] = np.abs(tfr) ** 2
# Run baseline correction
if baseline is not None:
@@ -312,12 +313,12 @@ def single_trial_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7,
imax = len(times)
else:
imax = int(np.where(times <= bmax)[0][-1]) + 1
- mean_baseline_power = np.mean(power[:,:,:,imin:imax], axis=3)
+ mean_baseline_power = np.mean(power[:, :, :, imin:imax], axis=3)
if baseline_mode is 'ratio':
- power /= mean_baseline_power[:,:,:,None]
+ power /= mean_baseline_power[:, :, :, None]
elif baseline_mode is 'zscore':
- power -= mean_baseline_power[:,:,:,None]
- power /= np.std(power[:,:,:,imin:imax], axis=3)[:,:,:,None]
+ power -= mean_baseline_power[:, :, :, None]
+ power /= np.std(power[:, :, :, imin:imax], axis=3)[:, :, :, None]
else:
print "No baseline correction applied..."
@@ -377,20 +378,20 @@ def induced_power(epochs, Fs, frequencies, use_fft=True, n_cycles=7,
plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex)
for c in range(n_channels):
- X = np.squeeze(epochs[:,c,:])
+ X = np.squeeze(epochs[:, c, :])
psd[c], plf[c] = _time_frequency(X, Ws, use_fft)
else:
from joblib import Parallel, delayed
psd_plf = Parallel(n_jobs=n_jobs)(
delayed(_time_frequency)(
- np.squeeze(epochs[:,c,:]), Ws, use_fft)
+ np.squeeze(epochs[:, c, :]), Ws, use_fft)
for c in range(n_channels))
psd = np.zeros((n_channels, n_frequencies, n_times))
plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex)
for c, (psd_c, plf_c) in enumerate(psd_plf):
- psd[c,:,:], plf[c,:,:] = psd_c, plf_c
+ psd[c, :, :], plf[c, :, :] = psd_c, plf_c
psd /= n_epochs
plf = np.abs(plf) / n_epochs
diff --git a/mne/viz.py b/mne/viz.py
index 7b3bc04..6354089 100755
--- a/mne/viz.py
+++ b/mne/viz.py
@@ -22,7 +22,7 @@ def plot_topo(evoked, layout):
if name in ch_names:
idx = ch_names.index(name)
ax = pl.axes(layout.pos[idx], axisbg='k')
- ax.plot(times, data[idx,:], 'w')
+ ax.plot(times, data[idx, :], 'w')
pl.xticks([], ())
pl.yticks([], ())
@@ -53,18 +53,18 @@ def plot_evoked(evoked, picks=None, unit=True):
channel_types.append(t)
counter = 1
- times = 1e3 * evoked.times # time in miliseconds
+ times = 1e3 * evoked.times # time in miliseconds
for t, scaling, name, ch_unit in zip(['eeg', 'grad', 'mag'],
[1e6, 1e13, 1e15],
['EEG', 'Gradiometers', 'Magnetometers'],
['uV', 'fT/cm', 'fT']):
if unit is False:
scaling = 1.0
- ch_unit = 'NA' # no unit
+ ch_unit = 'NA' # no unit
idx = [picks[i] for i in range(len(picks)) if types[i] is t]
if len(idx) > 0:
pl.subplot(n_channel_types, 1, counter)
- pl.plot(times, scaling*evoked.data[idx,:].T)
+ pl.plot(times, scaling * evoked.data[idx, :].T)
pl.title(name)
pl.xlabel('time (ms)')
counter += 1
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git
More information about the debian-med-commit
mailing list