[pymvpa] searchlight analysis

Yaroslav Halchenko debian at onerussian.com
Fri Sep 8 16:26:59 UTC 2017


On Fri, 08 Sep 2017, Pegah Kassraian Fard wrote:




> from glob import glob
> import os
> import numpy as np

> from mvpa2.suite import *

> %matplotlib inline


> # enable debug output for searchlight call
> if __debug__:
>     debug.active += ["SLC"]


> # change working directory to 'WB'
> os.chdir('mypath/WB')

> # use glob to get the filenames of .nii data into a list
> nii_fns = glob('beta*.nii')

glob order iirc might not be guaranteed, so I would sort it to make sure

nii_fns = sorted(glob('beta*.nii'))

> # read data

> labels = [
>     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
>     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
>     7, 7, 7, 7, 7, 7, 7, 
>     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
>     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
>     7, 7, 7, 7, 7, 7, 7
> ]

and make sure that above order of the volumes correspond to those labels

> grps = np.repeat([0, 1], 37, axis=0) # used for `chuncks`

> db = mvpa2.datasets.mri.fmri_dataset(
>     nii_fns, targets=labels, chunks=grps, mask=None, sprefix='vxl', tprefix='tpref', add_fa=None
> )

> # use only the samples of which labels are 1 or 2
> db12 = db[np.array([label in [1, 2] for label in labels], dtype='bool')]

more concise way

db12 = db.select(sadict={'targets': [1,2]})

> # in-place z-score normalization
> zscore(db12)

> # choose classifier
> clf = LinearNuSVMC()

> # setup measure to be computed by Searchlight
> # cross-validated mean transfer using an N-fold dataset splitter
> cv = CrossValidation(clf, NFoldPartitioner())

> # define searchlight methods
> radius_ = 1
> sl = sphere_searchlight(
>     cv, radius=radius_, space='vxl_indices',
>     postproc=mean_sample()
> )

> # stripping all attributes from the dataset that are not required for the searchlight analysis
> db12s = db12.copy(
>     deep=False,
>     sa=['targets', 'chunks'],
>     fa=['vxl_indices'],
>     a=['mapper']
> )

> # run searchlight
> sl_map = sl(db12s)

> # toggle between error rate (searchlight output) and accuracy (for plotting)
> sl_map.samples *= -1
> sl_map.samples += 1

> # The result dataset is fully aware of the original dataspace. 
> # Using this information we can map the 1D accuracy maps back into “brain-space” (using NIfTI image header information from the original input timeseries.
> niftiresults = map2nifti(sl_map, imghdr=db12.a.imghdr)

> # find the 3-d coordinates of extrema of error rate or accuracy
> extremum_i = np.argmax(sl_map.samples[0]) # max
> extremum_i = np.argmin(sl_map.samples[0]) # min

note that above you override the same extremum_i with 'min' (minimal accuracy ;)) !!!

> coord = db12s.fa[list(db.fa.keys())[0]][extremum_i]

.keys() order is also not guaranteed... why not to specify
'vxl_indices'?

> # plotting
> plot_args = {
> #     'background' : 'Subject2_Struct.nii',
> #     'background_mask' : 'brain.nii.gz',
> #     'overlay_mask' : 'S1_mask.nii',
>     'do_stretch_colors' : False,
>     'cmap_bg' : 'gray',
>     'cmap_overlay' : 'autumn', # YlOrRd_r # pl.cm.autumn
>     'interactive' : cfg.getboolean('examples', 'interactive', True)
> }
> fig = pl.figure(figsize=(12, 4), facecolor='white')
> subfig = plot_lightbox(
>     overlay=niftiresults,
>     vlim=(0.5, None), slices=range(23,31),
>     fig=fig, 
>     background='Subject2_Struct.nii',
> #     background_mask='brain.nii.gz',
> #     overlay_mask='S1_mask.nii',
>     **plot_args
> )
> pl.title('Accuracy distribution for radius %i' % radius_)







-- 
Yaroslav O. Halchenko
Center for Open Neuroscience     http://centerforopenneuroscience.org
Dartmouth College, 419 Moore Hall, Hinman Box 6207, Hanover, NH 03755
Phone: +1 (603) 646-9834                       Fax: +1 (603) 646-1419
WWW:   http://www.linkedin.com/in/yarik        



More information about the Pkg-ExpPsy-PyMVPA mailing list