[med-svn] [python-mne] 302/353: ENH : add better debiasing as suggested by Daniel
Yaroslav Halchenko
debian at onerussian.com
Fri Nov 27 17:25:20 UTC 2015
This is an automated email from the git hooks/post-receive script.
yoh pushed a commit to tag 0.4
in repository python-mne.
commit 903a20c14ab0aa99a56217fc02cdd3c2333c9be3
Author: Alexandre Gramfort <alexandre.gramfort at inria.fr>
Date: Wed Jul 18 09:43:38 2012 +0200
ENH : add better debiasing as suggested by Daniel
---
mne/mixed_norm/debiasing.py | 116 +++++++++++++++++++++++++++++++++
mne/mixed_norm/optim.py | 9 ++-
mne/mixed_norm/tests/test_debiasing.py | 22 +++++++
3 files changed, 142 insertions(+), 5 deletions(-)
diff --git a/mne/mixed_norm/debiasing.py b/mne/mixed_norm/debiasing.py
new file mode 100755
index 0000000..e962a4e
--- /dev/null
+++ b/mne/mixed_norm/debiasing.py
@@ -0,0 +1,116 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+# Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from math import sqrt
+import numpy as np
+from scipy import linalg
+
+from ..utils import check_random_state
+
+
+def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0):
+ """Find the largest singular value for the matrix kron(C.T, A)
+
+ It uses power iterations.
+
+ Parameters
+ ----------
+ A : array
+ An array
+ C : array
+ An array
+ max_iter : int
+ Maximum number of iterations
+ random_state : int | RandomState | None
+ Random state for random number generation
+
+ Returns
+ -------
+ L : float
+ largest singular value
+ """
+ AS_size = C.shape[0]
+ rng = check_random_state(random_state)
+ B = rng.randn(AS_size, AS_size)
+ B /= linalg.norm(B, 'fro')
+ ATA = np.dot(A.T, A)
+ CCT = np.dot(C, C.T)
+ L0 = np.inf
+ for _ in range(max_iter):
+ Y = np.dot(np.dot(ATA, B), CCT)
+ L = linalg.norm(Y, 'fro')
+
+ if abs(L - L0) < tol:
+ break
+
+ B = Y / L
+ L0 = L
+ return L
+
+
+def compute_bias(M, G, X, max_iter=1000, tol=1e-4, n_orient=1):
+ """Compute scaling to correct amplitude bias
+
+ It solves the following optimization problem using FISTA:
+
+ min 1/2 * (|| M - GDX ||fro)^2
+ s.t. D >= 0 and D is a diagonal matrix
+
+ Parameters
+ ----------
+ M : array
+ measurement data
+ G : array
+ leadfield matrix
+ X : array
+ reconstructed time courses with amplitude bias
+ max_iter : int
+ Maximum number of iterations
+ tol : float
+ The tolerance on convergence
+ n_orient : int
+ The number of orientations (1 for fixed and 3 otherwise)
+
+ Returns
+ -------
+ D : array
+ Debiasing weights
+ """
+ n_sources = X.shape[0]
+
+ lipschitz_constant = 1.1 * power_iteration_kron(G, X)
+
+ # initializations
+ D = np.ones(n_sources)
+ Y = np.ones(n_sources)
+ t = 1.0
+
+ for i in xrange(max_iter):
+ D0 = D
+
+ # gradient step
+ R = M - np.dot(G * Y, X)
+ D = Y + np.sum(np.dot(G.T, R) * X, axis=1) / lipschitz_constant
+ # Equivalent but faster than:
+ # D = Y + np.diag(np.dot(np.dot(G.T, R), X.T)) / lipschitz_constant
+
+ # prox ie projection on constraint
+ if n_orient != 1: # take care of orientations
+ # The scaling has to be the same for all orientations
+ D = np.mean(D.reshape(-1, n_orient), axis=1)
+ D = np.tile(D, [n_orient, 1]).T.ravel()
+ D = np.maximum(D, 0.0)
+
+ t0 = t
+ t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
+ Y.fill(0.0)
+ dt = (t0 - 1.0) / t
+ Y = D + dt * (D - D0)
+ if linalg.norm(D - D0, np.inf) < tol:
+ print "Debiasing converged after %d iterations" % i
+ break
+ else:
+ print "Debiasing did not converge"
+ return D
diff --git a/mne/mixed_norm/optim.py b/mne/mixed_norm/optim.py
index efe5ac3..147c114 100644
--- a/mne/mixed_norm/optim.py
+++ b/mne/mixed_norm/optim.py
@@ -6,6 +6,8 @@ from math import sqrt
import numpy as np
from scipy import linalg
+from .debiasing import compute_bias
+
def groups_norm2(A, n_orient):
"""compute squared L2 norms of groups inplace"""
@@ -295,10 +297,7 @@ def mixed_norm_solver(M, G, alpha, maxit=200, tol=1e-8, verbose=True,
n_orient=n_orient)
if (active_set.sum() > 0) and debias:
- # Run ordinary least square on active set
- GX = np.dot(G[:, active_set], X)
- k, _, _, _ = linalg.lstsq(GX.reshape(-1, 1), M.reshape(-1, 1))
- X *= k
- GX *= k
+ bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+ X *= bias[:, np.newaxis]
return X, active_set, E
diff --git a/mne/mixed_norm/tests/test_debiasing.py b/mne/mixed_norm/tests/test_debiasing.py
new file mode 100755
index 0000000..6ecda6d
--- /dev/null
+++ b/mne/mixed_norm/tests/test_debiasing.py
@@ -0,0 +1,22 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+# Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+
+from ..debiasing import compute_bias
+
+
+def test_compute_debiasing():
+ """Test source amplitude debiasing"""
+ rng = np.random.RandomState(42)
+ G = rng.randn(10, 4)
+ X = rng.randn(4, 20)
+ debias_true = np.arange(1, 5, dtype=np.float)
+ M = np.dot(G, X * debias_true[:, np.newaxis])
+ debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7)
+ assert_almost_equal(debias, debias_true, decimal=5)
+ debias = compute_bias(M, G, X, max_iter=10000, n_orient=2, tol=1e-5)
+ assert_almost_equal(debias, [1.8, 1.8, 3.72, 3.72], decimal=2)
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git
More information about the debian-med-commit
mailing list