[med-svn] [pycorrfit] 02/09: Imported Upstream version 0.9.1+dfsg

Alex Mestiashvili malex-guest at moszumanska.debian.org
Mon Oct 26 15:09:37 UTC 2015


This is an automated email from the git hooks/post-receive script.

malex-guest pushed a commit to branch master
in repository pycorrfit.

commit 2367df30d0e0e05c9c12347c8515b1b8a9287561
Author: Alexandre Mestiashvili <alex at biotec.tu-dresden.de>
Date:   Thu Oct 22 16:31:10 2015 +0200

    Imported Upstream version 0.9.1+dfsg
---
 ChangeLog.txt                                      |   36 +-
 doc/PyCorrFit_doc_content.tex                      |   28 +-
 .../CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id          |    1 +
 pycorrfit/__init__.py                              |    1 +
 pycorrfit/doc.py                                   |    2 -
 pycorrfit/edclasses.py                             |   34 +-
 pycorrfit/fcs_data_set.py                          | 1218 ++++++++++++++++++++
 pycorrfit/fitting.py                               |  440 -------
 pycorrfit/frontend.py                              |  305 +++--
 pycorrfit/models/MODEL_TIRF_1C.py                  |    2 +-
 pycorrfit/models/MODEL_TIRF_3D2Dkin_Ries.py        |    8 +-
 pycorrfit/models/MODEL_TIRF_gaussian_1C.py         |    4 +-
 pycorrfit/models/MODEL_TIRF_gaussian_3D2D.py       |    4 +-
 pycorrfit/models/MODEL_TIRF_gaussian_3D3D.py       |    2 +-
 pycorrfit/models/MODEL_classic_gaussian_2D.py      |    4 +-
 pycorrfit/models/MODEL_classic_gaussian_3D.py      |    6 +-
 pycorrfit/models/MODEL_classic_gaussian_3D2D.py    |    8 +-
 pycorrfit/models/__init__.py                       |   69 +-
 pycorrfit/openfile.py                              |  118 +-
 pycorrfit/page.py                                  |  921 +++++++--------
 pycorrfit/plotting.py                              |  207 ++--
 pycorrfit/readfiles/read_ASC_ALV.py                |  221 +++-
 pycorrfit/readfiles/read_CSV_PyCorrFit.py          |   65 +-
 pycorrfit/tools/average.py                         |   71 +-
 pycorrfit/tools/background.py                      |  118 +-
 pycorrfit/tools/batchcontrol.py                    |   12 +-
 pycorrfit/tools/comment.py                         |   32 +-
 pycorrfit/tools/datarange.py                       |   57 +-
 pycorrfit/tools/globalfit.py                       |  215 +---
 pycorrfit/tools/info.py                            |  213 ++--
 pycorrfit/tools/overlaycurves.py                   |   58 +-
 pycorrfit/tools/parmrange.py                       |   20 +-
 pycorrfit/tools/statistics.py                      |  261 +++--
 pycorrfit/tools/trace.py                           |   37 +-
 pycorrfit/usermodel.py                             |    6 +-
 setup.py                                           |   22 +-
 tests/README.md                                    |   21 +
 tests/test_fit_models.py                           |  127 ++
 tests/test_global_fit.py                           |   63 +
 tests/test_simple.py                               |   64 +
 40 files changed, 3153 insertions(+), 1948 deletions(-)

diff --git a/ChangeLog.txt b/ChangeLog.txt
index aef12fe..5b9dabd 100644
--- a/ChangeLog.txt
+++ b/ChangeLog.txt
@@ -1,5 +1,39 @@
+0.9.1
+- Tool 'Overlay curves': improve UI (#117)
+- Tool 'Statistics view': improve UI (#113)
+- Tool 'Trace view': display countrate (#121)
+- Bugfixes:
+  - Unicode errors in statistics tool (#131)
+  - Load session errors with empty pages
+0.9.0
+- Improve parameter display (#52, #114)
+- Display Chi2 on each page (#115)
+- The displayed Chi2-value for non-weighted fits is now
+  normalized to the expected values of the fit. The
+  documentation has been updated accordingly.
+- Add "All files" option in save dialogs (#97)
+- Improved plot export dialog (#99)
+0.8.9
+- Improved support for "ALV-7004" files (#104)
+- Increase resolution for image export
+- Load weights from PyCorrFit csv files 
+- Tool 'Overlay Curves': show cropped correlation curves
+- Tool 'Trace view': increase size of window (#93)
+- Tool 'Global fitting': remove forced, joint weights
+- Session comment dialog: more intuitive behavior (#116)
+- Improve plot export (#95)
+- Bugfixes:
+   - Weighted fits at borders of fit interval were
+     computed incorrectly due to integer division
+   - Fitting algorithms did not work (#94)
+   - Creating averages did not work (#123)
+   - ASCII errors in statistics tool (#112)
+- Under the hood:
+   - Introduce new classes: Correlation, Fit, Trace
+   - Code cleanup and rewrite to support planned features
+   - In some cases support older versions of NumPy
 0.8.8
-- Improved support for "ALV-7004" files.
+- Improved support for "ALV-7004" files
 - If you install the GUI with pip, you now need to include
   the `GUI` requirement: `pip install pycorrfit[GUI]`.
   The GUI depends on matplotlib and wxPython which is not
diff --git a/doc/PyCorrFit_doc_content.tex b/doc/PyCorrFit_doc_content.tex
index 3c67f7f..a7b071d 100755
--- a/doc/PyCorrFit_doc_content.tex
+++ b/doc/PyCorrFit_doc_content.tex
@@ -696,11 +696,35 @@ The minimum distance $\chi^2$ is used to characterize the success of a fit. Note
 
 \subsubsection{Weighted fitting}
 \label{sec:theor.weigh}
-In certain cases, it is useful to implement weights (standard deviation) $\sigma_i$ for the calculation of $\chi^2$. For example, very noisy parts of a correlation curve can falsify the resulting fit. In \textit{PyCorrFit}, weighting is implemented as follows:
+In certain cases, it is useful to perform weighted fitting with a known variance $\sigma_i^2$ at the data points $\tau_i$. In \textit{PyCorrFit}, weighted fitting is implemented as follows:
 \begin{equation}
 \chi^2_\mathrm{weighted} = \min_{\alpha_1, \dots, \alpha_k} \sum_{i=1}^n  \frac{\left[ G(\tau_i,\alpha_1, \dots, \alpha_k) - H(\tau_i) \right]^2}{\sigma_i^2}
 \end{equation}
-\textit{PyCorrFit} is able to calculate the weights $\sigma_i$ from the experimental data. The different approaches of this calculation of weights implemented in \textit{PyCorrFit} are explained in \hyref{Section}{sec:intro.graph}.
+
+Besides importing the variance alongside experimental data, \textit{PyCorrFit} is able to estimate the variance from the experimental data via several different approaches. A recommended approach is averaging over several curves. Other approaches such as estimation of the variance from spline fits or from the model function (see \hyref{Section}{sec:intro.graph}) cannot be considered unbiased. 
+Note that when performing global fits (see \hyref{Section}{sec:menub.tools.globa}), different types of weights for different correlation curves can strongly influence the result of the fit. Especially mixing curves with and without weights will most likely result in unphysical fits.
+
+\subsubsection{Displayed $\chi^2$ values}
+The displayed value of $\chi^2$ is defined by the type of the performed fit. This value is commonly normalized by the degrees of freedom $\nu = N - n - 1$, where $N$ is the number of observations (data points) and $n$ is the number of fitting parameters.
+\begin{itemize}
+
+\item \textbf{reduced expected sum of squares}: This value is used when there is no variance available for plot normalization.
+\begin{equation}
+\chi^2_\mathrm{red,exp} = \frac{1}{\nu}\sum_{i=1}^n  \frac{\left[ G(\tau_i,\alpha_\mathrm{min}) - H(\tau_i) \right]^2}{G(\tau_i,\alpha_\mathrm{min})}
+\end{equation}
+
+\item \textbf{reduced weighted sum of squares}: This  value is used when the fit was performed with variances $\sigma^2$.
+\begin{equation}
+\chi^2_\mathrm{red,weight} = \frac{1}{\nu}\sum_{i=1}^n  \frac{\left[ G(\tau_i,\alpha_\mathrm{min}) - H(\tau_i) \right]^2}{\sigma^2}
+\end{equation}
+
+\item \textbf{reduced global sum of squares}: This  value is used for global fits. The weights are computed identically to the situation with reduced weights, except that the variance $\sigma_\textrm{glob}^2$ may result in non-physical weighting (hence the emphasis on global).
+\begin{equation}
+\chi^2_\mathrm{red,weight} = \frac{1}{\nu}\sum_{i=1}^n  \frac{\left[ G(\tau_i,\alpha_\mathrm{min}) - H(\tau_i) \right]^2}{\sigma_\textrm{glob}^2}
+\end{equation}
+
+\end{itemize}
+
 
 \subsubsection{Algorithms}
 \label{sec:theor.alg}
diff --git a/examples/sample_sessions/CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id b/examples/sample_sessions/CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id
new file mode 100644
index 0000000..9a57f40
--- /dev/null
+++ b/examples/sample_sessions/CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id
@@ -0,0 +1 @@
+2a7849e9f8ef288a92575788e773684d7db1d8e9
\ No newline at end of file
diff --git a/pycorrfit/__init__.py b/pycorrfit/__init__.py
index cf8204a..8116907 100644
--- a/pycorrfit/__init__.py
+++ b/pycorrfit/__init__.py
@@ -8,6 +8,7 @@ from . import doc
 from . import models
 from . import openfile
 from . import readfiles
+from . import fcs_data_set
 
 from .main import Main
 
diff --git a/pycorrfit/doc.py b/pycorrfit/doc.py
index 1de9cc0..e9e1e43 100755
--- a/pycorrfit/doc.py
+++ b/pycorrfit/doc.py
@@ -31,8 +31,6 @@ import os
 import platform
 import scipy
 
-
-
 try:
     import sympy
 except ImportError:
diff --git a/pycorrfit/edclasses.py b/pycorrfit/edclasses.py
index 7a1ad07..c6752f7 100644
--- a/pycorrfit/edclasses.py
+++ b/pycorrfit/edclasses.py
@@ -23,35 +23,11 @@ with warnings.catch_warnings():
     except:
         pass
 
-
-import numpy as np
 import sys
 import traceback
-from wx.lib.agw import floatspin        # Float numbers in spin fields
 import wx 
 
 
-class FloatSpin(floatspin.FloatSpin):
-    def __init__(self, parent, digits=10, increment=.01):
-        floatspin.FloatSpin.__init__(self, parent, digits=digits,
-                                     increment = increment)
-        self.Bind(wx.EVT_SPINCTRL, self.increment)
-        #self.Bind(wx.EVT_SPIN, self.increment)
-        #self.increment()
-
-
-    def increment(self, event=None):
-        # Find significant digit
-        # and use it as the new increment
-        x = self.GetValue()
-        if x == 0:
-            incre = 0.1
-        else:
-            digit = int(np.ceil(np.log10(abs(x)))) - 2
-            incre = 10**digit
-        self.SetIncrement(incre)
-
-
 class ChoicesDialog(wx.Dialog):
     def __init__(self, parent, dropdownlist, title, text):
         # parent is main frame
@@ -107,7 +83,7 @@ def save_figure(self, evt=None):
         Page = self.canvas.HACK_Page
         add = self.canvas.HACK_append
         dirname = parent.dirname
-        filename = Page.tabtitle.GetValue().strip()+Page.counter[:2]+add
+        filename = self.canvas.get_window_title().replace(" ", "_").lower()+add
         formats = fig.canvas.get_supported_filetypes()
     except:
         dirname = "."
@@ -129,12 +105,8 @@ def save_figure(self, evt=None):
     if dlg.ShowModal() == wx.ID_OK:
         wildcard = keys[dlg.GetFilterIndex()]
         filename = dlg.GetPath()
-        haswc = False
-        for key in keys:
-            if filename.lower().endswith("."+key) is True:
-                haswc = True
-        if haswc == False:
-            filename = filename+"."+wildcard
+        if not filename.endswith(wildcard):
+            filename += "."+wildcard
         dirname = dlg.GetDirectory()
         #savename = os.path.join(dirname, filename)
         savename = filename
diff --git a/pycorrfit/fcs_data_set.py b/pycorrfit/fcs_data_set.py
new file mode 100644
index 0000000..0fe981b
--- /dev/null
+++ b/pycorrfit/fcs_data_set.py
@@ -0,0 +1,1218 @@
+# -*- coding: utf-8 -*-
+""" PyCorrFit data set
+
+Classes for FCS data evaluation.
+"""
+from __future__ import print_function, division
+
+import hashlib
+import numpy as np
+import scipy.interpolate as spintp
+import scipy.optimize as spopt
+import warnings
+
+from . import models as mdls
+from . import plotting
+
+class Trace(object):
+    """ unifies trace handling
+    """
+    def __init__(self, trace=None, countrate=None, duration=None,
+                 name=None):
+        """ Load trace data
+        
+        Parameters
+        ----------
+        trace : ndarray of shape (N, 2)
+            The array contains time [ms] and count rate [kHz].
+        coutrate : float
+            Average count rate [kHz].
+            Mandatory if `trace` is None. 
+        duration : float
+            Duration of measurement in milliseconds.
+            Mandatory if `trace` is None.
+        name : str
+            The name of the trace.
+        """
+        self._countrate = None
+        self._duration = None
+        self._trace = None
+        self._uid = None
+        
+        if trace is None:
+            self.countrate = countrate
+            self.duration = duration
+        else:
+            self.trace = trace
+        
+        if name is None:
+            name = "{:.2f}kHz, {:.0f}s".format(self.countrate,
+                                               self.duration/1000)
+        self.name = name
+    
+    def __getitem__(self, idx):
+        return self.trace[idx]
+    
+    def __repr__(self):
+        text = "Trace of length {:.3f}s and countrate {:.3f}kHz".format(
+                self.duration/1000, self.countrate)
+        return text
+    
+    @property
+    def countrate(self):
+        if self._countrate is None:
+            self._countrate = np.average(self._trace[:,1])
+        return self._countrate
+    
+    @countrate.setter
+    def countrate(self, value):
+        assert value is not None, "Setting value with None forbidden!"
+        assert self._trace is None, "Setting value impossible, "+\
+                                    "if `self.trace` is set."
+        self._countrate = value
+
+    @property
+    def duration(self):
+        if not hasattr(self, "_duration") or self._duration is None:
+            self._duration = self._trace[-1,0] - self._trace[0,0]
+        return self._duration
+    
+    @duration.setter
+    def duration(self, value):
+        assert value is not None, "Setting value with None forbidden!"
+        assert self._trace is None, "Setting value impossible, "+\
+                                    "if `self.trace` is set."
+        self._duration = value
+    
+    @property
+    def uid(self):
+        if self._uid is None:
+            hasher = hashlib.sha256()
+            hasher.update(str(np.random.random()))
+            hasher.update(str(self.trace))
+            hasher.update(self.name)
+            self._uid = hasher.hexdigest()
+        return self._uid
+    
+    @property
+    def trace(self):
+        if self._trace is None:
+            self._trace = np.array([ [0,             self.countrate],
+                                     [self.duration, self.countrate] 
+                                    ])
+        return self._trace
+    
+    @trace.setter
+    def trace(self, value):
+        assert value is not None, "Setting value with None forbidden!"
+        assert isinstance(value, np.ndarray), "value must be array!"
+        assert value.shape[1] == 2, "shape of array must be (N,2)!"
+        self._trace = value
+        # self.countrate is set automagically
+
+
+class Correlation(object):
+    """ unifies correlation curve handling
+    """
+    def __init__(self, backgrounds=[], correlation=None, corr_type="AC", 
+                 filename=None, fit_algorithm="Lev-Mar",
+                 fit_model=6000, fit_ival=(0,0),
+                 fit_weight_data=None, fit_weight_type="none", 
+                 normparm=None, title=None, traces=[], verbose=1):
+        """
+        Parameters
+        ----------
+        backgrounds: list of instances of Trace
+            background traces
+        correlation: ndarray of shape (N,2)
+            correlation data (time [s], correlation)
+        corr_type: str
+            type of correlation, e.g. "AC", "AC1", "cc12"
+        filename: str
+            path to filename of correlation
+        fit_algorithm: str
+            valid fit algorithm identifier (Algorithms.keys())
+        fit_ival:
+            fitting interval of lag times in indices
+        fit_model: instance of FitModel
+            the model used for fitting
+        fit_weight_data: any
+            data for the certain fit_weight_type
+        fit_weight_type: str
+            Reserved keywords or user-defined strings:
+             - "none" : no weights are used
+             - "splineX" : compute weights from spline with X knots
+                   and a spread of `fit_weight_data` bins.
+             - "model function" : compute weights from difference
+                   to model function
+             - user-defined : other weights (e.g. previously computed 
+                   averages given in fit_weight_data)
+        normparm: int
+            identifier of normalization parameter
+        title: str
+            user-editable title of this correlation
+        traces: list of instances of Trace
+            traces of the current correlation
+        verbose : int
+            increment to increase verbosity
+        """
+        # must be created before setting properties
+        self._backgrounds = []
+        self._correlation = None
+        self._fit_algorithm = None   
+        self._fit_model = None
+        self._fit_parameters = None
+        self._fit_parameters_range = None
+        self._fit_parameters_variable = None
+        self._fit_weight_memory = dict()
+        self._lag_time = None
+        self._model_memory = dict()
+        self._traces = []
+        self._uid = None
+
+        self.verbose = verbose
+
+        self.backgrounds = backgrounds
+        self.bg_correction_enabled = True
+        self.correlation = correlation
+        self.corr_type = corr_type
+        self.filename = filename
+        
+        self.fit_algorithm = fit_algorithm
+        self.fit_ival = fit_ival
+        self.fit_model = fit_model
+        # Do not change order:
+        self.fit_weight_type = fit_weight_type
+        self.fit_weight_parameters = fit_weight_data
+    
+        # lock prevents any changes to the parameters
+        self.lock_parameters = False
+        self.normparm = normparm
+        self.title = title
+        self.traces = traces
+
+    def __repr__(self):
+        if self.is_ac:
+            c = "AC"
+        else:
+            c = "CC"
+        text = "{} correlation '{}' with {} traces".format(
+                c, self.title, len(self._traces))
+        return text
+
+
+    def background_replace(self, channel, background):
+        """
+        Replace a background.
+        Channel must be 0 or 1.
+        background must be instance of `Trace`
+        """
+        assert channel in [0, 1]
+        assert isinstance(background, Trace)
+        
+        if self.is_ac:
+            if channel == 1:
+                raise ValueError("Cannot set second background for AC.")
+            self._backgrounds = [background]
+        else:
+            if len(self._backgrounds) == 0:
+                self._backgrounds = [Trace(countrate=0, duration=0), Trace(countrate=0, duration=0)]
+            elif len(self._backgrounds) == 1:
+                self._backgrounds.append(Trace(countrate=0, duration=0))
+            self._backgrounds[channel] = background
+
+    @property
+    def backgrounds(self):
+        """
+        The background trace(s) of this correlation in a list.
+        """
+        return self._backgrounds
+    
+    @backgrounds.setter
+    def backgrounds(self, value):
+        """
+        Set the backgrounds. The value can either be a list of traces or
+        instances of traces or a single trace in an array.
+        """
+        backgrounds = list()
+        if not isinstance(value, list):
+            value = [value]
+        assert len(value) in [0,1,2], "Backgrounds must be list with up to two elements."
+        for v in value:
+            if isinstance(v, np.ndarray):
+                backgrounds.append(Trace(trace=v))
+            elif isinstance(v, Trace):
+                backgrounds.append(v)
+            else:
+                raise ValueError("Each background must be instance of Trace or ndarray")
+        self._backgrounds = backgrounds
+
+
+    @property
+    def bg_correction_factor(self):
+        """
+        Returns background correction factor for
+        self._correlation
+        
+        Notes
+        -----
+        Thompson, N. Lakowicz, J.;
+        Geddes, C. D. & Lakowicz, J. R. (ed.)
+        Fluorescence Correlation Spectroscopy
+        Topics in Fluorescence Spectroscopy,
+        Springer US, 2002, 1, 337-378
+        """
+        if not self.bg_correction_enabled:
+            # bg correction disabled
+            return 1
+
+        if self.is_ac:
+            # Autocorrelation
+            if len(self.traces) == 1 and len(self.backgrounds) == 1:
+                S = self.traces[0].countrate
+                B = self.backgrounds[0].countrate
+                bgfactor = (S/(S-B))**2
+            else:
+                if self.verbose >= 1:
+                    warnings.warn("Correlation {}: no bg-correction".
+                                  format(self.uid))
+                bgfactor = 1
+        else:
+            # Crosscorrelation
+            if len(self.traces) == 2 and len(self.backgrounds) == 2:
+                S = self.traces[0].countrate
+                S2 = self.traces[1].countrate
+                B = self.backgrounds[0].countrate
+                B2 = self.backgrounds[1].countrate
+                bgfactor = (S/(S-B)) * (S2/(S2-B2))
+            else:
+                warnings.warn("Correlation {}: no bg-correction".
+                              format(self))
+                bgfactor = 1
+        return bgfactor
+
+    def check_parms(self, parms):
+        """ Check parameters using self.fit_model.func_verification and the user defined
+            boundaries self.fit_parameters_range for each parameter.
+        """
+        p = 1.*np.array(parms)
+        p = self.fit_model.func_verification(p)
+        r = self.fit_parameters_range
+        # TODO:
+        # - add potentials such that parameters don't stick to boundaries
+        for i in range(len(p)):
+            if r[i][0] == r[i][1]:
+                pass
+            elif p[i] < r[i][0]:
+                p[i] = r[i][0]
+            elif p[i] > r[i][1]:
+                p[i] = r[i][1]
+        return p
+
+    @property
+    def correlation(self):
+        """the correlation data, shape (N,2) with (time, correlation) """
+        if self._correlation is not None:
+            corr = self._correlation.copy()
+            return corr
+    
+    @correlation.setter
+    def correlation(self, value):
+        if value is None:
+            warnings.warn("Setting correlation to `None`.")
+        elif not isinstance(value, np.ndarray):
+            raise ValueError("Correlation must be 2d array!")
+        elif not value.shape[1] == 2:
+            raise ValueError("Correlation array must have shape (N,2)!")
+        self._correlation = value
+
+    @property
+    def correlation_fit(self):
+        """ returns correlation data for fitting (fit_ivald)
+        - background correction
+        - fitting interval cropping
+        """
+        corr = self.correlation
+        if corr is not None:
+            # perform background correction
+            corr[:,1] *= self.bg_correction_factor
+            # perform parameter normalization
+            return corr[self.fit_ival[0]:self.fit_ival[1],:]
+    
+    @property
+    def correlation_plot(self):
+        """ returns correlation data for plotting (normalized, fit_ivald)
+        - background correction
+        - fitting interval cropping
+        - parameter normalization
+        """
+        corr = self.correlation_fit
+        if corr is not None:
+            # perform parameter normalization
+            corr[:,1] *= self.normalize_factor
+            return corr
+    
+    
+    @property
+    def is_ac(self):
+        """True if instance contains autocorrelation"""
+        return self.corr_type.lower().count("ac") > 0
+
+    @property
+    def is_cc(self):
+        """True if instance contains crosscorrelation"""
+        return not self.is_ac
+
+    @property
+    def is_weighted_fit(self):
+        """True if a weighted fit was performed"""
+        return self.fit_weight_type != "none"
+
+    @property
+    def fit_algorithm(self):
+        """The string representing the fitting algorithm"""
+        return self._fit_algorithm
+
+    @fit_algorithm.setter
+    def fit_algorithm(self, value):
+        # TODO:
+        # - allow lower-case fitting algorithm
+        assert value in list(Algorithms.keys()), "Invalid fit algorithm: "+value
+        self._fit_algorithm = value
+
+    @property
+    def fit_model(self):
+        """instance of a fit model"""
+        return self._fit_model
+
+    @fit_model.setter
+    def fit_model(self, value):
+        """set the fit model
+        """
+        if isinstance(value, (int, long)):
+            newmodel = mdls.modeldict[value]
+        elif isinstance(value, mdls.Model):
+            newmodel = value
+        else:
+            raise NotImplementedError("Unknown model identifier")
+        
+        if newmodel != self._fit_model :
+            self._fit_model = newmodel
+            # overwrite fitting parameters
+            self._fit_parameters = self._fit_model.default_values
+            self._fit_parameters_variables = self._fit_model.default_variables
+            self._fit_parameters_range = np.zeros((len(self._fit_parameters), 2))
+            self.normalize_parm = None
+
+    @property
+    def fit_ival(self):
+        """lag time interval for fitting"""
+        lag = self.lag_time
+        if lag is not None:
+            if self._fit_ival[1] <= 0 or self._fit_ival[1] > lag.shape[0]:
+                self._fit_ival[1] = lag.shape[0]
+        return self._fit_ival
+    
+    @fit_ival.setter
+    def fit_ival(self, value):
+        value = list(value)
+        if value[1] <= 0:
+            if self.lag_time is not None:
+                value[1] = self.lag_time.shape[0]
+            else:
+                # just to be sure
+                warnings.warn("No data available.")
+                value[1] = 10000000000000000
+        self._fit_ival = value
+
+    @property
+    def fit_weight_data(self):
+        """data of weighted fitting"""
+        try:
+            data = self._fit_weight_memory[self.fit_weight_type]
+        except KeyError:
+            # Standard variables for weights
+            if self.fit_weight_type.count("spline"):
+                # Default area for weighting with spline fit
+                data = 3
+            else:
+                data = None
+        return data
+
+    @fit_weight_data.setter
+    def fit_weight_data(self, value):
+        self._fit_weight_memory[self.fit_weight_type] = value
+
+    @property
+    def fit_parameters(self):
+        """parameters that were fitted/will be used for fitting"""
+        return self._fit_parameters
+
+    @fit_parameters.setter
+    def fit_parameters(self, value):
+        # must unlock parameters, if change is required
+        value = np.array(value)
+        if self.lock_parameters == False:
+            self._fit_parameters = self.check_parms(value)
+        else:
+            warnings.warn("Correlation {}: fixed parameters unchanged.".
+                          format(self.uid))
+
+    @property
+    def fit_parameters_range(self):
+        """valid fitting ranges for fit parameters"""
+        return self._fit_parameters_range
+
+    @fit_parameters_range.setter
+    def fit_parameters_range(self, value):
+        value = np.array(value)
+        assert value.shape[1] == 2
+        assert value.shape[0] == self.fit_parameters.shape[0]
+        self._fit_parameters_range = value
+
+    @property
+    def fit_parameters_variable(self):
+        """which parameters are variable during fitting"""
+        if self._fit_parameters_variable is None:
+            self._fit_parameters_variable = np.array(self.fit_model.default_variables, dtype=bool)
+        return self._fit_parameters_variable
+
+    @fit_parameters_variable.setter
+    def fit_parameters_variable(self, value):
+        value = np.array(value, dtype=bool)
+        assert value.shape[0] == self.fit_parameters.shape[0]
+        self._fit_parameters_variable = value
+
+    @property
+    def lag_time(self):
+        """logarithmic lag time axis"""
+        if self.correlation is not None:
+            return self._correlation[:,0]
+        elif self._lag_time is not None:
+            return self._lag_time
+        else:
+            # some default lag time
+            return 10**np.linspace(-6,8,1001)
+
+    @lag_time.setter
+    def lag_time(self, value):
+        if self.correlation is not None:
+            warnings.warn("Setting lag time not possible, because of existing correlation")
+        else:
+            self._lag_time = value
+
+    @property
+    def lag_time_fit(self):
+        """lag time as used for fitting"""
+        return self.lag_time[self.fit_ival[0]:self.fit_ival[1]]
+
+    @property
+    def modeled(self):
+        """fitted data values, same shape as self.correlation"""
+        # perform parameter normalization
+        lag = self.lag_time
+        modeled = np.zeros((lag.shape[0], 2))
+        modeled[:,0] = lag
+        modeled[:,1] = self.fit_model(self.fit_parameters, lag)
+        return modeled.copy()
+
+    @property
+    def modeled_fit(self):
+        """fitted data values, same shape as self.correlation_fit"""
+        toplot = self.modeled[self.fit_ival[0]:self.fit_ival[1], :]
+        return toplot
+
+    @property
+    def modeled_plot(self):
+        """fitted data values, same shape as self.correlation_fit"""
+        toplot = self.modeled_fit
+        toplot[:,1] *= self.normalize_factor
+        return toplot
+
+    @property
+    def normalize_factor(self):
+        """plot normalization according to self.normparm"""
+        if self.normparm is None:
+            # nothing to do
+            return 1
+        
+        if self.normparm < self.fit_parameters.shape[0]:
+            nfactor = self.fit_parameters[self.normparm]
+        else:
+            # get supplementary parameters
+            alt = self.fit_model.get_supplementary_values(self.fit_parameters)
+            nfactor = alt[self.normparm - self.fit_parameters.shape[0]]
+        
+        return nfactor
+
+    @property
+    def residuals(self):
+        """fit residuals, same shape as self.correlation"""
+        if self.correlation is None:
+            raise ValueError("Cannot compute residuals; No correlation given!") 
+        residuals = self.correlation.copy()
+        residuals[:,1] -= self.modeled[:,1]
+        return residuals 
+    
+    @property
+    def residuals_fit(self):
+        """fit residuals, same shape as self.correlation_fit"""
+        residuals_fit = self.correlation_fit.copy()
+        residuals_fit[:,1] -= self.modeled_fit[:,1]
+        return residuals_fit
+
+    @property
+    def residuals_plot(self):
+        """fit residuals, same shape as self.correlation_fit"""
+        cp = self.correlation_plot
+        if cp is not None:
+            residuals_plot = self.correlation_plot.copy()
+            residuals_plot[:,1] -= self.modeled_plot[:,1]
+            return residuals_plot
+
+    def set_weights(self, type_name, data):
+        """
+        Add weights for fitting.
+        example:
+        type_name : "Average"
+        data : 1d ndarray with length self.lag_time 
+        """
+        if data is not None:
+            self._fit_weight_memory[type_name] = data
+
+    @property
+    def traces(self):
+        """
+        The trace(s) of this correlation in a list.
+        """
+        return self._traces
+    
+    @traces.setter
+    def traces(self, value):
+        """
+        Set the traces. The value can either be a list of traces or
+        instances of traces or a single trace in an array.
+        """
+        traces = list()
+        if not isinstance(value, list):
+            value = [value]
+        assert len(value) in [0,1,2], "Traces must be list with up to two elements."
+        for v in value:
+            if isinstance(v, np.ndarray):
+                traces.append(Trace(trace=v))
+            elif isinstance(v, Trace):
+                traces.append(v)
+            else:
+                raise ValueError("Each trace must be instance of Trace or ndarray")
+        self._traces = traces
+        
+        if len(self._traces) == 2:
+            if self._traces[0].duration != self._traces[1].duration:
+                warnings.warn("Unequal lenght of traces: {} and {}".format(
+                              self._traces[0].duration,
+                              self._traces[1].duration))
+
+    @property
+    def uid(self):
+        """
+        unique identifier of this instance
+        This might change when title or filename
+        are updated.
+        """
+        if self._uid is None:
+            hasher = hashlib.sha256()
+            hasher.update(str(np.random.random()))
+            hasher.update(str(self._correlation))
+            hasher.update(str(self.filename))
+            hasher.update(str(self.title))
+            self._uid = hasher.hexdigest()
+        return self._uid
+
+
+class Fit(object):
+    """ Used for fitting FCS data to models.
+    """
+    def __init__(self, correlations=[], global_fit=False,
+                 global_fit_variables=[],
+                 uselatex=False, verbose=0):
+        """ Using an FCS model, fit the data of shape (N,2).
+
+
+        Parameters
+        ----------
+        correlations: list of instances of Correlation
+            Correlations to fit.
+        global fit : bool
+            Perform global fit. The default behavior is
+            to fit all parameters that are selected for
+            fitting in each correlation. Parameters with
+            the same name in different models are treated
+            as one global parameter. 
+        global_fit_variables: list of list of strings
+            Each item contains a list of strings that are names
+            of parameters which will be treated as a common
+            parameter. This breaks the defaul behavior.
+            NOT IMPLEMENTED YET!
+        verbose: int
+            Increase verbosity by incrementing this number.
+        uselatex: bool
+            If verbose > 0, plotting will be performed with LaTeX.
+        """
+        assert len(global_fit_variables)==0, "not implemented"
+        
+        if isinstance(correlations, Correlation):
+            correlations = [correlations]
+        
+        self.correlations = correlations
+        self.global_fit_variables = global_fit_variables
+        self.verbose = verbose
+        self.uselatex = uselatex
+        self.is_weighted_fit = False
+        
+        if not global_fit:
+            # Fit each correlation separately
+            for corr in self.correlations:
+                # Set fitting options
+                self.fit_algorithm = corr.fit_algorithm
+                # Get the data required for fitting
+                self.x = corr.correlation_fit[:,0]
+                self.y = corr.correlation_fit[:,1]
+                # fit_bool: True for variable
+                self.fit_bool = corr.fit_parameters_variable.copy()
+                self.fit_parm = corr.fit_parameters.copy()
+                self.is_weighted_fit = corr.is_weighted_fit
+                self.fit_weights = Fit.compute_weights(corr,
+                                                   verbose=verbose,
+                                                   uselatex=uselatex)
+                self.func = corr.fit_model.function
+                self.check_parms = corr.check_parms
+                # Directly perform the fit and set the "fit" attribute
+                self.minimize()
+                # update correlation model parameters
+                corr.fit_parameters = self.fit_parm
+                # save fit instance in correlation class
+                corr.fit_results = self.get_fit_results(corr)
+        else:
+            # TODO:
+            # - allow detaching of parameters,
+            #   i.e. fitting "n" separately for two models
+            # Initiate all arrays
+            self.fit_algorithm = self.correlations[0].fit_algorithm
+            xtemp = list()      # x
+            ytemp = list()      # y
+            weights = list()    # weights
+            ids = [0]           # ids in big fitting array
+            cmodels = list()    # correlation model info
+            initpar = list()    # initial parameters
+            varin = list()      # names of variable fitting parameters
+            variv = list()      # values of variable fitting parameters
+            varmap = list()     # list of indices of fitted parameters
+            self.is_weighted_fit = None
+            for corr in self.correlations:
+                xtemp.append(corr.correlation_fit[:,0])
+                ytemp.append(corr.correlation_fit[:,1])
+                weights.append(Fit.compute_weights(corr))
+                ids.append(len(xtemp[-1])+ids[-1])
+                cmodels.append(corr.fit_model)
+                initpar.append(corr.fit_parameters)
+                # Create list of variable parameters
+                varthis = list()
+                for ipm, par in enumerate(corr.fit_model.parameters[0]):
+                    if corr.fit_parameters_variable[ipm]:
+                        varthis.append(ipm)
+                        varin.append(par)
+                        variv.append(corr.fit_parameters[ipm])
+                varmap.append(varthis)
+
+            # These are the variable fitting parameters
+            __, varidx = np.unique(varin, return_index=True)
+            varidx.sort()
+            varin = np.array(varin)[varidx]
+            variv = np.array(variv)[varidx]
+            
+            self.x = np.concatenate(xtemp)
+            self.y = np.concatenate(ytemp)
+            self.fit_bool = np.ones(len(variv), dtype=bool)
+            self.fit_parm = variv
+            self.fit_weights = np.concatenate(weights)
+            self.fit_parm_names = varin
+            
+            
+            def parameters_global_to_local(parameters, iicorr, varin=varin,
+                                          initpar=initpar,
+                                          correlations=correlations):
+                """
+                With global `parameters` and an id `iicorr` pointing at
+                the correlation in `self.correlations`, return the
+                updated parameters of the corresponding model.
+                """
+                fit_parm = initpar[iicorr].copy()
+                corr = correlations[iicorr]
+                mod = corr.fit_model
+                for kk, pn in enumerate(mod.parameters[0]):
+                    if pn in varin:
+                        # edit that parameter
+                        fit_parm[kk] = parameters[np.where(np.array(varin)==pn)[0]]
+                return fit_parm
+            
+            def parameters_local_to_global(parameters, iicorr, fit_parm,
+                                           varin=varin,
+                                           correlations=correlations):
+                """
+                inverse of parameters_global_to_local
+                """
+                corr = correlations[iicorr]
+                mod = corr.fit_model
+                for kk, pn in enumerate(mod.parameters[0]):
+                    if pn in varin:
+                        # edit that parameter
+                        parameters[np.where(np.array(varin)==pn)[0]] = fit_parm[kk]
+                return parameters
+            
+            # Create function for fitting using ids
+            def global_func(parameters, tau,
+                            glob2loc=parameters_global_to_local):
+                out = list()
+                # ids start at 0
+                for ii, mod in enumerate(cmodels):
+                    # Update parameters
+                    fit_parm = glob2loc(parameters, ii)
+                    # return function
+                    out.append(mod.function(fit_parm, tau[ids[ii]:ids[ii+1]]))
+                return np.concatenate(out)
+
+            self.func = global_func
+            
+            # Create function for checking
+            def global_check_parms(parameters,
+                                   glob2loc=parameters_global_to_local,
+                                   loc2glob=parameters_local_to_global):
+
+                for ii, corr in enumerate(self.correlations):
+                    # create new initpar
+                    fit_parm = glob2loc(parameters, ii)
+                    fit_parm = corr.check_parms(fit_parm)
+                    # update parameters
+                    parameters = loc2glob(parameters, ii, fit_parm)
+
+                return parameters
+            
+            self.check_parms = global_check_parms
+
+            # Directly perform the fit and set the "fit" attribute
+            self.minimize()
+            # Update correlations
+            for ii, corr in enumerate(self.correlations):
+                # write new model parameters
+                corr.fit_parameters = parameters_global_to_local(self.fit_parm,
+                                                                 ii)
+                # save fit instance in correlation class
+                corr.fit_results = self.get_fit_results(corr)
+
+
+    def get_fit_results(self, correlation):
+        """
+        Return a dictionary with all information about the performed fit.
+        
+        This function must be called immediately after `self.minimize`.
+        """
+        c = correlation
+        d = {
+             "chi2" : self.chi_squared,
+             "chi2 type" : self.chi_squared_type,
+             "weighted fit" : c.is_weighted_fit,
+             "fit algorithm" : c.fit_algorithm,
+             "fit result" : c.fit_parameters.copy(),
+             "fit parameters" : np.where(c.fit_parameters_variable)[0],
+             "fit weights" : self.compute_weights(c)
+             }
+        
+        
+        if c.is_weighted_fit:
+            d["weighted fit type"] = c.fit_weight_type
+            if isinstance(c.fit_weight_data, (int, float)):
+                d["weighted fit bins"] = c.fit_weight_data
+
+        if d["fit algorithm"] == "Lev-Mar" and self.parmoptim_error is not None:
+            d["fit error estimation"] = self.parmoptim_error
+        
+        
+        return d
+        
+
+    @property
+    def chi_squared(self):
+        """ Calculate displayed Chi²
+        
+            Calculate reduced Chi² for the current class.
+        """
+        # Calculate degrees of freedom
+        dof = len(self.x) - np.sum(self.fit_bool) - 1
+        # This is exactly what is minimized by the scalar minimizers
+        chi2 = self.fit_function_scalar(self.fit_parm[self.fit_bool], self.x)
+        if self.chi_squared_type == "reduced expected sum of squares":
+            fitted = self.func(self.fit_parm, self.x)
+            chi2 = np.sum((self.y-fitted)**2/np.abs(fitted)) / dof
+        elif self.chi_squared_type == "reduced weighted sum of squares":
+            fitted = self.func(self.fit_parm, self.x)
+            variance = self.fit_weights**2
+            chi2 = np.sum((self.y-fitted)**2/variance) / dof
+        elif self.chi_squared_type == "reduced global sum of squares":
+            fitted = self.func(self.fit_parm, self.x)
+            variance = self.fit_weights**2
+            chi2 = np.sum((self.y-fitted)**2/variance) / dof
+        return chi2
+
+
+    @property
+    def chi_squared_type(self):
+        """ The type of Chi² that currently applies.
+        
+        Returns
+        -------
+        "reduced" - if variance of data was used for fitting
+        "reduced Pearson" - if variance of data is not available
+        """
+        if self.is_weighted_fit is None:
+            # global fitting
+            return "reduced global sum of squares"
+        elif self.is_weighted_fit == True:
+            return "reduced weighted sum of squares"
+        elif self.is_weighted_fit == False:
+            return "reduced expected sum of squares"
+        else:
+            raise ValueError("Unknown weight type!")
+
+
+    @staticmethod
+    def compute_weights(correlation, verbose=0, uselatex=False):
+        """ computes and returns weights of the same length as 
+        `correlation.correlation_fit`
+        
+        `correlation` is an instance of Correlation
+        """
+        corr = correlation
+        model = corr.fit_model
+        model_parms = corr.fit_parameters
+        ival = corr.fit_ival
+        weight_data = corr.fit_weight_data
+        weight_type = corr.fit_weight_type
+        #parameters = corr.fit_parameters
+        #parameters_range = corr.fit_parameters_range
+        #parameters_variable = corr.fit_parameters_variable
+        
+        cdat = corr.correlation
+        if cdat is None:
+            raise ValueError("Cannot compute weights; No correlation given!")
+        cdatfit = corr.correlation_fit
+        x_full = cdat[:,0]
+        y_full = cdat[:,1]
+        x_fit = cdatfit[:,0]
+        #y_fit = cdatfit[:,1]
+        
+        dataweights = np.ones_like(x_fit)
+
+        try:
+            weight_spread = int(weight_data)
+        except:
+            if verbose > 1:
+                warnings.warn("Could not get weight spread for spline. Setting it to 3.")
+            weight_spread = 3
+
+        if weight_type[:6] == "spline":
+            # Number of knots to use for spline
+            try:
+                knotnumber = int(weight_type[6:])
+            except:
+                if verbose > 1:
+                    print("Could not get knot number. Setting it to 5.")
+                knotnumber = 5
+            
+            # Compute borders for spline fit.
+            if ival[0] < weight_spread:
+                # optimal case
+                pmin = ival[0]
+            else:
+                # non-optimal case
+                # we need to cut pmin
+                pmin = weight_spread
+            if x_full.shape[0] - ival[1] < weight_spread:
+                # optimal case
+                pmax = x_full.shape[0] - ival[1]
+            else:
+                # non-optimal case
+                # we need to cut pmax
+                pmax = weight_spread
+
+            x = x_full[ival[0]-pmin:ival[1]+pmax]
+            y = y_full[ival[0]-pmin:ival[1]+pmax]
+            # we are fitting knots on a base 10 logarithmic scale.
+            xs = np.log10(x)
+            knots = np.linspace(xs[1], xs[-1], knotnumber+2)[1:-1]
+            try:
+                tck = spintp.splrep(xs, y, s=0, k=3, t=knots, task=-1)
+                ys = spintp.splev(xs, tck, der=0)
+            except:
+                if verbose > 0:
+                    raise ValueError("Could not find spline fit with "+\
+                                     "{} knots.".format(knotnumber))
+                return
+            if verbose > 0:
+                try:
+                    # If plotting module is available:
+                    name = "spline fit: "+str(knotnumber)+" knots"
+                    plotting.savePlotSingle(name, 1*x, 1*y, 1*ys,
+                                             dirname=".",
+                                             uselatex=uselatex)
+                except:
+                    # use matplotlib.pylab
+                    try:
+                        from matplotlib import pylab as plt
+                        plt.xscale("log")
+                        plt.plot(x, ys, x, y)
+                        plt.show()
+                    except ImportError:
+                        # Tell the user to install matplotlib
+                        print("Couldn't import pylab! - not Plotting")
+
+            ## Calculation of variance
+            # In some cases, the actual cropping interval from ival[0]
+            # to ival[1] is chosen, such that the dataweights must be
+            # calculated from unknown datapoints.
+            # (e.g. points+endcrop > len(correlation)
+            # We deal with this by multiplying dataweights with a factor
+            # corresponding to the missed points.
+            for i in range(x_fit.shape[0]):
+                # Define start and end positions of the sections from
+                # where we wish to calculate the dataweights.
+                # Offset at beginning:
+                if  i + ival[0] <  weight_spread:
+                    # The offset that occurs
+                    offsetstart = weight_spread - i - ival[0]
+                    offsetcrop = 0
+                elif ival[0] > weight_spread:
+                    offsetstart = 0
+                    offsetcrop = ival[0] - weight_spread
+                else:
+                    offsetstart = 0
+                    offsetcrop = 0
+                # i: counter on correlation array
+                # start: counter on y array
+                start = i - weight_spread + offsetstart + ival[0] - offsetcrop
+                end = start + 2*weight_spread + 1 - offsetstart
+                dataweights[i] = (y[start:end] - ys[start:end]).std()
+                # The standard deviation at the end and the start of the
+                # array are multiplied by a factor corresponding to the
+                # number of bins that were not used for calculation of the
+                # standard deviation.
+                if offsetstart != 0:
+                    reference = 2*weight_spread + 1
+                    dividor = reference - offsetstart
+                    dataweights[i] *= reference/dividor   
+                # Do not substitute len(y[start:end]) with end-start!
+                # It is not the same!
+                backset =  2*weight_spread + 1 - len(y[start:end]) - offsetstart
+                if backset != 0:
+                    reference = 2*weight_spread + 1
+                    dividor = reference - backset
+                    dataweights[i] *= reference/dividor
+        elif weight_type == "model function":
+            # Number of neighboring (left and right) points to include
+            if ival[0] < weight_spread:
+                pmin = ival[0]
+            else:
+                pmin = weight_spread
+            if x_full.shape[0] - ival[1] <  weight_spread:
+                pmax = x_full.shape[0] - ival[1]
+            else:
+                pmax = weight_spread
+            x = x_full[ival[0]-pmin:ival[1]+pmax]
+            y = y_full[ival[0]-pmin:ival[1]+pmax]
+            # Calculated dataweights
+            for i in np.arange(x_fit.shape[0]):
+                # Define start and end positions of the sections from
+                # where we wish to calculate the dataweights.
+                # Offset at beginning:
+                if  i + ival[0] <  weight_spread:
+                    # The offset that occurs
+                    offsetstart = weight_spread - i - ival[0]
+                    offsetcrop = 0
+                elif ival[0] > weight_spread:
+                    offsetstart = 0
+                    offsetcrop = ival[0] - weight_spread
+                else:
+                    offsetstart = 0
+                    offsetcrop = 0
+                # i: counter on correlation array
+                # start: counter on correlation array
+                start = i - weight_spread + offsetstart + ival[0] - offsetcrop
+                end = start + 2*weight_spread + 1 - offsetstart
+                #start = ival[0] - weight_spread + i
+                #end = ival[0] + weight_spread + i + 1
+                diff = y - model(model_parms, x)
+                dataweights[i] = diff[start:end].std()
+                # The standard deviation at the end and the start of the
+                # array are multiplied by a factor corresponding to the
+                # number of bins that were not used for calculation of the
+                # standard deviation.
+                if offsetstart != 0:
+                    reference = 2*weight_spread + 1
+                    dividor = reference - offsetstart
+                    dataweights[i] *= reference/dividor   
+                # Do not substitute len(diff[start:end]) with end-start!
+                # It is not the same!
+                backset =  2*weight_spread + 1 - len(diff[start:end]) - offsetstart
+                if backset != 0:
+                    reference = 2*weight_spread + 1
+                    dividor = reference - backset
+                    dataweights[i] *= reference/dividor
+        elif weight_type == "none":
+            pass
+        else:
+            # This means that the user knows the dataweights and already
+            # gave it to us.
+            weights = weight_data
+            assert weights is not None, "User defined weights not given: "+weight_type
+            
+            # Check if these other weights have length of the cropped
+            # or the full array.
+            if weights.shape[0] == x_fit.shape[0]:
+                dataweights = weights
+            elif weights.shape[0] == x_full.shape[0]:
+                dataweights = weights[ival[0]:ival[1]]
+            else:
+                raise ValueError, \
+                  "`weights` must have length of full or cropped array."
+        
+        return dataweights
+        
+
+    def fit_function(self, parms, x):
+        """ Create the function to be minimized. The old function
+            `function` has more parameters than we need for the fitting.
+            So we use this function to set only the necessary 
+            parameters. Returns what `function` would have done.
+        """
+        # We reorder the needed variables to only use these that are
+        # not fixed for minimization
+        index = 0
+        for i in np.arange(len(self.fit_parm)):
+            if self.fit_bool[i]:
+                self.fit_parm[i] = parms[index]
+                index += 1
+        # Only allow physically correct parameters
+        self.fit_parm = self.check_parms(self.fit_parm)
+        tominimize = (self.func(self.fit_parm, x) - self.y)
+        # Check dataweights for zeros and don't use these
+        # values for the least squares method.
+        with np.errstate(divide='ignore'):
+            tominimize = np.where(self.fit_weights!=0, 
+                                  tominimize/self.fit_weights, 0)
+        ## There might be NaN values because of zero weights:
+        #tominimize = tominimize[~np.isinf(tominimize)]
+        return tominimize
+
+    def fit_function_scalar(self, parms, x):
+        """
+            Wrapper of `fit_function` for scalar minimization methods.
+            Returns the sum of squares of the input data.
+            (Methods that are not "Lev-Mar")
+        """
+        e = self.fit_function(parms, x)
+        return np.sum(e*e)
+
+    def minimize(self):
+        """ This will run the minimization process
+        """
+        assert (np.sum(self.fit_bool) != 0), "No parameter selected for fitting."
+        # Get algorithm
+        algorithm = Algorithms[self.fit_algorithm][0]
+
+        # Begin fitting
+        
+        if self.fit_algorithm == "Lev-Mar":
+            res = algorithm(self.fit_function, self.fit_parm[self.fit_bool],
+                            args=(self.x,), full_output=1)
+        else:
+            disp = self.verbose > 0 # print convergence message
+            res = algorithm(self.fit_function_scalar, self.fit_parm[self.fit_bool],
+                            args=(self.x,), full_output=1, disp=disp)
+
+        # The optimal parameters
+        parmoptim = res[0]
+        # Now write the optimal parameters to our values:
+        index = 0
+        for i in range(len(self.fit_parm)):
+            if self.fit_bool[i]:
+                self.fit_parm[i] = parmoptim[index]
+                index = index + 1
+        # Only allow physically correct parameters
+        self.fit_parm = self.check_parms(self.fit_parm)
+        # Write optimal parameters back to this class.
+        # Must be called after `self.fitparm = ...`
+        chi = self.chi_squared
+        # Compute error estimates for fit (Only "Lev-Mar")
+        if self.fit_algorithm == "Lev-Mar":
+            # This is the standard way to minimize the data. Therefore,
+            # we are a little bit more verbose.
+            if res[4] not in [1,2,3,4]:
+                warnings.warn("Optimal parameters not found: " + res[3])
+            try:
+                self.covar = res[1] * chi # The covariance matrix
+            except:
+                warnings.warn("PyCorrFit Warning: Error estimate not "+\
+                              "possible, because we could not "+\
+                              "calculate covariance matrix. Please "+\
+                              "try reducing the number of fitting "+\
+                              "parameters.")
+                self.parmoptim_error = None
+            else:
+                # Error estimation of fitted parameters
+                if self.covar is not None:
+                    self.parmoptim_error = np.diag(self.covar)
+        else:
+            self.parmoptim_error = None
+
+
+def GetAlgorithmStringList():
+    """
+        Get supported fitting algorithms as strings.
+        Returns two lists (that are key-sorted) for key and string.
+    """
+    A = Algorithms
+    out1 = list()
+    out2 = list()
+    a = list(A.keys())
+    a.sort()
+    for key in a:
+        out1.append(key)
+        out2.append(A[key][1])
+    return out1, out2
+    
+
+# As of version 0.8.3, we support several minimization methods for
+# fitting data to experimental curves.
+# These functions must be callable like scipy.optimize.leastsq. e.g.
+# res = spopt.leastsq(self.fit_function, self.fitparms[:],
+#                     args=(self.x), full_output=1)
+Algorithms = dict()
+
+# the original one is the least squares fit "leastsq"
+Algorithms["Lev-Mar"] = [spopt.leastsq, 
+           "Levenberg-Marquardt"]
+
+# simplex 
+Algorithms["Nelder-Mead"] = [spopt.fmin,
+           "Nelder-Mead (downhill simplex)"]
+
+# quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno
+Algorithms["BFGS"] = [spopt.fmin_bfgs,
+           "BFGS (quasi-Newton)"]
+
+# modified Powell-method
+Algorithms["Powell"] = [spopt.fmin_powell,
+           "modified Powell (conjugate direction)"]
+
+# nonliner conjugate gradient method by Polak and Ribiere
+Algorithms["Polak-Ribiere"] = [spopt.fmin_cg,
+           "Polak-Ribiere (nonlinear conjugate gradient)"]
diff --git a/pycorrfit/fitting.py b/pycorrfit/fitting.py
deleted file mode 100644
index 97bddf8..0000000
--- a/pycorrfit/fitting.py
+++ /dev/null
@@ -1,440 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-PyCorrFit
-
-Module fitting
-Here are the necessary functions for computing a fit with given parameters.
-See included class "Fit" for more information.
-"""
-
-try:
-    import matplotlib.pyplot as plt
-except:
-    pass
-    
-import numpy as np
-from scipy import interpolate as spintp
-from scipy import optimize as spopt
-
-# If we use this module with PyCorrFit, we can plot things with latex using
-# our own special thing.
-try:
-    from . import plotting
-except:
-    pass
-
-
-class Fit(object):
-    """
-        The class Fit needs the following parameters to perform a fit:
-        check_parms - A function checking the parameters for plausibility.
-        dataexpfull - Full experimental data *array of tuples*
-        function - function to be used for fitting f(parms, x) 
-        interval - interval of dataexpfull to fit in. [a, b]
-        values - starting parameters *parms* for fitting. *array*
-        valuestofit - which parameter to use for fitting. *bool array*
-        weights - no. of datapoints from left and right to use for weighting
-        fittype - type of fit. Can be one of the following
-                  - "None" (standard) - no weights. (*weights* is ignored)
-                  - "splineX" - fit a Xth order spline and calulate standard
-                               deviation from that difference
-                  - "model function" - calculate std. dev. from difference
-                                        of fit function and dataexpfull.
-                  - "other" - use an external std. dev.. The variable
-                              self.external_deviations has to be set before
-                              self.ApplyParameters is called. Cropping with
-                              *interval* is performed here.
-        fit_algorithm - The fitting algorithm to be used for minimization
-                        Have a look at the PyCorrFit documentation for more
-                        information.
-                        - "Lev-Mar" Least squares minimization
-                        - "Nelder-Mead" Simplex
-                        - "BFGS" quasi-Newton method of Broyden,
-                                 Fletcher, Goldfarb and Shanno
-                        - "Powell"
-                        - "Polak-Ribiere"
-    """
-    def __init__(self):
-        """ Initial setting of needed variables via the given *fitset* """   
-        self.check_parms = None
-        self.dataexpfull = None
-        self.function = None
-        self.interval = None
-        # Eventually use latex. This is passed
-        # to each plotting command. Only when plotting
-        # module is available.
-        self.uselatex = False 
-        self.values = None
-        self.valuestofit = None
-
-        self.verbose = False # Verbose mode (shows e.g. spline fit)
-        # The weights (data points from left and right of data array) have
-        # to be chosen in a way, that the interval +/- weights will not
-        # exceed self.dataexpfull!!!!
-        self.weights = None
-        # Changing fittype will change calculation of variances=dataweights**2.
-        # None means dataweights is 1.
-        self.fittype = "None"
-        # Chi**2 Value
-        self.chi = None
-        # Messages from fit algorithm
-        self.mesg = None
-        # Optimal parameters found by fit algorithm
-        self.parmoptim = None
-        self.covar = None # covariance matrix 
-        self.parmoptim_error = None # Errors of fit
-        # Variances for fitting
-        self.dataweights = None
-        # External std defined by the user
-        self.external_deviations = None
-        # It is possible to edit tolerance for fitting
-        # ftol, xtol and gtol.
-        # Those parameters could be added to the fitting routine later.
-        # Should we do a weighted fit?
-        # Standard is yes. If there are no weights
-        # (self.fittype not set) then this value becomes False
-        self.weightedfit=True
-        # Set the standard method for minimization
-        self.fit_algorithm = "Lev-Mar"
-        
-
-
-    def ApplyParameters(self):
-        if self.interval is None:
-            self.startcrop = self.endcrop = 0
-        else:
-            [self.startcrop, self.endcrop] = self.interval
-        # Get self.dataexp
-        if self.startcrop == self.endcrop:
-            self.dataexp = 1*self.dataexpfull
-            self.startcrop = 0
-            self.endcrop = len(self.dataexpfull)
-        else:
-            self.dataexp = 1*self.dataexpfull[self.startcrop:self.endcrop]
-            # If startcrop is larger than the lenght of dataexp,
-            # We will not have an array. Prevent that.
-            if len(self.dataexp) == 0:
-                self.dataexp = 1*self.dataexpfull
-        # Calculate x-values
-        # (Extract tau-values from dataexp)
-        self.x = self.dataexp[:, 0]
-        # Experimental data
-        self.data = self.dataexp[:,1]
-        # Set fit parameters
-        self.fitparms = np.zeros(sum(self.valuestofit))
-        index = 0
-        for i in np.arange(len(self.values)):
-            if self.valuestofit[i]:
-                self.fitparms[index] = np.float(self.values[i])
-                index = index + 1
-        # Assume we have a weighted fit. If this is not the case then
-        # this is changed in the else statement of the following 
-        # "if"-statement:
-        self.weightedfit=True
-        if self.fittype[:6] == "spline":
-            # Number of knots to use for spline
-            try:
-                knotnumber = int(self.fittype[6:])
-            except:
-                print "Could not get knotnumber. Setting to 5."
-                knotnumber = 5
-            # Number of neighbouring (left and right) points to include
-            points = self.weights
-            # Calculated dataweights
-            datalen = len(self.dataexp[:,1])
-            dataweights = np.zeros(datalen)
-            if self.startcrop < points:
-                pmin = self.startcrop
-            else:
-                pmin = points
-            if len(self.dataexpfull) - self.endcrop <  points:
-                pmax = (len(self.dataexpfull) - self.endcrop)
-            else:
-                pmax = points
-            x = self.dataexpfull[self.startcrop-pmin:self.endcrop+pmax,0]
-            xs = np.log10(x)
-            y = self.dataexpfull[self.startcrop-pmin:self.endcrop+pmax,1]
-            knots = np.linspace(xs[1], xs[-1], knotnumber+2)[1:-1]
-            try:
-                tck = spintp.splrep(xs,y,s=0,k=3,t=knots,task=-1)
-                ys = spintp.splev(xs,tck,der=0)
-            except:
-                print "Could not find spline with "+str(knotnumber)+" knots."
-                return
-            if self.verbose == True:
-                try:
-                    # If plotting module is available:
-                    name = "Spline fit: "+str(knotnumber)+" knots"
-                    plotting.savePlotSingle(name, 1*x, 1*y, 1*ys, dirname = ".",
-                                            uselatex=self.uselatex)
-                except:
-                    try:
-                        plt.xscale("log")
-                        plt.plot(x,ys, x,y)
-                        plt.show()
-                    except ImportError:
-                        # Tell the user to install matplotlib
-                        print "Matplotlib not found!"
-                        
-            ## Calculation of variance
-            # In some cases, the actual cropping interval from self.startcrop to
-            # self.endcrop is chosen, such that the dataweights must be
-            # calculated from unknown datapoints.
-            # (e.g. points+endcrop > len(dataexpfull)
-            # We deal with this by multiplying dataweights with a factor
-            # corresponding to the missed points.
-            for i in np.arange(datalen):
-                # Define start and end positions of the sections from
-                # where we wish to calculate the dataweights.
-                # Offset at beginning:
-                if  i + self.startcrop <  points:
-                    # The offset that occurs
-                    offsetstart = points - i - self.startcrop
-                    offsetcrop = 0
-                elif self.startcrop > points:
-                    offsetstart = 0
-                    offsetcrop = self.startcrop - points
-                else:
-                    offsetstart = 0
-                    offsetcrop = 0
-                # i: counter on dataexp array
-                # start: counter on y array
-                start = i - points + offsetstart + self.startcrop - offsetcrop
-                end = start + 2*points + 1 - offsetstart
-                dataweights[i] = (y[start:end] - ys[start:end]).std()
-                # The standard deviation at the end and the start of the
-                # array are multiplied by a factor corresponding to the
-                # number of bins that were not used for calculation of the
-                # standard deviation.
-                if offsetstart != 0:
-                    reference = 2*points + 1
-                    dividor = reference - offsetstart
-                    dataweights[i] *= reference/dividor   
-                # Do not substitute len(y[start:end]) with end-start!
-                # It is not the same!
-                backset =  2*points + 1 - len(y[start:end]) - offsetstart
-                if backset != 0:
-                    reference = 2*points + 1
-                    dividor = reference - backset
-                    dataweights[i] *= reference/dividor
-        elif self.fittype == "model function":
-            # Number of neighbouring (left and right) points to include
-            points = self.weights
-            if self.startcrop < points:
-                pmin = self.startcrop
-            else:
-                pmin = points
-            if len(self.dataexpfull) - self.endcrop <  points:
-                pmax = (len(self.dataexpfull) - self.endcrop)
-            else:
-                pmax = points
-            x = self.dataexpfull[self.startcrop-pmin:self.endcrop+pmax,0]
-            y = self.dataexpfull[self.startcrop-pmin:self.endcrop+pmax,1]
-            # Calculated dataweights
-            datalen = len(self.dataexp[:,1])
-            dataweights = np.zeros(datalen)
-            for i in np.arange(datalen):
-                # Define start and end positions of the sections from
-                # where we wish to calculate the dataweights.
-                # Offset at beginning:
-                if  i + self.startcrop <  points:
-                    # The offset that occurs
-                    offsetstart = points - i - self.startcrop
-                    offsetcrop = 0
-                elif self.startcrop > points:
-                    offsetstart = 0
-                    offsetcrop = self.startcrop - points
-                else:
-                    offsetstart = 0
-                    offsetcrop = 0
-                # i: counter on dataexp array
-                # start: counter on dataexpfull array
-                start = i - points + offsetstart + self.startcrop - offsetcrop
-                end = start + 2*points + 1 - offsetstart
-                #start = self.startcrop - points + i
-                #end = self.startcrop + points + i + 1
-                diff = y - self.function(self.values, x)
-                dataweights[i] = diff[start:end].std()
-                # The standard deviation at the end and the start of the
-                # array are multiplied by a factor corresponding to the
-                # number of bins that were not used for calculation of the
-                # standard deviation.
-                if offsetstart != 0:
-                    reference = 2*points + 1
-                    dividor = reference - offsetstart
-                    dataweights[i] *= reference/dividor   
-                # Do not substitute len(diff[start:end]) with end-start!
-                # It is not the same!
-                backset =  2*points + 1 - len(diff[start:end]) - offsetstart
-                if backset != 0:
-                    reference = 2*points + 1
-                    dividor = reference - backset
-                    dataweights[i] *= reference/dividor
-        elif self.fittype == "other":
-            # This means that the user knows the dataweights and already
-            # gave it to us.
-            if self.external_deviations is not None:
-                dataweights = \
-                           self.external_deviations[self.startcrop:self.endcrop]
-            else:
-                raise ValueError, \
-                      "self.external_deviations not set for fit type 'other'."
-        else:
-            # The fit.Fit() class will divide the function to minimize
-            # by the dataweights only if we have weights
-            self.weightedfit=False
-            dataweights=None
-        self.dataweights = dataweights
-
-
-    def fit_function(self, parms, x):
-        """ Create the function to be minimized. The old function
-            `function` has more parameters than we need for the fitting.
-            So we use this function to set only the necessary 
-            parameters. Returns what `function` would have done.
-        """
-        # We reorder the needed variables to only use these that are
-        # not fixed for minimization
-        index = 0
-        for i in np.arange(len(self.values)):
-            if self.valuestofit[i]:
-                self.values[i] = parms[index]
-                index = index + 1
-        # Only allow physically correct parameters
-        self.values = self.check_parms(self.values)
-        tominimize = (self.function(self.values, x) - self.data)
-        # Check if we have a weighted fit
-        if self.weightedfit is True:
-            # Check dataweights for zeros and don't use these
-            # values for the least squares method.
-            with np.errstate(divide='ignore'):
-                tominimize = np.where(self.dataweights!=0, 
-                                      tominimize/self.dataweights, 0)
-            ## There might be NaN values because of zero weights:
-            #tominimize = tominimize[~np.isinf(tominimize)]
-        return tominimize
-
-
-    def fit_function_scalar(self, parms, x):
-        """
-            Wrapper of `fit_function` for scalar minimization methods.
-            Returns the sum of squares of the input data.
-            (Methods that are not "Lev-Mar")
-        """
-        e = self.fit_function(parms,x)
-        return np.sum(e*e)
-        
-
-    def get_chi_squared(self):
-        """
-            Calculate Chi² for the current class.
-        """
-        # Calculate degrees of freedom
-        dof = len(self.x) - len(self.parmoptim) - 1
-        # This is exactly what is minimized by the scalar minimizers
-        chi2 = self.fit_function_scalar(self.parmoptim, self.x)
-        return chi2 / dof
-
-
-    def minimize(self):
-        """ This will minimize *self.fit_function()* using least squares.
-            *self.values*: The values with which the function is called.
-            *valuestofit*: A list with bool values that indicate which values
-            should be used for fitting.
-            Function *self.fit_function()* takes two parameters:
-            self.fit_function(parms, x) where *x* are x-values of *dataexp*.
-        """
-        if np.sum(self.valuestofit) == 0:
-            print "No fitting parameters selected."
-            self.valuesoptim = 1*self.values
-            return
-        # Get algorithm
-        algorithm = Algorithms[self.fit_algorithm][0]
-
-        # Begin fitting
-        if self.fit_algorithm == "Lev-Mar":
-            res = algorithm(self.fit_function, self.fitparms[:],
-                            args=(self.x), full_output=1)
-        else:
-            res = algorithm(self.fit_function_scalar, self.fitparms[:],
-                            args=([self.x]), full_output=1)
-
-        # The optimal parameters
-        self.parmoptim = res[0]
-
-        # Now write the optimal parameters to our values:
-        index = 0
-        for i in range(len(self.values)):
-            if self.valuestofit[i]:
-                self.values[i] = self.parmoptim[index]
-                index = index + 1
-        # Only allow physically correct parameters
-        self.values = self.check_parms(self.values)
-        # Write optimal parameters back to this class.
-        self.valuesoptim = 1*self.values # This is actually a redundance array
-        self.chi = self.get_chi_squared()
-        
-        # Compute error estimates for fit (Only "Lev-Mar")
-        if self.fit_algorithm == "Lev-Mar":
-            # This is the standard way to minimize the data. Therefore,
-            # we are a little bit more verbose.
-            if res[4] not in [1,2,3,4]:
-                print "Optimal parameters not found: " + res[3]
-            try:
-                self.covar = res[1] * self.chi # The covariance matrix
-            except:
-                print "PyCorrFit Warning: Error estimate not possible, because we"
-                print "          could not calculate covariance matrix. Please try"
-                print "          reducing the number of fitting parameters."
-                self.parmoptim_error = None
-            else:
-                # Error estimation of fitted parameters
-                if self.covar is not None:
-                    self.parmoptim_error = np.diag(self.covar)
-        else:
-            self.parmoptim_error = None
-
-
-def GetAlgorithmStringList():
-    """
-        Get supported fitting algorithms as strings.
-        Returns two lists (that are key-sorted) for key and string.
-    """
-    A = Algorithms
-    out1 = list()
-    out2 = list()
-    a = list(A.keys())
-    a.sort()
-    for key in a:
-        out1.append(key)
-        out2.append(A[key][1])
-    return out1, out2
-    
-
-# As of version 0.8.3, we support several minimization methods for
-# fitting data to experimental curves.
-# These functions must be callable like scipy.optimize.leastsq. e.g.
-# res = spopt.leastsq(self.fit_function, self.fitparms[:],
-#                     args=(self.x), full_output=1)
-Algorithms = dict()
-
-# the original one is the least squares fit "leastsq"
-Algorithms["Lev-Mar"] = [spopt.leastsq, 
-           "Levenberg-Marquardt"]
-
-# simplex 
-Algorithms["Nelder-Mead"] = [spopt.fmin,
-           "Nelder-Mead (downhill simplex)"]
-
-# quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno
-Algorithms["BFGS"] = [spopt.fmin_bfgs,
-           "BFGS (quasi-Newton)"]
-
-# modified Powell-method
-Algorithms["Powell"] = [spopt.fmin_powell,
-           "modified Powell (conjugate direction)"]
-
-# nonliner conjugate gradient method by Polak and Ribiere
-Algorithms["Polak-Ribiere"] = [spopt.fmin_cg,
-           "Polak-Ribiere (nonlinear conjugate gradient)"]
-
diff --git a/pycorrfit/frontend.py b/pycorrfit/frontend.py
index aac5592..b57c58e 100644
--- a/pycorrfit/frontend.py
+++ b/pycorrfit/frontend.py
@@ -69,6 +69,12 @@ class FlatNotebookDemo(fnb.FlatNotebook):
 
 
 class MyApp(wx.App):
+    def __init__(self, args):
+        wx.App.__init__(self, args)
+        # Suppress WXDEBUG assertions, as happens by default with wx2.8.
+        # http://anonscm.debian.org/cgit/collab-maint/wx-migration-tools.git/tree/README#n30
+        self.SetAssertMode(wx.PYAPP_ASSERT_SUPPRESS)
+        
     def MacOpenFile(self,filename):
         """
         """
@@ -88,8 +94,6 @@ class MyFrame(wx.Frame):
 
         sys.excepthook = MyExceptionHook
         ## Set initial variables that make sense
-        tau = 10**np.linspace(-6,8,1001)
-
         self.version = version
         wx.Frame.__init__(self, parent, anid, "PyCorrFit " + self.version)
         self.CreateStatusBar() # A Statusbar in the bottom of the window
@@ -120,18 +124,11 @@ class MyFrame(wx.Frame):
         self.value_set = mdls.values
         self.valuedict = mdls.valuedict
 
-        # Some standard time scale
-        # We need this for the functions inside the "FittingPanel"s
-        self.tau = tau 
-
         # Tab Counter
         self.tabcounter = 1
 
         # Background Correction List
-        # Here, each item is a list containing three elements:
-        # [0] average signal [kHz]
-        # [1] signal name (edited by user)
-        # [2] signal trace (tuple) ([ms], [kHz])
+        # Contains instances of `Trace`
         self.Background = list()
 
         # A dictionary for all the opened tool windows
@@ -225,8 +222,7 @@ class MyFrame(wx.Frame):
         active_parms = [active_labels, active_values, active_fitting]
         model = mdls.modeldict[modelid][1]
         # Create New Tab
-        Newtab = page.FittingPanel(self, counter, modelid, active_parms,
-                                   self.tau)
+        Newtab = page.FittingPanel(self, counter, modelid, active_parms)
         #self.Freeze()
         self.notebook.AddPage(Newtab, counter+model, select=select)
         if select:
@@ -705,7 +701,7 @@ class MyFrame(wx.Frame):
 
 
     def OnImportData(self,e=None):
-        """Import experimental data from a all filetypes specified in 
+        """Import experimental data from all filetypes specified in 
            *opf.Filetypes*.
            Is called by the curmenu and applies to currently opened model.
            Calls self.ImportData.
@@ -749,6 +745,12 @@ class MyFrame(wx.Frame):
                 trace = Stuff["Trace"]
                 curvelist = Stuff["Type"]
                 filename = Stuff["Filename"]
+                if "Weight" in Stuff:
+                    Weight = Stuff["Weight"]
+                    WeightName = Stuff["Weight Name"]
+                else:
+                    Weight = [None] * len(Stuff["Type"])
+                    WeightName = [None] * len(Stuff["Type"])
                 # If curvelist is a list with more than one item, we are
                 # importing more than one curve per file. Therefore, we
                 # need to create more pages for this file.
@@ -775,6 +777,8 @@ class MyFrame(wx.Frame):
                     newfilename = list()
                     newdataexp = list()
                     newtrace = list()
+                    newWeight = list()
+                    newWeightName = list()
                     if Chosen.ShowModal() == wx.ID_OK:
                         keys = Chosen.keys
                         if len(keys) == 0:
@@ -787,10 +791,14 @@ class MyFrame(wx.Frame):
                                 newfilename.append(filename[index])
                                 newdataexp.append(dataexp[index])
                                 newtrace.append(trace[index])
+                                newWeight.append(Weight[index])
+                                newWeightName.append(WeightName[index])
                         curvelist = newcurvelist
                         filename = newfilename
                         dataexp = newdataexp
                         trace = newtrace
+                        Weight = newWeight
+                        WeightName = newWeightName
                     else:
                         return
                     Chosen.Destroy()
@@ -810,6 +818,7 @@ class MyFrame(wx.Frame):
                     # Fill Page with data
                     self.ImportData(CurPage, dataexp[i], trace[i],
                                    curvetype=curvelist[i], filename=filename[i],
+                                   weights=Weight[i], weight_type=WeightName[i],
                                    curveid=i)
                     # Let the user abort, if he wants to:
                     # We want to do this here before an empty page is added
@@ -821,7 +830,7 @@ class MyFrame(wx.Frame):
                         # Create new page.
                         # (Add n-1 pages while importing.)
                         CurPage = self.add_fitting_tab(event=None, 
-                                             modelid=CurPage.modelid,
+                                             modelid=CurPage.corr.fit_model.id,
                                              counter=None)
                 # We are finished here:
                 return
@@ -868,7 +877,8 @@ class MyFrame(wx.Frame):
 
 
     def ImportData(self, Page, dataexp, trace, curvetype="",
-                   filename="", curveid="", run="", trigger=None):
+                   filename="", curveid="", run="0", 
+                   weights=None, weight_type=None, trigger=None):
         """
             Import data into the current page.
             
@@ -876,30 +886,17 @@ class MyFrame(wx.Frame):
             submodule `tools`.
         """
         CurPage = Page
+        # Set name of correlation
+        CurPage.corr.filename = filename
         # Import traces. Traces are usually put into a list, even if there
         # is only one trace. The reason is, that for cross correlation, we 
         # have two traces and thus we have to import both.
-        # In case of cross correlation, save that list of (two) traces
-        # in the page.tracecc variable. Else, save the trace for auto-
-        # correlations directly into the page.trace variable. We are
-        # doing this in order to keep data types clean.
-        if curvetype[0:2] == "CC":
-            # For cross correlation, the trace has two components
-            CurPage.SetCorrelationType(True, init=True)
-            CurPage.tracecc = trace
-            CurPage.trace = None
-        else:
-            CurPage.SetCorrelationType(False, init=True)
-            CurPage.tracecc = None
-            if trace is not None:
-                CurPage.trace = trace
-                CurPage.traceavg = trace[:,1].mean()
+        if trace is not None:
+            CurPage.corr.traces = trace
         # Import correlation function
-        CurPage.dataexpfull = dataexp
-        # We need this to be able to work with the data.
-        # It actually does nothing to the data right now.
-        CurPage.startcrop = None
-        CurPage.endcrop = None
+        CurPage.corr.correlation = dataexp
+        CurPage.corr.corr_type = curvetype
+        CurPage.OnAmplitudeCheck()
         # It might be possible, that we want the channels to be
         # fixed to some interval. This is the case if the 
         # checkbox on the "Channel selection" dialog is checked.
@@ -912,8 +909,21 @@ class MyFrame(wx.Frame):
             title = "{} r{:03d}-{}".format(filename, int(run), curvetype)
         else:
             title = "{} id{:03d}-{}".format(filename, int(curveid), curvetype)
-        CurPage.tabtitle.SetValue(title.strip())
+        CurPage.title = title
+        # set weights
+        if weights is not None:
+            CurPage.corr.set_weights(weight_type, weights)
+            List = CurPage.Fitbox[1].GetItems()
+            if not weight_type in List:
+                List.append(weight_type)
+                CurPage.Fitbox[1].SetItems(List)
+                CurPage.Fitbox[1].SetSelection(len(List)-1)
+            else:
+                listid = List.index(weight_type)
+                CurPage.Fitbox[1].SetSelection(listid)
+            
         # Plot everything
+        CurPage.OnAmplitudeCheck()
         CurPage.PlotAll(trigger=trigger)
         # Call this function to allow the "Channel Selection" window that
         # might be open to update itself.
@@ -1015,6 +1025,8 @@ class MyFrame(wx.Frame):
         Filename = list()   # there might be zipfiles with additional name info
         #Run = list()        # Run number connecting AC1 AC2 CC12 CC21
         Curveid = list()    # Curve ID of each curve in a file
+        Weight = list()
+        WeightName = list()
         
         # Display a progress dialog for file import
         N = len(Datafiles)
@@ -1046,7 +1058,13 @@ class MyFrame(wx.Frame):
                     Trace.append(Stuff["Trace"][i])
                     Type.append(Stuff["Type"][i])
                     Filename.append(Stuff["Filename"][i])
-                    #Curveid.append(str(i+1))
+                    if "Weight" in Stuff:
+                        Weight.append(Stuff["Weight"][i])
+                        WeightName.append(Stuff["Weight Name"][i])
+                    else:
+                        Weight.append(None)
+                        WeightName.append(None)
+                        #Curveid.append(str(i+1))                    
         dlgi.Destroy()
         
         # Add number of the curve within a file.
@@ -1150,6 +1168,8 @@ class MyFrame(wx.Frame):
         modelList = list()
         newCurveid = list()
         newRun = list()
+        newWeight = list()
+        newWeightName = list()
         if Chosen.ShowModal() == wx.ID_OK:
             keys = Chosen.typekeys
             # Keepdict is a list of indices pointing to Type or Correlation
@@ -1172,12 +1192,16 @@ class MyFrame(wx.Frame):
                         modelList.append(modelids[index])
                         newCurveid.append(Curveid[index])
                         newRun.append(Run[index])
+                        newWeight.append(Weight[index])
+                        newWeightName.append(WeightName[index])
             Correlation = newCorrelation
             Trace = newTrace
             Type = newType
             Filename = newFilename
             Curveid = newCurveid
             Run = newRun
+            Weight = newWeight
+            WeightName = newWeightName
         else:
             return
         Chosen.Destroy()
@@ -1201,6 +1225,7 @@ class MyFrame(wx.Frame):
             self.ImportData(CurPage, Correlation[i], Trace[i],
                             curvetype=Type[i], filename=Filename[i],
                             curveid=str(Curveid[i]), run=str(Run[i]),
+                            weights=Weight[i], weight_type=WeightName[i],
                             trigger="page_add_batch")
             # Let the user abort, if he wants to:
             # We want to do this here before an empty page is added
@@ -1317,63 +1342,85 @@ class MyFrame(wx.Frame):
             number = counter.strip().strip(":").strip("#")
             pageid = int(number)
             dataexp = Infodict["Correlations"][pageid][1]
-            if dataexp is not None:
-                # Write experimental data
-                Newtab.dataexpfull = dataexp
-                Newtab.dataexp = True # not None
+
+            if Infodict["Parameters"][0][7]:
+                curvetype = "cc"
+            else:
+                curvetype = "ac"
+
             # As of 0.7.3: Add external weights to page
             try:
-                Newtab.external_std_weights = \
-                               Infodict["External Weights"][pageid]
+                for key in Infodict["External Weights"][pageid].keys():
+                    Newtab.corr.set_weights(key, Infodict["External Weights"][pageid][key])
             except KeyError:
-                # No data
                 pass
-            else:
-                # Add external weights to fitbox
-                WeightKinds = Newtab.Fitbox[1].GetItems()
-                wkeys = Newtab.external_std_weights.keys()
-                wkeys.sort()
-                for wkey in wkeys:
-                    WeightKinds += [wkey]
-                Newtab.Fitbox[1].SetItems(WeightKinds)
+
+            if len(Infodict["Parameters"][i]) >= 6:
+                if Infodict["Parameters"][i][5][0] >= 3:
+                    # we have a weighted fit with external weights
+                    # these are usually averages.
+                    keys = Infodict["External Weights"][pageid].keys()
+                    keys = list(keys)
+                    keys.sort()
+                    key = keys[Infodict["Parameters"][i][5][0]-3]
+                    weights = Infodict["External Weights"][pageid][key]
+                    weight_type = key
+                else:
+                    weight_type = None
+                    weights = None
+
+            self.ImportData(Newtab, 
+                            dataexp, 
+                            trace=Infodict["Traces"][pageid],
+                            curvetype=curvetype,
+                            weights=weights,
+                            weight_type=weight_type)
+           
+            # Set Title of the Page
+            try:
+                Newtab.tabtitle.SetValue(Infodict["Comments"][pageid])
+            except:
+                pass # no page title
+
+            # Parameters
             self.UnpackParameters(Infodict["Parameters"][i], Newtab,
                                   init=True)
             # Supplementary data
+            fit_results = dict()
+            fit_results["weighted fit"] = Infodict["Parameters"][i][5][0] > 0
             try:
                 Sups = Infodict["Supplements"][pageid]
             except KeyError:
                 pass
             else:
-                errdict = dict()
-                for errInfo in Sups["FitErr"]:
-                    errkey = mdls.valuedict[modelid][0][int(errInfo[0])]
-                    errval = float(errInfo[1])
-                    errdict[errkey] = errval
-                Newtab.parmoptim_error = errdict
+                if Sups.has_key("FitErr"):
+                    ervals = list()
+                    for errInfo in Sups["FitErr"]:
+                        ervals.append(float(errInfo[1]))
+                    fit_results["fit error estimation"] = ervals
                 try:
-                    Newtab.GlobalParameterShare = Sups["Global Share"]
-                except:
+                    if len(Sups["Global Share"]) > 0: 
+                        fit_results["global pages"] = Sups["Global Share"]
+                except KeyError:
                     pass
                 try:
-                    Newtab.chi2 = Sups["Chi sq"]
+                    fit_results["chi2"] = Sups["Chi sq"]
                 except:
                     pass
-            # Set Title of the Page
-            try:
-                Newtab.tabtitle.SetValue(Infodict["Comments"][pageid])
-            except:
-                pass # no page title
-            # Import the intensity trace
-            try:
-                trace = Infodict["Traces"][pageid]
-            except:
-                trace = None
-            if trace is not None:
-                if Newtab.IsCrossCorrelation is False:
-                    Newtab.trace = trace[0]
-                    Newtab.traceavg = trace[0][:,1].mean()
-                else:
-                    Newtab.tracecc = trace
+                # also set fit parameters
+                fit_results["fit parameters"] = np.where(Infodict["Parameters"][i][3])[0]
+                # set fit weights for plotting
+                if fit_results["weighted fit"]:
+                    # these were already imported:
+                    try:
+                        weights = Infodict["External Weights"][pageid]
+                        for w in weights.keys():
+                            fit_results["weighted fit type"] = w
+                            fit_results["fit weights"] = weights[w]
+                    except KeyError:
+                        pass
+            Newtab.corr.fit_results = fit_results
+
             # Plot everything
             Newtab.PlotAll(trigger="page_add_batch")
         # Set Session Comment
@@ -1407,8 +1454,8 @@ class MyFrame(wx.Frame):
         # Export CSV data
         filename = Page.tabtitle.GetValue().strip()+Page.counter[:2]+".csv"
         dlg = wx.FileDialog(self, "Save curve", self.dirname, filename, 
-              "Correlation with trace (*.csv)|*.csv;*.CSV"+\
-              "|Correlation only (*.csv)|*.csv;*.CSV",
+              "Correlation with trace (*.csv)|*.csv;*.*"+\
+              "|Correlation only (*.csv)|*.csv;*.*",
                wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
         # user cannot do anything until he clicks "OK"
         if dlg.ShowModal() == wx.ID_OK:
@@ -1502,41 +1549,52 @@ class MyFrame(wx.Frame):
             Page.apply_parameters()
             # Set parameters
             Infodict["Parameters"][counter] = self.PackParameters(Page)
+            corr = Page.corr
             # Set supplementary information, such as errors of fit
-            if Page.parmoptim_error is not None: # == if Page.chi2 is not None
+            if hasattr(corr, "fit_results"):
                 Infodict["Supplements"][counter] = dict()
-                Infodict["Supplements"][counter]["Chi sq"] = float(Page.chi2)
+                if corr.fit_results.has_key("chi2"):
+                    Infodict["Supplements"][counter]["Chi sq"] = float(corr.fit_results["chi2"])
+                else:
+                    Infodict["Supplements"][counter]["Chi sq"] = 0
                 PageList = list()
                 for pagei in Page.GlobalParameterShare:
                     PageList.append(int(pagei))
                 Infodict["Supplements"][counter]["Global Share"] = PageList
-                                                
+
+                # optimization error
                 Alist = list()
-                for key in Page.parmoptim_error.keys():
-                    position = mdls.GetPositionOfParameter(Page.modelid, key)
-                    Alist.append([ int(position),
-                                   float(Page.parmoptim_error[key]) ])
-                    Infodict["Supplements"][counter]["FitErr"] = Alist
+                if (corr.fit_results.has_key("fit error estimation") and 
+                    len(corr.fit_results["fit error estimation"]) != 0):
+                    for ii, fitpid in enumerate(corr.fit_results["fit parameters"]):
+                        Alist.append([ int(fitpid),
+                                   float(corr.fit_results["fit error estimation"][ii]) ])
+                Infodict["Supplements"][counter]["FitErr"] = Alist
+                
             # Set exp data
-            Infodict["Correlations"][counter] = [Page.tau, Page.dataexpfull]
+            Infodict["Correlations"][counter] = [corr.lag_time, corr.correlation]
             # Also save the trace
-            if Page.IsCrossCorrelation is False:
-                Infodict["Traces"][counter] = Page.trace
-                # #Function_trace.append(Page.trace)
-            else:
-                # #Function_trace.append(Page.tracecc)
-                Infodict["Traces"][counter] = Page.tracecc
+            Infodict["Traces"][counter] = corr.traces
             # Append title to Comments
             # #Comments.append(Page.tabtitle.GetValue())
             Infodict["Comments"][counter] = Page.tabtitle.GetValue()
             # Add additional weights to Info["External Weights"]
-            if len(Page.external_std_weights) != 0:
-                Infodict["External Weights"][counter] = Page.external_std_weights
+            external_weights = dict()
+            for key in corr._fit_weight_memory.keys():
+                if isinstance(corr._fit_weight_memory[key], np.ndarray):
+                    external_weights[key] = corr._fit_weight_memory[key]
+            # also save current weights
+            if hasattr(corr, "fit_results"):
+                if corr.fit_results.has_key("weighted fit type"):
+                    fittype = corr.fit_results["weighted fit type"]
+                    fitweight = corr.fit_results["fit weights"]
+                    external_weights[fittype] = fitweight
+            Infodict["External Weights"][counter] = external_weights
         # Append Session Comment:
         Infodict["Comments"]["Session"] = self.SessionComment
         # File dialog
         dlg = wx.FileDialog(self, "Save session file", self.dirname, "",
-                            "PyCorrFit session (*.pcfs)|*.pcfs",
+                            "PyCorrFit session (*.pcfs)|*.pcfs|All files (*.*)|*.*",
                             wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
         if dlg.ShowModal() == wx.ID_OK:
             # Save everything
@@ -1603,12 +1661,12 @@ class MyFrame(wx.Frame):
         """
         Page.apply_parameters()
         # Get Model ID
-        modelid = Page.modelid
+        modelid = Page.corr.fit_model.id
         # Get Page number
         counter = Page.counter
-        active_numbers = Page.active_parms[1]       # Array, Parameters
-        active_fitting = Page.active_parms[2]
-        crop = [Page.startcrop, Page.endcrop]
+        active_numbers = Page.corr.fit_parameters   # Array, Parameters
+        active_fitting = Page.corr.fit_parameters_variable
+        crop = Page.corr.fit_ival
         Parms = [counter, modelid, active_numbers, active_fitting, crop]
         # Weighting:
         # Additional parameters as of v.0.2.0
@@ -1628,7 +1686,7 @@ class MyFrame(wx.Frame):
             knots = int(knots)
         weighted = Page.weighted_fittype_id
         weights = Page.weighted_nuvar
-        algorithm = Page.fit_algorithm
+        algorithm = Page.corr.fit_algorithm
         Parms.append([weighted, weights, knots, algorithm])
         # Additional parameters as of v.0.2.9
         # Which Background signal is selected?
@@ -1639,13 +1697,9 @@ class MyFrame(wx.Frame):
         Parms.append(Page.IsCrossCorrelation)
         # Additional parameter as of v.0.7.8
         # The selection of a normalization parameter (None or integer)
-        if Page.normparm is not None:
-            # We need to do this because yaml export would not work
-            # in safe mode.
-            Page.normparm=int(Page.normparm)
-        Parms.append(Page.normparm)
+        Parms.append(Page.corr.normparm)
         # Parameter ranges
-        Parms.append(Page.parameter_range)
+        Parms.append(Page.corr.fit_parameters_range)
         return Parms
 
 
@@ -1659,8 +1713,8 @@ class MyFrame(wx.Frame):
             (Autocorrelation/Cross-Correlation) of the page.
         """
         modelid = Parms[1]
-        if Page.modelid != modelid:
-            print "Wrong model: "+str(Page.modelid)+" vs. "+str(modelid)
+        if Page.corr.fit_model.id != modelid:
+            print "Wrong model: "+str(Page.corr.fit_model.id)+" vs. "+str(modelid)
             return
         active_values = Parms[2]
         active_fitting = Parms[3]
@@ -1690,17 +1744,13 @@ class MyFrame(wx.Frame):
             active_values[lindex] = sigma
             active_values = np.delete(active_values,lindex+1)
             active_fitting = np.delete(active_fitting, lindex+1)
-        # Cropping: What part of dataexp should be displayed.
-        [cropstart, cropend] = Parms[4]
+        # Cropping: What part of the correlation should be displayed.
+        Page.corr.fit_ival = Parms[4]
         # Add parameters and fitting to the created page.
         # We need to run Newtab.apply_parameters_reverse() in order
         # for the data to be displayed in the user interface.
-        Page.active_parms[1] = active_values
-        Page.active_parms[2] = active_fitting
-        # Cropping
-        Page.startcrop = cropstart
-        Page.endcrop = cropend
-        Page.crop_data()
+        Page.corr.fit_parameters = active_values
+        Page.corr.fit_parameters_variable = active_fitting
         # Weighted fitting
         if len(Parms) >= 6:
             if len(Parms[5]) == 2:
@@ -1712,7 +1762,7 @@ class MyFrame(wx.Frame):
             else:
                 # We have different fitting algorithms as of v. 0.8.3
                 [weighted, weights, knots, algorithm] = Parms[5]
-                Page.fit_algorithm = algorithm
+                Page.corr.fit_algorithm = algorithm
             if knots is not None:
                 # This is done with apply_paramters_reverse:
                 #       text = Page.Fitbox[1].GetValue()
@@ -1731,14 +1781,9 @@ class MyFrame(wx.Frame):
             Page.weighted_nuvar = weights
         Page.apply_parameters_reverse()
 
-        if Page.dataexp is not None:
+        if Page.corr.correlation is not None:
             Page.Fit_enable_fitting()
             Page.Fit_WeightedFitCheck()
-            Page.Fit_create_instance()
-        if Page.weighted_fit_was_performed:
-            # We need this to plot std-dev
-            Page.calculate_corr()
-            Page.data4weight = 1.*Page.datacorr
         # Set which background correction the Page uses:
         if len(Parms) >= 7:
             # causality check:
@@ -1751,14 +1796,18 @@ class MyFrame(wx.Frame):
                 Page.OnAmplitudeCheck("init")
         # Set if Newtab is of type cross-correlation:
         if len(Parms) >= 8:
-            Page.SetCorrelationType(Parms[7], init)
+            if Parms[7]:
+                Page.corr.corr_type = "cc"
+            else:
+                Page.corr.corr_type = "ac"
+            Page.OnAmplitudeCheck()
         if len(Parms) >= 9:
             # New feature in 0.7.8 includes normalization to a fitting
             # parameter.
-            Page.normparm = Parms[8]
+            Page.corr.normparm = Parms[8]
             Page.OnAmplitudeCheck("init")
         if len(Parms) >= 10:
-            Page.parameter_range = np.array(Parms[9])
+            Page.corr.fit_parameters_range = np.array(Parms[9])
         ## If we want to add more stuff, we should do something like:
         ##   if len(Parms) >= 11:
         ##       nextvalue = Parms[10]
diff --git a/pycorrfit/models/MODEL_TIRF_1C.py b/pycorrfit/models/MODEL_TIRF_1C.py
index c237592..52832cd 100755
--- a/pycorrfit/models/MODEL_TIRF_1C.py
+++ b/pycorrfit/models/MODEL_TIRF_1C.py
@@ -110,7 +110,7 @@ def CF_Gxyz_TIR_square(parms, tau, wixi=wixi):
     return G
 
 
-def MoreInfo_6000(parms, countrate):
+def MoreInfo_6000(parms, countrate=None):
     u"""Supplementary parameters:
         For a>>sigma, the correlation function at tau=0 corresponds to:
         [4] G(τ=0) = 1/(N_eff) * ( 1-2*σ/(sqrt(π)*a) )²
diff --git a/pycorrfit/models/MODEL_TIRF_3D2Dkin_Ries.py b/pycorrfit/models/MODEL_TIRF_3D2Dkin_Ries.py
index d4764da..9ea2697 100755
--- a/pycorrfit/models/MODEL_TIRF_3D2Dkin_Ries.py
+++ b/pycorrfit/models/MODEL_TIRF_3D2Dkin_Ries.py
@@ -27,7 +27,7 @@ def wixi(x):
 
 # Lateral correlation function
 def CF_gxy_square(parms, tau):
-    """ 2D free diffusion measured with a square pinhole.
+    u""" 2D free diffusion measured with a square pinhole.
         For the square pinhole, the correlation function can readily be
         calculated for a TIR-FCS setup.
         This function is called by other functions within this module.
@@ -61,7 +61,7 @@ def CF_gxy_square(parms, tau):
 
 
 def CF_gz_CC(parms, tau, wixi=wixi):
-    """ Axial (1D) diffusion in a TIR-FCS setup.
+    u""" Axial (1D) diffusion in a TIR-FCS setup.
         From Two species (bound/unbound) this is the bound part.
         This function is called by other functions within this module.
 
@@ -114,7 +114,7 @@ def CF_gz_CC(parms, tau, wixi=wixi):
 
 
 def CF_gz_AC(parms, tau, wixi=wixi):
-    """ Axial (1D) diffusion in a TIR-FCS setup.
+    u""" Axial (1D) diffusion in a TIR-FCS setup.
         From Two species (bound/unbound) this is the cross correlation part.
         This function is called by other functions within this module.
 
@@ -173,7 +173,7 @@ def CF_gz_AC(parms, tau, wixi=wixi):
 
 
 def CF_gz_AA(parms, tau, wixi=wixi):
-    """ Axial (1D) diffusion in a TIR-FCS setup.
+    u""" Axial (1D) diffusion in a TIR-FCS setup.
         From Two species (bound/unbound) this is the unbound part.
         This function is called by other functions within this module.
 
diff --git a/pycorrfit/models/MODEL_TIRF_gaussian_1C.py b/pycorrfit/models/MODEL_TIRF_gaussian_1C.py
index b74ad2b..7f30460 100755
--- a/pycorrfit/models/MODEL_TIRF_gaussian_1C.py
+++ b/pycorrfit/models/MODEL_TIRF_gaussian_1C.py
@@ -142,7 +142,7 @@ def CF_Gxyz_TIR_gauss_trip(parms, tau):
 
 
 
-def MoreInfo_6013(parms, countrate):
+def MoreInfo_6013(parms, countrate=None):
     u"""Supplementary variables:
         Beware that the effective volume is chosen arbitrarily.
         Correlation function at lag time τ=0:
@@ -172,7 +172,7 @@ def MoreInfo_6013(parms, countrate):
     return Info
 
 
-def MoreInfo_6014(parms, countrate):
+def MoreInfo_6014(parms, countrate=None):
     u"""Supplementary variables:
         Beware that the effective volume is chosen arbitrarily.
         Correlation function at lag time τ=0:
diff --git a/pycorrfit/models/MODEL_TIRF_gaussian_3D2D.py b/pycorrfit/models/MODEL_TIRF_gaussian_3D2D.py
index 64a8b6a..e1694b2 100755
--- a/pycorrfit/models/MODEL_TIRF_gaussian_3D2D.py
+++ b/pycorrfit/models/MODEL_TIRF_gaussian_3D2D.py
@@ -25,7 +25,7 @@ def wixi(x):
 
 # 3D + 2D + T
 def CF_Gxyz_3d2dT_gauss(parms, tau):
-    """ Two-component, two- and three-dimensional diffusion
+    u""" Two-component, two- and three-dimensional diffusion
         with a Gaussian lateral detection profile and
         an exponentially decaying profile in axial direction,
         including a triplet component.
@@ -136,7 +136,7 @@ def Checkme(parms):
     return parms
 
 
-def MoreInfo(parms, countrate):
+def MoreInfo(parms, countrate=None):
     u"""Supplementary parameters:
         Effective number of freely diffusing particles in 3D:
         [10] n3D = n*F
diff --git a/pycorrfit/models/MODEL_TIRF_gaussian_3D3D.py b/pycorrfit/models/MODEL_TIRF_gaussian_3D3D.py
index 751e9e7..2a5684a 100755
--- a/pycorrfit/models/MODEL_TIRF_gaussian_3D3D.py
+++ b/pycorrfit/models/MODEL_TIRF_gaussian_3D3D.py
@@ -153,7 +153,7 @@ def Checkme(parms):
     return parms
 
 
-def MoreInfo(parms, countrate):
+def MoreInfo(parms, countrate=None):
     u"""Supplementary parameters:
         Effective number of particle species 1:
         [10] n₁ = n*F₁
diff --git a/pycorrfit/models/MODEL_classic_gaussian_2D.py b/pycorrfit/models/MODEL_classic_gaussian_2D.py
index 5601c49..58b6a47 100755
--- a/pycorrfit/models/MODEL_classic_gaussian_2D.py
+++ b/pycorrfit/models/MODEL_classic_gaussian_2D.py
@@ -173,7 +173,7 @@ def Check_6031(parms):
     return parms
 
 
-def MoreInfo_6001(parms, countrate):
+def MoreInfo_6001(parms, countrate=None):
     # We can only give you the effective particle number
     n = parms[0]
     Info = list()
@@ -184,7 +184,7 @@ def MoreInfo_6001(parms, countrate):
     return Info
     
     
-def MoreInfo_6031(parms, countrate):
+def MoreInfo_6031(parms, countrate=None):
     u"""Supplementary parameters:
         [8] n₁ = n*F₁     Particle number of species 1
         [9] n₂ = n*(1-F₁) Particle number of species 2
diff --git a/pycorrfit/models/MODEL_classic_gaussian_3D.py b/pycorrfit/models/MODEL_classic_gaussian_3D.py
index ac494d7..163ee2e 100755
--- a/pycorrfit/models/MODEL_classic_gaussian_3D.py
+++ b/pycorrfit/models/MODEL_classic_gaussian_3D.py
@@ -184,7 +184,7 @@ def Check_3D3DT(parms):
     return parms
 
 
-def MoreInfo_1C(parms, countrate):
+def MoreInfo_1C(parms, countrate=None):
     # We can only give you the effective particle number
     n = parms[0]
     Info = list()
@@ -195,7 +195,7 @@ def MoreInfo_1C(parms, countrate):
     return Info
 
 
-def MoreInfo_6030(parms, countrate):
+def MoreInfo_6030(parms, countrate=None):
     u"""Supplementary parameters:
         [9]  n₁ = n*F₁     Particle number of species 1
         [10] n₂ = n*(1-F₁) Particle number of species 2
@@ -248,7 +248,7 @@ parms_6012 = [labels_6012, values_6012, valuestofit_6012]
 
 # 3D + 3D + T model gauss
 m_gauss_3d_3d_t_mix_6030 = [6030, "T+3D+3D",
-                            "Separate 3D diffusion + triplet, Gauß",
+                            u"Separate 3D diffusion + triplet, Gauß",
                             CF_Gxyz_gauss_3D3DT]
 labels_6030  = [u"n",
                 u"τ"+u"\u2081"+" [ms]",
diff --git a/pycorrfit/models/MODEL_classic_gaussian_3D2D.py b/pycorrfit/models/MODEL_classic_gaussian_3D2D.py
index 2de7b6a..742d5f6 100755
--- a/pycorrfit/models/MODEL_classic_gaussian_3D2D.py
+++ b/pycorrfit/models/MODEL_classic_gaussian_3D2D.py
@@ -98,7 +98,7 @@ def Checkme(parms):
     return parms
 
 
-def MoreInfo(parms, countrate):
+def MoreInfo(parms, countrate=None):
     u"""Supplementary parameters:
         Effective number of freely diffusing particles in 3D solution:
         [9]  n3D = n*F
@@ -122,8 +122,8 @@ def MoreInfo(parms, countrate):
 
 # 3D + 3D + T model gauss
 m_gauss_3d_2d_t = [6032, u"T+3D+2D",
-                            "Separate 3D and 2D diffusion + triplet, Gauß",
-                            CF_Gxyz_3d2dT_gauss]
+                   u"Separate 3D and 2D diffusion + triplet, Gauß",
+                   CF_Gxyz_3d2dT_gauss]
 labels  = [ u"n",
             u"τ_2D [ms]",
             u"τ_3D [ms]",
@@ -136,7 +136,7 @@ labels  = [ u"n",
                 ]
 values = [ 
                 25,      # n
-                100,     # taud2D
+                240,     # taud2D
                 0.1,     # taud3D
                 0.5,     # F3D
                 7,       # SP
diff --git a/pycorrfit/models/__init__.py b/pycorrfit/models/__init__.py
index 238ad3c..45ef025 100644
--- a/pycorrfit/models/__init__.py
+++ b/pycorrfit/models/__init__.py
@@ -66,6 +66,12 @@ class Model(object):
         """Emulate old list behavior of models"""
         return self._definitions[key]
 
+    def __repr__(self):
+        text = "Model {} - {}".format(
+                self.id,
+                self.description_short)
+        return text
+
     def apply(self, parameters, tau):
         """ 
         Apply the model with `parameters` and lag
@@ -114,7 +120,7 @@ class Model(object):
     def func_verification(self):
         return self._verification
     
-    def get_supplementary_parameters(self, values, countrate):
+    def get_supplementary_parameters(self, values, countrate=None):
         """
         Compute additional information for the model
         
@@ -123,11 +129,11 @@ class Model(object):
         values: list-like of same length as `self.default_values`
             parameters for the model
         countrate: float
-            count rate in Hz
+            countrate in kHz
         """
-        return self.func_supplements(values, countrate*1e-3)
+        return self.func_supplements(values, countrate)
 
-    def get_supplementary_values(self, values, countrate):
+    def get_supplementary_values(self, values, countrate=None):
         """
         Returns only the values of
         self.get_supplementary_parameters
@@ -145,6 +151,10 @@ class Model(object):
         return out
 
     @property
+    def name(self):
+        return self.description_short
+
+    @property
     def parameters(self):
         return self._parameters
 
@@ -302,7 +312,6 @@ def GetModelParametersFromId(modelid):
 def GetModelFitBoolFromId(modelid):
     return valuedict[modelid][2]
 
-
 def GetMoreInfo(modelid, Page):
     """ This functino is called by someone who has already calculated
         some stuff or wants to know more about the model he is looking at.
@@ -313,48 +322,49 @@ def GetMoreInfo(modelid, Page):
     """
     # Background signal average
     bgaverage = None
-    # Signal countrate/average:
-    # might become countrate - bgaverage
-    countrate = Page.traceavg
     # Get the parameters from the current page.
     parms = Page.active_parms[1]
     Info = list()
-    if Page.IsCrossCorrelation is False:
+    corr = Page.corr
+    if corr.is_ac:
+        if len(corr.traces)==1:
+            countrate = corr.traces[0].countrate
+        else:
+            countrate = None
         ## First import the supplementary parameters of the model
         ## The order is important for plot normalization and session
         ## saving as of version 0.7.8
         # Try to get the dictionary entry of a model
         # Background information
-        if Page.bgselected is not None:
-            # Background list consists of items with
-            #  [0] average
-            #  [1] name
-            #  [2] trace
-            bgaverage = Page.parent.Background[Page.bgselected][0]
+        if len(corr.backgrounds)==1:
+            bgaverage = corr.backgrounds[0].countrate
             # Now set the correct countrate
             # We already printed the countrate, so there's no harm done.
-            if countrate is not None:
-                # might be that there is no countrate.
-                countrate = countrate - bgaverage
+        if countrate is not None and bgaverage is not None:
+            # might be that there is no countrate.
+            relativecountrate = countrate - bgaverage
+        else:
+            relativecountrate = countrate
+        # In case of cross correlation, we don't show this kind of
+        # information.
         try:
             # This function should return all important information
             # that can be calculated from the given parameters.
+            # We need the relativecountrate to compute the CPP.
             func_info = supplement[modelid]
-            data = func_info(parms, countrate)
+            data = func_info(parms, relativecountrate)
             for item in data:
                 Info.append([item[0], item[1]])
         except KeyError:
             # No information available
             pass
-        # In case of cross correlation, we don't show this kind of
-        # information.
-        if Page.traceavg is not None:
+        if countrate is not None:
             # Measurement time
-            duration = Page.trace[-1,0]/1000
+            duration = corr.traces[0].duration/1000
             Info.append(["duration [s]", duration])
             # countrate has to be printed before background.
             # Background might overwrite countrate.
-            Info.append(["avg. signal [kHz]", Page.traceavg])
+            Info.append(["avg. signal [kHz]", corr.traces[0].countrate])
     else:
         ## Cross correlation curves usually have two traces. Since we
         ## do not know how to compute the cpp, we will pass the argument
@@ -373,17 +383,14 @@ def GetMoreInfo(modelid, Page):
         except KeyError:
             # No information available
             pass
-        if Page.tracecc is not None:
+        if len(corr.traces)==2:
             # Measurement time
-            duration = Page.tracecc[0][-1,0]/1000
+            duration = corr.traces[0].duration/1000
             Info.append(["duration [s]", duration])
             # countrate has to be printed before background.
             # Background might overwrite countrate.
-            avg0 = Page.tracecc[0][:,1].mean()
-            avg1 = Page.tracecc[1][:,1].mean()
-            Info.append(["avg. signal A [kHz]", avg0])
-            Info.append(["avg. signal B [kHz]", avg1])
-
+            Info.append(["avg. signal A [kHz]", corr.traces[0].countrate])
+            Info.append(["avg. signal B [kHz]", corr.traces[1].countrate])
 
     if len(Info) == 0:
         # If nothing matched until now:
diff --git a/pycorrfit/openfile.py b/pycorrfit/openfile.py
index 19f12b0..170fa8d 100644
--- a/pycorrfit/openfile.py
+++ b/pycorrfit/openfile.py
@@ -6,7 +6,7 @@ This file contains definitions for opening PyCorrFit sessions and
 saving PyCorrFit correlation curves.
 """
 
-
+import codecs
 import csv
 import numpy as np
 import os
@@ -21,7 +21,7 @@ from . import doc
 # These imports are required for loading data
 from .readfiles import Filetypes  # @UnusedImport
 from .readfiles import BGFiletypes  # @UnusedImport
-
+from .fcs_data_set import Trace
 
 
 def LoadSessionData(sessionfile, parameters_only=False):
@@ -224,7 +224,8 @@ def LoadSessionData(sessionfile, parameters_only=False):
                 if (str(row[0])[0:1] != '#'):
                     bgtrace.append((np.float(row[0]), np.float(row[1])))
             bgtrace = np.array(bgtrace)
-            Infodict["Backgrounds"].append([np.float(bgrow[0]), str(bgrow[1]), bgtrace])
+            newbackground = Trace(trace=bgtrace, name=str(bgrow[1]), countrate=np.float(bgrow[0]))
+            Infodict["Backgrounds"].append(newbackground)
             i = i + 1
         bgfile.close()
     # Get external weights if they exist
@@ -330,7 +331,7 @@ def SaveSessionData(sessionfile, Infodict):
     # Save external functions
     for key in Infodict["External Functions"].keys():
         funcfilename = "model_"+str(key)+".txt"
-        funcfile =  open(funcfilename, 'wb')
+        funcfile =  codecs.open(funcfilename, 'w', encoding="utf-8")
         funcfile.write(Infodict["External Functions"][key])
         funcfile.close()
         Arc.write(funcfilename)
@@ -371,7 +372,7 @@ def SaveSessionData(sessionfile, Infodict):
         # Since *Trace* and *Parms* are in the same order, which is the
         # Page order, we will identify the filename by the Page title 
         # number.
-        if Infodict["Traces"][pageid] is not None:
+        if Infodict["Traces"][pageid] is not None and len(Infodict["Traces"][pageid]) != 0:
             if Parms[pageid][7] is True:
                 # We have cross correlation: save two traces
                 ## A
@@ -411,8 +412,8 @@ def SaveSessionData(sessionfile, Infodict):
                 tracefilename = "trace"+number+".csv"
                 tracefile = open(tracefilename, 'wb')
                 traceWriter = csv.writer(tracefile, delimiter=',')
-                time = Infodict["Traces"][pageid][:,0]
-                rate = Infodict["Traces"][pageid][:,1]
+                time = Infodict["Traces"][pageid][0][:,0]
+                rate = Infodict["Traces"][pageid][0][:,1]
                 # Names of Columns
                 traceWriter.writerow(['# time', 'count rate'])
                 # Actual Data
@@ -445,15 +446,15 @@ def SaveSessionData(sessionfile, Infodict):
         bgfile = open(bgfilename, 'wb')
         bgwriter = csv.writer(bgfile, delimiter='\t')
         for i in np.arange(len(Background)):
-            bgwriter.writerow([str(Background[i][0]), Background[i][1]])
+            bgwriter.writerow([str(Background[i].countrate), Background[i].name])
             # Traces
             bgtracefilename = "bg_trace"+str(i)+".csv"
             bgtracefile = open(bgtracefilename, 'wb')
             bgtraceWriter = csv.writer(bgtracefile, delimiter=',')
             bgtraceWriter.writerow(['# time', 'count rate'])
             # Actual Data
-            time = Background[i][2][:,0]
-            rate = Background[i][2][:,1]
+            time = Background[i][:,0]
+            rate = Background[i][:,1]
             for j in np.arange(len(time)):
                 bgtraceWriter.writerow(["%.20e" % time[j],
                                         "%.20e" % rate[j]])
@@ -526,38 +527,48 @@ def ExportCorrelation(exportfile, Page, info, savetrace=True):
         Append the trace to the file
     """
 
-    openedfile = open(exportfile, 'wb')
+    openedfile = codecs.open(exportfile, 'w', encoding="utf-8")
     ## First, some doc text
     openedfile.write(ReadmeCSV.replace('\n', '\r\n'))
     # The infos
     InfoMan = info.InfoClass(CurPage=Page)
     PageInfo = InfoMan.GetCurFancyInfo()
     for line in PageInfo.splitlines():
-        openedfile.write("# "+line+"\r\n")
-    openedfile.write("#\r\n#\r\n")
+        openedfile.write(u"# "+line+"\r\n")
+    openedfile.write(u"#\r\n#\r\n")
     # Get all the data we need from the Page
     # Modeled data
-    # Since 0.7.8 the user may normalize the curves. The normalization
-    # factor is set in *Page.normfactor*.
-    corr = Page.datacorr[:,1]*Page.normfactor
-    if Page.dataexp is not None:
+    corr = Page.corr
+    mod = corr.modeled_plot[:,1]
+    if corr.correlation is not None:
         # Experimental data
-        tau = Page.dataexp[:,0]
-        exp = Page.dataexp[:,1]*Page.normfactor
-        res = Page.resid[:,1]*Page.normfactor
+        tau = corr.correlation_fit[:,0]
+        exp = corr.correlation_fit[:,1]
+        res = corr.residuals_fit[:,1]
         # Plotting! Because we only export plotted area.
-        weight = Page.weights_used_for_plotting
-        if weight is None:
-            pass
-        elif len(weight) != len(exp):
-            text = "Weights have not been calculated for the "+\
-                   "area you want to export. Pressing 'Fit' "+\
-                   "again should solve this issue. Weights will "+\
-                   "not be saved."
-            warnings.warn(text)
+        
+        if corr.is_weighted_fit:
+            weightname = corr.fit_weight_type
+            try:
+                weight = corr.fit_results["fit weights"]
+            except KeyError:
+                weight = corr.fit_weight_data
+    
+            if weight is None:
+                pass
+            
+            elif len(weight) != len(exp):
+                text = "Weights have not been calculated for the "+\
+                       "area you want to export. Pressing 'Fit' "+\
+                       "again should solve this issue. Weights will "+\
+                       "not be saved."
+                warnings.warn(text)
+                weight = None
+        else:
             weight = None
+            weightname = None
     else:
-        tau = Page.datacorr[:,0]
+        tau = corr.lag_time_fit
         exp = None
         res = None
     # Include weights in data saving:
@@ -569,19 +580,19 @@ def ExportCorrelation(exportfile, Page, info, savetrace=True):
     ## Correlation curve
     dataWriter = csv.writer(openedfile, delimiter='\t')
     if exp is not None:
-        header = '# Channel (tau [s])'+"\t"+ \
+        header = '# Lag time [s]'+"\t"+ \
                  'Experimental correlation'+"\t"+ \
                  'Fitted correlation'+ "\t"+ \
                  'Residuals'+"\r\n"
-        data = [tau, exp, corr, res]
-        if Page.weighted_fit_was_performed is True \
-        and weight is not None:
-            header = header.strip() + "\t"+'Weights (fit)'+"\r\n"
+        data = [tau, exp, mod, res]
+        if corr.is_weighted_fit and weight is not None:
+            header = "{} \t Weights [{}] \r\n".format(
+                      header.strip(), weightname)
             data.append(weight)
     else:
-        header = '# Channel (tau [s])'+"\t"+ \
+        header = '# Lag time [s]'+"\t"+ \
                  'Correlation function'+"\r\n"
-        data = [tau, corr]
+        data = [tau, mod]
     # Write header
     openedfile.write(header)
     # Write data
@@ -589,7 +600,7 @@ def ExportCorrelation(exportfile, Page, info, savetrace=True):
         # row-wise, data may have more than two elements per row
         datarow = list()
         for j in np.arange(len(data)):
-            rowcoli = str("%.10e") % data[j][i]
+            rowcoli = "{:.10e}".format(data[j][i])
             datarow.append(rowcoli)
         dataWriter.writerow(datarow)
     ## Trace
@@ -597,42 +608,31 @@ def ExportCorrelation(exportfile, Page, info, savetrace=True):
     if savetrace:
         # We will also save the trace in [s]
         # Intensity trace in kHz may stay the same
-        if Page.trace is not None:
+        if len(corr.traces) > 0:
             # Mark beginning of Trace
             openedfile.write('#\r\n#\r\n# BEGIN TRACE\r\n#\r\n')
             # Columns
-            time = Page.trace[:,0]*timefactor
-            intensity = Page.trace[:,1]
+            time = corr.traces[0][:,0]*timefactor
+            intensity = corr.traces[0][:,1]
             # Write
             openedfile.write('# Time [s]'+"\t" 
                                  'Intensity trace [kHz]'+" \r\n")
             for i in np.arange(len(time)):
-                dataWriter.writerow([str("%.10e") % time[i],
-                                     str("%.10e") % intensity[i]])
-        elif Page.tracecc is not None:
+                dataWriter.writerow(["{:.10e}".format(time[i]),
+                                     "{:.10e}".format(intensity[i])])
+        if len(corr.traces) > 1:
             # We have some cross-correlation here:
-            # Mark beginning of Trace A
-            openedfile.write('#\r\n#\r\n# BEGIN TRACE\r\n#\r\n')
-            # Columns
-            time = Page.tracecc[0][:,0]*timefactor
-            intensity = Page.tracecc[0][:,1]
-            # Write
-            openedfile.write('# Time [s]'+"\t" 
-                                 'Intensity trace [kHz]'+" \r\n")
-            for i in np.arange(len(time)):
-                dataWriter.writerow([str("%.10e") % time[i],
-                                     str("%.10e") % intensity[i]])
             # Mark beginning of Trace B
             openedfile.write('#\r\n#\r\n# BEGIN SECOND TRACE\r\n#\r\n')
             # Columns
-            time = Page.tracecc[1][:,0]*timefactor
-            intensity = Page.tracecc[1][:,1]
+            time = corr.traces[1][:,0]*timefactor
+            intensity = corr.traces[1][:,1]
             # Write
             openedfile.write('# Time [s]'+"\t" 
                                  'Intensity trace [kHz]'+" \r\n")
             for i in np.arange(len(time)):
-                dataWriter.writerow([str("%.10e") % time[i],
-                                     str("%.10e") % intensity[i]])
+                dataWriter.writerow(["{:.10e}".format(time[i]),
+                                     "{:.10e}".format(intensity[i])])
 
         openedfile.close()
 
diff --git a/pycorrfit/page.py b/pycorrfit/page.py
index b60c3ba..94fcc9b 100644
--- a/pycorrfit/page.py
+++ b/pycorrfit/page.py
@@ -6,69 +6,214 @@ Module frontend
 The frontend displays the GUI (Graphic User Interface).
 All functions and modules are called from here.
 """
+import numpy as np                      # NumPy
+import re
+import string
+import warnings
 import wx                               # GUI interface wxPython
 from wx.lib.agw import floatspin        # Float numbers in spin fields
 import wx.lib.plot as plot              # Plotting in wxPython
 import wx.lib.scrolledpanel as scrolled
-import numpy as np                      # NumPy
 
-from . import edclasses                    # Cool stuff like better floatspin
-from . import fitting as fit       # For fitting
+
 from . import models as mdls
 from . import tools
+from . import fcs_data_set as pcfbase
+from .fcs_data_set import Correlation, Fit
+
+
+def float2string_nsf(fval, n=7):
+    """
+    Truncate a float to n significant figures and return nice string.
+    Arguments:
+      q : a float
+      n : desired number of significant figures
+    Returns:
+    String with only n s.f. and trailing zeros.
+    """
+    #sgn=np.sign(fval)
+    if fval == 0:
+        npoint=n
+    else:
+        q=abs(fval)
+        k=int(np.ceil(np.log10(q/n)))
+        npoint = n-k
+    string="{:.{}f}".format(fval, npoint)
+    return string
+
+def nice_string(string):
+    """
+    Convert a string of a float created by `float2string_nsf`
+    to something nicer.
+    
+    i.e.
+    - 1.000000 -> 1
+    - 1.010000 -> 1.010
+    """
+    if len(string.split(".")[1].replace("0", "")) == 0:
+        return "{:d}".format(int(float(string)))
+    else:
+        olen = len(string)
+        newstring = string.rstrip("0")
+        if olen > len(newstring):
+            string=newstring+"0"
+        return string
+
+class PCFFloatValidator(wx.PyValidator):
+    def __init__(self, flag=None, pyVar=None):
+        wx.PyValidator.__init__(self)
+        self.flag = flag
+        self.Bind(wx.EVT_CHAR, self.OnChar)
+
+    def Clone(self):
+        return PCFFloatValidator(self.flag)
+
+    def Validate(self, win):
+        tc = self.GetWindow()
+        val = tc.GetValue()
+        
+        for x in val:
+            if x not in string.digits:
+                return False
+
+        return True
+
+    def OnChar(self, event):
+        """
+        Filter the characters that are put in the control.
+        
+        TODO:
+        - check for strings that do not make sense
+          - 2e-4.4
+          - 2e--3
+          - 3-1+5
+        """
+        key = event.GetKeyCode()
+        ctrl = event.GetEventObject()
+        # Get the actual string from the object
+        curval = wx.TextCtrl.GetValue(ctrl)
+
+        if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
+            event.Skip()
+            return
+
+        char = chr(key)
+        char = char.replace(",", ".")
+        
+        onlyonce = [".", "e"]
+        if char in onlyonce and curval.count(char):
+            # not allowed
+            return
+
+        if ( char in string.digits or
+             char in ["+", "-", ".", "e"]):
+            event.Skip()
+            return
+
+        if not wx.Validator_IsSilent():
+            wx.Bell()
+
+        # Returning without calling event.Skip eats the event before it
+        # gets to the text control
+        return
+
+
+class PCFFloatTextCtrl(wx.TextCtrl):
+    def __init__(self, *args, **kwargs):
+        wx.TextCtrl.__init__(self, *args, validator=PCFFloatValidator(), size=(110,-1),
+                             style=wx.TE_PROCESS_ENTER, **kwargs)
+        self.Bind(wx.EVT_ENTER_WINDOW, self.OnMouseEnter)
+        self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeave)
+        self._PCFvalue = 0.0
+
+    def OnMouseEnter(self, e):
+        self.SetFocus()
+        self.SetSelection(-1,0)
+
+    def OnMouseLeave(self, e):
+        self.SetSelection(0,0)
+        self.SetInsertionPoint(0)
+
+    def SetValue(self, value):
+        self._PCFvalue = value
+        string = PCFFloatTextCtrl.float2string(value)
+        wx.TextCtrl.SetValue(self, string)
+    
+    def GetValue(self):
+        string = wx.TextCtrl.GetValue(self)
+        if string == PCFFloatTextCtrl.float2string(self._PCFvalue):
+            # use internal value: more accurate
+            #print("internal", self._PCFvalue)
+            return self._PCFvalue
+        else:
+            # new value
+            #print("external", string)
+            return PCFFloatTextCtrl.string2float(string)
+        
+    @staticmethod
+    def float2string(value):
+        """
+        inverse of string2float with some tweaks
+        """
+        value = float2string_nsf(value)
+        value = nice_string(value)
+        return value
+        
+    @staticmethod
+    def string2float(string):
+        """
+        Remove any characters that are not in
+        [+-{0-9},.] and show a decent float
+        value.
+        """
+        # allow comma
+        string = string.replace(",", ".")
+        # allow only one decimal point
+        string = string[::-1].replace(".", "", string.count(".")-1)[::-1]
+        try:
+            string = "{:.12f}".format(float(string))
+        except:
+            pass
+        # remove letters
+        string = re.sub(r'[^\d.-]+', '', string)
+        if len(string) == 0:
+            string = "0"
+        return float(string)
+
 
 
 class FittingPanel(wx.Panel):
     """
     Those are the Panels that show the fitting dialogs with the Plots.
     """
-    def __init__(self, parent, counter, modelid, active_parms, tau):
+    def __init__(self, parent, counter, modelid, active_parms, tau=None):
         """ Initialize with given parameters. """
         wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
         self.parent = parent
-        self.filename = "None"
-        ## If IsCrossCorrelation is set to True, the trace and traceavg 
-        ## variables will not be used. Instead tracecc a list, of traces
-        ## will be used.
-        self.IsCrossCorrelation = False
-        ## Setting up variables for plotting
-        self.trace = None        # The intensity trace, tuple
-        self.traceavg = None     # Average trace intensity
-        self.tracecc = None      # List of traces (in CC mode only)
-        self.bgselected = None   # integer, index for parent.Background
-        self.bg2selected = None  # integer, index for parent.Background
-        #                          -> for cross-correlation
-        self.bgcorrect = 1.      # Background correction factor for dataexp
-        self.normparm = None     # Parameter number used for graph normalization
-        #                          if greater than number of fitting parms,
-        #                          then supplementary parm is used.
-        self.normfactor = 1.     # Graph normalization factor (e.g. value of n)
-        self.startcrop = None    # Where cropping of dataexp starts
-        self.endcrop = None      # Where cropping of dataexp ends
-        self.dataexp = None      # Experimental data (cropped)
-        self.dataexpfull = None  # Experimental data (not cropped)
-        self.datacorr = None     # Calculated data
-        self.resid = None        # Residuals
-        self.data4weight = None  # Data used for weight calculation 
-        # Fitting:
-        #self.Fitbox = [ fitbox, weightedfitdrop, fittext, fittext2,
-        #                fittextvar, fitspin, buttonfit, textalg,
-        #                self.AlgorithmDropdown]
-        # chi squared - is also an indicator, if something had been fitted
+        
+        self.corr = Correlation(fit_model=modelid)
+        if tau is not None:
+            self.corr.lag_time = tau
+        # active_parameters:
+        # [0] labels
+        # [1] values
+        # [2] bool values to fit
+        self.corr.fit_parameters = active_parms[1]
+        self.corr.fit_parameters_variable = active_parms[2]
+
+        self._bgselected = None
+        self._bg2selected = None
+
         self.FitKnots = 5 # number of knots for spline fit or similiars
-        self.chi2 = None
-        self.weighted_fit_was_performed = False # default is no weighting
-        self.weights_used_for_fitting = None # weights used for fitting
-        self.weights_used_for_plotting = None # weights used for plotting
-        self.weights_plot_fill_area = None # weight area in plot
+
         self.weighted_fittype_id = 0 # integer (drop down item)
-        self.weighted_fittype = "Unknown" # type of fit used
         self.weighted_nuvar = 3 # bins for std-dev. (left and rigth)
-        self.fit_algorithm ="Lev-Mar" # Least squares min. is standard
-        # dictionary for alternative variances from e.g. averaging
-        self.external_std_weights = dict()
-        # Errors of fit dictionary
-        self.parmoptim_error = None
+
+        
+        # The weights that are plotted in the page
+        # This is set by the PlotAll function
+        self.weights_plot_fill_area = None
+        
         # A list containing page numbers that share parameters with this page.
         # This parameter is defined by the global fitting tool and is saved in
         # sessions.
@@ -80,33 +225,7 @@ class FittingPanel(wx.Panel):
         # nothing will be plotted if called with "init"
         self.InitialPlot = False
         # Model we are using
-        self.modelid = modelid
-        # modelpack:
-        # [0] labels
-        # [1] values
-        # [2] bool values to fit
-        # [3] labels human readable (optional)
-        # [4] factors human readable (optional)
-        modelpack = mdls.modeldict[modelid]
-        # The string of the model in the menu
-        self.model = modelpack[1]
-        # Some more useless text about the model
-        self.modelname = modelpack[2]
-        # Function for fitting
-        self.active_fct = modelpack[3]
-        # Parameter verification function.
-        # This checks parameters concerning their physical meaningfullness :)
-        self.check_parms_model = mdls.verification[modelid]
-        # active_parameters:
-        # [0] labels
-        # [1] values
-        # [2] bool values to fit
-        self.active_parms = active_parms
-        # Parameter range for fitting (defaults to zero)
-        self.parameter_range = np.zeros((len(active_parms[0]),2))
-        # Some timescale
-        self.taufull = tau
-        self.tau = 1*self.taufull
+
         # Tool statistics uses this list:
         self.StatisticsCheckboxes = None
         ### Splitter window
@@ -156,44 +275,137 @@ class FittingPanel(wx.Panel):
         # Bind resizing to resizing function.
         wx.EVT_SIZE(self, self.OnSize)
 
+    @property
+    def active_parms(self):
+        names = self.corr.fit_model.parameters[0]
+        parms = self.corr.fit_parameters
+        bool = self.corr.fit_parameters_variable
+        return [names, parms, bool]
+
+    @property
+    def IsCrossCorrelation(self):
+        return self.corr.is_cc
+    
+    @property
+    def modelid(self):
+        return self.corr.fit_model.id
+    
+    @property
+    def title(self):
+        return self.tabtitle.GetValue()
+
+    @title.setter
+    def title(self, title):
+        self.tabtitle.SetValue(title.strip())
+        self.corr.title = title.strip()
+    
+    @property
+    def traceavg(self):
+        warnings.warn("Trace average always set to none!")
+        return None
+    
+    @property
+    def tracecc(self):
+        if self.corr.is_cc and len(self.corr.traces) != 0:
+            return self.corr.traces
+        else:
+            return None
+
+    @property
+    def bgselected(self):
+        return self._bgselected
+    
+    @bgselected.setter
+    def bgselected(self, value):
+        if value is None:
+            self.corr.backgrounds=[]
+            return
+        # check paren.Background and get id
+        background = self.parent.Background[value]
+        self.corr.background_replace(0, background)
+        self._bgselected = value
+
+    @property
+    def bg2selected(self):
+        return self._bg2selected
+    
+    @bg2selected.setter
+    def bg2selected(self, value):
+        if value is None:
+            if self.corr.is_cc:
+                self.corr.backgrounds=[]
+            return
+        # check paren.Background and get id
+        background = self.parent.Background[value]
+        self.corr.background_replace(1, background)
+        self._bg2selected = value
 
     def apply_parameters(self, event=None):
         """ Read the values from the form and write it to the
             pages parameters.
             This function is called when the "Apply" button is hit.
         """
+        modelid = self.corr.fit_model.id
         parameters = list()
+        parameters_variable = list()
         # Read parameters from form and update self.active_parms[1]
-        for i in np.arange(len(self.active_parms[1])):
+        for i in np.arange(len(self.spincontrol)):
             parameters.append(1*self.spincontrol[i].GetValue())
-            self.active_parms[2][i] = self.checkboxes[i].GetValue()
+            parameters_variable.append(self.checkboxes[i].GetValue())
+
+        self.corr.fit_parameters_variable = np.array(parameters_variable,
+                                                     dtype=bool)
         # As of version 0.7.5: we want the units to be displayed
         # human readable - the way they are displayed 
         # in the Page info tool.
         # Here: Convert human readable units to program internal
         # units
-        self.active_parms[1] = mdls.GetInternalFromHumanReadableParm(
-                                  self.modelid, np.array(parameters))[1]
-        self.active_parms[1] = self.check_parms(1*self.active_parms[1])
+        parmsconv = mdls.GetInternalFromHumanReadableParm(
+                        modelid, np.array(parameters))[1]
+        self.corr.fit_parameters = parmsconv
+
         # Fitting parameters
         self.weighted_nuvar = self.Fitbox[5].GetValue()
+        
         self.weighted_fittype_id = self.Fitbox[1].GetSelection()
-        if self.Fitbox[1].GetSelection() == -1:
+
+        fitbox_value = self.Fitbox[1].GetValue()
+        
+        if self.weighted_fittype_id == -1:
             # User edited knot number
-            Knots = self.Fitbox[1].GetValue()
+            Knots = fitbox_value
             Knots = filter(lambda x: x.isdigit(), Knots)
             if Knots == "":
                 Knots = "5"
             self.weighted_fittype_id = 1
             self.FitKnots = str(Knots)
-        elif self.Fitbox[1].GetSelection() == 1:
-            Knots = self.Fitbox[1].GetValue()
+            fit_weight_type = "spline{}".format(self.FitKnots)
+            fit_weight_data = self.weighted_nuvar
+        elif self.weighted_fittype_id == 1:
+            Knots = fitbox_value
             Knots = filter(lambda x: x.isdigit(), Knots)
             self.FitKnots = int(Knots)
+            fit_weight_type = "spline{}".format(self.FitKnots)
+            fit_weight_data = self.weighted_nuvar
+        elif self.weighted_fittype_id == 0:
+            fit_weight_type = "none"
+            fit_weight_data = None
+        elif self.weighted_fittype_id == 2:
+            fit_weight_type = "model function"
+            fit_weight_data = self.weighted_nuvar
+        else: # fitbox_selection > 2:
+            fit_weight_type = fitbox_value
+            self.corr.fit_weight_type = fitbox_value
+            fit_weight_data = self.corr.fit_weight_data
+        
         # Fitting algorithm
-        keys = fit.GetAlgorithmStringList()[0]
+        keys = pcfbase.GetAlgorithmStringList()[0]
         idalg = self.AlgorithmDropdown.GetSelection()
-        self.fit_algorithm = keys[idalg]
+        
+        self.corr.fit_algorithm = keys[idalg]
+        self.corr.fit_weight_type = fit_weight_type
+        self.corr.fit_weight_data = fit_weight_data
+        
         # If parameters have been changed because of the check_parms
         # function, write them back.
         self.apply_parameters_reverse()
@@ -203,8 +415,7 @@ class FittingPanel(wx.Panel):
         """ Read the values from the pages parameters and write
             it to the form.
         """
-        # check parameters
-        self.active_parms[1] = self.check_parms(self.active_parms[1])
+        modelid = self.corr.fit_model.id
         #
         # As of version 0.7.5: we want the units to be displayed
         # human readable - the way they are displayed 
@@ -212,158 +423,32 @@ class FittingPanel(wx.Panel):
         # 
         # Here: Convert program internal units to
         # human readable units
-        parameters = \
-                     mdls.GetHumanReadableParms(self.modelid,
-                                        self.active_parms[1])[1]
+        parameters = mdls.GetHumanReadableParms(modelid,
+                                        self.corr.fit_parameters)[1]
+        parameters_variable = self.corr.fit_parameters_variable
         # Write parameters to the form on the Page
         for i in np.arange(len(self.active_parms[1])):
             self.spincontrol[i].SetValue(parameters[i])
-            self.checkboxes[i].SetValue(self.active_parms[2][i])
+            self.checkboxes[i].SetValue(parameters_variable[i])
         # Fitting parameters
         self.Fitbox[5].SetValue(self.weighted_nuvar)
         idf = self.weighted_fittype_id
         List = self.Fitbox[1].GetItems()
-        List[1] = "Spline ("+str(self.FitKnots)+" knots)"
+        List[1] = "spline ("+str(self.FitKnots)+" knots)"
         self.Fitbox[1].SetItems(List)
         self.Fitbox[1].SetSelection(idf)
         # Fitting algorithm
-        keys = fit.GetAlgorithmStringList()[0]
-        idalg = keys.index(self.fit_algorithm)
+        keys = pcfbase.GetAlgorithmStringList()[0]
+        idalg = keys.index(self.corr.fit_algorithm)
         self.AlgorithmDropdown.SetSelection(idalg)
+        self.updateChi2()
 
 
     def calculate_corr(self):
-        """ Calculate correlation function
-            Returns an array of tuples (tau, correlation)
-            *self.active_f*: A function that is being calculated using
-            *self.active_parms*: A list of parameters
-    
-            Uses variables:
-            *self.datacorr*: Plotting data (tuples) of the correlation curve
-            *self.dataexp*: Plotting data (tuples) of the experimental curve
-            *self.tau*: "tau"-values for plotting (included) in dataexp.
-    
-            Returns:
-            Nothing. Recalculation of the mentioned global variables is done.
-        """
-        parameters = self.active_parms[1]
-        # calculate correlation values
-        y = self.active_fct(parameters, self.tau)
-        # Create new plotting data
-        self.datacorr = np.zeros((len(self.tau), 2))
-        self.datacorr[:, 0] = self.tau
-        self.datacorr[:, 1] = y
-
-
-    def check_parms(self, parms):
-        """ Check parameters using self.check_parms_model and the user defined
-            borders for each parameter.
-        """
-        p = 1.*np.array(parms)
-        p = self.check_parms_model(p)
-        r = self.parameter_range
-        for i in range(len(p)):
-            if r[i][0] == r[i][1]:
-                pass
-            elif p[i] < r[i][0]:
-                p[i] = r[i][0]
-            elif p[i] > r[i][1]:
-                p[i] = r[i][1]
-        return p
-            
-        
-    def crop_data(self):
-        """ Crop the pages data for plotting
-            This will create slices from
-            *self.taufull* and *self.dataexpfull* using the values from
-            *self.startcrop* and *self.endcrop*, creating
-            *self.tau* and *self.dataexp*.
-        """
-        if self.dataexpfull is not None:
-            if self.startcrop == self.endcrop:
-                # self.bgcorrect is background correction
-                self.dataexp = 1*self.dataexpfull
-                self.taufull = self.dataexpfull[:,0]
-                self.tau = 1*self.taufull
-                self.startcrop = 0
-                self.endcrop = len(self.taufull)
-            else:
-                self.dataexp = 1*self.dataexpfull[self.startcrop:self.endcrop]
-                self.taufull = self.dataexpfull[:,0]
-                self.tau = 1*self.dataexp[:,0]
-                # If startcrop is larger than the lenght of dataexp,
-                # We will not have an array. Prevent that.
-                if len(self.tau) == 0:
-                    self.tau = 1*self.taufull
-                    self.dataexp = 1*self.dataexpfull
-            try:
-                self.taufull[self.startcrop]
-                self.taufull[self.endcrop-1]
-            except:
-                self.startcrop = 0
-                self.endcrop = len(self.taufull)
-                self.tau = 1*self.taufull
-                self.dataexp = 1*self.dataexpfull
-        else:
-            # We have to check if the startcrop and endcrop parameters are
-            # inside the taufull array.
-            try:
-                # Raises IndexError if index out of bounds
-                self.taufull[self.startcrop]
-                # Raises TypeError if self.endcrop is not an int.
-                self.taufull[self.endcrop-1]
-            except (IndexError, TypeError):
-                self.tau = 1*self.taufull
-                self.endcrop = len(self.taufull)
-                self.startcrop = 0
-            else:
-                self.tau = 1*self.taufull[self.startcrop:self.endcrop]
-
-
-    def CorrectDataexp(self, dataexp):
         """ 
-            Background correction
-            Changes *self.bgcorrect*.
-            Overwrites *self.dataexp*.
-            For details see:
-            
-                Thompson, N. Lakowicz, J.;
-                Geddes, C. D. & Lakowicz, J. R. (ed.)
-                Fluorescence Correlation Spectroscopy
-                Topics in Fluorescence Spectroscopy,
-                Springer US, 2002, 1, 337-378
-            
-            The cross-correlation background correction can be derived in the
-            same manner.
+        Calculate model correlation function
         """
-        # Make a copy. Do not overwrite the original.
-        if dataexp is not None:
-            modified = 1 * dataexp
-            if self.IsCrossCorrelation:
-                # Cross-Correlation
-                if (self.bgselected is not None and
-                    self.bg2selected is not None    ):
-                    if self.tracecc is not None:
-                        S = self.tracecc[0][:,1].mean()
-                        S2 = self.tracecc[1][:,1].mean()
-                        B = self.parent.Background[self.bgselected][0]
-                        B2 = self.parent.Background[self.bg2selected][0]
-                        self.bgcorrect = (S/(S-B)) * (S2/(S2-B2))
-                        modified[:,1] *= self.bgcorrect
-            else:
-                # Autocorrelation
-                if self.bgselected is not None:
-                    # self.bgselected 
-                    if self.traceavg is not None:
-                        S = self.traceavg
-                        B = self.parent.Background[self.bgselected][0]
-                        # Calculate correction factor
-                        self.bgcorrect = (S/(S-B))**2
-                        # self.dataexp should be set, since we have self.trace
-                        modified[:,1] *= self.bgcorrect
-            return modified
-        else:
-            return None
+        return self.corr.modeled
 
 
     def Fit_enable_fitting(self):
@@ -377,67 +462,6 @@ class FittingPanel(wx.Panel):
         self.Fitbox[7].Enable()
         self.Fitbox[8].Enable()
 
-
-    def Fit_create_instance(self, noplots=False):
-        """ *noplots* prohibits plotting (e.g. splines) """
-        ### If you change anything here, make sure you
-        ### take a look at the global fit tool!
-        ## Start fitting class and fill with information.
-        self.apply_parameters()
-        Fitting = fit.Fit()
-        # Verbose mode?
-        if noplots is False:
-            Fitting.verbose = self.parent.MenuVerbose.IsChecked()
-        Fitting.uselatex = self.parent.MenuUseLatex.IsChecked()
-        Fitting.check_parms = self.check_parms
-        Fitting.dataexpfull = self.CorrectDataexp(self.dataexpfull)
-        if self.Fitbox[1].GetSelection() == 1:
-            # Knots = self.Fitbox[1].GetValue()
-            # Knots = filter(lambda x: x.isdigit(), Knots)
-            # self.FitKnots = Knots
-            Fitting.fittype = "spline"+str(self.FitKnots)
-            self.parent.StatusBar.SetStatusText("You can change the number"+
-               " of knots. Check 'Preference>Verbose Mode' to view the spline.")
-        elif self.Fitbox[1].GetSelection() == 2:
-            Fitting.fittype = "model function"
-            if self is self.parent.notebook.GetCurrentPage():
-                self.parent.StatusBar.SetStatusText("This is iterative. Press"+
-                 " 'Fit' multiple times. If it does not converge, use splines.")
-        elif self.Fitbox[1].GetSelection() > 2:
-            # This means we have some user defined std, for example from
-            # averaging. This std is stored in self.external_std_weights
-            # list, which looks like this:
-            # self.external_std_weights["from average"] = 1D np.array std
-            Fitting.fittype = "other"
-            Fitlist = self.Fitbox[1].GetItems()
-            FitValue = Fitlist[self.Fitbox[1].GetSelection()]
-            Fitting.external_deviations = self.external_std_weights[FitValue]
-            # Fitting will crop the variances according to
-            # the Fitting.interval that we set below.
-            if self is self.parent.notebook.GetCurrentPage():
-                self.parent.StatusBar.SetStatusText("")
-        else:
-            self.parent.StatusBar.SetStatusText("")
-        Fitting.function = self.active_fct
-        Fitting.interval = [self.startcrop, self.endcrop]
-        Fitting.values = 1*self.active_parms[1]
-        Fitting.valuestofit = 1*self.active_parms[2]
-        Fitting.weights = self.Fitbox[5].GetValue()
-        Fitting.ApplyParameters()
-        # Set weighted_fit_was_performed variables
-        if self.Fitbox[1].GetSelection() == 0:
-            self.weighted_fit_was_performed = False
-            self.weights_used_for_fitting = None
-            self.tauweight = None
-        else:
-            self.weighted_fit_was_performed = True
-            self.weights_used_for_fitting = Fitting.dataweights
-        self.weighted_fittype_id = idf = self.Fitbox[1].GetSelection()
-        self.weighted_fittype = self.Fitbox[1].GetItems()[idf]
-        # Set fitting algorithm
-        Fitting.fit_algorithm = self.fit_algorithm
-        return Fitting
-
         
     def Fit_function(self, event=None, noplots=False, trigger=None):
         """ Calls the fit function.
@@ -449,49 +473,33 @@ class FittingPanel(wx.Panel):
                       to `True`.
         
         """
-        if trigger in ["fit_batch"]:
-            noplots = True
         # Make a busy cursor
         wx.BeginBusyCursor()
         # Apply parameters
         # This also applies the background correction, if present
         self.apply_parameters()
         # Create instance of fitting class
-        Fitting = self.Fit_create_instance(noplots)
-        # Reset page counter
+        
+        # TODO:
+        # 
         self.GlobalParameterShare = list()
-        Fitting.minimize()
+
         try:
-            Fitting.minimize()
+            Fit(self.corr)
         except ValueError:
             # I sometimes had this on Windows. It is caused by fitting to
             # a .SIN file without selection proper channels first.
             print "There was an Error fitting. Please make sure that you\n"+\
                   "are fitting in a proper channel domain."
             wx.EndBusyCursor()
-            return
-        parms = Fitting.valuesoptim
-        # create an error dictionary
-        p_error = Fitting.parmoptim_error
-        if p_error is None:
-            self.parmoptim_error = None
-        else:
-            self.parmoptim_error = dict()
-            errcount = 0
-            for i in np.arange(len(parms)):
-                if self.active_parms[2][i]:
-                    self.parmoptim_error[self.active_parms[0][i]] =p_error[errcount]
-                    errcount += 1
-        self.chi2 = Fitting.chi
-        for i in np.arange(len(parms)):
-            self.active_parms[1][i] = parms[i]
-        # We need this for plotting
-        self.calculate_corr()
-        self.data4weight = 1.*self.datacorr
+            raise
+
         # Update spin-control values
         self.apply_parameters_reverse()
         # Plot everthing
         self.PlotAll(trigger=trigger)
+        # update displayed chi2
+        self.updateChi2()
         # Return cursor to normal
         wx.EndBusyCursor()
 
@@ -530,6 +538,7 @@ class FittingPanel(wx.Panel):
             *check*: The (un)set checkboxes
             *spin*: The spin text fields
         """
+        modelid = self.corr.fit_model.id
         box = wx.StaticBox(self.panelsettings, label=boxlabel)
         sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
         check = list()
@@ -539,14 +548,13 @@ class FittingPanel(wx.Panel):
         # human readable - the way they are displayed 
         # in the Page info tool.
         # 
-        labels = mdls.GetHumanReadableParms(self.modelid,
-                                            self.active_parms[1])[0]
+        labels = mdls.GetHumanReadableParms(modelid,
+                                            self.corr.fit_parameters)[0]
         for label in labels:
             sizerh = wx.BoxSizer(wx.HORIZONTAL)
             checkbox = wx.CheckBox(self.panelsettings, label=label)
             # We needed to "from wx.lib.agw import floatspin" to get this:
-            spinctrl = edclasses.FloatSpin(self.panelsettings, digits=10,
-                                           increment=.01)
+            spinctrl = PCFFloatTextCtrl(self.panelsettings)
             sizerh.Add(spinctrl)
             sizerh.Add(checkbox)
             sizer.Add(sizerh)
@@ -560,6 +568,7 @@ class FittingPanel(wx.Panel):
         """ Enable/Disable BG rate text line.
             New feature introduced in 0.7.8
         """
+        modelid = self.corr.fit_model.id
         ## Normalization to a certain parameter in plots
         # Find all parameters that start with an "N"
         # ? and "C" ?
@@ -570,64 +579,28 @@ class FittingPanel(wx.Panel):
         parameterlist = list()
         for i in np.arange(len(self.active_parms[0])):
             label = self.active_parms[0][i]
-            if label[0] == "n" or label[0] == "N":
+            if label[0].lower() == "n":
                 normlist.append("*"+label)
                 parameterlist.append(i)
         ## Add supplementary parameters
         # Get them from models
-        supplement = mdls.GetMoreInfo(self.modelid, self)
+        supplement = mdls.GetMoreInfo(modelid, self)
         if supplement is not None:
             for i in np.arange(len(supplement)):
                 label = supplement[i][0]
-                if label[0] == "n" or label[0] == "N":
+                if label[0].lower() == "n":
                     normlist.append("*"+label)
                     # Add the id of the supplement starting at the
                     # number of fitting parameters of current page.
                     parameterlist.append(i+len(self.active_parms[0]))
         normsel = self.AmplitudeInfo[2].GetSelection()
-        if event == "init":
-            # Read everything from the page not from the panel
-            # self.normparm was set and we need to set
-            #  self.normfactor
-            #  self.AmplitudeInfo[2]
-            if self.normparm is not None:
-                if self.normparm < len(self.active_parms[1]):
-                    # use fitting parameter from page
-                    self.normfactor =  self.active_parms[1][self.normparm]
-                else:
-                    # use supplementary parameter
-                    supnum = self.normparm - len(self.active_parms[1])
-                    self.normfactor =  supplement[supnum][1]
-                # Set initial selection
-                for j in np.arange(len(parameterlist)):
-                    if parameterlist[j] == self.normparm:
-                        normsel = j+1
-            else:
-                self.normfactor = 1.
-                normsel = 0
+        if normsel in [0, -1]:
+            # init or no normalization selected
+            self.corr.normparm = None
+            normsel = 0 
         else:
-            if normsel > 0:
-                # Make sure we are not normalizing with a background
-                # Use the parameter id from the internal parameterlist
-                parameterid = parameterlist[normsel-1]
-                if parameterid < len(self.active_parms[1]):
-                    # fitting parameter
-                    self.normfactor = self.active_parms[1][parameterid]
-                else:
-                    # supplementary parameter
-                    supnum = parameterid - len(self.active_parms[1])
-                    self.normfactor =  supplement[supnum][1]
-                
-                #### supplement are somehow sorted !!!!
-                # For parameter export:
-                self.normparm = parameterid
-                # No internal parameters will be changed
-                # Only the plotting
-            else:
-                self.normfactor = 1.
-                normsel = 0
-                # For parameter export
-                self.normparm = None
+            self.corr.normparm = parameterlist[normsel-1]
+
         if len(parameterlist) > 0:
             self.AmplitudeInfo[2].Enable()
             self.AmplitudeInfo[3].Enable()
@@ -643,40 +616,35 @@ class FittingPanel(wx.Panel):
         #                       [bgspin1, bgspin2],
         #                       normtoNDropdown, textnor]
         # Signal
-        if self.IsCrossCorrelation:
-            if self.tracecc is not None:
-                S1 = self.tracecc[0][:,1].mean()
-                S2 = self.tracecc[1][:,1].mean()
-                self.AmplitudeInfo[0][0].SetValue("{:.4f}".format(S1))
-                self.AmplitudeInfo[0][1].SetValue("{:.4f}".format(S2))
-            else:
-                self.AmplitudeInfo[0][0].SetValue("{:.4f}".format(0))
-                self.AmplitudeInfo[0][1].SetValue("{:.4f}".format(0))
+        self.AmplitudeInfo[0][0].SetValue("{:.4f}".format(0))
+        self.AmplitudeInfo[0][1].SetValue("{:.4f}".format(0))
+        for i in range(len(self.corr.traces)):
+            S = self.corr.traces[i].countrate
+            self.AmplitudeInfo[0][i].SetValue("{:.4f}".format(S))
+        if self.corr.is_cc:
+            self.AmplitudeInfo[0][1].Enable()
         else:
-            if self.traceavg is not None:
-                self.AmplitudeInfo[0][0].SetValue("{:.4f}".format(
-                                                self.traceavg))
-            else:
-                self.AmplitudeInfo[0][0].SetValue("{:.4f}".format(0))
-            self.AmplitudeInfo[0][1].SetValue("{:.4f}".format(0))
+            self.AmplitudeInfo[0][1].Disable()
         # Background
         ## self.parent.Background[self.bgselected][i]
         ## [0] average signal [kHz]
         ## [1] signal name (edited by user)
         ## [2] signal trace (tuple) ([ms], [kHz])
-        if self.bgselected is not None:
+        if len(self.corr.backgrounds) >= 1:
             self.AmplitudeInfo[1][0].SetValue(
-                        self.parent.Background[self.bgselected][0])
+                        self.corr.backgrounds[0].countrate)
         else:
             self.AmplitudeInfo[1][0].SetValue(0)
-        if self.bg2selected is not None and self.IsCrossCorrelation:
+            self.AmplitudeInfo[1][1].SetValue(0)
+        
+        if len(self.corr.backgrounds) == 2:
             self.AmplitudeInfo[1][1].SetValue(
-                        self.parent.Background[self.bg2selected][0])
+                        self.corr.backgrounds[1].countrate)
         else:
             self.AmplitudeInfo[1][1].SetValue(0)
         # Disable the second line in amplitude correction, if we have
         # autocorrelation only.
-        boolval = self.IsCrossCorrelation
+        boolval = self.corr.is_cc
         for item in self.WXAmplitudeCCOnlyStuff:
             item.Enable(boolval)
 
@@ -689,7 +657,7 @@ class FittingPanel(wx.Panel):
         #self.AmplitudeInfo = [ [intlabel1, intlabel2],
         #                       [bgspin1, bgspin2],
         #                       normtoNDropdown, textnor]
-        if self.IsCrossCorrelation:
+        if self.corr.is_cc:
             # update both self.bgselected and self.bg2selected
             bg = [self.AmplitudeInfo[1][0].GetValue(),
                   self.AmplitudeInfo[1][1].GetValue()]
@@ -700,12 +668,14 @@ class FittingPanel(wx.Panel):
             bg = self.AmplitudeInfo[1][0].GetValue()
             tools.background.ApplyAutomaticBackground(self, bg,
                                                       self.parent)
+        e.Skip()
 
     
     def OnTitleChanged(self, e):
+        modelid = self.corr.fit_model.id
         pid = self.parent.notebook.GetPageIndex(self)
         if self.tabtitle.GetValue() == "":
-            text = self.counter + mdls.modeldict[self.modelid][1]
+            text = self.counter + mdls.modeldict[modelid][1]
         else:
             # How many characters of the the page title should be displayed
             # in the tab? We choose 9: AC1-012 plus 2 whitespaces
@@ -719,6 +689,9 @@ class FittingPanel(wx.Panel):
             Parameter ranges are treated like parameters: They are saved in
             sessions and applied in batch mode.
         """
+        # TODO:
+        # - make range selector work with new class
+        
         # We write a separate tool for that.
         # This tool does not show up in the Tools menu.
         if self.parent.RangeSelector is None:
@@ -763,30 +736,21 @@ class FittingPanel(wx.Panel):
             # We use this to have the page plotted at least once before
             # readout of parameters (e.g. startcrop, endcrop)
             # This is a performence tweak.
-            self.crop_data()
-            if self.InitialPlot is True:
+            if self.InitialPlot:
                 return
             else:
                 self.InitialPlot = True
         ## Enable/Disable, set values frontend normalization
         self.OnAmplitudeCheck()
-        self.crop_data()
-        ## Calculate trace average
-        if self.trace is not None:
-            # Average of the current pages trace
-            self.traceavg = self.trace[:,1].mean()
-        # Perform Background correction
-        self.dataexp = self.CorrectDataexp(self.dataexp)
         ## Apply parameters
         self.apply_parameters()
         # Calculate correlation function from parameters
-        self.calculate_corr()
         ## Drawing of correlation plot
-        # Plots self.dataexp and the calcualted correlation function 
+        # Plots corr.correlation_fit and the calcualted correlation function 
         # self.datacorr into the upper canvas.
         # Create a line @ y=zero:
-        zerostart = self.tau[0]
-        zeroend = self.tau[-1]
+        zerostart = self.corr.lag_time_fit[0]
+        zeroend = self.corr.lag_time_fit[-1]
         datazero = [[zerostart, 0], [zeroend,0]]
         # Set plot colors
         width = 1   
@@ -796,64 +760,64 @@ class FittingPanel(wx.Panel):
         lines = list()
         linezero = plot.PolyLine(datazero, colour='orange', width=width)
         lines.append(linezero)
+        if self.corr.correlation is not None:
+            if self.corr.is_weighted_fit and \
+               self.parent.MenuShowWeights.IsChecked():
+                try:
+                    weights = self.corr.fit_results["fit weights"]
+                except:
+                    weights = self.corr.fit_weight_data
                 
-        if self.dataexp is not None:
-            if self.weighted_fit_was_performed == True and \
-               self.weights_used_for_fitting is not None and \
-               self.parent.MenuShowWeights.IsChecked() and \
-               self.data4weight is not None:
-                # Add the weights to the graph.
-                # This is done by drawing two lines.
-                w = 1*self.data4weight
-                w1 = 1*w
-                w2 = 1*w
-                w1[:, 1] = w[:, 1] + self.weights_used_for_fitting 
-                w2[:, 1] = w[:, 1] - self.weights_used_for_fitting 
-                wend = 1*self.weights_used_for_fitting 
-                # crop w1 and w2 if self.dataexp does not include all
-                # data points.
-                if np.all(w[:,0] == self.dataexp[:,0]):
-                    pass
-                else:
-                    start = np.min(self.dataexp[:,0])
-                    end = np.max(self.dataexp[:,0])
-                    idstart = np.argwhere(w[:,0]==start)
-                    idend = np.argwhere(w[:,0]==end)
-                    if len(idend) == 0:
-                        # dataexp is longer, do not change anything
-                        pass
-                    else:
-                        w1 = w1[:idend[0][0]+1]
-                        w2 = w2[:idend[0][0]+1]
-                        wend = wend[:idend[0][0]+1]
-                    if len(idstart) == 0:
-                        # dataexp starts earlier, do not change anything
+                if isinstance(weights, np.ndarray):
+                    # user might have selected a new weight type and
+                    # presses apply, do not try to display weights
+
+                    # if weights are from average or other, make sure that the 
+                    # dimensions are correct
+                    if weights.shape[0] == self.corr.correlation.shape[0]:
+                        weights = weights[self.corr.fit_ival[0]:self.corr.fit_ival[1]]
+                        
+                    if np.allclose(weights, np.ones_like(weights)):
+                        weights = 0
+                    if weights.shape[0] != self.corr.modeled_fit.shape[0]:
+                        # non-matching weigths
+                        warnings.warn("Unmatching weights found. Probably from previous data set.")
+                        weights = 0
+
+                    # Add the weights to the graph.
+                    # This is done by drawing two lines.
+                    w = 1*self.corr.modeled_fit
+                    w1 = 1*w
+                    w2 = 1*w
+                    
+                    w1[:, 1] = w[:, 1] + weights
+                    w2[:, 1] = w[:, 1] - weights
+                    # crop w1 and w2 if corr.correlation_fit does not include all
+                    # data points.
+                    if np.all(w[:,0] == self.corr.correlation_fit[:,0]):
                         pass
                     else:
-                        w1 = w1[idstart[0][0]:]
-                        w2 = w2[idstart[0][0]:]
-                        wend = wend[idstart[0][0]:]
-                ## Normalization with self.normfactor
-                w1[:,1] *= self.normfactor
-                w2[:,1] *= self.normfactor
-                self.weights_used_for_plotting = wend
-                self.weights_plot_fill_area = [w1,w2]
-                lineweight1 = plot.PolyLine(w1, legend='',
-                                          colour=colweight, width=width)
-                lines.append(lineweight1)
-                lineweight2 = plot.PolyLine(w2, legend='',
-                                          colour=colweight, width=width)
-                lines.append(lineweight2)
+                        raise ValueError("This should not have happened: size of weights is wrong.")
+                    ## Normalization with self.normfactor
+                    w1[:,1] *= self.corr.normalize_factor
+                    w2[:,1] *= self.corr.normalize_factor
+                    self.weights_plot_fill_area = [w1,w2]
+                    lineweight1 = plot.PolyLine(w1, legend='',
+                                              colour=colweight, width=width)
+                    lines.append(lineweight1)
+                    lineweight2 = plot.PolyLine(w2, legend='',
+                                              colour=colweight, width=width)
+                    lines.append(lineweight2)
+            else:
+                self.weights_plot_fill_area = None
                 
             ## Plot Correlation curves
             # Plot both, experimental and calculated data
             # Normalization with self.normfactor, new feature in 0.7.8
-            datacorr_norm = 1*self.datacorr
-            datacorr_norm[:,1] *= self.normfactor
-            dataexp_norm = 1*self.dataexp
-            dataexp_norm[:,1] *= self.normfactor
+            datacorr_norm = self.corr.modeled_plot
             linecorr = plot.PolyLine(datacorr_norm, legend='', colour=colfit,
                                      width=width)
+            dataexp_norm = self.corr.correlation_plot
             lineexp = plot.PolyLine(dataexp_norm, legend='', colour=colexp,
                                     width=width)
             # Draw linezero first, so it is in the background
@@ -863,18 +827,12 @@ class FittingPanel(wx.Panel):
                                 xLabel=u'lag time τ [ms]', yLabel=u'G(τ)')
             self.canvascorr.Draw(PlotCorr)
             ## Calculate residuals
-            self.resid = np.zeros((len(self.tau), 2))
-            self.resid[:, 0] = self.tau
-            self.resid[:, 1] = self.dataexp[:, 1] - self.datacorr[:, 1]
-            # Plot residuals
-            # Normalization with self.normfactor, new feature in 0.7.8
-            resid_norm = np.zeros((len(self.tau), 2))
-            resid_norm[:, 0] = self.tau
-            resid_norm[:, 1] = dataexp_norm[:, 1] - datacorr_norm[:, 1]
+            resid_norm = self.corr.residuals_plot
             lineres = plot.PolyLine(resid_norm, legend='', colour=colfit,
                                     width=width)
+            
             # residuals or weighted residuals?
-            if self.weighted_fit_was_performed:
+            if self.corr.is_weighted_fit:
                 yLabelRes = "weighted \nresiduals"
             else:
                 yLabelRes = "residuals"
@@ -884,8 +842,7 @@ class FittingPanel(wx.Panel):
             self.canvaserr.Draw(PlotRes)
         else:
             # Amplitude normalization, new feature in 0.7.8
-            datacorr_norm = 1*self.datacorr
-            datacorr_norm[:,1] *= self.normfactor
+            datacorr_norm = self.corr.modeled_plot
             linecorr = plot.PolyLine(datacorr_norm, legend='', colour='blue',
                                      width=1)
             PlotCorr = plot.PlotGraphics([linezero, linecorr],
@@ -894,26 +851,17 @@ class FittingPanel(wx.Panel):
         self.parent.OnFNBPageChanged(trigger=trigger)
 
 
-    def SetCorrelationType(self, iscc, init=False):
-        """
-            The correlation type (AC or CC) of the page is set if data
-            is imported to the page (parent.ImportData).
-            In this case, init is `True`, else `False`.
-        """
-        if init:
-            self.IsCrossCorrelation = iscc
-
-
     def settings(self):
         """ Here we define, what should be displayed at the left side
             of the fitting page/tab.
             Parameters:
         """
+        modelid = self.corr.fit_model.id
         horizontalsize = self.sizepanelx-10
         # Title
         # Create empty tab title
-        mddat = mdls.modeldict[self.modelid]
-        modelshort = mdls.GetModelType(self.modelid)
+        mddat = mdls.modeldict[modelid]
+        modelshort = mdls.GetModelType(modelid)
         titlelabel = u"Data set ({} {})".format(modelshort, mddat[1])
         boxti = wx.StaticBox(self.panelsettings, label=titlelabel)
         sizerti = wx.StaticBoxSizer(boxti, wx.VERTICAL)
@@ -932,14 +880,13 @@ class FittingPanel(wx.Panel):
         # human readable - the way they are displayed 
         # in the Page info tool.
         # 
-        labels, parameters = mdls.GetHumanReadableParms(self.modelid,
+        labels, parameters = mdls.GetHumanReadableParms(modelid,
                                                 self.active_parms[1])
         parameterstofit = self.active_parms[2]
         # Set initial values given by user/programmer for Diffusion Model
         for i in np.arange(len(labels)):
             self.checkboxes[i].SetValue(parameterstofit[i]) 
             self.spincontrol[i].SetValue(parameters[i])
-            self.spincontrol[i].increment()
         # Put everything together
         self.panelsettings.sizer = wx.BoxSizer(wx.VERTICAL)
         self.panelsettings.sizer.Add(sizerti)
@@ -968,10 +915,9 @@ class FittingPanel(wx.Panel):
                     label="Background"))
         sizeint.Add(wx.StaticText(self.panelsettings, label="Ch1"))
         intlabel1 = wx.TextCtrl(self.panelsettings)
-        bgspin1 = floatspin.FloatSpin(self.panelsettings,
-                        increment=0.01, digits=4, min_val=0)
-        self.Bind(floatspin.EVT_FLOATSPIN, self.OnBGSpinChanged,
-                  bgspin1)
+        bgspin1 = PCFFloatTextCtrl(self.panelsettings)
+        bgspin1.Bind(wx.EVT_KILL_FOCUS, self.OnBGSpinChanged)
+        bgspin1.Bind(wx.EVT_TEXT_ENTER, self.OnBGSpinChanged)
         sizeint.Add(intlabel1)
         intlabel1.SetEditable(False)
         sizeint.Add(bgspin1)
@@ -979,10 +925,8 @@ class FittingPanel(wx.Panel):
         sizeint.Add(chtext2)
         intlabel2 = wx.TextCtrl(self.panelsettings)
         intlabel2.SetEditable(False)
-        bgspin2 = floatspin.FloatSpin(self.panelsettings,
-                        increment=0.01, digits=4, min_val=0)
-        self.Bind(floatspin.EVT_FLOATSPIN, self.OnBGSpinChanged,
-                  bgspin2)
+        bgspin2 = PCFFloatTextCtrl(self.panelsettings)
+        bgspin2.Bind(wx.EVT_KILL_FOCUS, self.OnBGSpinChanged)
         sizeint.Add(intlabel2)
         sizeint.Add(bgspin2)
         miscsizer.Add(sizeint)
@@ -1003,7 +947,7 @@ class FittingPanel(wx.Panel):
         fitsizer.SetMinSize((horizontalsize, -1))
         # Add a checkbox for weighted fitting
         weightedfitdrop = wx.ComboBox(self.panelsettings)
-        self.weightlist = ["No weights", "Spline (5 knots)", "Model function"]
+        self.weightlist = ["no weights", "spline (5 knots)", "model function"]
         weightedfitdrop.SetItems(self.weightlist)
         weightedfitdrop.SetSelection(0)
         fitsizer.Add(weightedfitdrop)
@@ -1031,16 +975,21 @@ class FittingPanel(wx.Panel):
         textalg = wx.StaticText(self.panelsettings, label="Algorithm")
         fitsizer.Add(textalg)
         self.AlgorithmDropdown = wx.ComboBox(self.panelsettings)
-        items = fit.GetAlgorithmStringList()[1]
+        items = pcfbase.GetAlgorithmStringList()[1]
         self.AlgorithmDropdown.SetItems(items)
         self.Bind(wx.EVT_COMBOBOX, self.apply_parameters,
                   self.AlgorithmDropdown)
         fitsizer.Add(self.AlgorithmDropdown)
         self.AlgorithmDropdown.SetMaxSize(weightedfitdrop.GetSize())
-        # Add button "Fit"
+        # Add button "Fit" and chi2 display
+        fitbuttonsizer = wx.BoxSizer(wx.HORIZONTAL)
         buttonfit = wx.Button(self.panelsettings, label="Fit")
         self.Bind(wx.EVT_BUTTON, self.Fit_function, buttonfit)
-        fitsizer.Add(buttonfit)
+        fitbuttonsizer.Add(buttonfit)
+        self.WXTextChi2 = wx.StaticText(self.panelsettings)
+        # this StaticText is updated by `self.updateChi2()`
+        fitbuttonsizer.Add(self.WXTextChi2, flag=wx.ALIGN_CENTER)
+        fitsizer.Add(fitbuttonsizer)
         self.panelsettings.sizer.Add(fitsizer)
         # Squeeze everything into the sizer
         self.panelsettings.SetSizer(self.panelsettings.sizer)
@@ -1057,3 +1006,23 @@ class FittingPanel(wx.Panel):
         self.panelsettings.sizer.Fit(self.panelsettings)
         self.parent.Layout()
 
+
+    def updateChi2(self):
+        """
+        updates the self.WXTextChi2 text control
+        """
+        label = u""
+        if hasattr(self.corr, "fit_results"):
+            if "chi2" in self.corr.fit_results:
+                chi2 = self.corr.fit_results["chi2"]
+                chi2str = float2string_nsf(chi2, n=3)
+                chi2str = nice_string(chi2str)
+                label = u"  χ²={}".format(chi2str)
+        # This does not work with wxPython 2.8.12:
+        #self.WXTextChi2.SetLabelMarkup(u"<b>{}</b>".format(label))
+        self.WXTextChi2.SetLabel(u"{}".format(label))
+
+        
+        
+        
+        
\ No newline at end of file
diff --git a/pycorrfit/plotting.py b/pycorrfit/plotting.py
index ebd92b7..ddbfe07 100644
--- a/pycorrfit/plotting.py
+++ b/pycorrfit/plotting.py
@@ -29,7 +29,8 @@ from . import models as mdls
 
 def greek2tex(char):
     """ Converts greek UTF-8 letters to latex """
-    decchar = codecs.decode(char, "UTF-8")
+    #decchar = codecs.decode(char, "UTF-8")
+    decchar = char
     repres = unicodedata.name(decchar).split(" ")
     # GREEK SMALL LETTER ALPHA
     if repres[0] == "GREEK" and len(repres) == 4:
@@ -43,7 +44,7 @@ def greek2tex(char):
 
 def escapechars(string):
     """ For latex output, some characters have to be escaped with a "\\" """
-    string = codecs.decode(string, "UTF-8")
+    #string = codecs.decode(string, "UTF-8")
     escapechars = ["#", "$", "%", "&", "~", "_", "\\", "{", "}"] 
     retstr = ur""
     for char in string:
@@ -65,7 +66,7 @@ def latexmath(string):
         return r"\mathrm{offset}"
     elif string == "SP":
         return r"\mathrm{SP}"
-    string = codecs.decode(string, "UTF-8")
+    #string = codecs.decode(string, "UTF-8")
     unicodechars = dict()
     #unicodechars[codecs.decode("τ", "UTF-8")] = r"\tau"
     #unicodechars[codecs.decode("µ", "UTF-8")] = r"\mu"
@@ -78,13 +79,24 @@ def latexmath(string):
     # We need lambda in here, because unicode names it lamda sometimes.
     unicodechars[codecs.decode("λ", "UTF-8")] = r"\lambda"
     #unicodechars[codecs.decode("η", "UTF-8")] = r'\eta'
+    unitchars = dict()
+    unitchars[codecs.decode("µ", "UTF-8")] = r"\micro "
     items = string.split(" ", 1)
     a = items[0]
     if len(items) > 1:
         b = items[1]
+        if b.count(u"µ"):
+            # Use siunitx with the upright µ
+            bnew = ur"[\SI{}{"
+            for char in b.strip("[]"):
+                if char in unitchars.keys():
+                    bnew += unitchars[char]
+                else:
+                    bnew += char
+            b = bnew+ur"}]"
     else:
         b = ""
-    anew = r""
+    anew = ur""
     for char in a:
         if char in unicodechars.keys():
             anew += unicodechars[char]
@@ -116,49 +128,29 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
         plt.close()
     except:
         pass
-    # As of version 0.7.8 the user may export data normalized to a certain
-    # parameter.
-    if Page.dataexp is not None:
-        dataexp = 1*Page.dataexp
-        resid = 1*Page.resid
-        dataexp[:,1] *= Page.normfactor
-        resid[:,1] *= Page.normfactor
-    else:
-        dataexp = Page.dataexp
-        resid = Page.resid
-    fit = 1*Page.datacorr
-    fit[:,1] *= Page.normfactor
+    # get data
+    corr = Page.corr
+    dataexp = corr.correlation_plot
+    resid = corr.residuals_plot
+    fit = corr.modeled_plot
+
     weights = Page.weights_plot_fill_area
     tabtitle = Page.tabtitle.GetValue()
     #fitlabel = ur"Fit model: "+str(mdls.modeldict[Page.modelid][0])
-    fitlabel = Page.modelname
+    fitlabel = Page.corr.fit_model.name
     labelweights = ur"Weights of fit"
     labels, parms = mdls.GetHumanReadableParms(Page.modelid,
-                                               Page.active_parms[1])
-    ## According to issue #54, we remove fitting errors from plots
-    ## Error parameters with nice look
-    #errparmsblank = Page.parmoptim_error
-    #if errparmsblank is None:
-    #    errparms = None
-    #else:
-    #    errparms = dict()
-    #    for key in errparmsblank.keys():
-    #        newkey, newparm = mdls.GetHumanReadableParameterDict(Page.modelid,
-    #                                                    key, errparmsblank[key])
-    #        errparms[newkey] = newparm
-    #parmids = np.where(Page.active_parms[2])[0]
-    #labels = np.array(labels)[parmids]
-    #parms = np.array(parms)[parmids]
+                                               corr.fit_parameters)
     if dataexp is None:
         if tabtitle.strip() == "":
-            fitlabel = Page.modelname
+            fitlabel = Page.corr.fit_model.name
         else:
             fitlabel = tabtitle
     else:
         if tabtitle.strip() == "":
             tabtitle = "page"+str(Page.counter).strip().strip(":")
-    if Page.normparm is not None:
-        fitlabel += ur", normalized to "+Page.active_parms[0][Page.normparm]
+    if Page.corr.normparm is not None:
+        fitlabel += ur", normalized to "+Page.corr.fit_model.parameters[0][Page.corr.normparm]
 
     ## Check if we can use latex for plotting:
     r1 = findprogram("latex")[0]
@@ -175,7 +167,8 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
         rcParams['font.family']='serif'
         rcParams['text.latex.preamble']=[r"""\usepackage{amsmath}
                                             \usepackage[utf8x]{inputenc}
-                                            \usepackage{amssymb}"""] 
+                                            \usepackage{amssymb}
+                                            \usepackage{siunitx}"""] 
         fitlabel = ur"{\normalsize "+escapechars(fitlabel)+r"}"
         tabtitle = ur"{\normalsize "+escapechars(tabtitle)+r"}"
         labelweights = ur"{\normalsize "+escapechars(labelweights)+r"}"
@@ -184,6 +177,7 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
     # create plot
     # plt.plot(x, y, '.', label = 'original data', markersize=5)
     fig=plt.figure()
+    fig.canvas.set_window_title("Correlation - "+Page.title)
     if resid is not None:
         gs = gridspec.GridSpec(2, 1, height_ratios=[5,1])
         ax = plt.subplot(gs[0])
@@ -191,18 +185,15 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
         ax = plt.subplot(111)
         #    ax = plt.axes()
     ax.semilogx()
+    # plot fit first
+    plt.plot(fit[:,0], fit[:,1], '-', label=fitlabel, lw=1.5,
+             color="blue")
     if dataexp is not None:
-        plt.plot(dataexp[:,0], dataexp[:,1], '-', color="darkgrey",
-                 label=tabtitle)
+        plt.plot(dataexp[:,0], dataexp[:,1], '-', color="black",
+                 alpha=.7, label=tabtitle, lw=1)
     else:
-        plt.xlabel(r'lag time $\tau$ [ms]')
-    # Plotting with error bars is very ugly if you have a lot of
-    # data points.
-    # We will use fill_between instead.
-    #plt.errorbar(fit[:,0], fit[:,1], yerr=weights, fmt='-',
-    #             label = fitlabel, lw=2.5, color="blue")
-    plt.plot(fit[:,0], fit[:,1], '-', label = fitlabel, lw=2.5,
-             color="blue")    
+        plt.xlabel(ur'lag time $\tau$ [ms]')
+    
     if weights is not None and show_weights is True:
         plt.fill_between(weights[0][:,0],weights[0][:,1],weights[1][:,1],
                          color='cyan')
@@ -230,7 +221,7 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
         text += r'\begin{split}' # ...but they are all concatenated
         #                          by the interpreter :-)
         for i in np.arange(len(parms)):
-            text += r' {} &= {:.3g} \\'.format(latexmath(labels[i]), parms[i])
+            text += ur' {} &= {:.3g} \\'.format(latexmath(labels[i]), parms[i])
         ## According to issue #54, we remove fitting errors from plots
         #if errparms is not None:
         #    keys = errparms.keys()
@@ -242,7 +233,7 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
     else:
         text = ur""
         for i in np.arange(len(parms)):
-            text += "{} = {:.3g}\n".format(labels[i], parms[i])
+            text += u"{} = {:.3g}\n".format(labels[i], parms[i])
         ## According to issue #54, we remove fitting errors from plots
         #if errparms is not None:
         #    keys = errparms.keys()
@@ -262,7 +253,7 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
         ax2 = plt.subplot(gs[1])
         #ax2 = plt.axes()
         ax2.semilogx()
-        if Page.weighted_fit_was_performed:
+        if Page.corr.is_weighted_fit:
             if uselatex == True:
                 lb = r"\newline \indent "
             else:
@@ -270,13 +261,16 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
             yLabelRes = "weighted "+ lb +"residuals"
         else:
             yLabelRes = "residuals"
-        plt.plot(resid[:,0], resid[:,1], '-', color="darkgrey", label=yLabelRes)
-        plt.xlabel(r'lag time $\tau$ [ms]')
-        plt.ylabel(yLabelRes, multialignment='center')
         minx = np.min(resid[:,0])
         maxx = np.max(resid[:,0])
         miny = np.min(resid[:,1])
         maxy = np.max(resid[:,1])
+        plt.hlines(0, minx, maxx, colors="orange")
+        plt.plot(resid[:,0], resid[:,1], '-', color="black",
+                 alpha=.85, label=yLabelRes, lw=1)
+        plt.xlabel(r'lag time $\tau$ [ms]')
+        plt.ylabel(yLabelRes, multialignment='center')
+
         ax2.set_xlim(minx, maxx)
         maxy = max(abs(maxy), abs(miny))
         ax2.set_ylim(-maxy, maxy)
@@ -287,16 +281,19 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
     fig.canvas.HACK_parent = parent
     fig.canvas.HACK_fig = fig
     fig.canvas.HACK_Page = Page
-    fig.canvas.HACK_append = ""
+    fig.canvas.HACK_append = ".png"
     
 
     # Legend outside of plot
     # Decrease size of plot to fit legend
     box = ax.get_position()
-    box2 = ax2.get_position()
+    
     ax.set_position([box.x0, box.y0 + box.height * 0.2,
                      box.width, box.height * 0.9])
-    ax2.set_position([box2.x0, box2.y0 + box.height * 0.2,
+    
+    if resid is not None:
+        box2 = ax2.get_position()
+        ax2.set_position([box2.x0, box2.y0 + box.height * 0.2,
                      box2.width, box2.height])
     
     ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.55),
@@ -341,17 +338,14 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
     if tabtitle.strip() == "":
         tabtitle = "page"+str(Page.counter).strip().strip(":")
     # Intensity trace in kHz may stay the same
-    if Page.trace is not None:
-        # Set trace
-        traces = [Page.trace]
-        labels = ["{} ({:.2f} kHz)".format(tabtitle, np.average(traces[0][:,1]))]
-    elif Page.tracecc is not None:
-        # We have some cross-correlation here. Two traces.
-        traces = Page.tracecc
-        labels = ["{} A ({:.4g} kHz)".format(tabtitle, np.average(traces[0][:,1])),
-                  "{} B ({:.4g} kHz)".format(tabtitle, np.average(traces[1][:,1]))]
-    else:
+    if len(Page.corr.traces) == 0:
         return
+    
+    traces = Page.corr.traces
+    labels = list()
+    for ii, tr in enumerate(traces):
+        labels.append("Channel {}: {}".format(ii+1, tr.name))
+
     ## Check if we can use latex for plotting:
     r1 = findprogram("latex")[0]
     r2 = findprogram("dvipng")[0]
@@ -372,7 +366,8 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
         rcParams['text.usetex']=False
     # create plot
     # plt.plot(x, y, '.', label = 'original data', markersize=5)
-    fig=plt.figure()
+    fig=plt.figure(figsize=(10,3))
+    fig.canvas.set_window_title("Trace - "+Page.title)
     ax = plt.subplot(111)
     for i in np.arange(len(traces)):
         # Columns
@@ -381,7 +376,14 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
         plt.plot(time, intensity, '-', 
                  label = labels[i],
                  lw=1)
-                 
+    # set plot boundaries
+    maxy = -np.infty
+    miny = np.infty
+    for tr in traces:
+        maxy = max(np.max(tr[:,1]), maxy)
+        miny = min(np.min(tr[:,1]), miny)
+    ax.set_ylim(miny, maxy)
+
     plt.ylabel('count rate [kHz]')
     plt.xlabel('time [s]')
     
@@ -391,15 +393,18 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
     ax.set_position([box.x0, box.y0 + box.height * 0.2,
                      box.width, box.height * 0.9])
     plt.legend(loc='upper center', 
-               bbox_to_anchor=(0.5, -0.15),
-               prop={'size':9})
+               bbox_to_anchor=(0.5, -0.35),
+               prop={'size':9},
+               )
     
     ## Hack
     # We need this for hacking. See edclasses.
     fig.canvas.HACK_parent = parent
     fig.canvas.HACK_fig = fig
     fig.canvas.HACK_Page = Page
-    fig.canvas.HACK_append = "_trace"
+    fig.canvas.HACK_append = "_trace.png"
+
+    plt.tight_layout(rect=(.001,.34,.999,1.0))
 
     if verbose == True:
         plt.show()
@@ -416,63 +421,5 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
         except:
             pass
 
-
-def savePlotSingle(name, x, dataexp, datafit, dirname = ".", uselatex=False):
-    """ CURRENTLY THIS FUNCTION IS NOT USED BY PYCORRFIT
-        Show log plot of correlation function without residuals. 
-        Parameters:
-        *name*      name of curve in legend
-        *x*         tau-values to plot
-        *dataexp*   correlation data to plot
-        *datafit*   fitted curve to correlation data
-        *dirname*   initial directory for dialog (not used here)
-        *uselatex*  use latex for plotting
-        This function uses a hack in misc.py to change the function
-        for saving the final figure. We wanted save in the same directory
-        as PyCorrFit was working and the filename should be the tabtitle.
-    """
-    # This is a dirty hack to make sure no plots are opened
-    try:
-        plt.close()
-    except:
-        pass
-    ## Check if we can use latex for plotting:
-    r1 = findprogram("latex")[0]
-    r2 = findprogram("dvipng")[0]
-    # Ghostscript
-    r31 = findprogram("gs")[0]
-    r32 = findprogram("mgs")[0] # from miktex
-    r3 = max(r31,r32)
-    if r1+r2+r3 < 3:
-        uselatex = False
-    if uselatex == True:
-        rcParams['text.usetex']=True
-        rcParams['text.latex.unicode']=True
-        rcParams['font.family']='serif'
-        rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] 
-        name = ur"{\normalsize "+escapechars(name)+r"}"
-    else:
-        rcParams['text.usetex']=False
-    # create plot
-    # plt.plot(x, y, '.', label = 'original data', markersize=5)
-    plt.figure()
-    ax = plt.subplot(111)
-    #    ax = plt.axes()
-    ax.semilogx()
-    plt.plot(x, dataexp,'-', color="darkgrey")
-    plt.xlabel(r'lag time $\tau$ [ms]')
-    plt.plot(x, datafit, '-', label = name,
-             lw=2.5, color="blue")
-    plt.ylabel('correlation')
-    mind = np.min([ dataexp, datafit])
-    maxd = np.max([ dataexp, datafit])
-    ymin = mind - (maxd - mind)/20.
-    ymax = maxd + (maxd - mind)/20.
-    ax.set_ylim(bottom=ymin, top=ymax)
-    xmin = np.min(x)
-    xmax = np.max(x)
-    ax.set_xlim(xmin, xmax)
-    # Add some more stuff to the text and append data to a .txt file
-    #text = Auswert(parmname, parmoptim, text, savename)
-    plt.legend()
-    plt.show()
+# set dpi to 300
+matplotlib.rcParams['savefig.dpi'] = 300
\ No newline at end of file
diff --git a/pycorrfit/readfiles/read_ASC_ALV.py b/pycorrfit/readfiles/read_ASC_ALV.py
index 6e206aa..c2fc80b 100644
--- a/pycorrfit/readfiles/read_ASC_ALV.py
+++ b/pycorrfit/readfiles/read_ASC_ALV.py
@@ -10,54 +10,71 @@ import numpy as np
 
 
 def openASC(dirname, filename):
+    """ 
+    Read data from a ALV .ASC files.
+    """
+    path = os.path.join(dirname, filename)
+    with open(path, 'r') as openfile:
+        Alldata = openfile.readlines()
+    
+    # Open special format?
+    filetype = Alldata[0].strip() 
+    if filetype in ["ALV-7004/USB"]:
+        return openASC_ALV_7004_USB(path)
+    else:
+        # last resort
+        return openASC_old(path)
+
+
+def openASC_old(path):
     """ Read data from a .ASC file, created by
         some ALV-6000 correlator.
 
             ALV-6000/E-WIN Data
-            Date :	"2/20/2012"
+            Date :    "2/20/2012"
             ...
             "Correlation"
-              1.25000E-004	  3.00195E-001
-              2.50000E-004	  1.13065E-001
-              3.75000E-004	  7.60367E-002
-              5.00000E-004	  6.29926E-002
-              6.25000E-004	  5.34678E-002
-              7.50000E-004	  4.11506E-002
-              8.75000E-004	  4.36752E-002
-              1.00000E-003	  4.63146E-002
-              1.12500E-003	  3.78226E-002
+              1.25000E-004      3.00195E-001
+              2.50000E-004      1.13065E-001
+              3.75000E-004      7.60367E-002
+              5.00000E-004      6.29926E-002
+              6.25000E-004      5.34678E-002
+              7.50000E-004      4.11506E-002
+              8.75000E-004      4.36752E-002
+              1.00000E-003      4.63146E-002
+              1.12500E-003      3.78226E-002
             ...
-              3.35544E+004	 -2.05799E-006
-              3.77487E+004	  4.09032E-006
-              4.19430E+004	  4.26295E-006
-              4.61373E+004	  1.40265E-005
-              5.03316E+004	  1.61766E-005
-              5.45259E+004	  2.19541E-005
-              5.87202E+004	  3.26527E-005
-              6.29145E+004	  2.72920E-005
+              3.35544E+004     -2.05799E-006
+              3.77487E+004      4.09032E-006
+              4.19430E+004      4.26295E-006
+              4.61373E+004      1.40265E-005
+              5.03316E+004      1.61766E-005
+              5.45259E+004      2.19541E-005
+              5.87202E+004      3.26527E-005
+              6.29145E+004      2.72920E-005
 
             "Count Rate"
-               1.17188	      26.77194
-               2.34375	      26.85045
-               3.51563	      27.06382
-               4.68750	      26.97932
-               5.85938	      26.73694
-               7.03125	      27.11332
-               8.20313	      26.81376
-               9.37500	      26.82741
-              10.54688	      26.88801
-              11.71875	      27.09710
-              12.89063	      27.13209
-              14.06250	      27.02200
-              15.23438	      26.95287
-              16.40625	      26.75657
-              17.57813	      26.43056
+               1.17188          26.77194
+               2.34375          26.85045
+               3.51563          27.06382
+               4.68750          26.97932
+               5.85938          26.73694
+               7.03125          27.11332
+               8.20313          26.81376
+               9.37500          26.82741
+              10.54688          26.88801
+              11.71875          27.09710
+              12.89063          27.13209
+              14.06250          27.02200
+              15.23438          26.95287
+              16.40625          26.75657
+              17.57813          26.43056
             ...
-             294.14063	      27.22597
-             295.31250	      26.40581
-             296.48438	      26.33497
-             297.65625	      25.96457
-             298.82813	      26.71902
+             294.14063          27.22597
+             295.31250          26.40581
+             296.48438          26.33497
+             297.65625          25.96457
+             298.82813          26.71902
 
         1. We are interested in the "Correlation" section,
         where the first column denotes tau in ms and the second row the
@@ -88,8 +105,9 @@ def openASC(dirname, filename):
          from the file. Elements can be names and must be convertible to
          strings.
     """
-    openfile = open(os.path.join(dirname, filename), 'r')
-    Alldata = openfile.readlines()
+    filename = os.path.basename(path)
+    with open(path, 'r') as openfile:
+        Alldata = openfile.readlines()
     # End of trace
     EndT = Alldata.__len__()
     ## Correlation function
@@ -97,9 +115,11 @@ def openASC(dirname, filename):
     for i in np.arange(len(Alldata)):
         if Alldata[i].startswith('Mode'):
             mode = Alldata[i][5:].strip(' ":').strip().strip('"')
+            single_strings = ["a-ch0", "a-ch1", "auto ch0", "auto ch1",
+                              "fast auto ch0", "fast auto ch1",
+                               ]
             if (mode.lower().count('single') or
-                mode.lower().strip() == "a-ch0" or 
-                mode.lower().strip() == "a-ch1"):
+                mode.lower().strip() in single_strings):
                 single = True
                 channel = mode.split(" ")[-1]
             else:
@@ -126,7 +146,7 @@ def openASC(dirname, filename):
             # There are several curves now.
             StartC = i+2
         if Alldata[i].replace(" ", "").lower().strip() == '"countrate"':
-            # takes cate of "Count Rate" and "Countrate"
+            # takes care of "Count Rate" and "Countrate"
             # End of correlation function
             EndC = i-1
             # Start of trace (goes until end of file)
@@ -190,8 +210,6 @@ def openASC(dirname, filename):
             for i in np.arange(len(curvelist)-1):
                 trace2.append(list())
                 trace2[i+1].append((np.float(row[0])*timefactor, 0))
-    # return as an array
-    openfile.close()
 
     # group the resulting curves
     corrlist = list()
@@ -314,6 +332,116 @@ def openASC(dirname, filename):
     for i in curvelist:
         filelist.append(filename)
     dictionary["Filename"] = filelist
+    
+    return dictionary
+
+
+def openASC_ALV_7004_USB(path):
+    """
+    Opens ALV file format with header information "ALV-7004/USB" 
+    
+    This is a single-run file format.
+    - data is identified by 4*"\t"
+    - count rate is identified by string (also "countrate")
+    - allzero-correlations are removed
+
+    "Correlation"
+      2.50000E-005     -9.45478E-001     -1.00000E+000      5.22761E-002      3.05477E-002
+      5.00000E-005      6.73734E-001     -2.59938E-001      3.17894E-002      4.24466E-002
+      7.50000E-005      5.30716E-001      3.21605E-001      5.91051E-002      2.93061E-002
+      1.00000E-004      3.33292E-001      1.97860E-001      3.24102E-002      3.32379E-002
+      1.25000E-004      2.42538E-001      1.19988E-001      4.37917E-002      3.05477E-002
+      1.50000E-004      1.86396E-001      1.23318E-001      5.66218E-002      2.25806E-002
+      1.75000E-004      1.73836E-001      8.53991E-002      4.64819E-002      3.46865E-002
+      2.00000E-004      1.48080E-001      9.35377E-002      4.37917E-002      4.17223E-002
+    [...]
+      1.00663E+004      2.80967E-005     -2.23975E-005     -7.08272E-005      5.70470E-005
+      1.09052E+004      9.40185E-005      2.76261E-004      1.29745E-004      2.39958E-004
+      1.17441E+004     -2.82103E-004     -1.97386E-004     -2.88753E-004     -2.60987E-004
+      1.25829E+004      1.42069E-004      3.82018E-004      6.03932E-005      5.40363E-004
+    
+    "Count Rate"
+           0.11719         141.83165          81.54211         141.83165          81.54211
+           0.23438         133.70215          77.90344         133.70215          77.90344
+           0.35156         129.67148          74.58858         129.67148          74.58858
+           0.46875         134.57133          79.53957         134.57133          79.53957
+    [...]
+          29.29688         143.78307          79.06236         143.78307          79.06236
+          29.41406         154.80135          82.87147         154.80135          82.87147
+          29.53125         187.43013          89.61197         187.43013          89.61197
+          29.64844         137.82655          77.71597         137.82655          77.71597
+    [...]
+
+
+    """
+    filename = os.path.basename(path)
+    with open(path, 'r') as openfile:
+        Alldata = openfile.readlines()
+    
+    # Find the different arrays
+    # correlation array: "  "
+    # trace array: "       "
+    Allcorr = list()
+    Alltrac = list()
+    i=0
+    intrace = False
+    for item in Alldata:
+        if item.lower().strip().strip('"').replace(" ", "") == "countrate":
+            intrace = True
+        i += 1
+        if item.count("\t") == 4: 
+            if intrace:
+                it = item.split("\t")
+                it = [ float(t.strip()) for t in it ]
+                Alltrac.append(it)
+            else:
+                ic = item.split("\t")
+                ic = [ float(c.strip()) for c in ic ]
+                Allcorr.append(ic)
+    Allcorr = np.array(Allcorr)
+    Alltrac = np.array(Alltrac)
+    
+    # Allcorr: lag time, ac1, ac2, cc12, cc21
+    # Alltrac: time, trace1, trace2, trace1, trace2
+    assert np.allclose(Alltrac[:,1], Alltrac[:,3], rtol=.01), "unknown ALV file format"
+    assert np.allclose(Alltrac[:,2], Alltrac[:,4], rtol=.01), "unknown ALV file format"
+    
+    guesstypelist = ["AC1", "AC2", "CC12", "CC21"]
+    typelist = list()
+    corrlist = list()
+    tracelist = list()
+    filelist = list()
+    
+    lagtime = Allcorr[:,0]
+    time = Alltrac[:,0]*1000
+    trace1 = np.dstack((time, Alltrac[:,1]))[0]
+    trace2 = np.dstack((time, Alltrac[:,2]))[0]
+    
+    for i, typ in enumerate(guesstypelist):
+        corr = np.dstack((lagtime, Allcorr[:,i+1]))[0]
+        
+        if not np.allclose(corr[:,1], np.zeros_like(lagtime)):
+            # type
+            typelist.append(typ)
+            # correlation
+            corrlist.append(corr)
+            # trace
+            if typ.count("CC"):
+                tracelist.append([trace1, trace2])
+            elif typ.count("AC1"):
+                tracelist.append([trace1])
+            elif typ.count("AC2"):
+                tracelist.append([trace2])
+            else:
+                raise ValueError("Unknown ALV file format")
+            # filename
+            filelist.append(filename)
+
+    dictionary = dict()
+    dictionary["Correlation"] = corrlist
+    dictionary["Trace"] = tracelist
+    dictionary["Type"] = typelist
+    dictionary["Filename"] = filelist
     return dictionary
 
 
@@ -323,7 +451,7 @@ def mysplit(a, n):
        The signal average is preserved, but the signal variance will
        decrease.
     """
-    if n == 1:
+    if n <= 1:
         return [np.array(a)]
     a = np.array(a)
     N = len(a)
@@ -349,6 +477,5 @@ def mysplit(a, n):
     data[:,0] = x + newstep
     # make sure that the average stays the same:
     data[:,1] = y - np.average(y) + np.average(yp)
-
     return np.split(data,n)
     
diff --git a/pycorrfit/readfiles/read_CSV_PyCorrFit.py b/pycorrfit/readfiles/read_CSV_PyCorrFit.py
index ff4381f..4419bec 100644
--- a/pycorrfit/readfiles/read_CSV_PyCorrFit.py
+++ b/pycorrfit/readfiles/read_CSV_PyCorrFit.py
@@ -8,7 +8,8 @@ import numpy as np
 
 
 def openCSV(dirname, filename):
-    """ Read relevant data from a file looking like this:
+    """ 
+    Read relevant data from a file looking like this:
         [...]
         # Comment
         # Data type: Autocorrelation
@@ -31,26 +32,39 @@ def openCSV(dirname, filename):
         18.087936   31.21335
         [...]
 
-        Data type:
-        If Data type is "Cross-correlation", we will try to import
-        two traces after "# BEGIN SECOND TRACE"
+    The correlation part could also look like this:
+        # Channel (tau [s])    Experimental correlation    Fitted correlation    Residuals      Weights [model function]
+        2.0000000000e-07    1.5649271000e-01    1.5380094370e-01    2.6917663029e-03    7.3158300646e-03
+        4.0000000000e-07    1.4751239000e-01    1.5257959602e-01    -5.0672060199e-03    5.8123579098e-03
+        6.0000000000e-07    1.5145113000e-01    1.5137624642e-01    7.4883584881e-05    8.5622019656e-03
+        8.0000000000e-07    1.5661088000e-01    1.5019053433e-01    6.4203456659e-03    6.8098486549e-03
+        1.0000000000e-06    1.5456273000e-01    1.4902210818e-01    5.5406218229e-03    7.2476381023e-03
+        1.2000000000e-06    1.3293905000e-01    1.4787062503e-01    -1.4931575028e-02    6.9861494246e-03
+        1.4000000000e-06    1.4715790000e-01    1.4673575040e-01    4.2214960494e-04    6.9810206017e-03
+        1.6000000000e-06    1.5247520000e-01    1.4561715797e-01    6.8580420325e-03    6.6680066656e-03
+        1.8000000000e-06    1.4703974000e-01    1.4451452937e-01    2.5252106284e-03    6.3299717550e-03
+    In that case we are also importing the weights.
 
-        1st section:
-         First column denotes tau in seconds and the second row the
-         correlation signal.
-        2nd section:
-         First column denotes tau in seconds and the second row the
-         intensity trace in kHz.
+    Data type:
+    If Data type is "Cross-correlation", we will try to import
+    two traces after "# BEGIN SECOND TRACE"
 
+    1st section:
+     First column denotes tau in seconds and the second row the
+     correlation signal.
+    2nd section:
+     First column denotes tau in seconds and the second row the
+     intensity trace in kHz.
 
-        Returns:
-        1. A list with tuples containing two elements:
-           1st: tau in ms
-           2nd: corresponding correlation signal
-        2. None - usually is the trace, but the trace is not saved in
-                  the PyCorrFit .csv format.
-        3. A list with one element, indicating, that we are opening only
-           one correlation curve.
+
+    Returns:
+    1. A list with tuples containing two elements:
+       1st: tau in ms
+       2nd: corresponding correlation signal
+    2. None - usually is the trace, but the trace is not saved in
+              the PyCorrFit .csv format.
+    3. A list with one element, indicating, that we are opening only
+       one correlation curve.
     """
     # Check if the file is correlation data
     csvfile = open(os.path.join(dirname, filename), 'r')
@@ -65,10 +79,13 @@ def openCSV(dirname, filename):
     csvfile = open(os.path.join(dirname, filename), 'r')
     readdata = csv.reader(csvfile, delimiter=',')
     data = list()
+    weights = list()
+    weightname = "external"
     trace = None
     traceA = None
     DataType="AC" # May be changed
     numtraces = 0
+    prev_row = None
     for row in readdata:
         if len(row) == 0 or len(str(row[0]).strip()) == 0:
             # Do nothing with empty/whitespace lines
@@ -106,6 +123,15 @@ def openCSV(dirname, filename):
                 row = row[0].split()
             data.append((np.float(row[0].strip())*timefactor, 
                          np.float(row[1].strip())))
+            if len(row) == 5:
+                # this has to be correlation with weights
+                weights.append(np.float(row[4].strip()))
+                if weightname == "external":
+                    try:
+                        weightname = "ext. "+prev_row[0].split("Weights")[1].split("[")[1].split("]")[0]
+                    except:
+                        pass
+        prev_row = row
     # Collect the rest of the trace, if there is any:
     rest = np.array(data)
     if numtraces == 0:
@@ -142,4 +168,7 @@ def openCSV(dirname, filename):
     dictionary["Trace"] = Traces
     dictionary["Type"] = [DataType]
     dictionary["Filename"] = [filename]
+    if len(weights) != 0:
+        dictionary["Weight"] = [ np.array(weights)]
+        dictionary["Weight Name"] = [weightname]
     return dictionary
diff --git a/pycorrfit/tools/average.py b/pycorrfit/tools/average.py
index dcbba0e..4cbda67 100644
--- a/pycorrfit/tools/average.py
+++ b/pycorrfit/tools/average.py
@@ -131,7 +131,8 @@ class Average(wx.Frame):
             if Page.counter.strip(" :#") == str(PageNumbers[0]):
                 referencePage = Page
                 break
-        if referencePage is not None:
+
+        if referencePage is None:
             # If that did not work, we have to raise an error.
             raise IndexError("PyCorrFit could not find the first"+
 							 " page for averaging.")
@@ -139,21 +140,23 @@ class Average(wx.Frame):
         
         for i in np.arange(self.parent.notebook.GetPageCount()):
             Page = self.parent.notebook.GetPage(i)
+            corr = Page.corr
+            model = Page.corr.fit_model
             j = filter(lambda x: x.isdigit(), Page.counter)
             if int(j) in PageNumbers:
                 # Get all pages with the same model?
                 if self.WXCheckMono.GetValue() == True:
-                    if (Page.modelid == referencePage.modelid and
-                       Page.IsCrossCorrelation == referencePage.IsCrossCorrelation):
+                    if (model.id == referencePage.corr.fit_model.id and
+                       corr.is_cc == referencePage.corr.is_cc):
                         ## Check if the page has experimental data:
                         # If there is an empty page somewhere, don't bother
-                        if Page.dataexpfull is not None:
+                        if corr.correlation is not None:
                             pages.append(Page)
                             UsedPagenumbers.append(int(j))
                 else:
-                    if Page.IsCrossCorrelation == referencePage.IsCrossCorrelation:
+                    if corr.is_cc == referencePage.corr.is_cc:
                         # If there is an empty page somewhere, don't bother
-                        if Page.dataexpfull is not None:
+                        if corr.correlation is not None:
                             pages.append(Page)
                             UsedPagenumbers.append(int(j))
         # If there are no pages in the list, exit gracefully
@@ -175,21 +178,16 @@ class Average(wx.Frame):
         TraceNumber = 0
         TraceAvailable = False # turns True, if pages contain traces
         for page in pages:
+            corr = page.corr
             # experimental correlation curve
             # (at least 1d, because it might be None)
-            explist.append(np.atleast_1d(1*page.dataexpfull))
+            explist.append(np.atleast_1d(1*corr.correlation))
             # trace
             # We will put together a trace from all possible traces
             # Stitch together all the traces.
-            if page.IsCrossCorrelation is False:
-                trace = [page.trace]
-                # trace has one element
-                TraceNumber = 1
-            else:
-                trace = page.tracecc
-                # trace has two elements
-                TraceNumber = 2
-            if trace is not None and trace[0] is not None:
+            trace = corr.traces
+            TraceNumber = len(trace)
+            if TraceNumber > 0:
                 TraceAvailable = True
                 # Works with one or two traces. j = 0 or 1.
                 for j in np.arange(TraceNumber):
@@ -223,7 +221,7 @@ class Average(wx.Frame):
         # Now shorten the trace, because we want as little memory usage as
         # possible. I used this algorithm in read_FCS_Confocor3.py as well.
         newtraces = list()
-        if TraceAvailable is True:
+        if TraceAvailable:
             for j in np.arange(TraceNumber):
                 tracej = np.zeros((len(tracetime[j]),2))
                 tracej[:,0] = tracetime[j]
@@ -267,29 +265,28 @@ class Average(wx.Frame):
         # Set average data
         average[:,1] = averagedata
         # create new page
-        self.IsCrossCorrelation = self.Page.IsCrossCorrelation
-        interval = (self.Page.startcrop, self.Page.endcrop)
+        self.IsCrossCorrelation = self.Page.corr.is_cc
+        interval = self.Page.corr.fit_ival
         # Obtain the model ID from the dropdown selection.
         idsel = self.WXDropSelMod.GetSelection()
         modelid = self.DropdownIndex[idsel]
         self.AvgPage = self.parent.add_fitting_tab(modelid = modelid,
                                                    select = True)
-        (self.AvgPage.startcrop, self.AvgPage.endcrop) = interval
-        self.AvgPage.dataexpfull = average
-        self.AvgPage.IsCrossCorrelation = self.IsCrossCorrelation
+        self.AvgPage.corr.fit_ival = interval
+        self.AvgPage.corr.correlation = average
         if self.IsCrossCorrelation is False:
+            self.AvgPage.corr.corr_type = "AC average"
             newtrace = newtraces[0]
             if newtrace is not None and len(newtrace) != 0:
-                self.AvgPage.trace = newtrace
-                self.AvgPage.traceavg = newtrace[:,1].mean()
+                self.AvgPage.corr.traces = [newtrace]
             else:
-                self.AvgPage.trace = None
-                self.AvgPage.traceavg = None
+                self.AvgPage.corr.traces = []
         else:
+            self.AvgPage.corr.corr_type = "CC average"
             if newtraces[0] is not None and len(newtraces[0][0]) != 0:
-                self.AvgPage.tracecc = newtraces
+                self.AvgPage.corr.traces = newtraces
             else:
-                self.AvgPage.tracecc = None
+                self.AvgPage.corr.traces = []
         self.AvgPage.Fit_enable_fitting()
         if len(pages) == 1:
             # Use the same title as the first page
@@ -300,6 +297,7 @@ class Average(wx.Frame):
         self.AvgPage.tabtitle.SetValue(newtabti)
         # Set the addition information about the variance from averaging
         Listname = "Average"
+        listname = Listname.lower()
         standarddev = exparray.std(axis=0)[:,1]
         if np.sum(np.abs(standarddev)) == 0:
             # The average sd is zero. We probably made an average
@@ -307,11 +305,17 @@ class Average(wx.Frame):
             # average weighted fitting
             pass
         else:
-            self.AvgPage.external_std_weights[Listname] = standarddev
+            # TODO:
+            # kind of hackish to repeat this three times:
+            #   self.AvgPage.corr.set_weights(Listname,  standarddev)
+            self.AvgPage.corr.set_weights(listname,  standarddev)
             WeightKinds = self.AvgPage.Fitbox[1].GetItems()
             # Attention! Average weights and other external weights should
             # be sorted (for session saving).
-            extTypes = self.AvgPage.external_std_weights.keys()
+            extTypes = self.AvgPage.corr._fit_weight_memory.keys()
+            # TODO:
+            # find acleaner solution
+            extTypes.remove("none")
             extTypes.sort() # sorting
             for key in extTypes:
                 try:
@@ -319,12 +323,15 @@ class Average(wx.Frame):
                 except:
                     pass
             LenInternal = len(WeightKinds)
-            IndexAverag = extTypes.index(Listname)
+            IndexAverag = extTypes.index(listname)
             IndexInList = LenInternal + IndexAverag
             for key in extTypes:
                 WeightKinds += [key]
             self.AvgPage.Fitbox[1].SetItems(WeightKinds)
             self.AvgPage.Fitbox[1].SetSelection(IndexInList)
+            self.AvgPage.corr.set_weights(listname,  standarddev)
+            self.AvgPage.apply_parameters()
+            self.AvgPage.corr.set_weights(listname,  standarddev)
         self.AvgPage.PlotAll()
         # Keep the average tool open.
         # self.OnClose()
@@ -344,7 +351,7 @@ class Average(wx.Frame):
         modelkeys = mdls.modeltypes.keys()
         modelkeys.sort()
         try:
-            current_model = self.parent.notebook.GetCurrentPage().modelid
+            current_model = self.parent.notebook.GetCurrentPage().corr.fit_model.id
         except:
             current_model = -1
         i = 0
diff --git a/pycorrfit/tools/background.py b/pycorrfit/tools/background.py
index 3374c6a..82cafbf 100644
--- a/pycorrfit/tools/background.py
+++ b/pycorrfit/tools/background.py
@@ -18,6 +18,7 @@ import wx.lib.plot as plot
 from .. import misc
 from .. import openfile as opf                  # How to treat an opened file
 from .. import readfiles
+from ..fcs_data_set import Trace
 
 # Menu entry name
 MENUINFO = ["&Background correction", "Open a file for background correction."]
@@ -179,6 +180,16 @@ class BackgroundCorrection(wx.Frame):
             wx.Frame.SetIcon(self, parent.MainIcon)
 
 
+    def Apply(self, Page, backgroundid):
+        if self.rbtnCh1.GetValue() == True:
+            Page.bgselected = backgroundid
+        else:
+            Page.bg2selected = backgroundid
+        if Page.IsCrossCorrelation is False:
+            # Autocorrelation only has one background!
+            Page.bg2selected = None
+
+
     def OnApply(self, event):
         strFull = self.WXTextPages.GetValue()
         PageNumbers = misc.parseString2Pagenum(self, strFull)
@@ -193,13 +204,7 @@ class BackgroundCorrection(wx.Frame):
             Page = self.parent.notebook.GetPage(i)
             j = filter(lambda x: x.isdigit(), Page.counter)
             if int(j) in PageNumbers:
-                if self.rbtnCh1.GetValue() == True:
-                    Page.bgselected = item
-                else:
-                    Page.bg2selected = item
-                if Page.IsCrossCorrelation is False:
-                    # Autocorrelation only has one background!
-                    Page.bg2selected = None
+                self.Apply(Page, item)
                 Page.OnAmplitudeCheck("init")
                 Page.PlotAll()
         # Clean up unused backgrounds
@@ -214,12 +219,8 @@ class BackgroundCorrection(wx.Frame):
         for i in np.arange(N):
             # Set Page 
             Page = self.parent.notebook.GetPage(i)
-            Page.bgselected = item
-            if Page.IsCrossCorrelation:
-                Page.bg2selected = item
-            else:
-                Page.bg2selected = None
             try:
+                self.Apply(Page, item)
                 Page.OnAmplitudeCheck("init")
                 Page.PlotAll()
             except OverflowError:
@@ -365,9 +366,7 @@ class BackgroundCorrection(wx.Frame):
             self.btnapply.Enable(True)
             self.btnapplyall.Enable(True)
             # Draw a trace from the list
-            self.activetrace = self.parent.Background[item-1][2]
-            #self.textafterdropdown.SetLabel(" Avg:  "+
-            #                    str(self.parent.Background[item-1][0]))
+            self.activetrace = self.parent.Background[item-1].trace
         # We want to have the trace in [s] here.
         trace = 1.*self.activetrace
         trace[:,0] = trace[:,0]/1000
@@ -378,8 +377,7 @@ class BackgroundCorrection(wx.Frame):
 
 
     def OnImport(self, event):
-        self.parent.Background.append([self.average, self.bgname.GetValue(), 
-                                      self.trace])
+        self.parent.Background.append(Trace(trace=self.trace, name=self.bgname.GetValue()))
         # Next two lines are taken care of by UpdateDropdown
         #name = "{} ({:.2f} kHz)".format(self.bgname.GetValue(), self.average)
         #self.BGlist.append(name)
@@ -526,8 +524,7 @@ class BackgroundCorrection(wx.Frame):
         self.BGlist = list()
         #self.BGlist.append("File/User")
         for item in self.parent.Background:
-            bgname = "{} ({:.2f} kHz)".format(item[1],item[0])
-            self.BGlist.append(bgname)
+            self.BGlist.append(item.name)
         self.dropdown.SetItems(self.BGlist)
         # Show the last item
         self.dropdown.SetSelection(len(self.BGlist)-1)
@@ -550,28 +547,28 @@ def ApplyAutomaticBackground(page, bg, parent):
     bglist = 1*np.atleast_1d(bg)
     # minus 1 to identify non-set background id
     bgid = np.zeros(bglist.shape, dtype=int) - 1
-    for b in xrange(len(bglist)):
+    for b in range(len(bglist)):
+        bgname = "AUTO: {:e} kHz \t".format(bglist[b])
         # Check if exists:
         for i in xrange(len(parent.Background)):
-            if parent.Background[i][0] == bglist[b]:
+            if (parent.Background[i].countrate == bglist[b] and 
+                parent.Background[i].name == bgname):
                 bgid[b] = i
         if bgid[b] == -1:
             # Add new background
-            bgname = "AUTO: {:e} kHz \t".format(bglist[b])
-            trace = np.array([[0,bglist[b]],[1,bglist[b]]])
-            parent.Background.append([bglist[b], bgname, trace])
+            parent.Background.append(Trace(countrate=bglist[b], name=bgname, duration=1))
             bgid[b] = len(parent.Background) - 1
+    
     # Apply background to page
     # Last item is id of background
+
     page.bgselected = bgid[0]
-    if page.IsCrossCorrelation:
-        if len(bgid) != 2:
-            raise NotImplementedError("Cross-correlation data needs"+
-                "exactly two signals for background-correction!")
-        # Apply second background
+    
+    if len(bgid) == 2:
         page.bg2selected = bgid[1]
     else:
         page.bg2selected = None
+
     CleanupAutomaticBackground(parent)
     page.OnAmplitudeCheck("init")
     page.PlotAll()
@@ -588,53 +585,32 @@ def CleanupAutomaticBackground(parent):
     # Create a dictionary with keys: indices of old background list -
     # and elements: list of pages having this background
     BGdict = dict()
-    BG2dict = dict() # cross-correlation
-    for i in xrange(len(parent.Background)):
+    for i in range(len(parent.Background)):
         BGdict[i] = list()
-        BG2dict[i] = list()
     # Append pages to the lists inside the dictionary
-    for i in xrange(parent.notebook.GetPageCount()):
+    for i in range(parent.notebook.GetPageCount()):
         Page = parent.notebook.GetPage(i)
         if Page.bgselected is not None:
-            BGdict[Page.bgselected].append(Page)
+            if not BGdict.has_key(Page.bgselected):
+                BGdict[Page.bgselected] = list()
+            BGdict[Page.bgselected].append([Page, 1])
         if Page.bg2selected is not None:
-            BG2dict[Page.bg2selected].append(Page)
-    # Sort the keys and create a new background list
-    NewBGlist = list()
-    keyID = 0
-    keys = BGdict.keys()
-    keys.sort()
-    for key in keys:
-        # Do not delete user-generated backgrounds
-        if len(BGdict[key]) == 0 and parent.Background[key][1][-1]=="\t":
-            # This discrads auto-generated backgrounds that have no
-            # pages assigned to them
-            pass
-        else:
-            for page in BGdict[key]:
-                page.bgselected = keyID
-            NewBGlist.append(parent.Background[key])
-            keyID += 1
-    # Same thing for cross-correlation (two bg signals)
-    #keyID = 0
-    keys = BG2dict.keys()
-    keys.sort()
-    for key in keys:
-        # Do not delete user-generated backgrounds
-        if len(BG2dict[key]) == 0 and parent.Background[key][1][-1]=="\t":
-            # This discrads auto-generated backgrounds that have no
-            # pages assigned to them
-            pass
-        elif parent.Background[key][1][-1]=="\t":
-            # We already added the user-defined backgrounds
-            # Therefore, we only check for aut-generated backgrounds
-            # ("\t")
-            for page in BG2dict[key]:
-                page.bg2selected = keyID
-            NewBGlist.append(parent.Background[key])
-            keyID += 1
-    # Finally, write back background list
-    parent.Background = NewBGlist
+            if not BGdict.has_key(Page.bg2selected):
+                BGdict[Page.bg2selected] = list()
+            BGdict[Page.bg2selected].append([Page, 2])
+    
+    oldBackground = parent.Background
+    parent.Background = list()
+    bgcounter = 0
+    for key in BGdict.keys():
+        if len(BGdict[key]) != 0 or not oldBackground[key].name.endswith("\t"):
+            parent.Background.append(oldBackground[key])
+            for Page, bgid in BGdict[key]:
+                if bgid == 1:
+                    Page.bgselected = bgcounter
+                else:
+                    Page.bg2selected = bgcounter
+            bgcounter += 1
     # If the background correction tool is open, update the list
     # of backgrounds.
     # (self.MyName="BACKGROUND")
diff --git a/pycorrfit/tools/batchcontrol.py b/pycorrfit/tools/batchcontrol.py
index 8b4ffc3..9b89b4b 100644
--- a/pycorrfit/tools/batchcontrol.py
+++ b/pycorrfit/tools/batchcontrol.py
@@ -95,7 +95,7 @@ class BatchCtrl(wx.Frame):
         # Set all parameters for all pages
         for i in np.arange(self.parent.notebook.GetPageCount()):
             OtherPage = self.parent.notebook.GetPage(i)
-            if OtherPage.modelid == modelid and OtherPage.dataexp is not None:
+            if OtherPage.corr.fit_model.id == modelid and OtherPage.corr.correlation is not None:
                 self.parent.UnpackParameters(Parms, OtherPage)
                 OtherPage.PlotAll(trigger="parm_batch")
         # Update all other tools fit the finalize trigger.
@@ -114,17 +114,17 @@ class BatchCtrl(wx.Frame):
             if item <= 0:
                 Page = self.parent.notebook.GetCurrentPage()
             else:
-                Page = self.parent.notebook.GetPage(item)
+                Page = self.parent.notebook.GetPage(item-1)
             # Get internal ID
-            modelid = Page.modelid
+            modelid = Page.corr.fit_model.id
         else:
             # Get external ID
             modelid = self.YamlParms[item][1]
         # Fit all pages with right modelid
         for i in np.arange(self.parent.notebook.GetPageCount()):
             OtherPage = self.parent.notebook.GetPage(i)
-            if (OtherPage.modelid == modelid and
-                OtherPage.dataexpfull is not None):
+            if (OtherPage.corr.fit_model.id == modelid and
+                OtherPage.corr.correlation is not None):
                 #Fit
                 OtherPage.Fit_function(noplots=True,trigger="fit_batch")
         # Update all other tools fit the finalize trigger.
@@ -154,7 +154,7 @@ class BatchCtrl(wx.Frame):
             DDlist.append("Current page")
             for i in np.arange(self.parent.notebook.GetPageCount()):
                 aPage = self.parent.notebook.GetPage(i)
-                DDlist.append(aPage.counter+aPage.model)
+                DDlist.append(aPage.counter+aPage.tabtitle.GetValue())
             self.dropdown.SetItems(DDlist)
             self.dropdown.SetSelection(0)
 
diff --git a/pycorrfit/tools/comment.py b/pycorrfit/tools/comment.py
index 1ee1e9d..5b6f707 100755
--- a/pycorrfit/tools/comment.py
+++ b/pycorrfit/tools/comment.py
@@ -5,8 +5,7 @@ PyCorrFit
 Module tools - comment
 Edit the sessions' comment.
 """
-
-
+from __future__ import print_function
 import wx
 
 
@@ -29,21 +28,17 @@ class EditComment(wx.Frame):
         self.panel = wx.Panel(self)
         self.control = wx.TextCtrl(self.panel, style=wx.TE_MULTILINE, 
                         size=initial_sizec, value=self.parent.SessionComment)
+        self.Bind(wx.EVT_TEXT, self.OnTextChanged, self.control)
         text = wx.StaticText(self.panel, 
                    label="Session comments will be saved in the  session file.")
         # buttons
-        btnclose = wx.Button(self.panel, wx.ID_ANY, 'Close')
-        btnokay = wx.Button(self.panel, wx.ID_ANY, 'OK')
-        self.Bind(wx.EVT_BUTTON, self.OnClose, btnclose)
-        self.Bind(wx.EVT_BUTTON, self.OnOkay, btnokay)
+        btnsave = wx.Button(self.panel, wx.ID_SAVE, 'Save Comment')
+        self.Bind(wx.EVT_BUTTON, self.OnSave, btnsave)
         #sizers
         self.topSizer = wx.BoxSizer(wx.VERTICAL)
-        buttonsizer = wx.BoxSizer(wx.HORIZONTAL)
-        buttonsizer.Add(btnclose, 1)
-        buttonsizer.Add(btnokay, 1)
         self.topSizer.Add(text)
-        self.topSizer.Add(buttonsizer)
         self.topSizer.Add(self.control)
+        self.topSizer.Add(btnsave, 1, wx.RIGHT | wx.EXPAND)
         self.panel.SetSizer(self.topSizer)
         self.topSizer.Fit(self)
         #Icon
@@ -51,6 +46,7 @@ class EditComment(wx.Frame):
             wx.Frame.SetIcon(self, parent.MainIcon)
         self.Show(True)
         wx.EVT_SIZE(self, self.OnSize)
+        self.text_changed = False
 
 
     def OnSize(self, event):
@@ -60,12 +56,24 @@ class EditComment(wx.Frame):
         self.control.SetSize(sizec)
 
 
-    def OnClose(self, event=None):
+    def OnClose(self, e=None):
         self.parent.filemenu.Check(self.parent.menuComm.GetId(), False)
+        if self.text_changed:
+            # ask the user to save or discard.
+            dlg = wx.MessageDialog(self, "Save comment?",
+                                   "Do you want to save the current changes?",
+                                   style=wx.YES_NO)
+            if dlg.ShowModal() == wx.ID_YES:
+                self.OnSave()
         self.Destroy()
 
+    def OnTextChanged(self, e=None):
+        """ When the user changes the text
+        """
+        self.text_changed = True
 
-    def OnOkay(self, event):
+    def OnSave(self, e=None):
         self.parent.SessionComment = self.control.GetValue()
+        self.text_changed = False
         self.OnClose()
 
diff --git a/pycorrfit/tools/datarange.py b/pycorrfit/tools/datarange.py
index 36a9528..37c2468 100644
--- a/pycorrfit/tools/datarange.py
+++ b/pycorrfit/tools/datarange.py
@@ -111,20 +111,16 @@ class SelectChannels(wx.Frame):
             self.left = self.right = None
             self.panel.Disable()
         else:
-            self.left = self.Page.startcrop     # starting position
-            self.right = self.Page.endcrop      # ending position
-            if self.Page.dataexpfull is not None:
-                taufull = self.Page.dataexpfull[:,0]
-            else:
-                # then we only have tau
-                taufull = self.Page.taufull
+            self.left = self.Page.corr.fit_ival[0]     # starting position
+            self.right = self.Page.corr.fit_ival[1]      # ending position
+            taufull = self.Page.corr.lag_time
         self.lentau = len(taufull)
         self.start0 = 0                     # left border of interval
         # The interval starts at 0!
         self.end0 = self.lentau - 1         # right border of interval 
         if self.left is None or self.left > self.end0:
             # This means, that either left = right = None
-            # or the dataexp-array is too small
+            # or the correlation-array is too small
             self.left = self.start0
         if self.right is None:
             # set the maximum possible value
@@ -139,26 +135,12 @@ class SelectChannels(wx.Frame):
 
 
     def OnApplyAll(self, event=None):
-        start = self.spinstart.GetValue()
-        end = self.spinend.GetValue() + 1 # +1, [sic]
-        if start > end:
-            # swap the variables, we are not angry at the user
-            start, end = end, start
-        # Get all the Pages
         N = self.parent.notebook.GetPageCount()
         for i in np.arange(N):
             # Set Page 
             Page = self.parent.notebook.GetPage(i)
             # Find out maximal length
-            if Page.dataexpfull is not None:
-                maxlen = len(Page.dataexpfull[:,0])
-            else:
-                # then we only have tau
-                maxlen = len(Page.taufull)
-            # Use the smaller one of both, so we do not get an
-            # index out of bounds error
-            Page.endcrop = min(end, maxlen)
-            Page.startcrop = start*(start < maxlen - 1 )
+            self.SetValues(page=Page)
             Page.PlotAll()
         # Page.PlorAll() calls this function. This results in the wrong data
         # being displayed in an open "View Info" Window. We call it again.
@@ -179,15 +161,15 @@ class SelectChannels(wx.Frame):
         """
         if self.Page == None:
             return
-        N = len(self.Page.taufull)
+        N = len(self.Page.corr.lag_time)
         start = self.spinstart.Value
         end = self.spinend.Value
         # If the initial boundaries are outside of the experimental
         # data array of length N, change the start and end variables.
         start = start*(start < N-2)
         end = min(end, N-1)
-        t1 = 1.*self.Page.taufull[start]
-        t2 = 1.*self.Page.taufull[end]
+        t1 = 1.*self.Page.corr.lag_time[start]
+        t2 = 1.*self.Page.corr.lag_time[end]
         self.TextTimesStart.SetLabel("%.4e" % t1)
         self.TextTimesEnd.SetLabel("%.4e" % t2)
         self.OnCheckbox()
@@ -228,11 +210,10 @@ class SelectChannels(wx.Frame):
             self.panel.Enable()
             # There is a page. We may continue.
             state = self.fixcheck.GetValue()
-            if state == True:
+            if state:
                 # We do not need to run Calc_init
-                self.Page = page
-                self.SetValues()
-                self.Page.PlotAll(event="init")
+                self.SetValues(page=page)
+                page.PlotAll(event="init")
             else:
                 # We will run it
                 self.Calc_init(page)
@@ -245,11 +226,19 @@ class SelectChannels(wx.Frame):
 
 
 
-    def SetValues(self):
+    def SetValues(self, page=None):
+        if page is None:
+            page = self.Page
+        # Get interval
         start = self.spinstart.GetValue()
-        end = self.spinend.GetValue()
+        end = self.spinend.GetValue() + 1 # +1, [sic]
         if start > end:
             # swap the variables, we are not angry at the user
             start, end = end, start
-        self.Page.startcrop = start
-        self.Page.endcrop = end + 1 # +1, because arrays are accessed like this
+        # Find out maximal length
+        maxlen = len(page.corr.lag_time)
+        # Use the smaller one of both, so we do not get an
+        # index out of bounds error
+        page.corr.fit_ival = [ start*(start < maxlen - 1 ),
+                               min(end, maxlen)
+                             ]
\ No newline at end of file
diff --git a/pycorrfit/tools/globalfit.py b/pycorrfit/tools/globalfit.py
index c1d64bf..9ba9fe2 100644
--- a/pycorrfit/tools/globalfit.py
+++ b/pycorrfit/tools/globalfit.py
@@ -9,10 +9,9 @@ Perform global fitting on pages which share parameters.
 
 import wx
 import numpy as np
-from scipy import optimize as spopt
 
 from .. import misc
-from .. import models as mdls
+from ..fcs_data_set import Fit
 
 # Menu entry name
 MENUINFO = ["&Global fitting",
@@ -55,12 +54,6 @@ check parameters on each page and start 'Global fit'.
         valstring=misc.parsePagenum2String(pagenumlist)
         self.WXTextPages.SetValue(valstring)
         self.topSizer.Add(self.WXTextPages)
-        ## Weighted fitting
-        # The weighted fit of the current page will be applied to
-        # all other pages.
-        self.weightedfitdrop = wx.ComboBox(self.panel)
-        ## Bins from left and right: We also don't edit that.
-        self.topSizer.Add(self.weightedfitdrop)
         ## Button
         btnfit = wx.Button(self.panel, wx.ID_ANY, 'Global fit')
         # Binds the button to the function - close the tool
@@ -75,50 +68,6 @@ check parameters on each page and start 'Global fit'.
             wx.Frame.SetIcon(self, parent.MainIcon)
         self.Show(True)
 
-    
-    def fit_function(self, parms):
-        """
-            *parms*: Parameters to fit, array
-            needs: 
-             self.parmstofit - list (strings) of parameters to fit
-                               (corresponding to *parms*)
-             self.PageData (dict with dict item = self.PageData["PageNumber"]):
-                item["x"]
-                item["data"]
-                item["modelid"]
-                item["values"]
-        """
-        # The list containing arrays to be minimized
-        minimize = list()
-        for key in self.PageData.keys():
-            # Get the function
-            item = self.PageData[key]
-            modelid = item["modelid"]
-            function = mdls.modeldict[modelid][3]
-            values = self.PageData[key]["values"]
-            # Set parameters for each function (Page)
-            for i in np.arange(len(self.parmstofit)):
-                p = self.parmstofit[i]
-                labels = mdls.valuedict[modelid][0]
-                if p in labels:
-                    index = labels.index(p)
-                    values[index] = parms[i]
-            # Check parameters, if there is such a function
-            check_parms = mdls.verification[modelid]
-            values = check_parms(values)
-            # Write parameters back?
-            # self.PageData[key]["values"] = values
-            # Calculate resulting correlation function
-            # corr = function(item.values, item.x)
-            # Subtract data. This is the function we want to minimize
-            minimize.append(
-              (function(values, item["x"]) - item["data"]) / item["dataweights"]
-                           )
-
-        # Flatten the list and make an array out of it.
-        return np.array([it for sublist in minimize for it in sublist])
-
-
     def OnClose(self, event=None):
         # This is a necessary function for PyCorrFit.
         # Do not change it.
@@ -130,129 +79,44 @@ check parameters on each page and start 'Global fit'.
         # process a string like this: "1,2,4-9,10"
         strFull = self.WXTextPages.GetValue()
         PageNumbers = misc.parseString2Pagenum(self, strFull)
+        global_pages = list()
         if PageNumbers is None:
             # Something went wrong and parseString2Pagenum already displayed
             # an error message.
             return
-        ## Get the corresponding pages, if they exist:
-        self.PageData = dict()
-        self.parmstofit = list()
-        fitparms = list()
+        ## Get the correlations
+        corrs = list()
         for i in np.arange(self.parent.notebook.GetPageCount()):
             Page = self.parent.notebook.GetPage(i)
+            corr = Page.corr
             j = filter(lambda x: x.isdigit(), Page.counter)
             if int(j) in PageNumbers:
-                dataset = dict()
-                try:
-                    dataset["x"] = Page.dataexp[:,0]
-                    dataset["data"] = Page.dataexp[:,1]
-                except:
-                    print "No experimental data in page #"+j+"!"
-                else:
-                    dataset["modelid"] = Page.modelid
+                if corr.correlation is not None:
                     Page.apply_parameters()
-                    dataset["values"] = Page.active_parms[1]
-                    # Get weights
-                    weighttype = self.weightedfitdrop.GetSelection()
-                    Page.Fitbox[1].SetSelection(weighttype)
-                    weightname = self.weightedfitdrop.GetValue()
-                    setweightname = Page.Fitbox[1].GetValue()
-                    if setweightname.count(weightname) == 0:
-                        print "Page "+Page.counter+" has no fitting type '"+ \
-                              weightname+"'!"
-                    Page.Fit_WeightedFitCheck()
-                    Fitting = Page.Fit_create_instance(noplots=True)
-                    if Fitting.dataweights is None:
-                        dataset["dataweights"] = 1.
-                    else:
-                        dataset["dataweights"] = Fitting.dataweights
-                    self.PageData[int(j)] = dataset
-                    # Get the parameters to fit from that page
-                    labels = Page.active_parms[0]
-                    parms = 1*Page.active_parms[1]
-                    tofit = 1*Page.active_parms[2]
-                    for i in np.arange(len(labels)):
-                        if tofit[i]:
-                            if self.parmstofit.count(labels[i]) == 0:
-                                self.parmstofit.append(labels[i])
-                                fitparms.append(parms[i])
-        fitparms = np.array(fitparms)
-        # Now we can perform the least squares fit
-        if len(fitparms) == 0:
+                    corrs.append(corr)
+                    global_pages.append(int(j))
+                else:
+                    print "No experimental data in page #"+j+"!"
+
+        if len(corrs) == 0:
             return
-        res = spopt.leastsq(self.fit_function, fitparms[:], full_output=1)
-        pcov = res[1]
-        #self.parmoptim, self.mesg = spopt.leastsq(self.fit_function, 
-        #                                          fitparms[:])
-        self.parmoptim = res[0]
-        # So we have the optimal parameters.
-        # We would like to give each page a chi**2 and its parameters back:
-        # Create a clean list of PageNumbers
-        # UsedPages = dict.fromkeys(PageNumbers).keys()
-        UsedPages = self.PageData.keys()
-        UsedPages.sort()
-        for key in UsedPages:
-            # Get the Page:
-            for i in np.arange(self.parent.notebook.GetPageCount()):
-                aPage = self.parent.notebook.GetPage(i)
-                j = filter(lambda x: x.isdigit(), aPage.counter)
-                if int(j) == int(key):
-                    Page = aPage
-            Page.GlobalParameterShare = UsedPages
-            # Get the function
-            item = self.PageData[key]
-            modelid = item["modelid"]
-            #function = mdls.modeldict[modelid][3]
-            values = 1*Page.active_parms[1]
-            # Set parameters for each Page)
-            for i in np.arange(len(self.parmstofit)):
-                p = self.parmstofit[i]
-                labels = mdls.valuedict[modelid][0]
-                if p in labels:
-                    index = labels.index(p)
-                    values[index] = self.parmoptim[i]
-                    Page.active_parms[2][index] = True
-            # Check parameters, if there is such a function
-            check_parms = mdls.verification[modelid]
-            values = check_parms(values)
-            # Write parameters back?
-            Page.active_parms[1] = 1*values
-            # Calculate resulting correlation function
-            # corr = function(item.values, item.x)
-            # Subtract data. This is the function we want to minimize
-            #residual = function(values, item["x"]) - item["data"]
-            # Calculate chi**2
-            # Set the parameter error estimates for all pages
-            minimized = self.fit_function(self.parmoptim)
-            degrees_of_freedom = len(minimized) - len(self.parmoptim) - 1
-            self.chi = Page.chi2 = np.sum((minimized)**2) / degrees_of_freedom
-            try:
-                self.covar = pcov * self.chi
-            except:
-                self.parmoptim_error = None
-            else:
-                if self.covar is not None:
-                    self.parmoptim_error = np.diag(self.covar)
-            p_error = self.parmoptim_error
-            if p_error is None:
-                Page.parmoptim_error = None
-            else:
-                Page.parmoptim_error = dict()
-                for i in np.arange(len(p_error)):
-                    Page.parmoptim_error[self.parmstofit[i]] = p_error[i]
-            Page.apply_parameters_reverse()
-            # Because we are plotting the weights, we need to update
-            # the corresponfing info in each page:
-            weightid = self.weightedfitdrop.GetSelection()
-            if weightid != 0:
-                # We have weights.
-                # We need the following information for correct plotting.
-                Page.weighted_fit_was_performed = True
-                Page.weights_used_for_fitting = Fitting.dataweights
-                Page.calculate_corr()
-                Page.data4weight = 1.*Page.datacorr
-            Page.PlotAll()
+        
+        # Perform fit
+        fitter = Fit(corrs, global_fit=True)
+        fit_parm_names = [f.split()[0] for f in fitter.fit_parm_names]
 
+        # update fit results
+        for corr in corrs:
+            corr.fit_results["global parms"] = u", ".join(fit_parm_names)
+            corr.fit_results["global pages"] = u", ".join([str(g) for g in global_pages])
+        
+        # Plot resutls
+        for i in np.arange(self.parent.notebook.GetPageCount()):
+            Page = self.parent.notebook.GetPage(i)
+            j = filter(lambda x: x.isdigit(), Page.counter)
+            if int(j) in global_pages:
+                Page.apply_parameters_reverse()
+                Page.PlotAll()
 
     def OnPageChanged(self, page, trigger=None):
         """
@@ -273,28 +137,7 @@ check parameters on each page and start 'Global fit'.
             return
         self.panel.Enable()
         self.Page = page
-        if self.Page is not None:
-            weightlist = self.Page.Fitbox[1].GetItems()
-            # Do not display knot number for spline. May be different for each page.
-            # Remove everything after a "(" in the weightlist string.
-            # This way, e.g. the list does not show the knotnumber, which
-            # we don't use anyhow.
-            # We are doing this for all elements, because in the future, other (?)
-            # weighting methods might be implemented.
-            #for i in np.arange(len(weightlist)):
-            #    weightlist[i] = weightlist[i].split("(")[0].strip()
-            weightlist[1] = weightlist[1].split("(")[0].strip()
-            self.weightedfitdrop.SetItems(weightlist)
-            try:
-                # if there is no data, this could go wrong
-                self.Page.Fit_create_instance(noplots=True)
-                FitTypeSelection = self.Page.Fitbox[1].GetSelection()
-            except:
-                FitTypeSelection = 0
-            self.weightedfitdrop.SetSelection(FitTypeSelection)
-            ## Knotnumber: we don't want to interfere
-            # The user might want to edit the knotnumbers.
-            # self.FitKnots = Page.FitKnots   # 5 by default
+
 
     def SetPageNumbers(self, pagestring):
         self.WXTextPages.SetValue(pagestring)
diff --git a/pycorrfit/tools/info.py b/pycorrfit/tools/info.py
index 22e95c5..119ccc5 100644
--- a/pycorrfit/tools/info.py
+++ b/pycorrfit/tools/info.py
@@ -10,7 +10,7 @@ Open a text window with lots of information.
 import wx
 import numpy as np
 
-from .. import fitting
+from .. import fcs_data_set
 from .. import models as mdls
 
 # Menu entry name
@@ -54,31 +54,31 @@ class InfoClass(object):
         """ Get a nice string representation of the Info """
         InfoDict = self.GetPageInfo(Page)
         # Version
-        Version = "PyCorrFit v."+InfoDict["version"][0]+"\n"
+        Version = u"PyCorrFit v."+InfoDict["version"][0]+"\n"
         # Title
-        Title = "\n"
+        Title = u"\n"
         for item in InfoDict["title"]:
             Title = Title + item[0]+"\t"+ item[1]+"\n"
         # Parameters
-        Parameters = "\nParameters:\n"
+        Parameters = u"\nParameters:\n"
         for item in InfoDict["parameters"]:
-            Parameters = Parameters + "  "+item[0]+"\t"+ str(item[1])+"\n"
+            Parameters = Parameters + u"  "+item[0]+"\t"+ str(item[1])+"\n"
         # Supplementary parameters
-        Supplement = "\nSupplementary parameters:\n"
+        Supplement = u"\nSupplementary parameters:\n"
         try:
             for item in InfoDict["supplement"]:
                 Supplement = Supplement + "  "+item[0]+"\t"+ str(item[1])+"\n"
         except KeyError:
             Supplement = ""
         # Fitting
-        Fitting = "\nFitting:\n"
+        Fitting = u"\nFitting:\n"
         try:
             for item in InfoDict["fitting"]:
                 Fitting = Fitting + "  "+item[0]+"\t"+unicode(item[1])+"\n"
         except KeyError:
             Fitting = ""
         # Background
-        Background = "\nBackground:\n"
+        Background = u"\nBackground:\n"
         try:
             for item in InfoDict["background"]:
                 Background = Background + "  "+item[0]+"\t"+str(item[1])+"\n"
@@ -86,12 +86,12 @@ class InfoClass(object):
             Background = ""
 
         # Function doc string
-        ModelDoc = "\n\nModel doc string:\n       " + InfoDict["modeldoc"][0]
+        ModelDoc = u"\n\nModel doc string:\n       " + InfoDict["modeldoc"][0]
         # Supplementary variables
         try:
-            SupDoc = "\n"+8*" "+InfoDict["modelsupdoc"][0]
+            SupDoc = u"\n"+8*" "+InfoDict["modelsupdoc"][0]
         except:
-            SupDoc = ""
+            SupDoc = u""
         PageInfo = Version+Title+Parameters+Supplement+Fitting+Background+\
                    ModelDoc+SupDoc
         return PageInfo
@@ -103,10 +103,13 @@ class InfoClass(object):
         # A dictionary with headings as keys and lists of singletts/tuples as 
         # values. If it is a tuple, it might me interesting for a table.
         InfoDict = dict()
+        # Get Correlation
+        corr = Page.corr
+        
         # Get model information
-        model = [Page.model, Page.tabtitle.GetValue(), Page.modelid]
-        parms = Page.active_parms[1]
-        fct = Page.active_fct.__name__
+        model = corr.fit_model
+        parms = corr.fit_parameters
+        fct = corr.fit_model.function.__name__
         InfoDict["version"] = [Page.parent.version]
         Title = list()
         # The tool statistics relys on the string "filename/title".
@@ -114,133 +117,129 @@ class InfoClass(object):
         if len(model[1]) == 0:
             # Prevent saving no title
             model[1] = "NoName"
-        Title.append(["filename/title", model[1] ]) 
-        Title.append(["Model ID", str(model[2]) ]) 
-        Title.append(["Model name", model[0] ]) 
-        Title.append(["Model function", fct ]) 
-        Title.append(["Page number", Page.counter[1:-2] ]) 
+        Title.append(["filename/title", Page.title])
+        Title.append(["Model species", model.components])
+        Title.append(["Model name", model.name])
+        Title.append(["Model ID", str(model.id)]) 
+        Title.append(["Model function", fct]) 
+        Title.append(["Page number", Page.counter[1:-2]]) 
         ## Parameters
         Parameters = list()
         # Use this function to determine human readable parameters, if possible
-        Units, Newparameters = mdls.GetHumanReadableParms(model[2], parms)
+        Units, Newparameters = mdls.GetHumanReadableParms(model.id, parms)
         # Add Parameters
         for i in np.arange(len(parms)):
             Parameters.append([ Units[i], Newparameters[i] ])
         InfoDict["parameters"] = Parameters
         # Add some more information if available
         # Info is a dictionary or None
-        MoreInfo = mdls.GetMoreInfo(model[2], Page)
+        MoreInfo = mdls.GetMoreInfo(model.id, Page)
         if MoreInfo is not None:
             InfoDict["supplement"] = MoreInfo
             # Try to get the dictionary entry of a model
             try:
                 # This function should return all important information
                 # that can be calculated from the given parameters.
-                func_info = mdls.supplement[model[2]]
+                func_info = mdls.supplement[model.id]
             except KeyError:
                 # No information available
                 pass
             else:
                 InfoDict["modelsupdoc"] = [func_info.func_doc]
         ## Fitting
-        alg = fitting.Algorithms[Page.fit_algorithm][1]
-        weightedfit = Page.weighted_fit_was_performed
-        weightedfit_type = Page.weighted_fittype
-        fittingbins = Page.weighted_nuvar  # from left and right
-        Fitting = list()
-        if Page.dataexp is not None:
-            # Mode AC vs CC
-            if Page.IsCrossCorrelation is True:
-                Title.append(["Type AC/CC", "Cross-correlation" ]) 
-            else:
-                Title.append(["Type AC/CC", "Autocorrelation" ]) 
-            Fitting.append([ u"χ²", Page.chi2 ])
-            if Page.weighted_fit_was_performed:
-                Chi2type = u"Weighted sum of squares"
-            else:
-                Chi2type = u"Sum of squares"
-            Fitting.append([ u"χ²-type", Chi2type ])
-            Fitting.append([ "Weighted fit", weightedfit_type ])
-            Fitting.append([ "Algorithm", alg ])
-            if len(Page.GlobalParameterShare) != 0:
-                shared = str(Page.GlobalParameterShare[0])
-                for item in Page.GlobalParameterShare[1:]:
-                    shared += ", "+str(item)
-                Fitting.append(["Shared parameters with Pages", shared])
-            if weightedfit is True:
-                Fitting.append([ "Std. channels", 2*fittingbins+1 ])
-            # Fitting range:
-            t1 = 1.*Page.taufull[Page.startcrop]
-            t2 = 1.*Page.taufull[Page.endcrop-1]
-            Fitting.append([ "Interval start [ms]", "%.4e" % t1 ])
-            Fitting.append([ "Interval end [ms]", "%.4e" % t2 ])
-            # Fittet parameters and errors
-            somuch = sum(Page.active_parms[2])
-            if somuch >= 1:
-                fitted = ""
-                for i in np.arange(len(Page.active_parms[2])):
-                    if np.bool(Page.active_parms[2][i]) is True:
-                        errorvar = Page.active_parms[0][i] # variable name
-                        fitted=fitted+errorvar+ ", "
-                fitted = fitted.strip().strip(",") # remove trailing comma
-                Fitting.append(["fit par.", fitted])
-                # Fitting error included in v.0.7.3
-                Errors_fit = Page.parmoptim_error
-                if Errors_fit is not None:
-                    errkeys = Errors_fit.keys()
-                    errkeys.sort()
-                    for key in errkeys:
-                        savekey, saveval = \
-                            mdls.GetHumanReadableParameterDict(model[2],
-                                                [key], [Errors_fit[key]])
-                        # The tool statistics relys on the string "Err ".
-                        # Do not change it!
-                        Fitting.append(["Err "+savekey[0], saveval[0]])
-            InfoDict["fitting"] = Fitting
-        ## Normalization
-        if Page.normparm is None:
+        
+        
+        if hasattr(corr, "fit_results"):
+            Fitting = list()
+            weightedfit = corr.fit_results["weighted fit"]
+            if corr.correlation is not None:
+                # Mode AC vs CC
+                if corr.is_cc:
+                    Title.append(["Type AC/CC", "Cross-correlation" ]) 
+                else:
+                    Title.append(["Type AC/CC", "Autocorrelation" ]) 
+                Fitting.append([ u"χ²", corr.fit_results["chi2"]])
+                if weightedfit:
+                    try:
+                        Fitting.append(["Weighted fit", corr.fit_results["weighted fit type"]])
+                    except KeyError:
+                        Fitting.append(["Weighted fit", u""+Page.Fitbox[1].GetValue()])
+                if corr.fit_results.has_key("chi2 type"):
+                    ChiSqType = corr.fit_results["chi2 type"]
+                else:
+                    ChiSqType = "unknown"
+                Fitting.append([ u"χ²-type", ChiSqType])
+                Fitting.append([ "Algorithm", fcs_data_set.Algorithms[corr.fit_algorithm][1]])
+                if len(Page.GlobalParameterShare) != 0:
+                    shared = str(Page.GlobalParameterShare[0])
+                    for item in Page.GlobalParameterShare[1:]:
+                        shared += ", "+str(item)
+                    Fitting.append(["Shared parameters with Pages", shared])
+                if corr.fit_results.has_key("weighted fit bins"):
+                    Fitting.append(["Std. channels", 2*corr.fit_results["weighted fit bins"]+1])
+                # Fitting range:
+                t1 = 1.*corr.lag_time[corr.fit_ival[0]]
+                t2 = 1.*corr.lag_time[corr.fit_ival[1]-1]
+                Fitting.append([ "Ival start [ms]", "%.4e" % t1 ])
+                Fitting.append([ "Ival end [ms]", "%.4e" % t2 ])
+                # Fittet parameters
+                try:
+                    fitparmsid = corr.fit_results["fit parameters"]
+                except:
+                    fitparmsid = corr.fit_parameters_variable
+                fitparms = np.array(corr.fit_model.parameters[0])[fitparmsid]
+                fitparms_short = [ f.split()[0] for f in fitparms ]
+                fitparms_short = u", ".join(fitparms_short)
+                Fitting.append(["Fit parm.", fitparms_short])
+                # global fitting
+                for key in corr.fit_results.keys():
+                    if key.startswith("global"):
+                        Fitting.append([key.capitalize(), corr.fit_results[key]])
+                # Fit errors
+                if corr.fit_results.has_key("fit error estimation"):
+                    errors = corr.fit_results["fit error estimation"]
+                    for err, par in zip(errors, fitparms):
+                        nam, val = mdls.GetHumanReadableParameterDict( 
+                                                model.id, [par], [err])
+                        Fitting.append(["Err "+nam[0], val[0]])
+
+                InfoDict["fitting"] = Fitting
+
+        ## Normalization parameter id to name
+        if corr.normalize_parm is None:
             normparmtext = "None"
-        elif Page.normparm < len(Page.active_parms[0]):
-            normparmtext = Page.active_parms[0][Page.normparm]
+        elif Page.normparm < len(corr.fit_parameters):
+            normparmtext = corr.fit_model.parameters[0][corr.normalize_parm] 
         else:
             # supplementary parameters
-            supnum = Page.normparm - len(Page.active_parms[1])
-            normparmtext =  MoreInfo[supnum][0]
-        Title.append(["Normalization", normparmtext ]) 
+            supnum = corr.normalize_parm - len(corr.fit_parameters)
+            normparmtext = MoreInfo[supnum][0]
+        Title.append(["Normalization", normparmtext]) 
+        
         ## Background
         Background = list()
-        if Page.IsCrossCorrelation:
-            if ( Page.bgselected is not None and
-                 Page.bg2selected is not None     ):
+        if corr.is_cc:
+            if len(corr.backgrounds) == 2:
                 # Channel 1
-                bgname = Page.parent.Background[Page.bgselected][1]
-                if len(bgname) == 0:
-                    # Prevent saving no name
-                    bgname = "NoName"
-                Background.append([ "bg name Ch1", bgname])
+                Background.append([ "bg name Ch1", 
+                                    corr.backgrounds[0].name])
                 Background.append([ "bg rate Ch1 [kHz]", 
-                           Page.parent.Background[Page.bgselected][0] ])
+                                    corr.backgrounds[0].countrate])
                 # Channel 2
-                bg2name = Page.parent.Background[Page.bg2selected][1]
-                if len(bg2name) == 0:
-                    # Prevent saving no name
-                    bg2name = "NoName"
-                Background.append([ "bg name Ch2", bg2name])
+                Background.append([ "bg name Ch2", 
+                                    corr.backgrounds[1].name])
                 Background.append([ "bg rate Ch2 [kHz]", 
-                          Page.parent.Background[Page.bg2selected][0] ])
+                                    corr.backgrounds[1].countrate])
                 InfoDict["background"] = Background
         else:
-            if Page.bgselected is not None:
-                bgname = Page.parent.Background[Page.bgselected][1]
-                if len(bgname) == 0:
-                    # Prevent saving no name
-                    bgname = "NoName"
-                bgrate = Page.parent.Background[Page.bgselected][0]
-                Background.append([ "bg name", bgname ])
-                Background.append([ "bg rate [kHz]", bgrate ])
+            if len(corr.backgrounds) == 1:
+                Background.append([ "bg name", 
+                                    corr.backgrounds[0].name])
+                Background.append([ "bg rate [kHz]", 
+                                    corr.backgrounds[0].countrate])
                 InfoDict["background"] = Background
         ## Function doc string
-        InfoDict["modeldoc"] = [Page.active_fct.func_doc]
+        InfoDict["modeldoc"] = [corr.fit_model.description_long]
         InfoDict["title"] = Title
 
         return InfoDict
diff --git a/pycorrfit/tools/overlaycurves.py b/pycorrfit/tools/overlaycurves.py
index cb165e6..a520a7d 100644
--- a/pycorrfit/tools/overlaycurves.py
+++ b/pycorrfit/tools/overlaycurves.py
@@ -97,9 +97,8 @@ class Wrapper_Tools(object):
         for i in np.arange(N):
             Page = self.parent.notebook.GetPage(i)
             key = Page.counter
-            if Page.dataexp is not None:
-                curve = 1*Page.dataexp
-                curve[:,1] *= Page.normfactor
+            if Page.corr.correlation is not None:
+                curve = 1*Page.corr.correlation_plot
                 curvedict[key] = curve
                 labels[key] = Page.tabtitle.GetValue()
         return curvedict, labels
@@ -177,8 +176,9 @@ class Wrapper_Tools(object):
             for Page in pagerem:
                 j = self.parent.notebook.GetPageIndex(Page)
                 self.parent.notebook.DeletePage(j)
+            self.OnPageChanged()
         dlg.Destroy()
-        self.OnPageChanged()
+        
 
 
     def OnSelectionChanged(self, keylist, trigger=None):
@@ -240,7 +240,7 @@ class UserSelectCurves(wx.Frame):
         # Get the window positioning correctly
         pos = self.parent.GetPosition()
         pos = (pos[0]+100, pos[1]+100)
-        wx.Frame.__init__(self, parent=self.parent, title="Curve selection",
+        wx.Frame.__init__(self, parent=self.parent, title="Overlay curves",
                  pos=pos, style=wx.DEFAULT_FRAME_STYLE|wx.FRAME_FLOAT_ON_PARENT,
                  size=(800,500))
         ## Pre-process
@@ -255,8 +255,7 @@ class UserSelectCurves(wx.Frame):
             ctrl = "Apple"
         else:
             ctrl = "Ctrl"
-        text = "Select the curves to keep. \n" +\
-               "By holding down the '"+ctrl+"' key, single curves can be \n" +\
+        text = "By holding down the '"+ctrl+"' key, single curves can be \n" +\
                "selected or deselected. The 'Shift' key can be used \n" +\
                "to select groups."
         self.upperSizer.Add(wx.StaticText(panel_top, label=text))
@@ -279,15 +278,23 @@ class UserSelectCurves(wx.Frame):
                 if self.selkeys.count(self.curvekeys[i]) == 0:
                     self.SelectBox.Deselect(i)
         self.Bind(wx.EVT_LISTBOX, self.OnUpdatePlot, self.SelectBox)
-        self.boxSizer.Add(self.SelectBox)
-        # Button APPLY
-        btnok = wx.Button(panel_bottom, wx.ID_ANY, 'Apply')
-        self.Bind(wx.EVT_BUTTON, self.OnPushResults, btnok)
-        self.boxSizer.Add(btnok)
+        self.boxSizer.Add(self.SelectBox, wx.EXPAND)
+        minsx = self.boxSizer.GetMinSize()[0]
+        # Button REMOVE
+        btnrem = wx.Button(panel_bottom, wx.ID_ANY, 'Remove selected')
+        self.Bind(wx.EVT_BUTTON, self.OnPushResultsRemove, btnrem)
+        btnrem.SetMinSize((minsx, -1))
+        self.boxSizer.Add(btnrem)
+        # Button KEEP
+        btnkep = wx.Button(panel_bottom, wx.ID_ANY, 'Keep selected')
+        self.Bind(wx.EVT_BUTTON, self.OnPushResultsKeep, btnkep)
+        self.boxSizer.Add(btnkep)
+        btnkep.SetMinSize((minsx, -1))
         # Button CANCEL
         btncancel = wx.Button(panel_bottom, wx.ID_ANY, 'Cancel')
         self.Bind(wx.EVT_BUTTON, self.OnCancel, btncancel)
         self.boxSizer.Add(btncancel)
+        btncancel.SetMinSize((minsx, -1))
         # Finish off sizers
         panel_top.SetSizer(self.upperSizer)
         panel_bottom.SetSizer(self.boxSizer)
@@ -314,6 +321,17 @@ class UserSelectCurves(wx.Frame):
         self.Show(True)
 
     
+    def GetSelection(self):
+        keyssel = list()
+        for i in self.SelectBox.GetSelections():
+            keyssel.append(self.curvekeys[i])
+        keysnosel = list()
+        for key in self.curvekeys:
+            if keyssel.count(key) == 0:
+                keysnosel.append(key)
+        return keyssel, keysnosel
+    
+    
     def ProcessDict(self, e=None):
         # Define the order of keys used.
         # We want to sort the keys, such that #10: is not before #1:
@@ -342,15 +360,15 @@ class UserSelectCurves(wx.Frame):
         self.wrapper.OnClose()
         
 
-    def OnPushResults(self, e=None):
+    def OnPushResultsRemove(self, e=None):
         # Get keys from selection
-        keyskeep = list()
-        for i in self.SelectBox.GetSelections():
-            keyskeep.append(self.curvekeys[i])
-        keysrem = list()
-        for key in self.curvekeys:
-            if keyskeep.count(key) == 0:
-                keysrem.append(key)
+        keysrem, keyskeep = self.GetSelection()
+        self.wrapper.OnResults(keyskeep, keysrem)
+
+
+    def OnPushResultsKeep(self, e=None):
+        # Get keys from selection
+        keyskeep, keysrem = self.GetSelection()
         self.wrapper.OnResults(keyskeep, keysrem)
 
 
diff --git a/pycorrfit/tools/parmrange.py b/pycorrfit/tools/parmrange.py
index bba050f..c1d45d3 100644
--- a/pycorrfit/tools/parmrange.py
+++ b/pycorrfit/tools/parmrange.py
@@ -42,12 +42,12 @@ class RangeSelector(wx.Frame):
     def FillPanel(self):
         """ Fill the panel with parameters from the page
         """
-        
-        self.parameter_range = np.zeros(self.Page.parameter_range.shape)
-        labels, parmleft = mdls.GetHumanReadableParms(self.Page.modelid,  # @UnusedVariable
-                                                 self.Page.parameter_range[:,0])
-        labels, parmright = mdls.GetHumanReadableParms(self.Page.modelid,
-                                                 self.Page.parameter_range[:,1])
+        corr = self.Page.corr
+        self.parameter_range = np.zeros_like(corr.fit_parameters_range)
+        labels, parmleft = mdls.GetHumanReadableParms(corr.fit_model.id,  # @UnusedVariable
+                                                 corr.fit_parameters_range[:,0])
+        labels, parmright = mdls.GetHumanReadableParms(corr.fit_model.id,
+                                                 corr.fit_parameters_range[:,1])
         self.parameter_range[:,0] = np.array(parmleft)
         self.parameter_range[:,1] = np.array(parmright)
         # create line
@@ -126,6 +126,7 @@ class RangeSelector(wx.Frame):
         """ Called whenever something is edited in this frame.
             Writes back parameter ranges to the page
         """
+        corr = self.Page.corr
         # Read out parameters from all controls
         for i in range(len(self.WXparmlist)):
             self.parameter_range[i][0] = self.WXparmlist[i][0].GetValue()
@@ -134,12 +135,11 @@ class RangeSelector(wx.Frame):
                 self.parameter_range[i][1] = 1.01*np.abs(self.parameter_range[i][0])
                 self.WXparmlist[i][2].SetValue(self.parameter_range[i][1])
         # Set parameters
-        parm0 = mdls.GetInternalFromHumanReadableParm(self.Page.modelid,
+        parm0 = mdls.GetInternalFromHumanReadableParm(corr.fit_model.id,
                                                      self.parameter_range[:,0])[1]
-        parm1 = mdls.GetInternalFromHumanReadableParm(self.Page.modelid,
+        parm1 = mdls.GetInternalFromHumanReadableParm(corr.fit_model.id,
                                                      self.parameter_range[:,1])[1]
-        self.Page.parameter_range[:,0] = np.array(parm0)
-        self.Page.parameter_range[:,1] = np.array(parm1)
+        corr.fit_parameters_range = np.dstack((parm0, parm1))[0]
         #self.Page.PlotAll()
         
 
diff --git a/pycorrfit/tools/statistics.py b/pycorrfit/tools/statistics.py
index 8c0e8d4..668fab6 100644
--- a/pycorrfit/tools/statistics.py
+++ b/pycorrfit/tools/statistics.py
@@ -8,13 +8,17 @@ Values are sorted according to the page number.
 """
 from __future__ import division
 
+import codecs
 import wx
 import wx.lib.plot as plot              # Plotting in wxPython
 import wx.lib.scrolledpanel as scrolled
 import numpy as np
+import re
 
 from .info import InfoClass
 from .. import misc
+from .. import models as mdls
+
 
 # Menu entry name
 MENUINFO = ["&Statistics view", "Show some session statistics."]
@@ -149,6 +153,7 @@ class Stat(wx.Frame):
         
         ## Plotting panel
         self.canvas = plot.PlotCanvas(self.sp)
+        self.canvas.SetEnableZoom(True)
         self.sp.SplitVertically(self.panel, self.canvas, px+5)
         ## Icon
         if parent.MainIcon is not None:
@@ -173,58 +178,33 @@ class Stat(wx.Frame):
         # values in the statistics window afterwards.
         # new iteration
         keys = Infodict.keys()
-        body = list()
-        tail = list()
+        parms = list()
+        errparms = list()
 
         for key in keys:
-            # "title" - filename/title first
-            if key == "title":
-                for item in Infodict[key]:
-                    if len(item) == 2:
-                        if item[0] == "filename/title":
-                            headtitle = [item]
-                        else:
-                            tail.append(item)
-            # "title" - filename/title first
-            elif key == "parameters":
-                headparm = list()
-                bodyparm = list()
-                for parm in Infodict[key]:
-                    headparm.append(parm)
-                    try:
-                        for fitp in Infodict["fitting"]:
-                            parmname = parm[0]
-                            errname = "Err "+parmname
-                            if fitp[0] == errname:
-                                headparm.append(fitp)
-                    except:
-                        # There was not fit, the fit with "Lev-Mar"
-                        # was not good, or another fit algorithm was
-                        # used.
-                        pass
-            elif key == "fitting":
-                for fitp in Infodict[key]:
-                    # We added the error data before in the parm section
-                    if unicode(fitp[0])[0:4] != u"Err ":
-                        tail.append(fitp)
-            elif key == "supplement":
-                body += Infodict[key]
-            # Append all other items
-            elif key == "background":
-                body += Infodict[key]
-            else:
-                for item in Infodict[key]:
-                    if item is not None and len(item) == 2:
-                        tail.append(item)
-        # Bring lists together
-        head = headtitle + headparm
-        body = bodyparm + body
+            for item in Infodict[key]:
+                if item is not None:
+                    if key == "fitting" and item[0].startswith("Err "):
+                        errparms.append(item)
+                    elif len(item) == 2:
+                        parms.append(item)
+
+        # Separate checkbox for fit errors
+        if len(errparms) > 0:
+            parms.append(("Fit errors", errparms))
         
-        Info = head + body + tail
+        Info = Stat.SortParameters(parms)
 
         # List of default checked parameters:
         checked = np.zeros(len(Info), dtype=np.bool)
-        checked[:len(head)] = True
+        # Fit parameters
+        pbool = page.corr.fit_parameters_variable
+        model = mdls.modeldict[page.corr.fit_model.id]
+        pname = mdls.GetHumanReadableParms(model.id, model.parameters[1])[0]
+        checkadd = np.array(pname)[pbool]
+        for ii, p in enumerate(Info):
+            if p[0] in checkadd:
+                checked[ii] = True
         # A list with additional strings that should be default checked
         # if found somewhere in the data.
         checklist = ["cpp", "duration", "bg rate", "avg.", "Model name"]
@@ -233,13 +213,15 @@ class Stat(wx.Frame):
             for checkitem in checklist:
                 if item[0].count(checkitem):
                     checked[i] = True
-        # Alist with strings that should not be checked:
-        checklist = ["Err "]
+        # A list with strings that should not be checked:
+        nochecklist = []
         for i in range(len(Info)):
             item = Info[i]
-            for checkitem in checklist:
+            for checkitem in nochecklist:
                 if item[0].count(checkitem):
                     checked[i] = False
+        
+        
         if return_std_checked:
             return Info, checked
         else:
@@ -248,18 +230,15 @@ class Stat(wx.Frame):
         
     def GetListOfPlottableParms(self, e=None, return_values=False,
                                 page=None):
-        """ Returns sorted list of parameters that can be plotted.
-            (This means that the values are convertable to floats)
+        """ Returns list of parameters that can be plotted.
+            (This means that the values are convertible to floats)
             If return_values is True, then a second list with
             the corresponding values is returned.
         """
         if page is None:
             page = self.Page
         if self.parent.notebook.GetPageCount() != 0:
-            #Info = self.InfoClass.GetPageInfo(self.Page)
             Info = self.GetListOfAllParameters(page=page)
-            #keys = Info.keys()
-            #keys.sort()
             parmlist = list()
             parmvals = list()
             for item in Info:
@@ -282,6 +261,10 @@ class Stat(wx.Frame):
 
 
     def GetWantedParameters(self):
+        """
+        Updates self.SaveInfo with all the information that will be
+        saved to the table.
+        """
         strFull = self.WXTextPages.GetValue()
         PageNumbers = misc.parseString2Pagenum(self, strFull)
         # Get the wanted parameters from the selection.
@@ -293,7 +276,7 @@ class Stat(wx.Frame):
         pages = list()
         for i in np.arange(self.parent.notebook.GetPageCount()):
             Page = self.parent.notebook.GetPage(i)
-            if Page.modelid == self.Page.modelid:
+            if Page.modelid == self.Page.corr.fit_model.id:
                 # Only pages with same modelid
                 if int(Page.counter.strip("#: ")) in PageNumbers:
                     # Only pages selected in self.WXTextPages
@@ -323,16 +306,19 @@ class Stat(wx.Frame):
         # covers missing values. This means checking for
         #    "label == subitem[0]"
         # and iteration over AllInfo with that consition.
-        for Info in pagekeys:
+        for ii in pagekeys:
             pageinfo = list()
             for label in checked:
                 label_in_there = False
-                for item in AllInfo[Info]:
-                    for subitem in AllInfo[Info][item]:
+                for item in AllInfo[ii]:
+                    for subitem in AllInfo[ii][item]:
                         if subitem is not None and len(subitem) == 2:
                             if label == subitem[0]:
                                 label_in_there = True
                                 pageinfo.append(subitem)
+                            elif label == "Fit errors" and subitem[0].startswith("Err "):
+                                label_in_there = True
+                                pageinfo.append(subitem)
                 if label_in_there == False:
                     # No data available
                     pageinfo.append([label, "NaN"])
@@ -436,7 +422,7 @@ class Stat(wx.Frame):
         pages = list()
         for i in np.arange(self.parent.notebook.GetPageCount()):
             Page = self.parent.notebook.GetPage(i)
-            if Page.modelid == self.Page.modelid:
+            if Page.corr.fit_model.id == self.Page.corr.fit_model.id:
                 # Only pages with same modelid
                 if int(Page.counter.strip("#: ")) in PageNumbers:
                     # Only pages selected in self.WXTextPages
@@ -516,7 +502,7 @@ class Stat(wx.Frame):
             return
         elif trigger in ["tab_init"] and page is not None:
             # Check if we have to replot for a new model
-            if self.Page.modelid == page.modelid:
+            if self.Page.corr.fit_model.id == page.corr.fit_model.id:
                 return
         if (trigger in ["page_add_finalize"] and 
             self.WXTextPages.GetValue() == "1"):
@@ -591,23 +577,23 @@ class Stat(wx.Frame):
             if filename.lower().endswith(".txt") is not True:
                 filename = filename+".txt"
             dirname = dlg.GetDirectory()
-            openedfile = open(filename, 'wb')
+            openedfile = codecs.open(filename, 'w', encoding="utf-8")
             # Get Parameterlist of all Pages with same model id as
             # Self.Page
             # This creates self.SaveInfo:
             self.GetWantedParameters()
             # Write header
-            linestring = ""
+            linestring = u""
             for atuple in self.SaveInfo[0]:
-                linestring += str(atuple[0])+"\t"
+                linestring += u"{}\t".format(atuple[0])
             # remove trailing "\t"
-            openedfile.write(linestring.strip()+"\r\n")
+            openedfile.write(u"{}\r\n".format(linestring.strip()))
             # Write data         
             for item in self.SaveInfo:
-                linestring = ""
+                linestring = u""
                 for btuple in item:
-                    linestring += str(btuple[1])+"\t"
-                openedfile.write(linestring.strip()+"\r\n")
+                    linestring += u"{}\t".format(btuple[1])
+                openedfile.write(linestring.strip()+u"\r\n")
             openedfile.close()
         else:
             dirname = dlg.GetDirectory()
@@ -618,3 +604,144 @@ class Stat(wx.Frame):
     def SetPageNumbers(self, pagestring):
         self.WXTextPages.SetValue(pagestring)
 
+    @staticmethod
+    def SortParameters(parms):
+        u"""
+        Sort a list of tuples according to the first item.
+        The sorting convention was met in issue #113:
+        
+        - at the beginning: avg. countrates and particle numbers
+        - fast components go before slow components:
+          e.g. [τ_trip, τ_diff]
+        - model parameters are sorted logically according to their origin:
+          e.g. [T1, τ_trip1], or [F1, τ_diff1], or [T2, τ_trip2]
+        - if the parameter ends with a number, then we sort it to the
+          logical blocks - includes n1, n2, etc.
+        - at end: other fitting parameters and mode information
+
+        fitting parameters
+        
+            intensities
+            n
+            tautrip1
+            T1
+            tautrip2
+            T2
+            tau1
+            F1
+            tau2
+            F2
+            tau3
+            F3
+            alpha
+            SP
+        
+        non-fitting parameter
+        
+            model name
+            chisquare
+            weighted fit
+            interval
+            measurement time
+            ...
+            model id
+        """
+        
+        startswith_sort = [
+                           u"avg. signal",
+                           u"n",
+                           u"T",
+                           u"τ_trip",
+                           u"F",
+                           u"C",
+                           u"D",
+                           u"τ",
+                           u"τ_diff",
+                           u"alpha",
+                           u"SP",
+                           ]
+
+        otherparms = list()
+
+        # append to this list all parameters that might be in another model.
+        for m in mdls.models:
+            for p in mdls.GetHumanReadableParms(m.id, m.parameters[1])[0]:
+                exists = False
+                for sw in startswith_sort+otherparms:
+                    if p.startswith(sw):
+                        exists = True
+                if not exists:
+                    otherparms.append(p)
+        
+        # sort the other parameters by name
+        otherparms.sort()
+        # special offsets to distinguish "T" and "Type":
+        special_off_start = ["Type", "Fit"]
+        
+        
+        def rate_tuple(item):
+            x = item[0]
+            return rate(x)
+        
+        def rate(x):
+            """
+            rate a parameter for sorting.
+            lower values are at the beginning of the list.
+            """
+            x = x.split("[")[0]
+            # start at the top
+            r = 0
+            
+            # BLOCK OFFSET
+            try:
+                intx = int(x[-1])
+            except:
+                pass
+            else:
+                # penalty: belongs to block
+                r += 3 + intx
+            
+            # STARTSWITH PENALTY
+            for p in startswith_sort:
+                if x.startswith(p):
+                    r += 1 + 3*(startswith_sort.index(p))
+                    break
+        
+            # Block offset integer
+            non_decimal = re.compile(r'[^\d]+')
+            pnum = non_decimal.sub("", x)
+            if len(pnum) > 0:
+                r += int(pnum)
+                
+            if x.count("3D"):
+                r -= 3
+            if x.count("2D"):
+                r -= 0
+        
+            # Other Parameters
+            for p in otherparms:
+                if p.startswith(x):
+                    r += (otherparms.index(p)) + 3*len(startswith_sort)
+
+            # Special offsets
+            for p in special_off_start:
+                if x.startswith(p):
+                    r += 300
+
+            if r==0:
+                r = 10000
+
+            return r
+
+
+        def compare(x,y):
+            """
+            rates x and y.
+            returns -1, 0, 1 required for common list sort
+            """
+            rx = rate_tuple(x)
+            ry = rate_tuple(y)
+            
+            return rx-ry
+
+        return sorted(parms, cmp=compare)
\ No newline at end of file
diff --git a/pycorrfit/tools/trace.py b/pycorrfit/tools/trace.py
index 387019a..21ca6f3 100644
--- a/pycorrfit/tools/trace.py
+++ b/pycorrfit/tools/trace.py
@@ -36,7 +36,7 @@ class ShowTrace(wx.Frame):
             pass
         else:
             self.OnDraw()
-        initial_size = (350,150)
+        initial_size = (780,250)
         self.SetSize(initial_size)
         self.SetMinSize(initial_size)
         ## Icon
@@ -52,39 +52,48 @@ class ShowTrace(wx.Frame):
 
 
     def OnDraw(self):
-        if self.Page.trace is not None:
-            self.trace = 1*self.Page.trace
+        traces = self.Page.corr.traces
+        if len(traces) == 1:
+            self.trace = 1*traces[0].trace
             # We want to have the trace in [s] here.
             self.trace[:,0] = self.trace[:,0]/1000
-            line = plot.PolyLine(self.trace, legend='', colour='blue',
-                                 width=1)
+            line = plot.PolyLine(self.trace,
+                    legend='{:.2f}kHz'.format(traces[0].countrate),
+                    colour='blue', width=1)
             lines = [line]
             self.canvas.SetEnableLegend(False)
             xmax = np.max(self.trace[:,0])
             xmin = np.min(self.trace[:,0])
-        elif self.Page.tracecc is not None:
+            ymax = np.max(self.trace[:,1])
+            ymin = np.min(self.trace[:,1])
+        elif len(traces) == 2:
             # This means that we have two (CC) traces to plot
-            self.tracea = 1*self.Page.tracecc[0]
+            self.tracea = 1*traces[0].trace
             self.tracea[:,0] = self.tracea[:,0]/1000
-            self.traceb = 1*self.Page.tracecc[1]
+            self.traceb = 1*traces[1].trace
             self.traceb[:,0] = self.traceb[:,0]/1000
-            linea = plot.PolyLine(self.tracea, legend='channel 1', 
-                                  colour='blue', width=1)
-            lineb = plot.PolyLine(self.traceb, legend='channel 2', 
-                                  colour='red', width=1)
+            linea = plot.PolyLine(self.tracea,
+                    legend='channel 1\n{:.2f}kHz'.format(traces[0].countrate), 
+                    colour='blue', width=1)
+            lineb = plot.PolyLine(self.traceb, 
+                    legend='channel 2\n{:.2f}kHz'.format(traces[1].countrate), 
+                    colour='red', width=1)
             lines = [linea, lineb]
             self.canvas.SetEnableLegend(True)
             xmax = max(np.max(self.tracea[:,0]), np.max(self.traceb[:,0]))
             xmin = min(np.min(self.tracea[:,0]), np.min(self.traceb[:,0]))
+            ymax = max(np.max(self.tracea[:,1]), np.max(self.traceb[:,1]))
+            ymin = min(np.min(self.tracea[:,1]), np.min(self.traceb[:,1]))
+
         else: 
             self.canvas.Clear()
             return
         # Plot lines
-        
         self.canvas.Draw(plot.PlotGraphics(lines, 
                                            xLabel='time [s]', 
                                            yLabel='count rate [kHz]'),
-                                           xAxis=(xmin,xmax))
+                                           xAxis=(xmin,xmax),
+                                           yAxis=(ymin,ymax))
 
 
     def OnPageChanged(self, page=None, trigger=None):
diff --git a/pycorrfit/usermodel.py b/pycorrfit/usermodel.py
index 1087cb3..af69dcc 100644
--- a/pycorrfit/usermodel.py
+++ b/pycorrfit/usermodel.py
@@ -10,7 +10,7 @@ We only parse the function with sympy and test it once during
 import. After that, the function is evaluated using eval()!
 """
 
-
+import codecs
 import numpy as np
 import scipy.special as sps
 import sys
@@ -170,12 +170,12 @@ class UserModel(object):
                     # Add whitespaces in model string (looks nicer)
                     for olin in code[1:]:
                         doc = doc + "\n       "+olin.strip()
-                    func.func_doc = doc
+                    func.func_doc = codecs.decode(doc, "utf-8")
                 elif var[0] == "g":
                     substitutes[var] = val.strip()
                 else:
                     # Add value and variable to our lists
-                    labels.append(var)
+                    labels.append(codecs.decode(var, "utf-8"))
                     values.append(float(val))
         # Active Parameters we are using for the fitting
         # [0] labels
diff --git a/setup.py b/setup.py
index 988dfd8..9afb8d8 100644
--- a/setup.py
+++ b/setup.py
@@ -8,8 +8,9 @@
 #  pip install wheel twine
 #  python setup.py bdist wheel
 from __future__ import print_function
-from setuptools import setup, Extension
+from setuptools import setup, Extension, Command
 import sys
+import subprocess
 
 from os.path import join, dirname, realpath, exists
 from warnings import warn
@@ -39,6 +40,21 @@ else:
                         )
               ]
 
+
+class PyTest(Command):
+    """ Perform pytests
+    """
+    user_options = []
+    def initialize_options(self):
+        pass
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        errno = subprocess.call([sys.executable, 'tests/runtests.py'])
+        raise SystemExit(errno)
+
 # Download documentation if it was not compiled
 Documentation = join(dirname(realpath(__file__)), "doc/PyCorrFit_doc.pdf")
 webdoc = "https://github.com/paulmueller/PyCorrFit/wiki/PyCorrFit_doc.pdf"
@@ -101,7 +117,9 @@ setup(
                  },
     # cython
     ext_modules=EXTENSIONS,
-    cmdclass={"build_ext": build_ext},
+    cmdclass={'build_ext': build_ext,
+              'test': PyTest,
+             },
     # requirements
     extras_require = {
         # If you need the GUI of this project in your project, add
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..64694ba
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,21 @@
+### Test Scripts
+
+
+This will run all tests:
+
+    python runtests.py
+
+Beautiful html output is possible with (Unix, package `aha` required)
+
+    ./runtests_html.sh
+
+
+### Running single tests
+
+Directly execute the scripts, e.g.
+
+
+    python test_simple.py
+
+
+
diff --git a/tests/test_fit_models.py b/tests/test_fit_models.py
new file mode 100644
index 0000000..6acb059
--- /dev/null
+++ b/tests/test_fit_models.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Go through each model, vary one parameter and fit it back to the
+default value of that model.
+"""
+from __future__ import division, print_function
+import sys
+from os.path import abspath, dirname, split
+
+import matplotlib.pylab as plt
+import numpy as np
+
+# Add parent directory to beginning of path variable
+sys.path.insert(0, dirname(dirname(abspath(__file__))))
+
+import pycorrfit
+from pycorrfit.fcs_data_set import Correlation, Fit
+
+
+# GLOBAL PARAMETERS FOR THIS TEST:
+TAUMIN = 1e-3
+TAUMAX = 1e6
+TAULEN = 100
+FITALG = "Lev-Mar"
+
+
+def fit_single_parameter(modelid, fullparms, parmid, parmval, noise=False):
+    """
+    Use the full parameter set `fullparms` and leave a single parameter
+    `parmid` variable during the fit.
+    Returns the fitted value of the parameter with index `parmid`
+    """
+    corr = Correlation(fit_model=modelid, fit_algorithm=FITALG, verbose=0)
+    tau = np.exp(np.linspace(np.log(TAUMIN),np.log(TAUMAX), TAULEN))
+    # Create artificial data by using the current fit_model
+    data = corr.fit_model(fullparms, tau)
+    if noise:
+        if noise is True:
+            deltanoise = (np.max(data)-np.min(data))/20
+        else:
+            deltanoise = (np.max(data)-np.min(data))*noise
+        anoise = (np.random.random(data.shape[0])-.5)*deltanoise
+        data += anoise
+    # Add artificial data to data set
+    corr.correlation = np.dstack((tau, data))[0]
+    # Set variable parameters
+    fit_bool = np.zeros(fullparms.shape[0])
+    fit_bool[parmid] = True
+    corr.fit_parameters_variable = fit_bool
+    fullparms_edit = fullparms.copy()
+    fullparms_edit[parmid] = parmval
+    corr.fit_parameters = fullparms_edit
+    Fit(corr)
+    return corr.fit_parameters[parmid]
+
+
+def deviate_parameter(model, parmid):
+    """
+    Returns a deviated version of the parameter with id `parmid`.
+    Performs model checks to ensure the new value is physical.
+    """
+    val = model.default_values[parmid]
+    if val==0:
+        val+=.1
+    else:
+        val*=.9
+    return val
+
+
+def test_fit_single_parameter():
+    """
+    Deviate a single parameter and fit it back.
+    """
+    faillist=list()
+    for model in pycorrfit.models.models:
+        fullparms = model.default_values
+        for ii, val in enumerate(fullparms):
+            newval = deviate_parameter(model, ii)
+            fitval = fit_single_parameter(model.id, fullparms, ii, newval, noise=False)
+            #print(val-fitval)
+            if not np.allclose([val], [fitval]):
+                faillist.append([model.id, model.parameters[0][ii], val, fitval])
+    if len(faillist) != 0:
+        raise ValueError("Model tests failed for:\n", faillist)
+
+
+def fit_single_parameter_with_noise(noise=0.005):
+    """
+    Deviate a single parameter and fit it back.
+    """
+    faillist=list()
+    for model in pycorrfit.models.models:
+        fullparms = model.default_values
+        for ii, val in enumerate(fullparms):
+            newval = deviate_parameter(model, ii)
+            fitval = fit_single_parameter(model.id, fullparms, ii, newval, noise=noise)
+            if not np.allclose([val], [fitval], atol=.1, rtol=.1):
+                faillist.append([model.id, model.parameters[0][ii], val, fitval])
+    return faillist
+
+
+def test_fit_single_parameter_with_noise_one_permille():
+    faillist = fit_single_parameter_with_noise(noise=0.001)
+    if len(faillist) > 1:
+        raise ValueError("Model tests failed for:\n", faillist)
+
+def test_fit_single_parameter_with_noise_two_percent():
+    faillist = fit_single_parameter_with_noise(noise=0.02)
+    if len(faillist) > 5:
+        raise ValueError("Model tests failed for:\n", faillist)
+
+def test_fit_single_parameter_with_noise_five_percent():
+    faillist = fit_single_parameter_with_noise(noise=0.05)
+    if len(faillist) > 10:
+        raise ValueError("Model tests failed for:\n", faillist)
+        
+
+
+
+
+if __name__ == "__main__":
+    # Run all tests
+    loc = locals()
+    for key in list(loc.keys()):
+        if key.startswith("test_") and hasattr(loc[key], "__call__"):
+            loc[key]()
diff --git a/tests/test_global_fit.py b/tests/test_global_fit.py
new file mode 100644
index 0000000..bde5b81
--- /dev/null
+++ b/tests/test_global_fit.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import sys
+from os.path import abspath, dirname
+import numpy as np
+
+# Add parent directory to beginning of path variable
+sys.path.insert(0, dirname(dirname(abspath(__file__))))
+
+import pycorrfit  # @UnusedImport
+from pycorrfit.fcs_data_set import Correlation, Fit
+
+
+def create_corr():
+    n = 2.4
+    taud = 10
+    SP = 3.3
+    tau = np.exp(np.linspace(np.log(1e-3),np.log(1e6), 10))
+    corr1 = Correlation(fit_model=6002)
+    corr1.lag_time = tau
+    # 0: n
+    # 1: τ_diff [ms]
+    p1a = corr1.fit_parameters.copy()
+    p1b = p1a.copy()
+    p1b[0] = n
+    p1b[1] = taud
+    # write values and return to original
+    corr1.fit_parameters = p1b 
+    corr1.correlation = corr1.modeled_fit.copy()
+    corr1.fit_parameters = p1a
+    corr1.fit_parameters_variable = [True, True, False, False, False]
+    
+    corr2 = Correlation(fit_model=6011)
+    corr2.lag_time = tau
+    # 0: n
+    # 3: τ_diff [ms]
+    # 4: SP
+    p2a = corr2.fit_parameters.copy()
+    p2b = p2a.copy()
+    p2b[0] = n
+    p2b[3] = taud
+    p2b[4] = SP
+    # write values and return to original
+    corr2.fit_parameters = p2b 
+    corr2.correlation = corr2.modeled_fit.copy()
+    corr2.fit_parameters = p2a
+    corr2.fit_parameters_variable = [True, False, False, True, True, False]
+    
+    corrs = [corr1, corr2]
+    initparms = np.array([n, taud, SP])
+    
+    return corrs, initparms
+    
+def test_globalfit():
+    corrs, initparms = create_corr()
+    # commence global fit
+    globalfit = Fit(correlations=corrs, global_fit=True)
+    assert np.allclose(globalfit.fit_parm, initparms), "Global fit failed"
+    
+
+if __name__ == "__main__":
+    test_globalfit()
+    
\ No newline at end of file
diff --git a/tests/test_simple.py b/tests/test_simple.py
new file mode 100644
index 0000000..fe0a869
--- /dev/null
+++ b/tests/test_simple.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import sys
+from os.path import abspath, dirname
+
+import matplotlib.pylab as plt
+import numpy as np
+
+# Add parent directory to beginning of path variable
+sys.path.insert(0, dirname(dirname(abspath(__file__))))
+
+import pycorrfit  # @UnusedImport
+from pycorrfit.fcs_data_set import Correlation, Fit
+
+
+def create_corr():
+    corr = Correlation()
+
+    tau = np.exp(np.linspace(np.log(1e-3),np.log(1e6), 10))
+    data = corr.fit_model(corr.fit_parameters, tau)
+    noise = (np.random.random(data.shape[0])-.5)*.0005
+    data += noise
+
+    corr.correlation = np.dstack((tau, data))[0]
+    return corr
+
+
+def test_simple_corr():
+    corr = create_corr()
+    oldparms = corr.fit_parameters.copy()
+    temp = corr.fit_parameters
+    temp[0] *= 2
+    temp[-1] *= .1
+    
+    Fit(corr)
+    
+    res = oldparms - corr.fit_parameters
+
+    assert np.allclose(res, np.zeros_like(res), atol=0.009)
+    
+
+if __name__ == "__main__":
+    corr = create_corr()
+
+    fig, (ax1, ax2) = plt.subplots(2,1)
+    ax1.set_xscale("log")
+    ax2.set_xscale("log")
+
+    print(corr.fit_parameters)
+    temp = corr.fit_parameters
+    temp[0] *= 2
+    temp[-1] *= .1
+    ax1.plot(corr.correlation_fit[:,0], corr.correlation_fit[:,1])
+    ax1.plot(corr.modeled_fit[:,0], corr.modeled_fit[:,1])
+    print(corr.fit_parameters)
+
+    Fit(corr)
+
+    print(corr.fit_parameters)
+
+    ax2.plot(corr.correlation_fit[:,0], corr.correlation_fit[:,1])
+    ax2.plot(corr.modeled_fit[:,0], corr.modeled_fit[:,1])
+
+    plt.show()

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/pycorrfit.git



More information about the debian-med-commit mailing list