[med-svn] [pycorrfit] 02/18: Imported Upstream version 0.9.8+dfsg
Alex Mestiashvili
malex-guest at moszumanska.debian.org
Fri Jul 22 14:23:42 UTC 2016
This is an automated email from the git hooks/post-receive script.
malex-guest pushed a commit to branch master
in repository pycorrfit.
commit 3be6b5d3c602b04fa80d887db4f45f20f6fb0bda
Author: Alexandre Mestiashvili <alex at biotec.tu-dresden.de>
Date: Fri Jun 24 13:18:36 2016 +0200
Imported Upstream version 0.9.8+dfsg
---
ChangeLog.txt | 29 +
MANIFEST.in | 3 +-
PKG-INFO | 33 -
README.md | 28 -
README.rst | 99 +++
Readme.txt | 28 -
doc/PyCorrFit_doc_content.tex | 3 +-
.../CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id | 1 -
pycorrfit/__init__.py | 12 +-
pycorrfit/__main__.py | 8 +-
pycorrfit/correlation.py | 550 +++++++++++++
pycorrfit/{fcs_data_set.py => fit.py} | 915 +++++----------------
pycorrfit/gui/__init__.py | 0
pycorrfit/{ => gui}/doc.py | 48 +-
pycorrfit/{ => gui}/edclasses.py | 0
pycorrfit/{ => gui}/frontend.py | 57 +-
pycorrfit/{ => gui}/icon.py | 0
pycorrfit/{ => gui}/main.py | 0
pycorrfit/{ => gui}/misc.py | 19 -
pycorrfit/{ => gui}/page.py | 27 +-
pycorrfit/{ => gui}/plotting.py | 29 +-
pycorrfit/{ => gui}/tools/__init__.py | 0
pycorrfit/{ => gui}/tools/average.py | 4 +-
pycorrfit/{ => gui}/tools/background.py | 7 +-
pycorrfit/{ => gui}/tools/batchcontrol.py | 24 +-
pycorrfit/{ => gui}/tools/chooseimport.py | 2 +-
pycorrfit/{ => gui}/tools/comment.py | 0
pycorrfit/{ => gui}/tools/datarange.py | 0
pycorrfit/{ => gui}/tools/example.py | 0
pycorrfit/{ => gui}/tools/globalfit.py | 3 +-
pycorrfit/{ => gui}/tools/info.py | 14 +-
pycorrfit/{ => gui}/tools/overlaycurves.py | 0
pycorrfit/{ => gui}/tools/parmrange.py | 19 +-
pycorrfit/{ => gui}/tools/plotexport.py | 0
pycorrfit/{ => gui}/tools/simulation.py | 3 +-
pycorrfit/{ => gui}/tools/statistics.py | 3 +-
pycorrfit/{ => gui}/tools/trace.py | 0
pycorrfit/{ => gui}/usermodel.py | 5 +-
pycorrfit/{ => gui}/wxutils.py | 0
pycorrfit/meta.py | 74 ++
pycorrfit/models/MODEL_classic_gaussian_2D.py | 288 -------
pycorrfit/models/MODEL_classic_gaussian_3D.py | 302 -------
pycorrfit/models/MODEL_classic_gaussian_3D2D.py | 156 ----
pycorrfit/models/MODEL_classic_gaussian_TT3D3D.py | 178 ----
pycorrfit/models/__init__.py | 228 +----
pycorrfit/models/classes.py | 155 ++++
pycorrfit/models/control.py | 224 +++++
pycorrfit/models/cp_confocal.py | 15 +
pycorrfit/models/cp_mix.py | 92 +++
pycorrfit/models/cp_triplet.py | 15 +
pycorrfit/models/model_confocal_2d.py | 67 ++
pycorrfit/models/model_confocal_2d_2d.py | 116 +++
pycorrfit/models/model_confocal_3d.py | 77 ++
pycorrfit/models/model_confocal_3d_2d.py | 119 +++
pycorrfit/models/model_confocal_3d_3d.py | 116 +++
pycorrfit/models/model_confocal_t_2d.py | 95 +++
pycorrfit/models/model_confocal_t_2d_2d.py | 151 ++++
pycorrfit/models/model_confocal_t_3d.py | 97 +++
pycorrfit/models/model_confocal_t_3d_2d.py | 159 ++++
pycorrfit/models/model_confocal_t_3d_3d.py | 155 ++++
pycorrfit/models/model_confocal_t_3d_3d_2d.py | 189 +++++
pycorrfit/models/model_confocal_t_3d_3d_3d.py | 190 +++++
pycorrfit/models/model_confocal_tt_2d_2d.py | 167 ++++
pycorrfit/models/model_confocal_tt_3d_2d.py | 177 ++++
pycorrfit/models/model_confocal_tt_3d_3d.py | 176 ++++
pycorrfit/openfile.py | 64 +-
pycorrfit/readfiles/__init__.py | 35 +-
pycorrfit/trace.py | 109 +++
setup.cfg | 3 +
setup.py | 33 +-
tests/README.md | 8 +-
tests/data_file_dl.py | 175 ++++
tests/test_constraints.py | 98 +++
tests/test_file_formats.py | 41 +
tests/test_fit_model_gaussian.py | 276 ++++++-
tests/test_fit_models.py | 29 +-
tests/test_global_fit.py | 4 +-
tests/test_simple.py | 3 +-
78 files changed, 4429 insertions(+), 2200 deletions(-)
diff --git a/ChangeLog.txt b/ChangeLog.txt
index e2cd638..68700b0 100644
--- a/ChangeLog.txt
+++ b/ChangeLog.txt
@@ -1,3 +1,32 @@
+0.9.8
+- Bugfixes:
+ - Indexing error when saving sessions (#154)
+ - Page number truncated in csv export (#159)
+ - Export of csv files used incorrect normalization (#153)
+ - Normalization parameter was not displayed in the 'Info' tool
+0.9.7
+- Second triplet time is now larger than first triplet time
+ by default
+- Remove a hack that causes a run through all pages
+ e.g. when an average is created
+- Bugfixes:
+ - Opening sessions with user-defined models
+ - Saving sessions with comments containing non-ASCII characters
+ - Windows build: Graphical plot export was misconfigured
+ -> added matplotlibrc patch in .spec file
+0.9.6
+- Bugfixes:
+ - Fixed minor wx sizer problems for the tools
+ - Fixed `AttributeError` in page.py if no weights are present
+- New confocal fitting models (#111):
+ - 3D+3D, 2D+2D, 3D+2D; no triplet
+ - T+T+2D+2D, T+T+3D+2D; double triplet
+ - T+3D+3D+3D, T+3D+3D+2D (#40, #59)
+- Under the hood:
+ - Separation of core and GUI modules
+ - Include tests in distributions for PyPI
+ - Improve automated testing on Windows and Mac OS
+ - More constraint options for fitting
0.9.5
- Bugfixes
- Closing the batch control window causes segfault bug (#142)
diff --git a/MANIFEST.in b/MANIFEST.in
index d3c7d05..d00b2e8 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,8 +4,9 @@ include doc/*.pdf
include doc/*.md
include doc/Images/*
include examples/external_model_functions/*.txt
-include Readme.txt
+include README.md
include ChangeLog.txt
include pycorrfit/readfiles/read_pt3_scripts/*.py
include pycorrfit/readfiles/read_pt3_scripts/*.pyx
include pycorrfit/readfiles/read_pt3_scripts/LICENSE
+include tests/*.py
\ No newline at end of file
diff --git a/PKG-INFO b/PKG-INFO
deleted file mode 100644
index 5396602..0000000
--- a/PKG-INFO
+++ /dev/null
@@ -1,33 +0,0 @@
-Metadata-Version: 1.0
-Name: pycorrfit
-Version: 0.8.0
-Summary: UNKNOWN
-Home-page: https://github.com/FCS-analysis/PyCorrFit
-Author: Paul Mueller
-Author-email: paul.mueller at biotec.tu-dresden.de
-License: GPL v2
-Description: ![PyCorrFit](https://raw.github.com/FCS-analysis/PyCorrFit/master/doc-src/Images/PyCorrFit_logo_dark.png)
- =========
-
- This repository contains the source code of PyCorrFit - a scientific tool for fitting
- correlation curves on a logarithmic plot.
-
- In current biomedical research, fluorescence correlation spectroscopy (FCS) is applied
- to characterize molecular dynamic processes in vitro and in living cells. Commercial
- FCS setups only permit data analysis that is limited to a specific instrument by
- the use of in-house file formats or a finite number of implemented correlation
- model functions. PyCorrFit is a general-purpose FCS evaluation software that,
- amongst other formats, supports the established Zeiss ConfoCor3 ~.fcs file format.
- PyCorrFit comes with several built-in model functions, covering a wide range of
- applications in standard confocal FCS. In addition, it contains equations dealing
- with different excitation geometries like total internal reflection (TIR). For more
- information, visit the official homepage at http://pycorrfit.craban.de.
-
-
- - [Download the latest version](https://github.com/FCS-analysis/PyCorrFit/releases)
- - [Documentation](https://github.com/FCS-analysis/PyCorrFit/raw/master/PyCorrFit_doc.pdf)
- - [Run PyCorrFit from source](https://github.com/FCS-analysis/PyCorrFit/wiki/Running-PyCorrFit-from-source)
- - [Write model functions](https://github.com/FCS-analysis/PyCorrFit/wiki/Writing-model-functions)
- - [Need help?](https://github.com/FCS-analysis/PyCorrFit/wiki/Creating-a-new-issue)
-
-Platform: UNKNOWN
diff --git a/README.md b/README.md
deleted file mode 100644
index cb20f3b..0000000
--- a/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-![PyCorrFit](https://raw.github.com/FCS-analysis/PyCorrFit/master/doc/Images/PyCorrFit_logo_dark.png)
-=========
-[![PyPI](http://img.shields.io/pypi/v/PyCorrFit.svg)](https://pypi.python.org/pypi/pycorrfit)
-[![Build Win](https://img.shields.io/appveyor/ci/paulmueller/PyCorrFit/master.svg?label=build_win)](https://ci.appveyor.com/project/paulmueller/pycorrfit)
-[![Build Mac](https://img.shields.io/travis/FCS-analysis/PyCorrFit/master.svg?label=build_mac)](https://travis-ci.org/FCS-analysis/PyCorrFit)
-
-
-
-This repository contains the source code of PyCorrFit - a scientific tool for fitting
-correlation curves on a logarithmic plot.
-
-In current biomedical research, fluorescence correlation spectroscopy (FCS) is applied
-to characterize molecular dynamic processes in vitro and in living cells. Commercial
-FCS setups only permit data analysis that is limited to a specific instrument by
-the use of in-house file formats or a finite number of implemented correlation
-model functions. PyCorrFit is a general-purpose FCS evaluation software that,
-amongst other formats, supports the established Zeiss ConfoCor3 ~.fcs file format.
-PyCorrFit comes with several built-in model functions, covering a wide range of
-applications in standard confocal FCS. In addition, it contains equations dealing
-with different excitation geometries like total internal reflection (TIR). For more
-information, visit the official homepage at http://pycorrfit.craban.de.
-
-
-- [Download the latest version](https://github.com/FCS-analysis/PyCorrFit/releases)
-- [Documentation](https://github.com/FCS-analysis/PyCorrFit/wiki/PyCorrFit_doc.pdf)
-- [Run PyCorrFit from source](https://github.com/FCS-analysis/PyCorrFit/wiki/Running-from-source)
-- [Write your own model functions](https://github.com/FCS-analysis/PyCorrFit/wiki/Writing-model-functions)
-- [Need help?](https://github.com/FCS-analysis/PyCorrFit/wiki/Creating-a-new-issue)
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..24189ba
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,99 @@
+|PyCorrFit|
+===========
+
+|PyPI Version| |Build Status Win| |Build Status Mac|
+
+A graphical fitting tool for fluorescence correlation spectroscopy (FCS) that comes with support for several file formats, can be applied to a large variety of problems, and attempts to be as user-friendly as possible. Some of the features are
+
+- Averaging of curves
+- Background correction
+- Batch processing
+- Overlay tool to identify outliers
+- Fast simulation of model parameter behavior
+- Session management
+- User-defined model functions
+- High quality plot export using LaTeX (bitmap or vector graphics)
+
+
+Getting started
+===============
+
+Installation
+------------
+Installers for PyCorrFit are available at the `release page <https://github.com/FCS-analysis/PyCorrFit/releases>`__.
+
+Documentation
+-------------
+A detailed documentation including an explanation of the graphical user interface and available model
+functions is available as a `PDF file <https://github.com/FCS-analysis/PyCorrFit/wiki/PyCorrFit_doc.pdf>`__.
+
+Wiki
+----
+If you are interested in a specific topic or wish to contribute with your own HowTo, have a look at the
+`PyCorrFit wiki <https://github.com/FCS-analysis/PyCorrFit/wiki/>`__. There you will also find information
+on `how to write your own model functions <https://github.com/FCS-analysis/PyCorrFit/wiki/Writing-model-functions>`__.
+
+Problems
+--------
+If you find a bug or need help with a specific topic, do not hesitate to ask a question
+at the `issues page <https://github.com/FCS-analysis/PyCorrFit/wiki/Creating-a-new-issue>`__.
+
+
+Advanced usage
+--------------
+If you have Python installed you can install PyCorrFit, including its scripting functionalities, from the Python package index:
+
+::
+
+ pip install pycorrfit[GUI]
+
+More information is available in the `PyCorrFit wiki <https://github.com/FCS-analysis/PyCorrFit/wiki/Running-from-source>`__.
+
+
+Information for developers
+==========================
+
+Running from source
+-------------------
+The easiest way to run PyCorrFit from source is to use
+`Anaconda <http://continuum.io/downloads>`__. PyCorrFit requires wxPython which is not
+available at the Python package index. Make sure you install a unicode version of wxPython.
+Detailed installation instructions are `here <https://github.com/FCS-analysis/PyCorrFit/wiki/Running-from-source>`__.
+
+
+Contributing
+------------
+The main branch for developing PyCorrFit is *develop*. Small changes that do not
+break anything can be submitted to this branch.
+If you want to do big changes, please (fork ShapeOut and) create a separate branch,
+e.g. ``my_new_feature_dev``, and create a pull-request to *develop* once you are done making
+your changes.
+Please make sure to also update the
+`changelog <https://github.com/FCS-analysis/PyCorrFit/blob/develop/ChangeLog.txt>`__.
+
+Tests
+-----
+PyCorrFit is tested using pytest. If you have the time, please write test
+methods for your code and put them in the ``tests`` directory. You may
+run the tests manually by issuing:
+
+::
+
+ python setup.py test
+
+
+Windows test binaries
+---------------------
+After each commit to the PyCorrFit repository, a binary installer is created
+by `Appveyor <https://ci.appveyor.com/project/paulmueller/PyCorrFit>`__. Click
+on a build and navigate to ``ARTIFACTS`` (upper right corner right under
+the running time of the build). From there you can download the Windows installer of the commit.
+
+
+.. |PyCorrFit| image:: https://raw.github.com/FCS-analysis/PyCorrFit/master/doc/Images/PyCorrFit_logo_dark.png
+.. |PyPI Version| image:: http://img.shields.io/pypi/v/PyCorrFit.svg
+ :target: https://pypi.python.org/pypi/pycorrfit
+.. |Build Status Win| image:: https://img.shields.io/appveyor/ci/paulmueller/PyCorrFit/master.svg?label=build_win
+ :target: https://ci.appveyor.com/project/paulmueller/pycorrfit
+.. |Build Status Mac| image:: https://img.shields.io/travis/FCS-analysis/PyCorrFit/master.svg?label=build_mac
+ :target: https://travis-ci.org/FCS-analysis/PyCorrFit
diff --git a/Readme.txt b/Readme.txt
deleted file mode 100644
index e48aa40..0000000
--- a/Readme.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-PyCorrFit can be used for fitting any data on a semi-log plot. The program focusses on
-Fluorescence Correlation Spectroscopy (FCS) and comes with a couple of features that are
-crucial for FCS data analysis:
-
-- Averaging of curves
-- Background correction
-- Batch processing
-- Overlay tool to identify outliers
-- Fast simulation of model parameter behavior
-- Session management
-- User-defined model functions
-- High quality plot export using LaTeX (bitmap or vector graphics)
-
-For a full list of features and supported file formats visit http://pycorrfit.craban.de.
-There are also precompiled binaries for various systems.
-
-This package provides the Python module `pycorrfit` and its graphical user interface. The
-graphical user interface is written with wxPython. A HowTo for the installation of the
-latest version of PyCorrFit using pip can be found there:
-
-https://github.com/FCS-analysis/PyCorrFit/wiki/Installation_pip
-
-Further reading:
-
-- Latest downloads: https://github.com/FCS-analysis/PyCorrFit/releases
-- Documentation: https://github.com/FCS-analysis/PyCorrFit/wiki/PyCorrFit_doc.pdf
-- Write model functions: https://github.com/FCS-analysis/PyCorrFit/wiki/Writing-model-functions
-- Need help? https://github.com/FCS-analysis/PyCorrFit/wiki/Creating-a-new-issue
diff --git a/doc/PyCorrFit_doc_content.tex b/doc/PyCorrFit_doc_content.tex
index 5ec10d5..6adcbad 100755
--- a/doc/PyCorrFit_doc_content.tex
+++ b/doc/PyCorrFit_doc_content.tex
@@ -11,6 +11,7 @@ under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 2 of the License,
or (at your option) any later version\footnote{\url{http://www.gnu.org/licenses/gpl.html}}.
+
\subsubsection*{What \textit{PyCorrFit} can do}
\begin{itemize}
\item Load correlation curves from numerous correlators
@@ -571,7 +572,7 @@ with $g_{ij}(\tau)$ as the pairwise correlation function of identical ($i = j$)
g_{ij}(\tau) = \langle s(t) \cdot s(t + \tau) \rangle = \frac{q_iq_j}{V} \int \int W(\vec{r}) P_{\mathrm{d},ij} \left( \vec{r} \,' | \vec{r},\tau \right) W(\vec{r}\,') dVdV'
\end{equation}
Note that the diffusion propagator $P_{\mathrm{d},ij}$ is now indexed, since the movement of some particle pairs may depend on each other and therefore show correlations. If particle $i$ and particle $j$ move independently, the mixed terms cancel $g_{ij}(\tau) = 0$.
-Due to the sums in \hyref{Equation}{eq12}, adding up individual contributions of sub-ensembles is allowed. A frequently used expression to cover free diffusion of similarly labelled, differently sized particles is simply the sum of correlation functions, weighted with their relative fractions $F_k = nk/n$ to the overall amplitude $G(0) = 1/n$:
+Due to the sums in \hyref{Equation}{eq12}, adding up individual contributions of sub-ensembles is allowed. A frequently used expression to cover free diffusion of similarly labelled, differently sized particles is simply the sum of correlation functions, weighted with their relative fractions $F_k = n_k/n$ to the overall amplitude $G(0) = 1/n$:
\begin{equation}
\label{eq14}
G^{\rm D}(\tau) = \sum_{k=1}^m F_k G^{\rm D}(\tau) = \frac{1}{n} \sum_{k=1}^m F_k \left(1+\frac{\tau}{\tau_{{\rm diff},k}} \right) ^{-1} \left(1+\frac{\tau}{\textit{SP}^2 \, \tau_{{\rm diff},k}} \right)
diff --git a/examples/sample_sessions/CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id b/examples/sample_sessions/CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id
deleted file mode 100644
index 9a57f40..0000000
--- a/examples/sample_sessions/CSFCS_DiO-in-DOPC.pcfs.REMOVED.git-id
+++ /dev/null
@@ -1 +0,0 @@
-2a7849e9f8ef288a92575788e773684d7db1d8e9
\ No newline at end of file
diff --git a/pycorrfit/__init__.py b/pycorrfit/__init__.py
index 8116907..a66e7c0 100644
--- a/pycorrfit/__init__.py
+++ b/pycorrfit/__init__.py
@@ -4,14 +4,18 @@
PyCorrFit is a tool to fit correlation curves on a logarithmic scale.
"""
-from . import doc
+from . import meta
from . import models
from . import openfile
from . import readfiles
-from . import fcs_data_set
-from .main import Main
+from .correlation import Correlation
+from .fit import Fit
+from .trace import Trace
-__version__ = doc.__version__
+__version__ = meta.get_version()
__author__ = u"Paul Müller"
__license__ = "GPL v2"
+
+# Import the GUI in the end, because it needs `__version__`.
+from .gui.main import Main
\ No newline at end of file
diff --git a/pycorrfit/__main__.py b/pycorrfit/__main__.py
index 6887328..8fc4715 100644
--- a/pycorrfit/__main__.py
+++ b/pycorrfit/__main__.py
@@ -3,11 +3,5 @@
Runs PyCorrFit
"""
-from . import doc
-from . import main
-
-## VERSION
-version = doc.__version__
-__version__ = version
-
+from .gui import main
main.Main()
diff --git a/pycorrfit/correlation.py b/pycorrfit/correlation.py
new file mode 100644
index 0000000..6e83e40
--- /dev/null
+++ b/pycorrfit/correlation.py
@@ -0,0 +1,550 @@
+# -*- coding: utf-8 -*-
+""" PyCorrFit data set
+
+Classes for FCS data evaluation.
+"""
+from __future__ import print_function, division
+
+import hashlib
+import numpy as np
+import warnings
+
+from . import models as mdls
+from . import fit
+from .trace import Trace
+
+
+
+class Correlation(object):
+ """ unifies correlation curve handling
+ """
+ def __init__(self, backgrounds=[], correlation=None, corr_type="AC",
+ filename=None, fit_algorithm="Lev-Mar",
+ fit_model=6000, fit_ival=(0,0),
+ fit_weight_data=None, fit_weight_type="none",
+ normparm=None, title=None, traces=[], verbose=1):
+ """
+ Parameters
+ ----------
+ backgrounds: list of instances of Trace
+ background traces
+ correlation: ndarray of shape (N,2)
+ correlation data (time [s], correlation)
+ corr_type: str
+ type of correlation, e.g. "AC", "AC1", "cc12"
+ filename: str
+ path to filename of correlation
+ fit_algorithm: str
+ valid fit algorithm identifier (Algorithms.keys())
+ fit_ival:
+ fitting interval of lag times in indices
+ fit_model: instance of FitModel
+ the model used for fitting
+ fit_weight_data: any
+ data for the certain fit_weight_type
+ fit_weight_type: str
+ Reserved keywords or user-defined strings:
+ - "none" : no weights are used
+ - "splineX" : compute weights from spline with X knots
+ and a spread of `fit_weight_data` bins.
+ - "model function" : compute weights from difference
+ to model function
+ - user-defined : other weights (e.g. previously computed
+ averages given in fit_weight_data)
+ normparm: int
+ identifier of normalization parameter
+ title: str
+ user-editable title of this correlation
+ traces: list of instances of Trace
+ traces of the current correlation
+ verbose : int
+ increment to increase verbosity
+ """
+ # must be created before setting properties
+ self._backgrounds = []
+ self._correlation = None
+ self._fit_algorithm = None
+ self._fit_model = None
+ self._fit_parameters = None
+ self._fit_parameters_range = None
+ self._fit_parameters_variable = None
+ self._fit_weight_memory = dict()
+ self._lag_time = None
+ self._model_memory = dict()
+ self._traces = []
+ self._uid = None
+
+ self.verbose = verbose
+
+ self.backgrounds = backgrounds
+ self.bg_correction_enabled = True
+ self.correlation = correlation
+ self.corr_type = corr_type
+ self.filename = filename
+
+ self.fit_algorithm = fit_algorithm
+ self.fit_ival = fit_ival
+ self.fit_model = fit_model
+ # Do not change order:
+ self.fit_weight_type = fit_weight_type
+ self.fit_weight_parameters = fit_weight_data
+
+ self.normparm = normparm
+ self.title = title
+ self.traces = traces
+
+ def __repr__(self):
+ if self.is_ac:
+ c = "AC"
+ else:
+ c = "CC"
+ text = "{} correlation '{}' with {} traces".format(
+ c, self.title, len(self._traces))
+ return text
+
+
+ def background_replace(self, channel, background):
+ """
+ Replace a background.
+ Channel must be 0 or 1.
+ background must be instance of `Trace`
+ """
+ assert channel in [0, 1]
+ assert isinstance(background, Trace)
+
+ if self.is_ac:
+ if channel == 1:
+ raise ValueError("Cannot set second background for AC.")
+ self._backgrounds = [background]
+ else:
+ if len(self._backgrounds) == 0:
+ self._backgrounds = [Trace(countrate=0, duration=0), Trace(countrate=0, duration=0)]
+ elif len(self._backgrounds) == 1:
+ self._backgrounds.append(Trace(countrate=0, duration=0))
+ self._backgrounds[channel] = background
+
+ @property
+ def backgrounds(self):
+ """
+ The background trace(s) of this correlation in a list.
+ """
+ return self._backgrounds
+
+ @backgrounds.setter
+ def backgrounds(self, value):
+ """
+ Set the backgrounds. The value can either be a list of traces or
+ instances of traces or a single trace in an array.
+ """
+ backgrounds = []
+ if not isinstance(value, list):
+ value = [value]
+ assert len(value) in [0,1,2], "Backgrounds must be list with up to two elements."
+ for v in value:
+ if isinstance(v, np.ndarray):
+ backgrounds.append(Trace(trace=v))
+ elif isinstance(v, Trace):
+ backgrounds.append(v)
+ else:
+ raise ValueError("Each background must be instance of Trace or ndarray")
+ self._backgrounds = backgrounds
+
+
+ @property
+ def bg_correction_factor(self):
+ """
+ Returns background correction factor for
+ self._correlation
+
+ Notes
+ -----
+ Thompson, N. Lakowicz, J.;
+ Geddes, C. D. & Lakowicz, J. R. (ed.)
+ Fluorescence Correlation Spectroscopy
+ Topics in Fluorescence Spectroscopy,
+ Springer US, 2002, 1, 337-378
+ """
+ if not self.bg_correction_enabled:
+ # bg correction disabled
+ return 1
+
+ if self.is_ac:
+ # Autocorrelation
+ if len(self.traces) == 1 and len(self.backgrounds) == 1:
+ S = self.traces[0].countrate
+ B = self.backgrounds[0].countrate
+ bgfactor = (S/(S-B))**2
+ else:
+ if self.verbose >= 1:
+ warnings.warn("Correlation {}: no bg-correction".
+ format(self.uid))
+ bgfactor = 1
+ else:
+ # Crosscorrelation
+ if len(self.traces) == 2 and len(self.backgrounds) == 2:
+ S = self.traces[0].countrate
+ S2 = self.traces[1].countrate
+ B = self.backgrounds[0].countrate
+ B2 = self.backgrounds[1].countrate
+ bgfactor = (S/(S-B)) * (S2/(S2-B2))
+ else:
+ warnings.warn("Correlation {}: no bg-correction".
+ format(self))
+ bgfactor = 1
+ return bgfactor
+
+ def check_parms(self, parms):
+ """ Check parameters using self.fit_model.func_verification and the user defined
+ boundaries self.fit_parameters_range for each parameter.
+ """
+ p = 1.*np.array(parms)
+ r = self.fit_parameters_range
+ for i in range(len(p)):
+ if r[i][0] == r[i][1]:
+ pass
+ elif r[i][0] is None:
+ if p[i] > r[i][1]:
+ p[i] = r[i][1]
+ elif r[i][1] is None:
+ if p[i] < r[i][0]:
+ p[i] = r[i][1]
+ elif p[i] < r[i][0]:
+ p[i] = r[i][0]
+ elif p[i] > r[i][1]:
+ p[i] = r[i][1]
+ return p
+
+ @property
+ def correlation(self):
+ """the correlation data, shape (N,2) with (time, correlation) """
+ if self._correlation is not None:
+ corr = self._correlation.copy()
+ return corr
+
+ @correlation.setter
+ def correlation(self, value):
+ if value is None:
+ warnings.warn("Setting correlation to `None`.")
+ elif not isinstance(value, np.ndarray):
+ raise ValueError("Correlation must be 2d array!")
+ elif not value.shape[1] == 2:
+ raise ValueError("Correlation array must have shape (N,2)!")
+ self._correlation = value
+
+ @property
+ def correlation_fit(self):
+ """ returns correlation data for fitting (fit_ivald)
+ - background correction
+ - fitting interval cropping
+ """
+ corr = self.correlation
+ if corr is not None:
+ # perform background correction
+ corr[:,1] *= self.bg_correction_factor
+ # perform parameter normalization
+ return corr[self.fit_ival[0]:self.fit_ival[1],:]
+
+
+ @property
+ def correlation_plot(self):
+ """ returns correlation data for plotting (normalized, fit_ivald)
+ - background correction
+ - fitting interval cropping
+ - parameter normalization
+ """
+ corr = self.correlation_fit
+ if corr is not None:
+ # perform parameter normalization
+ corr[:,1] *= self.normalize_factor
+ return corr
+
+ @property
+ def is_ac(self):
+ """True if instance contains autocorrelation"""
+ return self.corr_type.lower().count("ac") > 0
+
+ @property
+ def is_cc(self):
+ """True if instance contains crosscorrelation"""
+ return not self.is_ac
+
+ @property
+ def is_weighted_fit(self):
+ """True if a weighted fit was performed"""
+ return self.fit_weight_type != "none"
+
+ @property
+ def fit_algorithm(self):
+ """The string representing the fitting algorithm"""
+ return self._fit_algorithm
+
+ @fit_algorithm.setter
+ def fit_algorithm(self, value):
+ # TODO:
+ # - allow lower-case fitting algorithm
+ assert value in list(fit.Algorithms.keys()), "Invalid fit algorithm: "+value
+ self._fit_algorithm = value
+
+ @property
+ def fit_ival(self):
+ """lag time interval for fitting"""
+ lag = self.lag_time
+ if lag is not None:
+ if self._fit_ival[1] <= 0 or self._fit_ival[1] > lag.shape[0]:
+ self._fit_ival[1] = lag.shape[0]
+ return self._fit_ival
+
+ @fit_ival.setter
+ def fit_ival(self, value):
+ value = list(value)
+ if value[1] <= 0:
+ if self.lag_time is not None:
+ value[1] = self.lag_time.shape[0]
+ else:
+ # just to be sure
+ warnings.warn("No data available.")
+ value[1] = 10000000000000000
+ self._fit_ival = value
+
+ @property
+ def fit_model(self):
+ """instance of a fit model"""
+ return self._fit_model
+
+ @fit_model.setter
+ def fit_model(self, value):
+ """set the fit model
+ """
+ if isinstance(value, (int, long)):
+ newmodel = mdls.modeldict[value]
+ elif isinstance(value, mdls.Model):
+ newmodel = value
+ else:
+ raise NotImplementedError("Unknown model identifier")
+
+ if newmodel != self._fit_model :
+ self._fit_model = newmodel
+ # overwrite fitting parameters
+ self._fit_parameters = self._fit_model.default_values
+ self._fit_parameters_variables = self._fit_model.default_variables
+ self._fit_parameters_range = np.zeros((len(self._fit_parameters), 2))
+ self.normparm = None
+
+ @property
+ def fit_weight_data(self):
+ """data of weighted fitting"""
+ try:
+ data = self._fit_weight_memory[self.fit_weight_type]
+ except KeyError:
+ # Standard variables for weights
+ if self.fit_weight_type.count("spline"):
+ # Default area for weighting with spline fit
+ data = 3
+ else:
+ data = None
+ return data
+
+ @fit_weight_data.setter
+ def fit_weight_data(self, value):
+ self._fit_weight_memory[self.fit_weight_type] = value
+
+ @property
+ def fit_parameters(self):
+ """parameters that were fitted/will be used for fitting"""
+ # Do not return `self._fit_parameters.copy()`, because
+ # some methods of PyCorrFit depende on the array being
+ # accessible and changeable with indices.
+ return self._fit_parameters
+
+ @fit_parameters.setter
+ def fit_parameters(self, value):
+ # must unlock parameters, if change is required
+ value = np.array(value)
+ self._fit_parameters = self.check_parms(value)
+
+ @property
+ def fit_parameters_range(self):
+ """valid fitting ranges for fit parameters"""
+ model = self.fit_model.boundaries
+ mine = self._fit_parameters_range
+ new = []
+ for a, b in zip(model, mine):
+ c = [-np.inf, np.inf]
+ if a[0] != a[1]:
+ c[0] = a[0]
+ c[1] = a[1]
+ # user overrides model
+ if b[0] != b[1]:
+ c[0] = b[0]
+ c[1] = b[1]
+ if c[0] is not None and np.isnan(c[0]):
+ c[0] = -np.inf
+ if c[1] is not None and np.isnan(c[1]):
+ c[1] = np.inf
+
+ new.append(c)
+ return np.array(new)
+
+ @fit_parameters_range.setter
+ def fit_parameters_range(self, value):
+ value = np.array(value)
+ assert value.shape[1] == 2
+ assert value.shape[0] == self.fit_parameters.shape[0]
+ self._fit_parameters_range = value
+
+ @property
+ def fit_parameters_variable(self):
+ """which parameters are variable during fitting"""
+ if self._fit_parameters_variable is None:
+ self._fit_parameters_variable = np.array(self.fit_model.default_variables, dtype=bool)
+ return self._fit_parameters_variable
+
+ @fit_parameters_variable.setter
+ def fit_parameters_variable(self, value):
+ value = np.array(value, dtype=bool)
+ assert value.shape[0] == self.fit_parameters.shape[0]
+ self._fit_parameters_variable = value
+
+ @property
+ def lag_time(self):
+ """logarithmic lag time axis"""
+ if self.correlation is not None:
+ return self._correlation[:,0].copy()
+ elif self._lag_time is not None:
+ return self._lag_time
+ else:
+ # some default lag time
+ return 10**np.linspace(-6,8,1001)
+
+ @lag_time.setter
+ def lag_time(self, value):
+ if self.correlation is not None:
+ warnings.warn("Setting lag time not possible, because of existing correlation")
+ else:
+ self._lag_time = value
+
+ @property
+ def lag_time_fit(self):
+ """lag time as used for fitting"""
+ return self.lag_time[self.fit_ival[0]:self.fit_ival[1]]
+
+ @property
+ def modeled(self):
+ """fitted data values, same shape as self.correlation"""
+ # perform parameter normalization
+ lag = self.lag_time
+ modeled = np.zeros((lag.shape[0], 2))
+ modeled[:,0] = lag
+ modeled[:,1] = self.fit_model(self.fit_parameters, lag)
+ return modeled.copy()
+
+ @property
+ def modeled_fit(self):
+ """fitted data values, same shape as self.correlation_fit"""
+ toplot = self.modeled[self.fit_ival[0]:self.fit_ival[1], :]
+ return toplot
+
+ @property
+ def modeled_plot(self):
+ """fitted data values, same shape as self.correlation_fit"""
+ toplot = self.modeled_fit
+ toplot[:,1] *= self.normalize_factor
+ return toplot
+
+ @property
+ def normalize_factor(self):
+ """plot normalization according to self.normparm"""
+ if self.normparm is None:
+ # nothing to do
+ return 1
+ if self.normparm < self.fit_parameters.shape[0]:
+ nfactor = self.fit_parameters[self.normparm]
+ else:
+ # get supplementary parameters
+ alt = self.fit_model.get_supplementary_values(self.fit_parameters)
+ nfactor = alt[self.normparm - self.fit_parameters.shape[0]]
+
+ return nfactor
+
+ @property
+ def residuals(self):
+ """fit residuals, same shape as self.correlation"""
+ if self.correlation is None:
+ raise ValueError("Cannot compute residuals; No correlation given!")
+ residuals = self.correlation.copy()
+ residuals[:,1] -= self.modeled[:,1]
+ return residuals
+
+ @property
+ def residuals_fit(self):
+ """fit residuals, same shape as self.correlation_fit"""
+ residuals_fit = self.correlation_fit.copy()
+ residuals_fit[:,1] -= self.modeled_fit[:,1]
+ return residuals_fit
+
+ @property
+ def residuals_plot(self):
+ """fit residuals, same shape as self.correlation_fit"""
+ cp = self.correlation_plot
+ if cp is not None:
+ residuals_plot = self.correlation_plot.copy()
+ residuals_plot[:,1] -= self.modeled_plot[:,1]
+ return residuals_plot
+
+ def set_weights(self, type_name, data):
+ """
+ Add weights for fitting.
+ example:
+ type_name : "Average"
+ data : 1d ndarray with length self.lag_time
+ """
+ if data is not None:
+ self._fit_weight_memory[type_name] = data
+
+ @property
+ def traces(self):
+ """
+ The trace(s) of this correlation in a list.
+ """
+ return self._traces
+
+ @traces.setter
+ def traces(self, value):
+ """
+ Set the traces. The value can either be a list of traces or
+ instances of traces or a single trace in an array.
+ """
+ traces = []
+ if not isinstance(value, list):
+ value = [value]
+ assert len(value) in [0,1,2], "Traces must be list with up to two elements."
+ for v in value:
+ if isinstance(v, np.ndarray):
+ traces.append(Trace(trace=v))
+ elif isinstance(v, Trace):
+ traces.append(v)
+ else:
+ raise ValueError("Each trace must be instance of Trace or ndarray")
+ self._traces = traces
+
+ if len(self._traces) == 2:
+ if self._traces[0].duration != self._traces[1].duration:
+ warnings.warn("Unequal lenght of traces: {} and {}".format(
+ self._traces[0].duration,
+ self._traces[1].duration))
+
+ @property
+ def uid(self):
+ """
+ unique identifier of this instance
+ This might change when title or filename
+ are updated.
+ """
+ if self._uid is None:
+ hasher = hashlib.sha256()
+ hasher.update(str(np.random.random()))
+ hasher.update(str(self._correlation))
+ hasher.update(str(self.filename))
+ hasher.update(str(self.title))
+ self._uid = hasher.hexdigest()
+ return self._uid
diff --git a/pycorrfit/fcs_data_set.py b/pycorrfit/fit.py
similarity index 52%
rename from pycorrfit/fcs_data_set.py
rename to pycorrfit/fit.py
index 8a21b07..27c9e1e 100644
--- a/pycorrfit/fcs_data_set.py
+++ b/pycorrfit/fit.py
@@ -6,648 +6,205 @@ Classes for FCS data evaluation.
from __future__ import print_function, division
import copy
-import hashlib
import lmfit
import numpy as np
-import scipy.integrate as spintg
import scipy.interpolate as spintp
import warnings
-from . import models as mdls
-class Trace(object):
- """ unifies trace handling
+class Constraint(object):
+ """ Class to translate fit constraints to lmfit syntax.
"""
- def __init__(self, trace=None, countrate=None, duration=None,
- name=None):
- """ Load trace data
-
+ def __init__(self, constraint, fit_bool, fit_bounds, fit_values):
+ """
Parameters
----------
- trace : ndarray of shape (N, 2)
- The array contains time [ms] and count rate [kHz].
- coutrate : float
- Average count rate [kHz].
- Mandatory if `trace` is None.
- duration : float
- Duration of measurement in milliseconds.
- Mandatory if `trace` is None.
- name : str
- The name of the trace.
- """
- self._countrate = None
- self._duration = None
- self._trace = None
- self._uid = None
-
- if trace is None:
- self.countrate = countrate
- self.duration = duration
- else:
- self.trace = trace
+ constraint : list of strings and ints
+ The abstract constraint (e.g. [1, 0, "<", "2.3"]) as
+ used in the model definitions.
+ fit_bool : list of boolean
+ A list of bools indicating which parameters are varied
+ during fitting.
+ fit_bounds : list of lists of two floats
+ The parameter boundaries for fitting.
+ fit_values : list of floats
+ The initial fitting values.
- if name is None:
- name = "{:.2f}kHz, {:.0f}s".format(self.countrate,
- self.duration/1000)
- self.name = name
-
- def __getitem__(self, idx):
- return self.trace[idx]
-
- def __repr__(self):
- text = "Trace of length {:.3f}s and countrate {:.3f}kHz".format(
- self.duration/1000, self.countrate)
- return text
-
- @property
- def countrate(self):
- if self._countrate is None:
- #self._countrate = np.average(self._trace[:,1])
- # Take into account traces that have arbitrary sampling
- self._countrate = spintg.simps(self._trace[:,1], self._trace[:,0]) / self.duration
- return self._countrate
-
- @countrate.setter
- def countrate(self, value):
- assert value is not None, "Setting value with None forbidden!"
- assert self._trace is None, "Setting value impossible, "+\
- "if `self.trace` is set."
- self._countrate = value
-
- @property
- def duration(self):
- if not hasattr(self, "_duration") or self._duration is None:
- self._duration = self._trace[-1,0] - self._trace[0,0]
- return self._duration
-
- @duration.setter
- def duration(self, value):
- assert value is not None, "Setting value with None forbidden!"
- assert self._trace is None, "Setting value impossible, "+\
- "if `self.trace` is set."
- self._duration = value
-
- @property
- def uid(self):
- if self._uid is None:
- hasher = hashlib.sha256()
- hasher.update(str(np.random.random()))
- hasher.update(str(self.trace))
- hasher.update(self.name)
- self._uid = hasher.hexdigest()
- return self._uid
+ Notes
+ -----
+ - the first item in constraints must be an integer indexing a parameter
+ - the second/third item must be an integer as well or an operator (">", "<")
+ - if the fourth item is omitted, it is assumed to be "0"
+ - the first integer must be larger than the second integer
+ """
+ if len(constraint) == 3:
+ constraint.append("0")
+
+ self.constraint = constraint
+ self.fit_bool = fit_bool
+ self.fit_bounds = fit_bounds
+ self.fit_values = fit_values
@property
- def trace(self):
- if self._trace is None:
- self._trace = np.array([ [0, self.countrate],
- [self.duration, self.countrate]
- ])
- return self._trace
-
- @trace.setter
- def trace(self, value):
- assert value is not None, "Setting value with None forbidden!"
- assert isinstance(value, np.ndarray), "value must be array!"
- assert value.shape[1] == 2, "shape of array must be (N,2)!"
- self._trace = value
- # self.countrate is set automagically
-
-
-class Correlation(object):
- """ unifies correlation curve handling
- """
- def __init__(self, backgrounds=[], correlation=None, corr_type="AC",
- filename=None, fit_algorithm="Lev-Mar",
- fit_model=6000, fit_ival=(0,0),
- fit_weight_data=None, fit_weight_type="none",
- normparm=None, title=None, traces=[], verbose=1):
+ def parameters(self):
"""
- Parameters
- ----------
- backgrounds: list of instances of Trace
- background traces
- correlation: ndarray of shape (N,2)
- correlation data (time [s], correlation)
- corr_type: str
- type of correlation, e.g. "AC", "AC1", "cc12"
- filename: str
- path to filename of correlation
- fit_algorithm: str
- valid fit algorithm identifier (Algorithms.keys())
- fit_ival:
- fitting interval of lag times in indices
- fit_model: instance of FitModel
- the model used for fitting
- fit_weight_data: any
- data for the certain fit_weight_type
- fit_weight_type: str
- Reserved keywords or user-defined strings:
- - "none" : no weights are used
- - "splineX" : compute weights from spline with X knots
- and a spread of `fit_weight_data` bins.
- - "model function" : compute weights from difference
- to model function
- - user-defined : other weights (e.g. previously computed
- averages given in fit_weight_data)
- normparm: int
- identifier of normalization parameter
- title: str
- user-editable title of this correlation
- traces: list of instances of Trace
- traces of the current correlation
- verbose : int
- increment to increase verbosity
+ Returns list of dict for each parameter.
"""
- # must be created before setting properties
- self._backgrounds = []
- self._correlation = None
- self._fit_algorithm = None
- self._fit_model = None
- self._fit_parameters = None
- self._fit_parameters_range = None
- self._fit_parameters_variable = None
- self._fit_weight_memory = dict()
- self._lag_time = None
- self._model_memory = dict()
- self._traces = []
- self._uid = None
-
- self.verbose = verbose
-
- self.backgrounds = backgrounds
- self.bg_correction_enabled = True
- self.correlation = correlation
- self.corr_type = corr_type
- self.filename = filename
+ parms = [ it for it in self.constraint if isinstance(it, (int, long))]
+ id2 = self.constraint.index(parms[1])
- self.fit_algorithm = fit_algorithm
- self.fit_ival = fit_ival
- self.fit_model = fit_model
- # Do not change order:
- self.fit_weight_type = fit_weight_type
- self.fit_weight_parameters = fit_weight_data
-
- self.normparm = normparm
- self.title = title
- self.traces = traces
-
- def __repr__(self):
- if self.is_ac:
- c = "AC"
- else:
- c = "CC"
- text = "{} correlation '{}' with {} traces".format(
- c, self.title, len(self._traces))
- return text
-
-
- def background_replace(self, channel, background):
- """
- Replace a background.
- Channel must be 0 or 1.
- background must be instance of `Trace`
- """
- assert channel in [0, 1]
- assert isinstance(background, Trace)
+ p1 = {"id": parms[0],
+ "bool": self.fit_bool[parms[0]],
+ "sign": +1,
+ "value": self.fit_values[parms[0]]
+ }
- if self.is_ac:
- if channel == 1:
- raise ValueError("Cannot set second background for AC.")
- self._backgrounds = [background]
- else:
- if len(self._backgrounds) == 0:
- self._backgrounds = [Trace(countrate=0, duration=0), Trace(countrate=0, duration=0)]
- elif len(self._backgrounds) == 1:
- self._backgrounds.append(Trace(countrate=0, duration=0))
- self._backgrounds[channel] = background
+ p2 = {"id": parms[1],
+ "bool": self.fit_bool[parms[1]],
+ "sign": ( +1 if id2==2 else -1),
+ "value": self.fit_values[parms[1]]
+ }
+
+ return p1, p2
@property
- def backgrounds(self):
- """
- The background trace(s) of this correlation in a list.
- """
- return self._backgrounds
-
- @backgrounds.setter
- def backgrounds(self, value):
- """
- Set the backgrounds. The value can either be a list of traces or
- instances of traces or a single trace in an array.
- """
- backgrounds = []
- if not isinstance(value, list):
- value = [value]
- assert len(value) in [0,1,2], "Backgrounds must be list with up to two elements."
- for v in value:
- if isinstance(v, np.ndarray):
- backgrounds.append(Trace(trace=v))
- elif isinstance(v, Trace):
- backgrounds.append(v)
- else:
- raise ValueError("Each background must be instance of Trace or ndarray")
- self._backgrounds = backgrounds
-
+ def operator(self):
+ strval = [ it for it in self.constraint if not isinstance(it, (int, long))]
+ return strval[0]
@property
- def bg_correction_factor(self):
+ def offset(self):
+ return float(self.constraint[-1])
+
+ def update_fit_bounds(self):
"""
- Returns background correction factor for
- self._correlation
+ Update the bounds with the given constraint. This only applies
+ if one of the parameters is not varied during fitting.
Notes
-----
- Thompson, N. Lakowicz, J.;
- Geddes, C. D. & Lakowicz, J. R. (ed.)
- Fluorescence Correlation Spectroscopy
- Topics in Fluorescence Spectroscopy,
- Springer US, 2002, 1, 337-378
- """
- if not self.bg_correction_enabled:
- # bg correction disabled
- return 1
-
- if self.is_ac:
- # Autocorrelation
- if len(self.traces) == 1 and len(self.backgrounds) == 1:
- S = self.traces[0].countrate
- B = self.backgrounds[0].countrate
- bgfactor = (S/(S-B))**2
- else:
- if self.verbose >= 1:
- warnings.warn("Correlation {}: no bg-correction".
- format(self.uid))
- bgfactor = 1
- else:
- # Crosscorrelation
- if len(self.traces) == 2 and len(self.backgrounds) == 2:
- S = self.traces[0].countrate
- S2 = self.traces[1].countrate
- B = self.backgrounds[0].countrate
- B2 = self.backgrounds[1].countrate
- bgfactor = (S/(S-B)) * (S2/(S2-B2))
- else:
- warnings.warn("Correlation {}: no bg-correction".
- format(self))
- bgfactor = 1
- return bgfactor
-
- def check_parms(self, parms):
- """ Check parameters using self.fit_model.func_verification and the user defined
- boundaries self.fit_parameters_range for each parameter.
- """
- p = 1.*np.array(parms)
- r = self.fit_parameters_range
- for i in range(len(p)):
- if r[i][0] == r[i][1]:
- pass
- elif r[i][0] is None:
- if p[i] > r[i][1]:
- p[i] = r[i][1]
- elif r[i][1] is None:
- if p[i] < r[i][0]:
- p[i] = r[i][1]
- elif p[i] < r[i][0]:
- p[i] = r[i][0]
- elif p[i] > r[i][1]:
- p[i] = r[i][1]
- return p
-
- @property
- def correlation(self):
- """the correlation data, shape (N,2) with (time, correlation) """
- if self._correlation is not None:
- corr = self._correlation.copy()
- return corr
-
- @correlation.setter
- def correlation(self, value):
- if value is None:
- warnings.warn("Setting correlation to `None`.")
- elif not isinstance(value, np.ndarray):
- raise ValueError("Correlation must be 2d array!")
- elif not value.shape[1] == 2:
- raise ValueError("Correlation array must have shape (N,2)!")
- self._correlation = value
-
- @property
- def correlation_fit(self):
- """ returns correlation data for fitting (fit_ivald)
- - background correction
- - fitting interval cropping
+ The fitting boundaries are updated in-place (`fit_bounds` variable
+ from `__init__`).
"""
- corr = self.correlation
- if corr is not None:
- # perform background correction
- corr[:,1] *= self.bg_correction_factor
- # perform parameter normalization
- return corr[self.fit_ival[0]:self.fit_ival[1],:]
+ p1, p2 = self.parameters
+ op = self.operator
+ os = self.offset
+ assert op in ["<", ">"], "Constraint operator not supported"
-
- @property
- def correlation_plot(self):
- """ returns correlation data for plotting (normalized, fit_ivald)
- - background correction
- - fitting interval cropping
- - parameter normalization
- """
- corr = self.correlation_fit
- if corr is not None:
- # perform parameter normalization
- corr[:,1] *= self.normalize_factor
- return corr
-
- @property
- def is_ac(self):
- """True if instance contains autocorrelation"""
- return self.corr_type.lower().count("ac") > 0
-
- @property
- def is_cc(self):
- """True if instance contains crosscorrelation"""
- return not self.is_ac
-
- @property
- def is_weighted_fit(self):
- """True if a weighted fit was performed"""
- return self.fit_weight_type != "none"
-
- @property
- def fit_algorithm(self):
- """The string representing the fitting algorithm"""
- return self._fit_algorithm
-
- @fit_algorithm.setter
- def fit_algorithm(self, value):
- # TODO:
- # - allow lower-case fitting algorithm
- assert value in list(Algorithms.keys()), "Invalid fit algorithm: "+value
- self._fit_algorithm = value
-
- @property
- def fit_model(self):
- """instance of a fit model"""
- return self._fit_model
-
- @fit_model.setter
- def fit_model(self, value):
- """set the fit model
- """
- if isinstance(value, (int, long)):
- newmodel = mdls.modeldict[value]
- elif isinstance(value, mdls.Model):
- newmodel = value
- else:
- raise NotImplementedError("Unknown model identifier")
- if newmodel != self._fit_model :
- self._fit_model = newmodel
- # overwrite fitting parameters
- self._fit_parameters = self._fit_model.default_values
- self._fit_parameters_variables = self._fit_model.default_variables
- self._fit_parameters_range = np.zeros((len(self._fit_parameters), 2))
- self.normalize_parm = None
-
- @property
- def fit_ival(self):
- """lag time interval for fitting"""
- lag = self.lag_time
- if lag is not None:
- if self._fit_ival[1] <= 0 or self._fit_ival[1] > lag.shape[0]:
- self._fit_ival[1] = lag.shape[0]
- return self._fit_ival
-
- @fit_ival.setter
- def fit_ival(self, value):
- value = list(value)
- if value[1] <= 0:
- if self.lag_time is not None:
- value[1] = self.lag_time.shape[0]
+ if p1["bool"] and p2["bool"]:
+ # do nothing, this case is handled in `get_lmfit_parameter_kwargs`
+ pass
+ elif p1["bool"]:
+ # only parameter 1 is varied
+ if op == "<":
+ # [3, "<", 1, "0"] -> p1 < p2
+ # [3, 1, "<", "0"] -> p1 < -p2
+ # [3, 1, "<", "1.2] -> p1 < -p2 + 1.2
+ # [3, "<", 1, "1.2] -> p1 < p2 + 1.2
+ bnd = [-np.inf, p2["sign"]*p2["value"] + os]
else:
- # just to be sure
- warnings.warn("No data available.")
- value[1] = 10000000000000000
- self._fit_ival = value
-
- @property
- def fit_weight_data(self):
- """data of weighted fitting"""
- try:
- data = self._fit_weight_memory[self.fit_weight_type]
- except KeyError:
- # Standard variables for weights
- if self.fit_weight_type.count("spline"):
- # Default area for weighting with spline fit
- data = 3
+ # [3, ">", 1, "0"] -> p1 > p2
+ # [3, 1, ">", "0"] -> p1 > -p2
+ # [3, 1, ">", "1.2] -> p1 > -p2 + 1.2
+ # [3, ">", 1, "1.2] -> p1 > p2 + 1.2
+ bnd = [p2["sign"]*p2["value"] + os, np.inf]
+ bound = [max(self.fit_bounds[p1["id"]][0], bnd[0]),
+ min(self.fit_bounds[p1["id"]][1], bnd[1]),
+ ]
+ self.fit_bounds[p1["id"]] = bound
+ elif p2["bool"]:
+ # only parameter2 is varied
+ if op == "<":
+ # [3, "<", 1, "0"] -> p2 > p1
+ # [3, 1, "<", "0"] -> (-)p2 > p1 -> p2 < -p1
+ # [3, 1, "<", "1.2] -> p2 < -p1 + 1.2 = -(p1-1.2)
+ # [3, "<", 1, "1.2] -> p2 > p1 - 1.2 = +(p1-1.2)
+ if p2["sign"] == -1:
+ bnd = [-np.inf, -(p1["value"] - os)]
+ else:
+ bnd = [(p1["value"] - os), np.inf]
else:
- data = None
- return data
-
- @fit_weight_data.setter
- def fit_weight_data(self, value):
- self._fit_weight_memory[self.fit_weight_type] = value
-
- @property
- def fit_parameters(self):
- """parameters that were fitted/will be used for fitting"""
- # Do not return `self._fit_parameters.copy()`, because
- # some methods of PyCorrFit depende on the array being
- # accessible and changeable with indices.
- return self._fit_parameters
-
- @fit_parameters.setter
- def fit_parameters(self, value):
- # must unlock parameters, if change is required
- value = np.array(value)
- self._fit_parameters = self.check_parms(value)
-
- @property
- def fit_parameters_range(self):
- """valid fitting ranges for fit parameters"""
- model = self.fit_model.boundaries
- mine = self._fit_parameters_range
- new = []
- for a, b in zip(model, mine):
- c = [-np.inf, np.inf]
- if a[0] != a[1]:
- c[0] = a[0]
- c[1] = a[1]
- # user overrides model
- if b[0] != b[1]:
- c[0] = b[0]
- c[1] = b[1]
- if c[0] is not None and np.isnan(c[0]):
- c[0] = -np.inf
- if c[1] is not None and np.isnan(c[1]):
- c[1] = np.inf
-
- new.append(c)
- return np.array(new)
-
- @fit_parameters_range.setter
- def fit_parameters_range(self, value):
- value = np.array(value)
- assert value.shape[1] == 2
- assert value.shape[0] == self.fit_parameters.shape[0]
- self._fit_parameters_range = value
-
- @property
- def fit_parameters_variable(self):
- """which parameters are variable during fitting"""
- if self._fit_parameters_variable is None:
- self._fit_parameters_variable = np.array(self.fit_model.default_variables, dtype=bool)
- return self._fit_parameters_variable
-
- @fit_parameters_variable.setter
- def fit_parameters_variable(self, value):
- value = np.array(value, dtype=bool)
- assert value.shape[0] == self.fit_parameters.shape[0]
- self._fit_parameters_variable = value
-
- @property
- def lag_time(self):
- """logarithmic lag time axis"""
- if self.correlation is not None:
- return self._correlation[:,0].copy()
- elif self._lag_time is not None:
- return self._lag_time
- else:
- # some default lag time
- return 10**np.linspace(-6,8,1001)
-
- @lag_time.setter
- def lag_time(self, value):
- if self.correlation is not None:
- warnings.warn("Setting lag time not possible, because of existing correlation")
+ # [3, ">", 1, "0"] -> p2 < p1
+ # [3, 1, ">", "0"] -> p2 > -p1
+ # [3, 1, ">", "1.2] -> p2 > -(p1 - 1.2)
+ # [3, ">", 1, "1.2] -> p2 < p1 - 1.2
+ if p2["sign"] == -1:
+ bnd = [-(p1["value"] - os), np.inf]
+ else:
+ bnd = [-np.inf, p1["value"] - os]
+ bound = [max(self.fit_bounds[p2["id"]][0], bnd[0]),
+ min(self.fit_bounds[p2["id"]][1], bnd[1]),
+ ]
+ self.fit_bounds[p2["id"]] = bound
else:
- self._lag_time = value
-
- @property
- def lag_time_fit(self):
- """lag time as used for fitting"""
- return self.lag_time[self.fit_ival[0]:self.fit_ival[1]]
-
- @property
- def modeled(self):
- """fitted data values, same shape as self.correlation"""
- # perform parameter normalization
- lag = self.lag_time
- modeled = np.zeros((lag.shape[0], 2))
- modeled[:,0] = lag
- modeled[:,1] = self.fit_model(self.fit_parameters, lag)
- return modeled.copy()
-
- @property
- def modeled_fit(self):
- """fitted data values, same shape as self.correlation_fit"""
- toplot = self.modeled[self.fit_ival[0]:self.fit_ival[1], :]
- return toplot
-
- @property
- def modeled_plot(self):
- """fitted data values, same shape as self.correlation_fit"""
- toplot = self.modeled_fit
- toplot[:,1] *= self.normalize_factor
- return toplot
-
- @property
- def normalize_factor(self):
- """plot normalization according to self.normparm"""
- if self.normparm is None:
- # nothing to do
- return 1
- if self.normparm < self.fit_parameters.shape[0]:
- nfactor = self.fit_parameters[self.normparm]
+ # neither is varied.
+ # Do nothing.
+ pass
+ return self.fit_bounds
+
+ def get_lmfit_parameter_kwargs(self):
+ """
+ Using the given constraint, update the list of lmfit
+ parameters.
+ """
+ p1, p2 = self.parameters
+ op = self.operator
+ ofs = self.offset
+ assert op in ["<", ">"], "Constraint operator not supported"
+
+
+ if p1["bool"] and p2["bool"]:
+ if op == "<":
+ #p1 < (-)p2 + 1.2
+ #-> p1 = (-)p2 - d12 + 1.2
+ #-> d12 = (-)p2 - p1 + 1.2
+ #-> d12 > 0
+ deltaname = "delta_{}_{}".format(p1["id"], p2["id"])
+ kwdelt = {}
+ kwdelt["name"] = deltaname
+ kwdelt["value"] = p2["bool"]*self.fit_values[p2["id"]] - self.fit_values[p1["id"]]
+ kwdelt["vary"] = True
+ kwdelt["min"] = 0 # note: enforces "<=" (not "<")
+ kwdelt["max"] = np.inf
+
+ kwp1 = {}
+ kwp1["name"] = "parm{:04d}".format(p1["id"])
+ # this condition deals with negative numbers
+ kwp1["expr"] = "{MIN} if {COMP} < {MIN} else {MAX} if {COMP} > {MAX} else {COMP}".format(
+ COMP="{}*parm{:04d}-{}+{:.14f}".format(p2["sign"], p2["id"], deltaname, ofs),
+ MIN=self.fit_bounds[p1["id"]][0],
+ MAX=self.fit_bounds[p1["id"]][1])
+ kwargs = [kwdelt, kwp1]
+ elif op == ">":
+ #p1 > (-)p2 + 1.2
+ #-> p1 = (-)p2 + d12 + 1.2
+ #-> d12 = p1 - (-)p2 - 1.2
+ #-> d12 > 0
+ deltaname = "delta_{}_{}".format(p1["id"], p2["id"])
+ kwdelt = {}
+ kwdelt["name"] = deltaname
+ kwdelt["value"] = self.fit_values[p1["id"]] - p2["bool"]*self.fit_values[p2["id"]]
+ kwdelt["vary"] = True
+ kwdelt["min"] = 0 # note: enforces ">=" (not ">")
+ kwdelt["max"] = np.inf #self.fit_bounds[p1["id"]][1] + max(-p2["sign"]*self.fit_bounds[p2["id"]]) - ofs
+
+ kwp1 = {}
+ kwp1["name"] = "parm{:04d}".format(p1["id"])
+ # this condition deals with negative numbers
+ kwp1["expr"] = "{MIN} if {COMP} < {MIN} else {MAX} if {COMP} > {MAX} else {COMP}".format(
+ COMP="{}*parm{:04d}+{}+{:.14f}".format(p2["sign"], p2["id"], deltaname, ofs),
+ MIN=self.fit_bounds[p1["id"]][0],
+ MAX=self.fit_bounds[p1["id"]][1])
+
+ kwargs = [kwdelt, kwp1]
+
else:
- # get supplementary parameters
- alt = self.fit_model.get_supplementary_values(self.fit_parameters)
- nfactor = alt[self.normparm - self.fit_parameters.shape[0]]
+ kwargs = None
- return nfactor
-
- @property
- def residuals(self):
- """fit residuals, same shape as self.correlation"""
- if self.correlation is None:
- raise ValueError("Cannot compute residuals; No correlation given!")
- residuals = self.correlation.copy()
- residuals[:,1] -= self.modeled[:,1]
- return residuals
-
- @property
- def residuals_fit(self):
- """fit residuals, same shape as self.correlation_fit"""
- residuals_fit = self.correlation_fit.copy()
- residuals_fit[:,1] -= self.modeled_fit[:,1]
- return residuals_fit
-
- @property
- def residuals_plot(self):
- """fit residuals, same shape as self.correlation_fit"""
- cp = self.correlation_plot
- if cp is not None:
- residuals_plot = self.correlation_plot.copy()
- residuals_plot[:,1] -= self.modeled_plot[:,1]
- return residuals_plot
-
- def set_weights(self, type_name, data):
- """
- Add weights for fitting.
- example:
- type_name : "Average"
- data : 1d ndarray with length self.lag_time
- """
- if data is not None:
- self._fit_weight_memory[type_name] = data
-
- @property
- def traces(self):
- """
- The trace(s) of this correlation in a list.
- """
- return self._traces
-
- @traces.setter
- def traces(self, value):
- """
- Set the traces. The value can either be a list of traces or
- instances of traces or a single trace in an array.
- """
- traces = []
- if not isinstance(value, list):
- value = [value]
- assert len(value) in [0,1,2], "Traces must be list with up to two elements."
- for v in value:
- if isinstance(v, np.ndarray):
- traces.append(Trace(trace=v))
- elif isinstance(v, Trace):
- traces.append(v)
- else:
- raise ValueError("Each trace must be instance of Trace or ndarray")
- self._traces = traces
-
- if len(self._traces) == 2:
- if self._traces[0].duration != self._traces[1].duration:
- warnings.warn("Unequal lenght of traces: {} and {}".format(
- self._traces[0].duration,
- self._traces[1].duration))
-
- @property
- def uid(self):
- """
- unique identifier of this instance
- This might change when title or filename
- are updated.
- """
- if self._uid is None:
- hasher = hashlib.sha256()
- hasher.update(str(np.random.random()))
- hasher.update(str(self._correlation))
- hasher.update(str(self.filename))
- hasher.update(str(self.title))
- self._uid = hasher.hexdigest()
- return self._uid
-
+ return kwargs
+
+
class Fit(object):
""" Used for fitting FCS data to models.
@@ -660,7 +217,7 @@ class Fit(object):
Parameters
----------
- correlations: list of instances of Correlation
+ correlations: list of instances of `pycorrfit.Correlation`
Correlations to fit.
global fit : bool
Perform global fit. The default behavior is
@@ -680,7 +237,7 @@ class Fit(object):
"""
assert len(global_fit_variables)==0, "not implemented"
- if isinstance(correlations, Correlation):
+ if not isinstance(correlations, list):
correlations = [correlations]
self.correlations = correlations
@@ -852,9 +409,9 @@ class Fit(object):
"chi2 type" : self.chi_squared_type,
"weighted fit" : c.is_weighted_fit,
"fit algorithm" : c.fit_algorithm,
- "fit result" : c.fit_parameters.copy(),
- "fit parameters" : np.where(c.fit_parameters_variable)[0],
- "fit weights" : self.compute_weights(c)
+ "fit result" : 1*c.fit_parameters,
+ "fit parameters" : 1*np.where(c.fit_parameters_variable)[0],
+ "fit weights" : 1*self.compute_weights(c)
}
@@ -1179,104 +736,40 @@ class Fit(object):
# must be defined in such a way, that a parameter with a larger
# index number is dependent on only one parameter with a lower
# index number, e.g. parm1>parm0, parm3<parm1, etc..
- cstrnew = {}
- bound = np.array(self.fit_bound).copy()
- for cc in self.constraints:
- if self.fit_bool[cc[0]] and self.fit_bool[cc[2]]:
- # Both cc[0] and c[2] are varied.
- # Everything will work fine, independent of the
- # the fact if cc[2] is varied or not.
- cstrnew[cc[0]] = [cc[1], cc[2]]
- elif self.fit_bool[cc[0]]:
- # Only cc[0] is varied, create boundary
- if cc[1] == "<":
- # maximum
- bnd = [-np.inf, self.fit_parm[cc[2]]]
- elif cc[1] == ">":
- # minimum
- bnd = [self.fit_parm[cc[2]], np.inf]
- # update boundaries if necessary
- bound[cc[0]] = [max(bound[cc[0]][0], bnd[0]),
- min(bound[cc[0]][1], bnd[1])]
- elif self.fit_bool[cc[2]]:
- # Only cc[2] is varied, create boundary
- if cc[1] == "<":
- # minimum boundary
- bnd = [self.fit_parm[cc[0]], np.inf]
- elif cc[1] == ">":
- # maximum boundary
- bnd = [-np.inf, self.fit_parm[cc[0]]]
- # update boundaries if necessary
- bound[cc[2]] = [max(bound[cc[2]][0], bnd[0]),
- min(bound[cc[2]][1], bnd[1])]
- else:
- # Neither cc[0] nor cc[2] are varied.
- # Do nothing.
- pass
-
- # Third, setup all variable parameters with the necessary constraints.
+ #
+ # Constraints have the three-element form:
+ # [1, "<", 0] -> parm1 < parm0
+ # [3, ">", 1] -> parm3 > parm1
+ # or the four-element form:
+ # [1, "<", 0, "2.3"]] -> parm1 < parm0 + 2.3
+ # [1, 0, "<", "2.3"]] -> parm1 + parm0 < 2.3
for pp in range(len(self.fit_parm)):
if self.fit_bool[pp]:
- # analyze constraints using lmfit:
- if pp in cstrnew:
- # constrained parameters
- ppref = cstrnew[pp][1]
- rel = cstrnew[pp][0]
- #TODO:
- # - combine the two following cases for better readybility
- if rel == "<":
- #p2 < p1
- #-> p2 = p1 - d21
- #-> d21 = p1 - p2
- #-> d21 > 0
- deltaname="delta_{}_{}".format(pp, ppref)
- params.add(lmfit.Parameter(name=deltaname,
- value=self.fit_parm[ppref]-self.fit_parm[pp],
- vary=self.fit_bool[pp],
- min=0,
- max=np.inf,
- ))
- ppcomp = "parm{:04d}-{}".format(ppref, deltaname)
- params.add(lmfit.Parameter(name="parm{:04d}".format(pp),
- # this condition deals with negative numbers
- expr="{MIN} if {COMP} < {MIN} else {MAX} if {COMP} > {MAX} else {COMP}".format(
- COMP=ppcomp,
- MIN=bound[pp][0],
- MAX=bound[pp][1])
- ))
- elif rel == ">":
- # The opposite of the above case
- #p2 > p1
- #-> p2 = p1 + d21
- #-> d21 = p2 - p1
- #-> d21 > 0
- deltaname="delta_{}_{}".format(pp, ppref)
- params.add(lmfit.Parameter(name=deltaname,
- value=self.fit_parm[pp]-self.fit_parm[ppref],
- vary=self.fit_bool[pp],
- min=0,
- max=np.inf,
- ))
- ppcomp = "parm{:04d}+{}".format(ppref, deltaname)
- params.add(lmfit.Parameter(name="parm{:04d}".format(pp),
- # this condition deals with negative numbers
- expr="{MIN} if {COMP} < {MIN} else {MAX} if {COMP} > {MAX} else {COMP}".format(
- COMP=ppcomp,
- MIN=bound[pp][0],
- MAX=bound[pp][1])
- ))
- else:
- raise NotImplementedError("Only '<' and '>' are allowed constraints!")
-
- else:
- ## normal parameter
- params.add(lmfit.Parameter(name="parm{:04d}".format(pp),
- value=self.fit_parm[pp],
- vary=self.fit_bool[pp],
- min=bound[pp][0],
- max=bound[pp][1],
- )
- )
+ inconstr = len([ cc for cc in self.constraints if pp in cc])
+ kwarglist = []
+
+ if inconstr:
+ for cc in self.constraints:
+ con = Constraint(constraint=cc,
+ fit_bool=self.fit_bool,
+ fit_bounds=self.fit_bound,
+ fit_values=self.fit_parm)
+ self.fit_bound = con.update_fit_bounds()
+ if con.parameters[0]["id"] == pp:
+ kws = con.get_lmfit_parameter_kwargs()
+ if kws is not None:
+ kwarglist += kws
+ if len(kwarglist) == 0:
+ # normal parameter
+ kwarglist += [{"name": "parm{:04d}".format(pp),
+ "value": self.fit_parm[pp],
+ "vary": True,
+ "min": self.fit_bound[pp][0],
+ "max": self.fit_bound[pp][1],
+ }]
+ for kw in kwarglist:
+ params.add(lmfit.Parameter(**kw))
+
return params
@staticmethod
diff --git a/pycorrfit/gui/__init__.py b/pycorrfit/gui/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pycorrfit/doc.py b/pycorrfit/gui/doc.py
similarity index 77%
rename from pycorrfit/doc.py
rename to pycorrfit/gui/doc.py
index 7eedd12..37016b2 100755
--- a/pycorrfit/doc.py
+++ b/pycorrfit/gui/doc.py
@@ -41,47 +41,19 @@ except ImportError:
import wx
import yaml
-import readfiles
-from readfiles import read_pt3_scripts
-
-
-def GetLocationOfFile(filename):
- dirname = os.path.dirname(os.path.abspath(__file__))
- locations = [
- os.path.realpath(dirname+"/../"),
- os.path.realpath(dirname+"/../pycorrfit_doc/"),
- os.path.realpath(dirname+"/../doc/"),
- ]
-
- for i in range(len(locations)):
- # check /usr/lib64/32 -> /usr/lib
- for larch in ["lib32", "lib64"]:
- if dirname.count(larch):
- locations.append(locations[i].replace(larch, "lib", 1))
-
- ## freezed binaries:
- if hasattr(sys, 'frozen'):
- try:
- adir = sys._MEIPASS + "/doc/" # @UndefinedVariable
- except:
- adir = "./"
- locations.append(os.path.realpath(adir))
- for loc in locations:
- thechl = os.path.join(loc,filename)
- if os.path.exists(thechl):
- return thechl
- break
- # if this does not work:
- return None
+import pycorrfit
+from pycorrfit import readfiles, meta
+from pycorrfit.readfiles import read_pt3_scripts
+__version__ = pycorrfit.__version__
def GetLocationOfChangeLog(filename = "ChangeLog.txt"):
- return GetLocationOfFile(filename)
+ return meta.get_file_location(filename)
def GetLocationOfDocumentation(filename = "PyCorrFit_doc.pdf"):
""" Returns the location of the documentation if there is any."""
- return GetLocationOfFile(filename)
+ return meta.get_file_location(filename)
def info(version):
@@ -175,14 +147,6 @@ HomePage = "http://pycorrfit.craban.de/"
ChangeLog = "ChangeLog.txt"
StaticChangeLog = GetLocationOfChangeLog(ChangeLog)
-# Check if we can extract the version
-try:
- clfile = open(StaticChangeLog, 'r')
- __version__ = clfile.readline().strip()
- clfile.close()
-except:
- __version__ = "0.0.0-unknown"
-
# Github homepage
GitChLog = "https://raw.github.com/FCS-analysis/PyCorrFit/master/ChangeLog.txt"
diff --git a/pycorrfit/edclasses.py b/pycorrfit/gui/edclasses.py
similarity index 100%
rename from pycorrfit/edclasses.py
rename to pycorrfit/gui/edclasses.py
diff --git a/pycorrfit/frontend.py b/pycorrfit/gui/frontend.py
similarity index 98%
rename from pycorrfit/frontend.py
rename to pycorrfit/gui/frontend.py
index 402c745..144b5cf 100644
--- a/pycorrfit/frontend.py
+++ b/pycorrfit/gui/frontend.py
@@ -9,6 +9,7 @@ functions and modules are called from here.
from distutils.version import LooseVersion # For version checking
import os
import webbrowser
+import wx
import wx.lib.agw.flatnotebook as fnb # Flatnotebook (Tabs)
import wx.py.shell
import numpy as np # NumPy
@@ -28,16 +29,21 @@ except ImportError:
from . import doc # Documentation/some texts
from . import edclasses
-from . import models as mdls
-from . import openfile as opf # How to treat an opened file
-from . import page
try:
from . import plotting
except ImportError:
warnings.warn("Submodule `pycorrfit.plotting` will not be "+\
"available. Reason: {}.".format(sys.exc_info()[1].message))
-from . import readfiles
-from . import tools # Some tools
+
+
+from pycorrfit import models as mdls
+from pycorrfit import openfile as opf
+from pycorrfit import readfiles
+from pycorrfit import meta
+
+
+from . import page
+from . import tools
from . import usermodel
@@ -51,7 +57,7 @@ class ExceptionDialog(wx.MessageDialog):
########################################################################
-class FlatNotebookDemo(fnb.FlatNotebook):
+class FlatNotebook(fnb.FlatNotebook):
"""
Flatnotebook class
"""
@@ -156,7 +162,7 @@ class MyFrame(wx.Frame):
panel = wx.Panel(self)
self.panel = panel
- self.notebook = FlatNotebookDemo(panel)
+ self.notebook = FlatNotebook(panel)
self.notebook.SetRightClickMenu(self.curmenu)
#self.notebook.SetAGWWindowStyleFlag(FNB_X_ON_TAB)
@@ -228,8 +234,7 @@ class MyFrame(wx.Frame):
if select:
# A hack to have the last page displayed in the tab menu:
Npag = self.notebook.GetPageCount()
- for i in range(int(Npag)):
- self.notebook.SetSelection(i)
+ self.notebook.SetSelection(Npag-1)
#self.Thaw()
self.tabcounter = self.tabcounter + 1
@@ -940,11 +945,11 @@ class MyFrame(wx.Frame):
# do nothing
return
## Check if we can use latex for plotting:
- r1 = misc.findprogram("latex")[0]
- r2 = misc.findprogram("dvipng")[0]
+ r1 = meta.find_program("latex")[0]
+ r2 = meta.find_program("dvipng")[0]
# Ghostscript
- r31 = misc.findprogram("gs")[0]
- r32 = misc.findprogram("mgs")[0] # from miktex
+ r31 = meta.find_program("gs")[0]
+ r32 = meta.find_program("mgs")[0] # from miktex
r3 = max(r31,r32)
if r1+r2+r3 < 3:
# Warn the user
@@ -1338,7 +1343,7 @@ class MyFrame(wx.Frame):
counter=counter)
# Add experimental Data
# Import dataexp:
- number = counter.strip().strip(":").strip("#")
+ number = counter.strip(":# ")
pageid = int(number)
dataexp = Infodict["Correlations"][pageid][1]
@@ -1459,7 +1464,8 @@ class MyFrame(wx.Frame):
# What Data do we wish to save?
Page = self.notebook.GetCurrentPage()
# Export CSV data
- filename = Page.tabtitle.GetValue().strip()+Page.counter[:2]+".csv"
+ filename = "#{:04d}_{}".format(int(Page.counter.strip(":# ")),
+ Page.title.strip())
dlg = wx.FileDialog(self, "Save curve", self.dirname, filename,
"Correlation with trace (*.csv)|*.csv;*.*"+\
"|Correlation only (*.csv)|*.csv;*.*",
@@ -1476,7 +1482,10 @@ class MyFrame(wx.Frame):
savetrace = True
else:
savetrace = False
- opf.ExportCorrelation(path, Page, tools.info,
+ # Collect info on page
+ InfoMan = tools.info.InfoClass(CurPage=Page)
+ PageInfo = InfoMan.GetCurFancyInfo()
+ opf.ExportCorrelation(path, Page.corr, PageInfo,
savetrace=savetrace)
dlg.Destroy()
@@ -1551,7 +1560,7 @@ class MyFrame(wx.Frame):
for i in np.arange(N):
# Set Page
Page = self.notebook.GetPage(i)
- counter = int(Page.counter.strip().strip(":").strip("#"))
+ counter = int(Page.counter.strip(":# "))
# Apply currently set parameters
Page.apply_parameters()
# Set parameters
@@ -1571,11 +1580,17 @@ class MyFrame(wx.Frame):
# optimization error
Alist = list()
- if (corr.fit_results.has_key("fit error estimation") and
- len(corr.fit_results["fit error estimation"]) != 0):
+ if (# there is an error key
+ corr.fit_results.has_key("fit error estimation") and
+ # the errors were computed
+ len(corr.fit_results["fit error estimation"]) != 0 and
+ # len(errors) matches len(fit parameters)
+ len(corr.fit_results["fit error estimation"]) == len(corr.fit_results["fit parameters"])
+ ):
for ii, fitpid in enumerate(corr.fit_results["fit parameters"]):
- Alist.append([ int(fitpid),
- float(corr.fit_results["fit error estimation"][ii]) ])
+ Alist.append([int(fitpid),
+ float(corr.fit_results["fit error estimation"][ii])
+ ])
Infodict["Supplements"][counter]["FitErr"] = Alist
# Set exp data
diff --git a/pycorrfit/icon.py b/pycorrfit/gui/icon.py
similarity index 100%
rename from pycorrfit/icon.py
rename to pycorrfit/gui/icon.py
diff --git a/pycorrfit/main.py b/pycorrfit/gui/main.py
similarity index 100%
rename from pycorrfit/main.py
rename to pycorrfit/gui/main.py
diff --git a/pycorrfit/misc.py b/pycorrfit/gui/misc.py
similarity index 93%
rename from pycorrfit/misc.py
rename to pycorrfit/gui/misc.py
index 43ac168..f4e7c3c 100644
--- a/pycorrfit/misc.py
+++ b/pycorrfit/gui/misc.py
@@ -10,7 +10,6 @@ import codecs
from distutils.version import LooseVersion # For version checking
import numpy as np
import os
-import sys
import tempfile
import urllib2
import webbrowser
@@ -173,24 +172,6 @@ def getMainIcon(pxlength=32):
return iconICO
-def findprogram(program):
- """ Uses the systems PATH variable find executables"""
- path = os.environ['PATH']
- paths = path.split(os.pathsep)
- for d in paths:
- if os.path.isdir(d):
- fullpath = os.path.join(d, program)
- if sys.platform[:3] == 'win':
- for ext in '.exe', '.bat':
- program_path = fullpath + ext
- if os.path.isfile(fullpath + ext):
- return (1, program_path)
- else:
- if os.path.isfile(fullpath):
- return (1, fullpath)
- return (0, None)
-
-
def Update(parent):
""" This is a thread for _Update """
parent.StatusBar.SetStatusText("Connecting to server...")
diff --git a/pycorrfit/page.py b/pycorrfit/gui/page.py
similarity index 98%
rename from pycorrfit/page.py
rename to pycorrfit/gui/page.py
index 0334eab..92a3b69 100644
--- a/pycorrfit/page.py
+++ b/pycorrfit/gui/page.py
@@ -7,19 +7,18 @@ The frontend displays the GUI (Graphic User Interface).
All functions and modules are called from here.
"""
import numpy as np # NumPy
-import re
-import string
import warnings
import wx # GUI interface wxPython
-from wx.lib.agw import floatspin # Float numbers in spin fields
import wx.lib.plot as plot # Plotting in wxPython
import wx.lib.scrolledpanel as scrolled
-from . import models as mdls
+from pycorrfit import models as mdls
+from pycorrfit import fit
+from pycorrfit import Correlation, Fit
+
+
from . import tools
-from . import fcs_data_set as pcfbase
-from .fcs_data_set import Correlation, Fit
from . import wxutils
@@ -121,8 +120,8 @@ class FittingPanel(wx.Panel):
def active_parms(self):
names = self.corr.fit_model.parameters[0]
parms = self.corr.fit_parameters
- bool = self.corr.fit_parameters_variable
- return [names, parms, bool]
+ bools = self.corr.fit_parameters_variable
+ return [names, parms, bools]
@property
def IsCrossCorrelation(self):
@@ -249,7 +248,7 @@ class FittingPanel(wx.Panel):
fit_weight_data = self.corr.fit_weight_data
# Fitting algorithm
- keys = pcfbase.GetAlgorithmStringList()[0]
+ keys = fit.GetAlgorithmStringList()[0]
idalg = self.AlgorithmDropdown.GetSelection()
self.corr.fit_algorithm = keys[idalg]
@@ -294,7 +293,7 @@ class FittingPanel(wx.Panel):
normsel = self.corr.normparm + 1
self.AmplitudeInfo[2].SetSelection(normsel)
# Fitting algorithm
- keys = pcfbase.GetAlgorithmStringList()[0]
+ keys = fit.GetAlgorithmStringList()[0]
idalg = keys.index(self.corr.fit_algorithm)
self.AlgorithmDropdown.SetSelection(idalg)
self.updateChi2()
@@ -651,9 +650,10 @@ class FittingPanel(wx.Panel):
if weights.shape[0] == self.corr.correlation.shape[0]:
weights = weights[self.corr.fit_ival[0]:self.corr.fit_ival[1]]
+ # perform some checks
if np.allclose(weights, np.ones_like(weights)):
weights = 0
- if weights.shape[0] != self.corr.modeled_fit.shape[0]:
+ elif weights.shape[0] != self.corr.modeled_fit.shape[0]:
# non-matching weigths
warnings.warn("Unmatching weights found. Probably from previous data set.")
weights = 0
@@ -737,7 +737,8 @@ class FittingPanel(wx.Panel):
# Create empty tab title
mddat = mdls.modeldict[modelid]
modelshort = mdls.GetModelType(modelid)
- titlelabel = u"Data set ({} {})".format(modelshort, mddat[1])
+ titlelabel = u"Data set {} ({} {})".format(
+ self.counter.strip(" :"), modelshort, mddat[1])
boxti = wx.StaticBox(self.panelsettings, label=titlelabel)
sizerti = wx.StaticBoxSizer(boxti, wx.VERTICAL)
sizerti.SetMinSize((horizontalsize, -1))
@@ -855,7 +856,7 @@ class FittingPanel(wx.Panel):
textalg = wx.StaticText(self.panelsettings, label="Algorithm")
fitsizer.Add(textalg)
self.AlgorithmDropdown = wx.ComboBox(self.panelsettings)
- items = pcfbase.GetAlgorithmStringList()[1]
+ items = fit.GetAlgorithmStringList()[1]
self.AlgorithmDropdown.SetItems(items)
self.Bind(wx.EVT_COMBOBOX, self.apply_parameters,
self.AlgorithmDropdown)
diff --git a/pycorrfit/plotting.py b/pycorrfit/gui/plotting.py
similarity index 94%
rename from pycorrfit/plotting.py
rename to pycorrfit/gui/plotting.py
index ddbfe07..decf507 100644
--- a/pycorrfit/plotting.py
+++ b/pycorrfit/gui/plotting.py
@@ -8,6 +8,7 @@ Be sure to install texlive-science and texlive-math-extra
"""
+import wx # b/c of pyinstaller
import codecs
import numpy as np
import matplotlib
@@ -23,8 +24,8 @@ from matplotlib import rcParams
import unicodedata
# For finding latex tools
-from .misc import findprogram
-from . import models as mdls
+from pycorrfit.meta import find_program
+from pycorrfit import models as mdls
def greek2tex(char):
@@ -153,11 +154,11 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
fitlabel += ur", normalized to "+Page.corr.fit_model.parameters[0][Page.corr.normparm]
## Check if we can use latex for plotting:
- r1 = findprogram("latex")[0]
- r2 = findprogram("dvipng")[0]
+ r1 = find_program("latex")[0]
+ r2 = find_program("dvipng")[0]
# Ghostscript
- r31 = findprogram("gs")[0]
- r32 = findprogram("mgs")[0] # from miktex
+ r31 = find_program("gs")[0]
+ r32 = find_program("mgs")[0] # from miktex
r3 = max(r31,r32)
if r1+r2+r3 < 3:
uselatex = False
@@ -177,7 +178,9 @@ def savePlotCorrelation(parent, dirname, Page, uselatex=False,
# create plot
# plt.plot(x, y, '.', label = 'original data', markersize=5)
fig=plt.figure()
- fig.canvas.set_window_title("Correlation - "+Page.title)
+ wtit = "Correlation #{:04d}_{}".format(int(Page.counter.strip(":# ")),
+ Page.title.strip())
+ fig.canvas.set_window_title(wtit)
if resid is not None:
gs = gridspec.GridSpec(2, 1, height_ratios=[5,1])
ax = plt.subplot(gs[0])
@@ -347,11 +350,11 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
labels.append("Channel {}: {}".format(ii+1, tr.name))
## Check if we can use latex for plotting:
- r1 = findprogram("latex")[0]
- r2 = findprogram("dvipng")[0]
+ r1 = find_program("latex")[0]
+ r2 = find_program("dvipng")[0]
# Ghostscript
- r31 = findprogram("gs")[0]
- r32 = findprogram("mgs")[0]
+ r31 = find_program("gs")[0]
+ r32 = find_program("mgs")[0]
r3 = max(r31,r32)
if r1+r2+r3 < 3:
uselatex = False
@@ -367,7 +370,9 @@ def savePlotTrace(parent, dirname, Page, uselatex=False, verbose=False):
# create plot
# plt.plot(x, y, '.', label = 'original data', markersize=5)
fig=plt.figure(figsize=(10,3))
- fig.canvas.set_window_title("Trace - "+Page.title)
+ wtit = "Trace #{:04d}_{}".format(int(Page.counter.strip(":# ")),
+ Page.title.strip())
+ fig.canvas.set_window_title(wtit)
ax = plt.subplot(111)
for i in np.arange(len(traces)):
# Columns
diff --git a/pycorrfit/tools/__init__.py b/pycorrfit/gui/tools/__init__.py
similarity index 100%
rename from pycorrfit/tools/__init__.py
rename to pycorrfit/gui/tools/__init__.py
diff --git a/pycorrfit/tools/average.py b/pycorrfit/gui/tools/average.py
similarity index 99%
rename from pycorrfit/tools/average.py
rename to pycorrfit/gui/tools/average.py
index 4cbda67..581d709 100644
--- a/pycorrfit/tools/average.py
+++ b/pycorrfit/gui/tools/average.py
@@ -10,8 +10,10 @@ Creates an average of curves.
import numpy as np
import wx
+from pycorrfit import models as mdls
+
from .. import misc
-from .. import models as mdls
+
# Menu entry name
MENUINFO = ["&Average data", "Create an average curve from whole session."]
diff --git a/pycorrfit/tools/background.py b/pycorrfit/gui/tools/background.py
similarity index 99%
rename from pycorrfit/tools/background.py
rename to pycorrfit/gui/tools/background.py
index 82cafbf..5900291 100644
--- a/pycorrfit/tools/background.py
+++ b/pycorrfit/gui/tools/background.py
@@ -15,10 +15,11 @@ import wx
from wx.lib.agw import floatspin # Float numbers in spin fields
import wx.lib.plot as plot
+from pycorrfit import openfile as opf # How to treat an opened file
+from pycorrfit import readfiles
+from pycorrfit import Trace
+
from .. import misc
-from .. import openfile as opf # How to treat an opened file
-from .. import readfiles
-from ..fcs_data_set import Trace
# Menu entry name
MENUINFO = ["&Background correction", "Open a file for background correction."]
diff --git a/pycorrfit/tools/batchcontrol.py b/pycorrfit/gui/tools/batchcontrol.py
similarity index 95%
rename from pycorrfit/tools/batchcontrol.py
rename to pycorrfit/gui/tools/batchcontrol.py
index 5a41488..3e976c2 100644
--- a/pycorrfit/tools/batchcontrol.py
+++ b/pycorrfit/gui/tools/batchcontrol.py
@@ -11,8 +11,8 @@ import numpy as np
import os
import wx
-from .. import openfile as opf # How to treat an opened file
-from .. import models as mdls
+from pycorrfit import openfile as opf # How to treat an opened file
+from pycorrfit import models as mdls
# Menu entry name
@@ -150,15 +150,14 @@ class BatchCtrl(wx.Frame):
oldpage = self.curpage
self.curpage = self.parent.notebook.GetCurrentPage()
- if Page is not None:
- # redraw this tool if necessary
- if oldpage is not None and not isinstance(oldpage, wx._core._wxPyDeadObject):
- oldmodelid = self.curpage.modelid
- else:
- oldmodelid = 0
- newmodelid = self.curpage.modelid
- if oldmodelid != newmodelid:
- self.RedrawParameterBox()
+ # redraw this tool if necessary
+ if oldpage is not None and not isinstance(oldpage, wx._core._wxPyDeadObject):
+ oldmodelid = oldpage.modelid
+ else:
+ oldmodelid = 0
+ newmodelid = self.curpage.modelid
+ if oldmodelid != newmodelid:
+ self.RedrawParameterBox()
# We need to update the list of Pages in self.dropdown
if self.rbtnhere.Value == True:
@@ -279,6 +278,8 @@ check box.""")
panel.Layout()
sizer_bag.Fit(self)
self.mastersizer = sizer_bag
+ self.mastersizer.Fit(self)
+
def RedrawParameterBox(self, e=None):
@@ -320,5 +321,6 @@ for batch modification.""")
self.mastersizer.Fit(panel)
panel.Layout()
self.SetSize(panel.GetSize())
+ self.mastersizer.Fit(self)
except:
pass
\ No newline at end of file
diff --git a/pycorrfit/tools/chooseimport.py b/pycorrfit/gui/tools/chooseimport.py
similarity index 99%
rename from pycorrfit/tools/chooseimport.py
rename to pycorrfit/gui/tools/chooseimport.py
index 35da061..ac28349 100644
--- a/pycorrfit/tools/chooseimport.py
+++ b/pycorrfit/gui/tools/chooseimport.py
@@ -11,7 +11,7 @@ of data (AC1, AC2, CC12, CC21) he wants to import.
import numpy as np
import wx
-from .. import models as mdls
+from pycorrfit import models as mdls
from . import overlaycurves
diff --git a/pycorrfit/tools/comment.py b/pycorrfit/gui/tools/comment.py
similarity index 100%
rename from pycorrfit/tools/comment.py
rename to pycorrfit/gui/tools/comment.py
diff --git a/pycorrfit/tools/datarange.py b/pycorrfit/gui/tools/datarange.py
similarity index 100%
rename from pycorrfit/tools/datarange.py
rename to pycorrfit/gui/tools/datarange.py
diff --git a/pycorrfit/tools/example.py b/pycorrfit/gui/tools/example.py
similarity index 100%
rename from pycorrfit/tools/example.py
rename to pycorrfit/gui/tools/example.py
diff --git a/pycorrfit/tools/globalfit.py b/pycorrfit/gui/tools/globalfit.py
similarity index 99%
rename from pycorrfit/tools/globalfit.py
rename to pycorrfit/gui/tools/globalfit.py
index 9ba9fe2..40e04c8 100644
--- a/pycorrfit/tools/globalfit.py
+++ b/pycorrfit/gui/tools/globalfit.py
@@ -10,8 +10,9 @@ Perform global fitting on pages which share parameters.
import wx
import numpy as np
+from pycorrfit import Fit
from .. import misc
-from ..fcs_data_set import Fit
+
# Menu entry name
MENUINFO = ["&Global fitting",
diff --git a/pycorrfit/tools/info.py b/pycorrfit/gui/tools/info.py
similarity index 97%
rename from pycorrfit/tools/info.py
rename to pycorrfit/gui/tools/info.py
index 86068fb..18b142c 100644
--- a/pycorrfit/tools/info.py
+++ b/pycorrfit/gui/tools/info.py
@@ -10,8 +10,8 @@ Open a text window with lots of information.
import wx
import numpy as np
-from .. import fcs_data_set
-from .. import models as mdls
+from pycorrfit import fit
+from pycorrfit import models as mdls
# Menu entry name
MENUINFO = ["Page &info",
@@ -170,7 +170,7 @@ class InfoClass(object):
else:
ChiSqType = "unknown"
Fitting.append([ u"χ²-type", ChiSqType])
- Fitting.append([ "Algorithm", fcs_data_set.Algorithms[corr.fit_algorithm][1]])
+ Fitting.append([ "Algorithm", fit.Algorithms[corr.fit_algorithm][1]])
if len(Page.GlobalParameterShare) != 0:
shared = str(Page.GlobalParameterShare[0])
for item in Page.GlobalParameterShare[1:]:
@@ -207,13 +207,13 @@ class InfoClass(object):
InfoDict["fitting"] = Fitting
## Normalization parameter id to name
- if corr.normalize_parm is None:
+ if corr.normparm is None:
normparmtext = "None"
- elif Page.normparm < len(corr.fit_parameters):
- normparmtext = corr.fit_model.parameters[0][corr.normalize_parm]
+ elif corr.normparm < len(corr.fit_parameters):
+ normparmtext = corr.fit_model.parameters[0][corr.normparm]
else:
# supplementary parameters
- supnum = corr.normalize_parm - len(corr.fit_parameters)
+ supnum = corr.normparm - len(corr.fit_parameters)
normparmtext = MoreInfo[supnum][0]
Title.append(["Normalization", normparmtext])
diff --git a/pycorrfit/tools/overlaycurves.py b/pycorrfit/gui/tools/overlaycurves.py
similarity index 100%
rename from pycorrfit/tools/overlaycurves.py
rename to pycorrfit/gui/tools/overlaycurves.py
diff --git a/pycorrfit/tools/parmrange.py b/pycorrfit/gui/tools/parmrange.py
similarity index 98%
rename from pycorrfit/tools/parmrange.py
rename to pycorrfit/gui/tools/parmrange.py
index d4037ff..fafd6c5 100644
--- a/pycorrfit/tools/parmrange.py
+++ b/pycorrfit/gui/tools/parmrange.py
@@ -8,12 +8,10 @@ Select the range in which the parameter should reside for fitting.
import wx
-from wx.lib.agw import floatspin
import numpy as np
-from .. import edclasses # edited floatspin
-from .. import models as mdls
+from pycorrfit import models as mdls
from .. import wxutils
@@ -84,12 +82,18 @@ class RangeSelector(wx.Frame):
self.WXboxsizer.Add(text2)
self.WXboxsizer.Add(right)
self.WXparmlist.append([left, [text, text2], right])
-
+ self.WXboxsizer.Layout()
+
self.topSizer.Add(self.WXboxsizer)
self.btnapply = wx.Button(self.panel, wx.ID_ANY, 'Apply')
self.Bind(wx.EVT_BUTTON, self.OnSetParmRange, self.btnapply)
self.topSizer.Add(self.btnapply)
-
+
+ self.topSizer.Layout()
+ self.panel.SetSizer(self.topSizer)
+ self.topSizer.Fit(self.panel)
+ self.SetMinSize(self.topSizer.GetMinSizeTuple())
+ self.topSizer.Fit(self)
def OnClose(self, event=None):
# This is a necessary function for PyCorrFit.
@@ -132,10 +136,7 @@ class RangeSelector(wx.Frame):
self.WXboxsizerlist = list()
self.WXparmlist = list()
self.FillPanel()
- self.WXboxsizer.Layout()
- self.topSizer.Layout()
- self.SetMinSize(self.topSizer.GetMinSizeTuple())
- self.topSizer.Fit(self)
+
def OnSetParmRange(self, e):
diff --git a/pycorrfit/tools/plotexport.py b/pycorrfit/gui/tools/plotexport.py
similarity index 100%
rename from pycorrfit/tools/plotexport.py
rename to pycorrfit/gui/tools/plotexport.py
diff --git a/pycorrfit/tools/simulation.py b/pycorrfit/gui/tools/simulation.py
similarity index 99%
rename from pycorrfit/tools/simulation.py
rename to pycorrfit/gui/tools/simulation.py
index b6c37ca..7e2becc 100644
--- a/pycorrfit/tools/simulation.py
+++ b/pycorrfit/gui/tools/simulation.py
@@ -12,7 +12,7 @@ import wx
from wx.lib.agw import floatspin
import numpy as np
-from .. import models as mdls
+from pycorrfit import models as mdls
# Menu entry name
MENUINFO = ["S&lider simulation",
@@ -148,7 +148,6 @@ class Slide(wx.Frame):
self.topSizer.Add(slidesizer)
self.panel.SetSizer(self.topSizer)
self.topSizer.Fit(self)
- #self.SetMinSize(self.topSizer.GetMinSizeTuple())
self.OnRadio()
self.OnPageChanged(self.Page, init=True)
#Icon
diff --git a/pycorrfit/tools/statistics.py b/pycorrfit/gui/tools/statistics.py
similarity index 99%
rename from pycorrfit/tools/statistics.py
rename to pycorrfit/gui/tools/statistics.py
index 668fab6..15ee1be 100644
--- a/pycorrfit/tools/statistics.py
+++ b/pycorrfit/gui/tools/statistics.py
@@ -15,9 +15,10 @@ import wx.lib.scrolledpanel as scrolled
import numpy as np
import re
+from pycorrfit import models as mdls
+
from .info import InfoClass
from .. import misc
-from .. import models as mdls
# Menu entry name
diff --git a/pycorrfit/tools/trace.py b/pycorrfit/gui/tools/trace.py
similarity index 100%
rename from pycorrfit/tools/trace.py
rename to pycorrfit/gui/tools/trace.py
diff --git a/pycorrfit/usermodel.py b/pycorrfit/gui/usermodel.py
similarity index 98%
rename from pycorrfit/usermodel.py
rename to pycorrfit/gui/usermodel.py
index af69dcc..f8c1de7 100644
--- a/pycorrfit/usermodel.py
+++ b/pycorrfit/gui/usermodel.py
@@ -29,7 +29,8 @@ except ImportError:
Function = object
import wx
-from . import models as mdls
+from pycorrfit import models as mdls
+from pycorrfit.models.control import append_model
class CorrFunc(object):
@@ -204,7 +205,7 @@ class UserModel(object):
self.SetCurrentID()
self.modelarray[i]["Definitions"][0] = self.CurrentID
# We assume that the models have the correct ID for now
- mdls.AppendNewModel(self.modelarray)
+ append_model(self.modelarray)
# Set variables and models
# Is this still necessary? - We are doing this for compatibility!
self.parent.value_set = mdls.values
diff --git a/pycorrfit/wxutils.py b/pycorrfit/gui/wxutils.py
similarity index 100%
rename from pycorrfit/wxutils.py
rename to pycorrfit/gui/wxutils.py
diff --git a/pycorrfit/meta.py b/pycorrfit/meta.py
new file mode 100644
index 0000000..210455a
--- /dev/null
+++ b/pycorrfit/meta.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+meta data and methods for PyCorrFit
+"""
+
+import os
+import sys
+
+
+def find_program(program):
+ """ Uses the systems PATH variable find executables"""
+ path = os.environ['PATH']
+ paths = path.split(os.pathsep)
+ for d in paths:
+ if os.path.isdir(d):
+ fullpath = os.path.join(d, program)
+ if sys.platform[:3] == 'win':
+ for ext in '.exe', '.bat':
+ program_path = fullpath + ext
+ if os.path.isfile(fullpath + ext):
+ return (1, program_path)
+ else:
+ if os.path.isfile(fullpath):
+ return (1, fullpath)
+ return (0, None)
+
+
+def get_file_location(filename):
+ """
+ Locate non-Python files that are part of PyCorrFit.
+ """
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ locations = ["/./", "/pycorrfit_doc/", "/doc/"]
+ locations += [ "/.."+l for l in locations]
+ locations = [ os.path.realpath(dirname+l) for l in locations]
+
+ for i in range(len(locations)):
+ # check /usr/lib64/32 -> /usr/lib
+ for larch in ["lib32", "lib64"]:
+ if dirname.count(larch):
+ locations.append(locations[i].replace(larch, "lib", 1))
+
+ ## freezed binaries:
+ if hasattr(sys, 'frozen'):
+ try:
+ adir = sys._MEIPASS + "/doc/" # @UndefinedVariable
+ except:
+ adir = "./"
+ locations.append(os.path.realpath(adir))
+ for loc in locations:
+ thechl = os.path.join(loc,filename)
+ if os.path.exists(thechl):
+ return thechl
+ break
+ # if this does not work:
+ return None
+
+
+def get_version():
+ """
+ Get the version.
+ """
+ StaticChangeLog = get_file_location("ChangeLog.txt")
+
+ # Check if we can extract the version
+ try:
+ clfile = open(StaticChangeLog, 'r')
+ version = clfile.readline().strip()
+ clfile.close()
+ except:
+ version = "0.0.0-unknown"
+
+
+ return version
\ No newline at end of file
diff --git a/pycorrfit/models/MODEL_classic_gaussian_2D.py b/pycorrfit/models/MODEL_classic_gaussian_2D.py
deleted file mode 100755
index 04c9b37..0000000
--- a/pycorrfit/models/MODEL_classic_gaussian_2D.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This file contains simple 2D models for confocal FCS.
-"""
-from __future__ import division
-
-import numpy as np
-
-
-# 2D simple gauss
-def CF_Gxy_gauss(parms, tau):
- u""" Two-dimensional diffusion with a Gaussian laser profile.
-
- G(τ) = offset + 1/( n * (1+τ/τ_diff) )
-
- Calculation of diffusion coefficient and concentration
- from the effective radius of the detection profile (r₀ = 2*σ):
- D = r₀²/(4*τ_diff)
- Conc = n/(π*r₀²)
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal area
- [1] τ_diff Characteristic residence time in confocal area
- [3] offset
- *tau* - lag time
- """
- n = parms[0]
- taudiff = parms[1]
- dc = parms[2]
-
- BB = 1 / ( (1.+tau/taudiff) )
- G = dc + 1/n * BB
- return G
-
-
-def get_boundaries_xy_gauss(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-# 2D simple gauss
-def CF_Gxy_T_gauss(parms, tau):
- u""" Two-dimensional diffusion with a Gaussian laser profile,
- including a triplet component.
- The triplet factor takes into account a blinking term.
- Set *T* or *τ_trip* to 0, if no triplet component is wanted.
-
- triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
-
- G(τ) = offset + 1/( n * (1+τ/τ_diff) )*triplet
-
- Calculation of diffusion coefficient and concentration
- from the effective radius of the detection profile (r₀ = 2*σ):
- D = r₀²/(4*τ_diff)
- Conc = n/(π*r₀²)
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal area
- [1] τ_diff Characteristic residence time in confocal area
- [2] τ_trip Characteristic residence time in triplet state
- [3] T Fraction of particles in triplet (non-fluorescent) state
- 0 <= T < 1
- [4] offset
- *tau* - lag time
- """
- n = parms[0]
- taudiff = parms[1]
- tautrip = parms[2]
- T = parms[3]
- dc = parms[4]
-
- if tautrip == 0 or T == 0:
- triplet = 1
- else:
- triplet = 1 + T/(1-T) * np.exp(-tau/tautrip)
-
- BB = 1 / ( (1.+tau/taudiff) )
- G = dc + 1/n * BB * triplet
- return G
-
-
-def get_boundaries_xy_T_gauss(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- # F
- boundaries[3] = [0,.9999999999999]
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-# 2D + 2D + Triplet Gauß
- # Model 6031
-def CF_Gxyz_gauss_2D2DT(parms, tau):
- u""" Two-component, two-dimensional diffusion with a Gaussian laser
- profile, including a triplet component.
- The triplet factor takes into account blinking according to triplet
- states of excited molecules.
- Set *T* or *τ_trip* to 0, if no triplet component is wanted.
-
- particle1 = F₁/(1+τ/τ₁)
- particle2 = α*(1-F₁)/(1+τ/τ₂)
- triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
- norm = (F₁ + α*(1-F₁))²
- G = 1/n*(particle1 + particle2)*triplet/norm + offset
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal area
- (n = n₁+n₂)
- [1] τ₁ Diffusion time of particle species 1
- [2] τ₂ Diffusion time of particle species 2
- [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
- 0 <= F₁ <= 1
- [4] α Relative molecular brightness of particle 2
- compared to particle 1 (α = q₂/q₁)
- [5] τ_trip Characteristic residence time in triplet state
- [6] T Fraction of particles in triplet (non-fluorescent)
- state 0 <= T < 1
- [7] offset
- *tau* - lag time
- """
- n=parms[0]
- taud1=parms[1]
- taud2=parms[2]
- F=parms[3]
- alpha=parms[4]
- tautrip=parms[5]
- T=parms[6]
- off=parms[7]
-
- particle1 = F/( 1+tau/taud1 )
- particle2 = alpha**2*(1-F)/( 1+tau/taud2 )
- # If the fraction of dark molecules is zero, we put the
- # whole triplet fraction to death.
- triplet = 1 + T/(1-T)*np.exp(-tau/tautrip)
- # For alpha == 1, *norm* becomes one
- norm = (F + alpha*(1-F))**2
-
- G = 1/n*(particle1 + particle2)*triplet/norm + off
- return G
-
-def get_boundaries_6031(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- # F
- boundaries[3] = [0,.9999999999999]
- # T
- boundaries[6] = [0,.9999999999999]
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-def MoreInfo_6001(parms, countrate=None):
- # We can only give you the effective particle number
- n = parms[0]
- Info = list()
- if countrate is not None:
- # CPP
- cpp = countrate/n
- Info.append(["cpp [kHz]", cpp])
- return Info
-
-
-def MoreInfo_6031(parms, countrate=None):
- u"""Supplementary parameters:
- [8] n₁ = n*F₁ Particle number of species 1
- [9] n₂ = n*(1-F₁) Particle number of species 2
- """
- # We can only give you the effective particle number
- n = parms[0]
- F1 = parms[3]
- Info = list()
- # The enumeration of these parameters is very important for
- # plotting the normalized curve. Countrate must come out last!
- Info.append([u"n\u2081", n*F1])
- Info.append([u"n\u2082", n*(1.-F1)])
- if countrate is not None:
- # CPP
- cpp = countrate/n
- Info.append(["cpp [kHz]", cpp])
- return Info
-
-
-# 2D Model Gauss
-m_twodga6001 = [6001, u"2D", u"2D confocal diffusion", CF_Gxy_gauss]
-labels_6001 = [u"n",
- u"τ_diff [ms]",
- u"offset"]
-values_6001 = [4.0, 0.4, 0.0]
-valuestofit_6001 = [True, True, False]
-parms_6001 = [labels_6001, values_6001, valuestofit_6001]
-
-
-# 2D Model Gauss with Triplet
-m_twodga6002 = [6002, u"T+2D", u"2D confocal diffusion with triplet",
- CF_Gxy_T_gauss]
-labels_6002 = [u"n",
- u"τ_diff [ms]",
- u"τ_trip [ms]",
- u"T",
- u"offset"]
-values_6002 = [4.0, 0.4, 0.001, 0.01, 0.0]
-labels_hr_6002 = [u"n",
- u"τ_diff [ms]",
- u"τ_trip [µs]",
- u"T",
- u"offset"]
-factors_hr_6002 = [1., 1., 1000., 1., 1.]
-valuestofit_6002 = [True, True, True, True, False]
-parms_6002 = [labels_6002, values_6002, valuestofit_6002,
- labels_hr_6002, factors_hr_6002]
-
-
-# 2D + 2D + T model gauss
-m_gauss_2d_2d_t_mix_6031 = [6031, u"T+2D+2D",
- u"Separate 2D diffusion + triplet, Gauß",
- CF_Gxyz_gauss_2D2DT]
-labels_6031 = ["n",
- u"τ"+u"\u2081"+u" [ms]",
- u"τ"+u"\u2082"+u" [ms]",
- u"F"+u"\u2081",
- u"\u03b1"+u" (q"+u"\u2082"+"/q"+u"\u2081"+")",
- u"τ_trip [ms]",
- u"T",
- u"offset"
- ]
-values_6031 = [
- 25, # n
- 5, # taud1
- 1000, # taud2
- 0.5, # F
- 1.0, # alpha
- 0.001, # tautrip
- 0.01, # T
- 0.0 # offset
- ]
-# For user comfort we add values that are human readable.
-# Theese will be used for output that only humans can read.
-labels_human_readable_6031 = [
- u"n",
- u"τ"+u"\u2081"+u" [ms]",
- u"τ"+u"\u2082"+u" [ms]",
- u"F"+u"\u2081",
- u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
- u"τ_trip [µs]",
- u"T",
- u"offset"
- ]
-values_factor_human_readable_6031 = [
- 1., # "n",
- 1., # "τ"+u"\u2081"+" [ms]",
- 1., # "τ"+u"\u2082"+" [ms]",
- 1., # "F"+u"\u2081",
- 1., # u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
- 1000., # "τ_trip [µs]",
- 1., # "T",
- 1. # "offset"
- ]
-valuestofit_6031 = [True, True, True, True, False, False, False, False]
-parms_6031 = [labels_6031, values_6031, valuestofit_6031,
- labels_human_readable_6031, values_factor_human_readable_6031]
-
-
-model1 = dict()
-model1["Parameters"] = parms_6001
-model1["Definitions"] = m_twodga6001
-model1["Supplements"] = MoreInfo_6001
-model1["Boundaries"] = get_boundaries_xy_gauss(values_6001)
-
-model2 = dict()
-model2["Parameters"] = parms_6002
-model2["Definitions"] = m_twodga6002
-model2["Supplements"] = MoreInfo_6001
-model2["Boundaries"] = get_boundaries_xy_T_gauss(values_6002)
-model2["Constraints"] = [[2, "<", 1]] # triplet time < diffusion time
-
-model3 = dict()
-model3["Parameters"] = parms_6031
-model3["Definitions"] = m_gauss_2d_2d_t_mix_6031
-model3["Supplements"] = MoreInfo_6031
-model3["Boundaries"] = get_boundaries_6031(values_6031)
-model3["Constraints"] = [[2, ">", 1], [5, "<", 1]] # triplet time < diffusion time
-
-Modelarray = [model1, model2, model3]
diff --git a/pycorrfit/models/MODEL_classic_gaussian_3D.py b/pycorrfit/models/MODEL_classic_gaussian_3D.py
deleted file mode 100755
index c66d9d2..0000000
--- a/pycorrfit/models/MODEL_classic_gaussian_3D.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This file contains 3D models for confocal FCS.
-"""
-from __future__ import division
-
-import numpy as np
-
-# 3D simple gauss
-def CF_Gxyz_gauss(parms, tau):
- # Model 6012
- u""" Three-dimanesional free diffusion with a Gaussian laser profile
- (eliptical).
-
- G(τ) = offset + 1/( n*(1+τ/τ_diff) * sqrt(1 + τ/(SP²*τ_diff)) )
-
- Calculation of diffusion coefficient and concentration
- from the effective radius of the detection profile (r₀ = 2*σ):
- D = r₀²/(4*τ_diff)
- Conc = n/( sqrt(π³)*r₀²*z₀ )
-
- r₀ lateral detection radius (waist of lateral gaussian)
- z₀ axial detection length (waist of axial gaussian)
- D Diffusion coefficient
- Conc Concentration of dye
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal volume
- [1] τ_diff Characteristic residence time in confocal volume
- [2] SP SP=z₀/r₀ Structural parameter,
- describes the axis ratio of the confocal volume
- [3] offset
- *tau* - lag time
- """
- n = parms[0]
- taudiff = parms[1]
- SP = parms[2]
- off = parms[3]
-
- BB = 1 / ( (1.+tau/taudiff) * np.sqrt(1.+tau/(SP**2*taudiff)) )
- G = off + 1/n * BB
- return G
-
-
-# 3D blinking gauss
- # Model 6011
-def CF_Gxyz_blink(parms, tau):
- u""" Three-dimanesional free diffusion with a Gaussian laser profile
- (eliptical), including a triplet component.
- The triplet factor takes into account a blinking term.
- Set *T* or *τ_trip* to 0, if no triplet component is wanted.
-
- G(τ) = offset + 1/( n*(1+τ/τ_diff) * sqrt(1 + τ/(SP²*τ_diff)) )
- * ( 1+T/(1.-T)*exp(-τ/τ_trip) )
-
- Calculation of diffusion coefficient and concentration
- from the effective radius of the detection profile (r₀ = 2*σ):
- D = r₀²/(4*τ_diff)
- Conc = n/( sqrt(π³)*r₀²*z₀ )
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal volume
- [1] T Fraction of particles in triplet (non-fluorescent) state
- 0 <= T < 1
- [2] τ_trip Characteristic residence time in triplet state
- [3] τ_diff Characteristic residence time in confocal volume
- [4] SP SP=z₀/r₀ Structural parameter,
- describes the axis ratio of the confocal volume
- [5] offset
- *tau* - lag time
- """
- n = parms[0]
- T = parms[1]
- tautrip = parms[2]
- taudiff = parms[3]
- SP = parms[4]
- off = parms[5]
-
- if tautrip == 0 or T == 0:
- AA = 1
- else:
- AA = 1 + T/(1-T) * np.exp(-tau/tautrip)
-
- BB = 1 / ( (1+tau/taudiff) * np.sqrt(1+tau/(SP**2*taudiff)) )
- G = off + 1/n * AA * BB
- return G
-
-
-def get_boundaries_6011(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- # T
- boundaries[1] = [0,.9999999999999]
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-# 3D + 3D + Triplet Gauß
- # Model 6030
-def CF_Gxyz_gauss_3D3DT(parms, tau):
- u""" Two-component three-dimensional free diffusion
- with a Gaussian laser profile, including a triplet component.
- The triplet factor takes into account a blinking term.
- Set *T* or *τ_trip* to 0, if no triplet component is wanted.
-
- particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
- particle2 = α*(1-F₁)/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
- triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
- norm = (F₁ + α*(1-F₁))²
- G = 1/n*(particle1 + particle2)*triplet/norm + offset
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal volume
- (n = n₁+n₂)
- [1] τ₁ Diffusion time of particle species 1
- [2] τ₂ Diffusion time of particle species 2
- [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
- 0 <= F₁ <= 1
- [4] SP SP=z₀/r₀, Structural parameter,
- describes elongation of the confocal volume
- [5] α Relative molecular brightness of particle
- 2 compared to particle 1 (α = q₂/q₁)
- [6] τ_trip Characteristic residence time in triplet state
- [7] T Fraction of particles in triplet (non-fluorescent) state
- 0 <= T < 1
- [8] offset
- *tau* - lag time
- """
- n=parms[0]
- taud1=parms[1]
- taud2=parms[2]
- F=parms[3]
- SP=parms[4]
- alpha=parms[5]
- tautrip=parms[6]
- T=parms[7]
- off=parms[8]
-
- particle1 = F/( (1+tau/taud1) * np.sqrt(1+tau/(taud1*SP**2)))
- particle2 = alpha**2*(1-F)/( (1+tau/taud2) * np.sqrt(1+tau/(taud2*SP**2)))
- # If the fraction of dark molecules is zero.
- if tautrip == 0 or T == 0:
- triplet = 1
- else:
- triplet = 1 + T/(1-T) * np.exp(-tau/tautrip)
- # For alpha == 1, *norm* becomes one
- norm = (F + alpha*(1-F))**2
-
- G = 1/n*(particle1 + particle2)*triplet/norm + off
- return G
-
-def get_boundaries_3D3DT(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- # F
- boundaries[3] = [0,.9999999999999]
- # T
- boundaries[7] = [0,.9999999999999]
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-def MoreInfo_1C(parms, countrate=None):
- # We can only give you the effective particle number
- n = parms[0]
- Info = list()
- if countrate is not None:
- # CPP
- cpp = countrate/n
- Info.append(["cpp [kHz]", cpp])
- return Info
-
-
-def MoreInfo_6030(parms, countrate=None):
- u"""Supplementary parameters:
- [9] n₁ = n*F₁ Particle number of species 1
- [10] n₂ = n*(1-F₁) Particle number of species 2
- """
- # We can only give you the effective particle number
- n = parms[0]
- F1 = parms[3]
- Info = list()
- # The enumeration of these parameters is very important for
- # plotting the normalized curve. Countrate must come out last!
- Info.append([u"n\u2081", n*F1])
- Info.append([u"n\u2082", n*(1.-F1)])
- if countrate is not None:
- # CPP
- cpp = countrate/n
- Info.append(["cpp [kHz]", cpp])
- return Info
-
-
-# 3D Model blink gauss
-m_3dblink6011 = [6011, "T+3D","3D confocal diffusion with triplet",
- CF_Gxyz_blink]
-labels_6011 = [u"n",
- u"T",
- u"τ_trip [ms]",
- u"τ_diff [ms]",
- u"SP",
- u"offset"]
-values_6011 = [4.0, 0.2, 0.001, 0.4, 5.0, 0.0]
-labels_hr_6011 = [u"n",
- u"T",
- u"τ_trip [µs]",
- u"τ_diff [ms]",
- u"SP",
- u"offset"]
-factors_hr_6011 = [1., 1., 1000., 1., 1., 1.]
-valuestofit_6011 = [True, True, True, True, False, False]
-parms_6011 = [labels_6011, values_6011, valuestofit_6011,
- labels_hr_6011, factors_hr_6011]
-
-# 3D Model gauss
-m_3dgauss6012 = [6012, "3D","3D confocal diffusion", CF_Gxyz_gauss]
-labels_6012 = [u"n",
- u"τ_diff [ms]",
- u"SP",
- u"offset"]
-values_6012 = [4.0, 0.4, 5.0, 0.0]
-valuestofit_6012 = [True, True, False, False]
-parms_6012 = [labels_6012, values_6012, valuestofit_6012]
-
-# 3D + 3D + T model gauss
-m_gauss_3d_3d_t_mix_6030 = [6030, "T+3D+3D",
- u"Separate 3D diffusion + triplet, Gauß",
- CF_Gxyz_gauss_3D3DT]
-labels_6030 = [u"n",
- u"τ"+u"\u2081"+" [ms]",
- u"τ"+u"\u2082"+" [ms]",
- u"F"+u"\u2081",
- u"SP",
- u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
- u"τ_trip [ms]",
- u"T",
- u"offset"
- ]
-values_6030 = [
- 25, # n
- 5, # taud1
- 1000, # taud2
- 0.5, # F
- 5, # SP
- 1.0, # alpha
- 0.001, # tautrip
- 0.01, # T
- 0.0 # offset
- ]
-# For user comfort we add values that are human readable.
-# Theese will be used for output that only humans can read.
-labels_human_readable_6030 = [
- u"n",
- u"τ"+u"\u2081"+" [ms]",
- u"τ"+u"\u2082"+" [ms]",
- u"F"+u"\u2081",
- u"SP",
- u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
- u"τ_trip [µs]",
- u"T",
- u"offset"
- ]
-values_factor_human_readable_6030 = [
- 1., # n
- 1., # taud1
- 1., # taud2
- 1., # F
- 1., # SP
- 1., # alpha
- 1000., # tautrip [µs]
- 1., # T
- 1. # offset
- ]
-valuestofit_6030 = [True, True, True, True, False, False, False, False, False]
-parms_6030 = [labels_6030, values_6030, valuestofit_6030,
- labels_human_readable_6030, values_factor_human_readable_6030]
-
-
-# Pack the models
-model1 = dict()
-model1["Parameters"] = parms_6011
-model1["Definitions"] = m_3dblink6011
-model1["Supplements"] = MoreInfo_1C
-model1["Boundaries"] = get_boundaries_6011(values_6011)
-model1["Constraints"] = [[3, ">", 2]] # triplet time < diffusion time
-
-model2 = dict()
-model2["Parameters"] = parms_6012
-model2["Definitions"] = m_3dgauss6012
-model2["Supplements"] = MoreInfo_1C
-
-model3 = dict()
-model3["Parameters"] = parms_6030
-model3["Definitions"] = m_gauss_3d_3d_t_mix_6030
-model3["Supplements"] = MoreInfo_6030
-model3["Boundaries"] = get_boundaries_3D3DT(values_6030)
-model3["Constraints"] = [[2, ">", 1], [6, "<", 1]]
-
-Modelarray = [model1, model2, model3]
diff --git a/pycorrfit/models/MODEL_classic_gaussian_3D2D.py b/pycorrfit/models/MODEL_classic_gaussian_3D2D.py
deleted file mode 100755
index 4dda648..0000000
--- a/pycorrfit/models/MODEL_classic_gaussian_3D2D.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This file contains a T+3D+2D model for confocal FCS.
-"""
-from __future__ import division
-
-import numpy as np
-
-# 3D + 2D + T
-def CF_Gxyz_3d2dT_gauss(parms, tau):
- u""" Two-component, two- and three-dimensional diffusion
- with a Gaussian laser profile, including a triplet component.
- The triplet factor takes into account blinking according to triplet
- states of excited molecules.
- Set *T* or *τ_trip* to 0, if no triplet component is wanted.
-
- particle2D = (1-F)/ (1+τ/τ_2D)
- particle3D = α*F/( (1+τ/τ_3D) * sqrt(1+τ/(τ_3D*SP²)))
- triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
- norm = (1-F + α*F)²
- G = 1/n*(particle1 + particle2)*triplet/norm + offset
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal volume
- (n = n2D+n3D)
- [1] τ_2D Diffusion time of surface bound particls
- [2] τ_3D Diffusion time of freely diffusing particles
- [3] F Fraction of molecules of the freely diffusing species
- (n3D = n*F), 0 <= F <= 1
- [4] SP SP=z₀/r₀ Structural parameter,
- describes elongation of the confocal volume
- [5] α Relative molecular brightness of particle
- 3D compared to particle 2D (α = q3D/q2D)
- [6] τ_trip Characteristic residence time in triplet state
- [7] T Fraction of particles in triplet (non-fluorescent) state
- 0 <= T < 1
- [8] offset
- *tau* - lag time
- """
- n=parms[0]
- taud2D=parms[1]
- taud3D=parms[2]
- F=parms[3]
- SP=parms[4]
- alpha=parms[5]
- tautrip=parms[6]
- T=parms[7]
- off=parms[8]
-
-
- particle2D = (1-F)/ (1+tau/taud2D)
- particle3D = alpha**2*F/( (1+tau/taud3D) * np.sqrt(1+tau/(taud3D*SP**2)))
- if tautrip == 0 or T == 0:
- triplet = 1
- else:
- triplet = 1 + T/(1-T) * np.exp(-tau/tautrip)
- norm = (1-F + alpha*F)**2
- G = 1/n*(particle2D + particle3D)*triplet/norm
-
- return G + off
-
-def get_boundaries(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- # F
- boundaries[3] = [0,.9999999999999]
- # T
- boundaries[7] = [0,.9999999999999]
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-def MoreInfo(parms, countrate=None):
- u"""Supplementary parameters:
- Effective number of freely diffusing particles in 3D solution:
- [9] n3D = n*F
- Effective number particles diffusing on 2D surface:
- [10] n2D = n*(1-F)
- """
- # We can only give you the effective particle number
- n = parms[0]
- F3d = parms[3]
- Info = list()
- # The enumeration of these parameters is very important for
- # plotting the normalized curve. Countrate must come out last!
- Info.append([u"n3D", n*F3d])
- Info.append([u"n2D", n*(1.-F3d)])
- if countrate is not None:
- # CPP
- cpp = countrate/n
- Info.append([u"cpp [kHz]", cpp])
- return Info
-
-
-# 3D + 3D + T model gauss
-m_gauss_3d_2d_t = [6032, u"T+3D+2D",
- u"Separate 3D and 2D diffusion + triplet, Gauß",
- CF_Gxyz_3d2dT_gauss]
-labels = [ u"n",
- u"τ_2D [ms]",
- u"τ_3D [ms]",
- u"F_3D",
- u"SP",
- u"\u03b1"+" (q_3D/q_2D)",
- u"τ_trip [ms]",
- u"T",
- u"offset"
- ]
-values = [
- 25, # n
- 240, # taud2D
- 0.1, # taud3D
- 0.5, # F3D
- 7, # SP
- 1.0, # alpha
- 0.001, # tautrip
- 0.01, # T
- 0.0 # offset
- ]
-# For user comfort we add values that are human readable.
-# Theese will be used for output that only humans can read.
-labels_human_readable = [ u"n",
- u"τ_2D [ms]",
- u"τ_3D [ms]",
- u"F_3D",
- u"SP",
- u"\u03b1"+" (q_3D/q_2D)",
- u"τ_trip [µs]",
- u"T",
- u"offset"
- ]
-values_factor_human_readable = [
- 1., # "n",
- 1., # "τ_2D [ms]",
- 1., # "τ_3D [ms]",
- 1., # "F_3D",
- 1., # "SP",
- 1., # u"\u03b1"+" (q_3D/q_2D)",
- 1000., # "τ_trip [µs]",
- 1., # "T",
- 1. # "offset"
- ]
-valuestofit = [True, True, True, True, False, False, False, False, False]
-parms = [labels, values, valuestofit,
- labels_human_readable, values_factor_human_readable]
-
-
-model1 = dict()
-model1["Parameters"] = parms
-model1["Definitions"] = m_gauss_3d_2d_t
-model1["Boundaries"] = get_boundaries(values)
-model1["Supplements"] = MoreInfo
-model1["Constraints"] = [[2, "<", 1], [6, "<", 2]]
-
-Modelarray = [model1]
diff --git a/pycorrfit/models/MODEL_classic_gaussian_TT3D3D.py b/pycorrfit/models/MODEL_classic_gaussian_TT3D3D.py
deleted file mode 100644
index 3211c19..0000000
--- a/pycorrfit/models/MODEL_classic_gaussian_TT3D3D.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This file contains a T+T+3D+3D model for confocal FCS.
-"""
-from __future__ import division
-
-import numpy as np
-
-# 3D + 3D + Triplet Gauß
-# Model 6043
-def CF_Gxyz_gauss_3D3DTT(parms, tau):
- u""" Two-component three-dimensional free diffusion
- with a Gaussian laser profile, including two triplet components.
- The triplet factor takes into account a blinking term.
- Set *T* or *τ_trip* to 0, if no triplet component is wanted.
-
- particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
- particle2 = α*(1-F₁)/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
- triplet1 = 1 + T₁/(1-T₁)*exp(-τ/τ_trip₁)
- triplet2 = 1 + T₂/(1-T₂)*exp(-τ/τ_trip₂)
- norm = (F₁ + α*(1-F₁))²
- G = 1/n*(particle1 + particle2)*triplet1*triplet2/norm + offset
-
- *parms* - a list of parameters.
- Parameters (parms[i]):
- [0] n Effective number of particles in confocal volume
- (n = n₁+n₂)
- [1] τ₁ Diffusion time of particle species 1
- [2] τ₂ Diffusion time of particle species 2
- [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
- 0 <= F₁ <= 1
- [4] SP SP=z₀/r₀, Structural parameter,
- describes elongation of the confocal volume
- [5] α Relative molecular brightness of particle
- 2 compared to particle 1 (α = q₂/q₁)
- [6] τ_trip₁ Characteristic residence time in triplet state
- [7] T₁ Fraction of particles in triplet (non-fluorescent) state
- 0 <= T < 1
- [8] τ_trip₂ Characteristic residence time in triplet state
- [9] T₂ Fraction of particles in triplet (non-fluorescent) state
- 0 <= T < 1
- [10] offset
- *tau* - lag time
- """
- n=parms[0]
- taud1=parms[1]
- taud2=parms[2]
- F=parms[3]
- SP=parms[4]
- alpha=parms[5]
- tautrip1=parms[6]
- T1=parms[7]
- tautrip2=parms[8]
- T2=parms[9]
- off=parms[10]
-
- particle1 = F/( (1+tau/taud1) * np.sqrt(1+tau/(taud1*SP**2)))
- particle2 = alpha**2*(1-F)/( (1+tau/taud2) * np.sqrt(1+tau/(taud2*SP**2)))
- # If the fraction of dark molecules is zero.
- if tautrip1 == 0 or T1 == 0:
- triplet1 = 1
- else:
- triplet1 = 1 + T1/(1-T1) * np.exp(-tau/tautrip1)
- if tautrip2 == 0 or T2 == 0:
- triplet2 = 1
- else:
- triplet2 = 1 + T2/(1-T2) * np.exp(-tau/tautrip2)
- # For alpha == 1, *norm* becomes one
- norm = (F + alpha*(1-F))**2
-
- G = 1/n*(particle1 + particle2)*triplet1*triplet2/norm + off
- return G
-
-def get_boundaries_3D3DTT(parms):
- # strictly positive
- boundaries = [[0, np.inf]]*len(parms)
- # F
- boundaries[3] = [0,.9999999999999]
- # T
- boundaries[7] = [0,.9999999999999]
- boundaries[9] = [0,.9999999999999]
- boundaries[-1] = [-np.inf, np.inf]
- return boundaries
-
-
-m_gauss_3d_3d_t_t_mix_6043 = [6043, "T+T+3D+3D",
- u"Separate 3D diffusion + two triplet, Gauß",
- CF_Gxyz_gauss_3D3DTT]
-
-labels_6043 = [u"n",
- u"τ"+u"\u2081"+" [ms]",
- u"τ"+u"\u2082"+" [ms]",
- u"F"+u"\u2081",
- u"SP",
- u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
- u"τ_trip₁ [ms]",
- u"T₁",
- u"τ_trip₂ [ms]",
- u"T₂",
- u"offset"
- ]
-
-labels_human_readable_6043 = [
- u"n",
- u"τ₁ [ms]",
- u"τ₂ [ms]",
- u"F₁",
- u"SP",
- u"\u03b1"+u" (q₂/q₁)",
- u"τ_trip₁ [µs]",
- u"T₁",
- u"τ_trip₂ [µs]",
- u"T₂",
- u"offset"
- ]
-
-values_6043 = [
- 25, # n
- 5, # taud1
- 1000, # taud2
- 0.5, # F
- 5, # SP
- 1.0, # alpha
- 0.002, # tautrip1
- 0.01, # T1
- 0.001, # tautrip2
- 0.01, # T2
- 0.0 # offset
- ]
-
-values_factor_human_readable_6043 = [
- 1., # n
- 1., # taud1
- 1., # taud2
- 1., # F
- 1., # SP
- 1., # alpha
- 1000., # tautrip1 [µs]
- 1., # T1
- 1000., # tautrip2 [µs]
- 1., # T2
- 1. # offset
- ]
-
-
-valuestofit_6043 = [True, True, True, True, False, False, False, False, False, False, False]
-parms_6043 = [labels_6043, values_6043, valuestofit_6043,
- labels_human_readable_6043, values_factor_human_readable_6043]
-
-
-def MoreInfo_6043(parms, countrate=None):
- u"""Supplementary parameters:
- [9] n₁ = n*F₁ Particle number of species 1
- [10] n₂ = n*(1-F₁) Particle number of species 2
- """
- # We can only give you the effective particle number
- n = parms[0]
- F1 = parms[3]
- Info = list()
- # The enumeration of these parameters is very important for
- # plotting the normalized curve. Countrate must come out last!
- Info.append([u"n\u2081", n*F1])
- Info.append([u"n\u2082", n*(1.-F1)])
- if countrate is not None:
- # CPP
- cpp = countrate/n
- Info.append(["cpp [kHz]", cpp])
- return Info
-
-
-model = dict()
-model["Parameters"] = parms_6043
-model["Definitions"] = m_gauss_3d_3d_t_t_mix_6043
-model["Supplements"] = MoreInfo_6043
-model["Boundaries"] = get_boundaries_3D3DTT(values_6043)
-model["Constraints"] = [[2, ">", 1], [6, "<", 1], [8, "<", 6]]
-
-Modelarray = [model]
\ No newline at end of file
diff --git a/pycorrfit/models/__init__.py b/pycorrfit/models/__init__.py
index 916f52d..28a4f25 100644
--- a/pycorrfit/models/__init__.py
+++ b/pycorrfit/models/__init__.py
@@ -28,193 +28,8 @@ import numpy as np
import sys
import warnings
-## Models
-from . import MODEL_classic_gaussian_2D
-from . import MODEL_classic_gaussian_3D
-from . import MODEL_classic_gaussian_3D2D
-from . import MODEL_classic_gaussian_TT3D3D
-from . import MODEL_TIRF_gaussian_1C
-from . import MODEL_TIRF_gaussian_3D2D
-from . import MODEL_TIRF_gaussian_3D3D
-from . import MODEL_TIRF_1C
-from . import MODEL_TIRF_2D2D
-from . import MODEL_TIRF_3D2D
-from . import MODEL_TIRF_3D3D
-from . import MODEL_TIRF_3D2Dkin_Ries
-
-
-class Model(object):
- """General class for handling FCS fitting models"""
- def __init__(self, datadict):
- """datadict is an item in Modelarray"""
- self._parameters = datadict["Parameters"]
- self._definitions = datadict["Definitions"]
-
- if "Supplements" in list(datadict.keys()):
- self._supplements = datadict["Supplements"]
- else:
- self._supplements = lambda x, y: []
-
- if "Boundaries" in list(datadict.keys()):
- self._boundaries = datadict["Boundaries"]
- else:
- # dummy verification function
- self._boundaries = [[None,None]]*len(self._parameters[1])
-
- if "Constraints" in list(datadict.keys()):
- # sort constraints such that the first value is always
- # larger than the last.
- newcc = []
- for cc in datadict["Constraints"]:
- if cc[0] < cc[2]:
- if cc[1] == ">":
- cc = [cc[2], "<", cc[0]]
- elif cc[1] == "<":
- cc = [cc[2], ">", cc[0]]
- newcc.append(cc)
- self._constraints = newcc
- else:
- self._constraints = []
-
- def __call__(self, parameters, tau):
- return self.function(parameters, tau)
-
- def __getitem__(self, key):
- """Emulate old list behavior of models"""
- return self._definitions[key]
-
- def __repr__(self):
- text = "Model {} - {}".format(
- self.id,
- self.description_short)
- return text
-
- def apply(self, parameters, tau):
- """
- Apply the model with `parameters` and lag
- times `tau`
- """
- return self.function(parameters, tau)
-
- @property
- def constraints(self):
- """ fitting constraints """
- return copy.copy(self._constraints)
-
- @property
- def components(self):
- """how many components does this model have"""
- return self._definitions[1]
-
- @property
- def default_values(self):
- """default fitting values"""
- return np.array(self._parameters[1]).copy()
-
- @property
- def default_variables(self):
- """indexes default variable fitting (bool)"""
- return np.array(self._parameters[2]).copy()
-
- @property
- def description_long(self):
- """long description"""
- return self._definitions[3].__doc__
-
- @property
- def description_short(self):
- """short description"""
- return self._definitions[2]
-
- @property
- def id(self):
- return self._definitions[0]
-
- @property
- def function(self):
- return self._definitions[3]
-
- @property
- def func_supplements(self):
- return self._supplements
-
- @property
- def func_verification(self):
- warnings.warn("`func_verification is deprecated: please do not use it!")
- return lambda x: x
-
- def get_supplementary_parameters(self, values, countrate=None):
- """
- Compute additional information for the model
-
- Parameters
- ----------
- values: list-like of same length as `self.default_values`
- parameters for the model
- countrate: float
- countrate in kHz
- """
- return self.func_supplements(values, countrate)
-
- def get_supplementary_values(self, values, countrate=None):
- """
- Returns only the values of
- self.get_supplementary_parameters
-
- Parameters
- ----------
- values: list-like of same length as `self.default_values`
- parameters for the model
- countrate: float
- count rate in Hz
- """
- out = list()
- for item in self.get_supplementary_parameters(values, countrate):
- out.append(item[1])
- return out
-
- @property
- def name(self):
- return self.description_short
-
- @property
- def parameters(self):
- return self._parameters
-
- @property
- def boundaries(self):
- return self._boundaries
-
-
-def AppendNewModel(Modelarray):
- """ Append a new model from a modelarray. *Modelarray* has to be a list
- whose elements have two items:
- [0] parameters
- [1] some info about the model
- See separate models for more information
- """
- global values
- global valuedict
- global models
- global modeldict
- global supplement
- global boundaries
-
- for datadict in Modelarray:
- # We can have many models in one model array
- amod = Model(datadict)
-
- models.append(amod)
- modeldict[amod.id] = amod
-
- values.append(amod.parameters)
- valuedict[amod.id] = amod.parameters
-
- # Supplementary Data might be there
- supplement[amod.id] = amod.func_supplements
-
- # Check functions - check for correct values
- boundaries[amod.id] = amod.boundaries
+from .classes import Model
+from .control import values, valuedict, models, modeldict, modeltypes, supplement, boundaries, shorttype
def GetHumanReadableParms(model, parameters):
@@ -315,18 +130,14 @@ def GetModelType(modelid):
if modelid >= 7000:
return u"User"
else:
- shorttype = dict()
- shorttype[u"Confocal (Gaussian)"] = u"Confocal"
- shorttype[u"TIR (Gaussian/Exp.)"] = u"TIR Conf."
- shorttype[u"TIR (□xσ/Exp.)"] = u"TIR □xσ"
for key in modeltypes.keys():
mlist = modeltypes[key]
if mlist.count(modelid) == 1:
- return shorttype[key]
try:
return shorttype[key]
except:
- return ""
+ warnings.warn("No shorttype defined for `{}`.".format(key))
+ return key
def GetModelFunctionFromId(modelid):
return modeldict[modelid][3]
@@ -436,34 +247,3 @@ def GetPositionOfParameter(model, name):
return int(i)
-# Pack all variables
-values = list()
-# Also create a dictionary, key is modelid
-valuedict = dict()
-# Pack all models
-models = list()
-# Also create a dictinary
-modeldict = dict()
-# A dictionary for supplementary data:
-supplement = dict()
-# A dictionary containing model boundaries
-boundaries = dict()
-
-
-# Load all models from the imported "MODEL_*" submodules
-for g in list(globals().keys()):
- if g.startswith("MODEL_") and hasattr(globals()[g], "Modelarray"):
- AppendNewModel(globals()[g].Modelarray)
-
-# Create a list for the differentiation between the models
-# This should make everything look a little cleaner
-modeltypes = dict()
-#modeltypes[u"Confocal (Gaussian)"] = [6001, 6002, 6012, 6011, 6031, 6032, 6030]
-#modeltypes[u"TIR (Gaussian/Exp.)"] = [6013, 6033, 6034]
-#modeltypes[u"TIR (□xσ/Exp.)"] = [6000, 6010, 6022, 6020, 6023, 6021]
-
-modeltypes[u"Confocal (Gaussian)"] = [6011, 6030, 6002, 6031, 6032, 6043]
-modeltypes[u"TIR (Gaussian/Exp.)"] = [6014, 6034, 6033]
-modeltypes[u"TIR (□xσ/Exp.)"] = [6010, 6023, 6000, 6022, 6020, 6021]
-modeltypes[u"User"] = list()
-
diff --git a/pycorrfit/models/classes.py b/pycorrfit/models/classes.py
new file mode 100644
index 0000000..b1088ee
--- /dev/null
+++ b/pycorrfit/models/classes.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+from __future__ import division, print_function
+
+import copy
+import numpy as np
+import warnings
+
+class Model(object):
+ """General class for handling FCS fitting models"""
+ def __init__(self, datadict):
+ """datadict is an item in Modelarray"""
+ self._parameters = datadict["Parameters"]
+ self._definitions = datadict["Definitions"]
+
+ if "Supplements" in list(datadict.keys()):
+ self._supplements = datadict["Supplements"]
+ else:
+ self._supplements = lambda x, y: []
+
+ if "Boundaries" in list(datadict.keys()):
+ self._boundaries = datadict["Boundaries"]
+ else:
+ # dummy verification function
+ self._boundaries = [[-np.inf, np.inf]]*len(self._parameters[1])
+
+ if "Constraints" in list(datadict.keys()):
+ # sort constraints such that the first value is always
+ # larger than the last.
+ newcc = []
+ for cc in datadict["Constraints"]:
+ if cc[0] < cc[2]:
+ if cc[1] == ">":
+ cc = [cc[2], "<", cc[0]]
+ elif cc[1] == "<":
+ cc = [cc[2], ">", cc[0]]
+ newcc.append(cc)
+ self._constraints = newcc
+ else:
+ self._constraints = []
+
+ def __call__(self, parameters, tau):
+ return self.function(parameters, tau)
+
+ def __getitem__(self, key):
+ """Emulate old list behavior of models"""
+ return self._definitions[key]
+
+ def __repr__(self):
+ text = "Model {} - {}".format(
+ self.id,
+ self.description_short)
+ return text
+
+ def apply(self, parameters, tau):
+ """
+ Apply the model with `parameters` and lag
+ times `tau`
+ """
+ return self.function(parameters, tau)
+
+ @property
+ def boundaries(self):
+ return self._boundaries
+
+ @property
+ def constraints(self):
+ """ fitting constraints """
+ return copy.copy(self._constraints)
+
+ @property
+ def components(self):
+ """how many components does this model have"""
+ return self._definitions[1]
+
+ @property
+ def default_values(self):
+ """default fitting values"""
+ return np.array(self._parameters[1]).copy()
+
+ @property
+ def default_variables(self):
+ """indexes default variable fitting (bool)"""
+ return np.array(self._parameters[2]).copy()
+
+ @property
+ def description_long(self):
+ """long description"""
+ return self._definitions[3].__doc__
+
+ @property
+ def description_short(self):
+ """short description"""
+ return self._definitions[2]
+
+ @property
+ def function(self):
+ return self._definitions[3]
+
+ @property
+ def func_supplements(self):
+ return self._supplements
+
+ @property
+ def func_verification(self):
+ warnings.warn("`func_verification is deprecated: please do not use it!")
+ return lambda x: x
+
+ def get_supplementary_parameters(self, values, countrate=None):
+ """
+ Compute additional information for the model
+
+ Parameters
+ ----------
+ values: list-like of same length as `self.default_values`
+ parameters for the model
+ countrate: float
+ countrate in kHz
+ """
+ return self.func_supplements(values, countrate)
+
+ def get_supplementary_values(self, values, countrate=None):
+ """
+ Returns only the values of
+ self.get_supplementary_parameters
+
+ Parameters
+ ----------
+ values: list-like of same length as `self.default_values`
+ parameters for the model
+ countrate: float
+ count rate in Hz
+ """
+ out = list()
+ for item in self.get_supplementary_parameters(values, countrate):
+ out.append(item[1])
+ return out
+
+ @property
+ def id(self):
+ return self._definitions[0]
+
+ @property
+ def name(self):
+ return self.description_short
+
+ @property
+ def parameters(self):
+ return self._parameters
+
+ @property
+ def type(self):
+ if len(self._definitions) < 5:
+ return None
+ else:
+ return self._definitions[4]
\ No newline at end of file
diff --git a/pycorrfit/models/control.py b/pycorrfit/models/control.py
new file mode 100644
index 0000000..9b091fe
--- /dev/null
+++ b/pycorrfit/models/control.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+""" pycorrfit.models.control
+
+Controls which fitting models are imported an in which order.
+"""
+from __future__ import division
+import numpy as np
+
+from .classes import Model
+
+def append_model(modelarray):
+ """ Append a new model from a modelarray. *Modelarray* has to be a list
+ whose elements have two items:
+ [0] parameters
+ [1] some info about the model
+ See separate models for more information
+ """
+ global values
+ global valuedict
+ global models
+ global modeldict
+ global supplement
+ global boundaries
+ global modeltypes
+
+ if not isinstance(modelarray, list):
+ modelarray = [modelarray]
+
+ for datadict in modelarray:
+ # We can have many models in one model array
+ amod = Model(datadict)
+
+ models.append(amod)
+ if amod.id in modeldict:
+ raise ValueError("Model with same is already exists: \n {} vs. {}".
+ format(amod, modeldict[amod.id]))
+ modeldict[amod.id] = amod
+
+ values.append(amod.parameters)
+ valuedict[amod.id] = amod.parameters
+
+ # Supplementary Data might be there
+ supplement[amod.id] = amod.func_supplements
+
+ # Check functions - check for correct values
+ boundaries[amod.id] = amod.boundaries
+
+ # Add model type to internal type list.
+ if amod.type is not None:
+ if not amod.type in modeltypes:
+ modeltypes[amod.type] = []
+ modeltypes[amod.type].append(amod.id)
+
+
+def model_setup(modelid, name, comp, mtype, fctn, par_labels, par_values,
+ par_vary=None, par_boundaries=None, par_constraints=None,
+ par_hr_labels=None, par_hr_factors=None,
+ supplementary_method=None,
+ ):
+ u"""
+ This helper method does everything that is required to make a model
+ available for PyCorrFit. The idea is that this method can be called from
+ anywhere and thus we do not need to do the tedious work of adding models
+ in the __init__.py file.
+
+ Parameters
+ ----------
+ modelid : int
+ Model identifier.
+ name : str
+ Name of the Model.
+ comp : str
+ Description of components of the model, e.g. "T+3D+2D"
+ mtype : str
+ Type of model, e.g. "Confocal (Gaussian)"
+ fctn : callable
+ The method that computes the model function. It must take
+ two arguments. The first is of shape `par_values` and the
+ second is a 2D array containing lag time and correlation.
+ par_labels : list-like, strings
+ The labels of each parameter in PyCorrFit dimensionless
+ representation, i.e.
+
+ unit of time : 1 ms
+ unit of inverse time: 1000 /s
+ unit of distance : 100 nm
+ unit of Diff.coeff : 10 µm²/s
+ unit of inverse area: 100 /µm²
+ unit of inv. volume : 1000 /µm³
+ par_values : list-like, floats
+ The parameter values in PyCorrFit dimensionless units.
+ par_vary : list-like, bools or None
+ A list describing which parameters should be varied during
+ fitting. If not given, only the first element is set to `True`.
+ par_boundaries : list-like, floats
+ The parameter boundaries - two values for each parameter.
+ Examples: [[0, np.inf], [0,1]]
+ par_constraints : list of lists
+ Constraints between parameters. For example, make sure parameter
+ 2 is always larger than parameter 1 and parameter 5 is always
+ smaller than parameter 1: [[2, ">", 1], [5, "<", 1]]
+ Parameter count starts at 0.
+ par_hr_labels : list-like, strings
+ User-defined human readable labels of the parameters. If this is
+ set, `par_hr_factors` is also required.
+ par_hr_factors : list-like, floats
+ The multiplicative factors to get from `par_labels` to
+ `par_hr_labels`.
+ supplementary_method : callable
+ A method that takes the parameters `par_values` and the countrate
+ of the experiment as an argument and returns a dictinoary of
+ supplementary information.
+ """
+ # Checks
+ assert len(par_labels) == len(par_values)
+ for p in [par_vary,
+ par_boundaries,
+ par_hr_labels,
+ par_hr_factors,
+ ]:
+ if p is not None:
+ assert len(p) == len(par_values), "Number of parameters must match!"
+
+ if par_hr_factors is None or par_hr_labels is None:
+ assert par_hr_factors is None, "human readable requires two parameter"
+ assert par_hr_labels is None, "human readable requires two parameter"
+
+ if par_vary is None:
+ # Set par_vary
+ par_vary = np.zeros(len(par_values), dtype=bool)
+ par_vary[0] = True
+
+ if par_hr_factors is None:
+ # Set equal to labels
+ par_hr_labels = par_labels
+ par_hr_factors = np.ones_like(par_values)
+
+ model={}
+
+ model["Parameters"] = [par_labels, par_values, par_vary,
+ par_hr_labels, par_hr_factors]
+
+ model["Definitions"] = [modelid, comp, name, fctn, mtype]
+
+ if supplementary_method is not None:
+ model["Supplements"] = supplementary_method
+
+ if par_boundaries is not None:
+ model["Boundaries"] = par_boundaries
+
+ if par_constraints is not None:
+ model["Constraints"] = par_constraints
+
+ append_model(model)
+
+
+# Pack all variables
+values = list()
+# Also create a dictionary, key is modelid
+valuedict = dict()
+# Pack all models
+models = list()
+# Also create a dictinary
+modeldict = dict()
+# A dictionary for supplementary data:
+supplement = dict()
+# A dictionary containing model boundaries
+boundaries = dict()
+
+# shorttypes are used by the GUI to abbreviate the model type
+shorttype = dict()
+shorttype[u"Confocal (Gaussian)"] = u"CFoc"
+shorttype[u"Confocal (Gaussian) and triplet"] = u"CFoc"
+shorttype[u"Confocal (Gaussian) with double triplet"] = u"CFoc"
+shorttype[u"TIR (Gaussian/Exp.)"] = u"TIR CFoc"
+shorttype[u"TIR (□xσ/Exp.)"] = u"TIR □xσ"
+
+# Create a list for the differentiation between the models
+# This should make everything look a little cleaner
+modeltypes = {}
+modeltypes[u"User"] = []
+
+# The order of the import matters!
+# These models perform the integration by themselves using the `model_setup` method.
+from . import model_confocal_3d
+from . import model_confocal_3d_3d
+from . import model_confocal_2d
+from . import model_confocal_2d_2d
+from . import model_confocal_3d_2d
+
+from . import model_confocal_t_3d
+from . import model_confocal_t_3d_3d
+from . import model_confocal_t_2d
+from . import model_confocal_t_2d_2d
+from . import model_confocal_t_3d_2d
+from . import model_confocal_t_3d_3d_3d
+from . import model_confocal_t_3d_3d_2d
+
+from . import model_confocal_tt_3d_3d
+from . import model_confocal_tt_2d_2d
+from . import model_confocal_tt_3d_2d
+
+
+# These lines can be removed once all models are converted
+# from `MODEL_*` to `model_` syntax.
+modeltypes[u"TIR (Gaussian/Exp.)"] = [6014, 6034, 6033]
+modeltypes[u"TIR (□xσ/Exp.)"] = [6010, 6023, 6000, 6022, 6020, 6021]
+
+
+## Models
+from . import MODEL_TIRF_gaussian_1C
+from . import MODEL_TIRF_gaussian_3D2D
+from . import MODEL_TIRF_gaussian_3D3D
+from . import MODEL_TIRF_1C
+from . import MODEL_TIRF_2D2D
+from . import MODEL_TIRF_3D2D
+from . import MODEL_TIRF_3D3D
+from . import MODEL_TIRF_3D2Dkin_Ries
+
+# Load all models from the imported "MODEL_*" submodules
+# These are the models that were not imported using the `model_setup` method.
+for g in list(globals().keys()):
+ if g.startswith("MODEL_") and hasattr(globals()[g], "Modelarray"):
+ append_model(globals()[g].Modelarray)
diff --git a/pycorrfit/models/cp_confocal.py b/pycorrfit/models/cp_confocal.py
new file mode 100644
index 0000000..694a119
--- /dev/null
+++ b/pycorrfit/models/cp_confocal.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+"""
+Confocal fitting model components.
+"""
+
+from __future__ import division
+import numpy as np
+
+
+def threed(tau, taudiff, SP):
+ return 1/((1 + tau/taudiff) * np.sqrt(1+tau/(taudiff*SP**2)))
+
+
+def twod(tau, taudiff):
+ return 1/((1 + tau/taudiff))
\ No newline at end of file
diff --git a/pycorrfit/models/cp_mix.py b/pycorrfit/models/cp_mix.py
new file mode 100644
index 0000000..1d78d85
--- /dev/null
+++ b/pycorrfit/models/cp_mix.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+"""
+Mixed components for fitting models.
+"""
+
+from __future__ import division
+
+
+def double_pnum(n,
+ F1,
+ alpha,
+ comp1,
+ kwargs1,
+ comp2,
+ kwargs2,
+ ):
+ u"""
+ Double component models where the particle number is given in
+ the model i.e. for confocal diffusion models.
+
+ Parameters
+ ----------
+ n : float
+ Total particle number
+ F1 : float
+ Fraction of particle species 1
+ alpha : float
+ Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ comp1, comp2 : callables
+ The model functions for each of the components.
+ kwargs1, kwargs2 : dicts
+ The keyword arguments for `comp1` and `comp2`
+ """
+ norm = (F1 + alpha*(1-F1))**2
+
+ g1 = F1 * comp1(**kwargs1)
+ g2 = alpha**2 * (1-F1) * comp2(**kwargs2)
+
+ G = 1/n * (g1 + g2) / norm
+
+ return G
+
+
+def triple_pnum(n,
+ F1,
+ F2,
+ alpha21,
+ alpha31,
+ comp1,
+ kwargs1,
+ comp2,
+ kwargs2,
+ comp3,
+ kwargs3
+ ):
+ u"""
+ Double component models where the particle number is given in
+ the model i.e. for confocal diffusion models.
+
+ Parameters
+ ----------
+ n : float
+ Total particle number
+ F1, F2 : float
+ Fraction of particle species 1 and 2.
+ This infers that F3 = 1 - F1 - F2
+ alpha21 : float
+ Relative molecular brightness of particle
+ 2 compared to particle 1 (α₂₁ = q₂/q₁)
+ alpha31 : float
+ Relative molecular brightness of particle
+ 3 compared to particle 1
+ comp1, comp2, comp3 : callables
+ The model functions for each of the components.
+ kwargs1, kwargs2, kwargs3 : dicts
+ The keyword arguments for `comp1`, `comp2`, and `comp3`.
+ """
+ alpha11 = 1
+ F3 = 1 - F1 - F2
+ if F3 < 0:
+ F3 = 0
+
+ norm = (F1*alpha11 + F2*alpha21 + F3*alpha31)**2
+
+ g1 = alpha11**2 * F1 * comp1(**kwargs1)
+ g2 = alpha21**2 * F2 * comp2(**kwargs2)
+ g3 = alpha31**2 * F3 * comp3(**kwargs3)
+
+ G = 1/n * (g1 + g2 + g3) / norm
+
+ return G
\ No newline at end of file
diff --git a/pycorrfit/models/cp_triplet.py b/pycorrfit/models/cp_triplet.py
new file mode 100644
index 0000000..7028368
--- /dev/null
+++ b/pycorrfit/models/cp_triplet.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+"""
+Triplet components.
+"""
+
+from __future__ import division
+import numpy as np
+
+def trip(tau, tautrip, T):
+ if tautrip == 0 or T == 0:
+ AA = 1
+ else:
+ AA = 1 + T/(1-T) * np.exp(-tau/tautrip)
+
+ return AA
\ No newline at end of file
diff --git a/pycorrfit/models/model_confocal_2d.py b/pycorrfit/models/model_confocal_2d.py
new file mode 100644
index 0000000..a526ef8
--- /dev/null
+++ b/pycorrfit/models/model_confocal_2d.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod
+
+# 2D simple gauss
+def CF_Gxy_gauss(parms, tau):
+ u""" Two-dimensional diffusion with a Gaussian laser profile.
+
+ G(τ) = offset + 1/( n * (1+τ/τ_diff) )
+
+ Calculation of diffusion coefficient and concentration
+ from the effective radius of the detection profile (r₀ = 2*σ):
+ D = r₀²/(4*τ_diff)
+ Conc = n/(π*r₀²)
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal area
+ [1] τ_diff Characteristic residence time in confocal area
+ [3] offset
+ *tau* - lag time
+ """
+ n = parms[0]
+ taudiff = parms[1]
+ dc = parms[2]
+
+ BB = twod(tau=tau, taudiff=taudiff)
+
+ G = dc + 1/n * BB
+ return G
+
+
+def supplements(parms, countrate=None):
+ # We can only give you the effective particle number
+ n = parms[0]
+ Info = list()
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [4.0, 0.4, 0.0]
+
+## boundaries
+boundaries = [[0, np.inf]]*len(parms)
+boundaries[-1] = [-np.inf, np.inf]
+
+model_setup(
+ modelid=6001,
+ name="2D diffusion (confocal)",
+ comp="2D",
+ mtype="Confocal (Gaussian)",
+ fctn=CF_Gxy_gauss,
+ par_labels=[ u"n",
+ u"τ_diff [ms]",
+ u"offset"],
+ par_values=parms,
+ par_vary=[True, True, False],
+ par_boundaries=boundaries,
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_2d_2d.py b/pycorrfit/models/model_confocal_2d_2d.py
new file mode 100644
index 0000000..c790c60
--- /dev/null
+++ b/pycorrfit/models/model_confocal_2d_2d.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod
+from .cp_mix import double_pnum
+
+
+# 2D + 2D Gauß
+ # Model 6037
+def CF_Gxyz_gauss_2D2D(parms, tau):
+ u""" Two-component, two-dimensional diffusion with a Gaussian laser
+ profile, including a triplet component.
+ The triplet factor takes into account blinking according to triplet
+ states of excited molecules.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle1 = F₁/(1+τ/τ₁)
+ particle2 = α²*(1-F₁)/(1+τ/τ₂)
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle1 + particle2)/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal area
+ (n = n₁+n₂)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] α Relative molecular brightness of particle 2
+ compared to particle 1 (α = q₂/q₁)
+ [5] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ F=parms[3]
+ alpha=parms[4]
+ off=parms[5]
+
+ g = double_pnum(n=n,
+ F1=F,
+ alpha=alpha,
+ comp1=twod,
+ comp2=twod,
+ kwargs1={"tau":tau,
+ "taudiff":taud1},
+ kwargs2={"tau":tau,
+ "taudiff":taud2},
+ )
+
+ G = off + g
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [6] n₁ = n*F₁ Particle number of species 1
+ [7] n₂ = n*(1-F₁) Particle number of species 2
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*(1.-F1)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 0.5, # F
+ 1.0, # alpha
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6037,
+ name="Separate 2D diffusion (confocal)",
+ comp="2D+2D",
+ mtype="Confocal (Gaussian)",
+ fctn=CF_Gxyz_gauss_2D2D,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+u" [ms]",
+ u"τ"+u"\u2082"+u" [ms]",
+ u"F"+u"\u2081",
+ u"\u03b1"+u" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1]],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_3d.py b/pycorrfit/models/model_confocal_3d.py
new file mode 100644
index 0000000..7278d19
--- /dev/null
+++ b/pycorrfit/models/model_confocal_3d.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import threed
+
+# 3D simple gauss
+def CF_Gxyz_gauss(parms, tau):
+ # Model 6012
+ u""" Three-dimanesional free diffusion with a Gaussian laser profile
+ (eliptical).
+
+ G(τ) = offset + 1/( n*(1+τ/τ_diff) * sqrt(1 + τ/(SP²*τ_diff)) )
+
+ Calculation of diffusion coefficient and concentration
+ from the effective radius of the detection profile (r₀ = 2*σ):
+ D = r₀²/(4*τ_diff)
+ Conc = n/( sqrt(π³)*r₀²*z₀ )
+
+ r₀ lateral detection radius (waist of lateral gaussian)
+ z₀ axial detection length (waist of axial gaussian)
+ D Diffusion coefficient
+ Conc Concentration of dye
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ [1] τ_diff Characteristic residence time in confocal volume
+ [2] SP SP=z₀/r₀ Structural parameter,
+ describes the axis ratio of the confocal volume
+ [3] offset
+ *tau* - lag time
+ """
+ n = parms[0]
+ taudiff = parms[1]
+ SP = parms[2]
+ off = parms[3]
+
+ BB = threed(tau, taudiff, SP)
+
+ G = off + 1/n * BB
+ return G
+
+
+def supplements(parms, countrate=None):
+ # We can only give you the effective particle number
+ n = parms[0]
+ Info = list()
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [4.0, 0.4, 5.0, 0.0]
+boundaries = [[0, np.inf]]*len(parms)
+boundaries[-1] = [-np.inf, np.inf]
+
+model_setup(
+ modelid=6012,
+ name="3D diffusion (confocal)",
+ comp="3D",
+ mtype="Confocal (Gaussian)",
+ fctn=CF_Gxyz_gauss,
+ par_labels=[
+ u"n",
+ u"τ_diff [ms]",
+ u"SP",
+ u"offset"],
+ par_values=parms,
+ par_vary=[True, True, False, False],
+ par_boundaries=boundaries,
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_3d_2d.py b/pycorrfit/models/model_confocal_3d_2d.py
new file mode 100644
index 0000000..c041989
--- /dev/null
+++ b/pycorrfit/models/model_confocal_3d_2d.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod, threed
+from .cp_mix import double_pnum
+
+
+# 3D + 2D + T
+def CF_Gxyz_3d2d_gauss(parms, tau):
+ u""" Two-component, two- and three-dimensional diffusion
+ with a Gaussian laser profile.
+
+ particle2D = (1-F)/ (1+τ/τ_2D)
+ particle3D = α²*F/( (1+τ/τ_3D) * sqrt(1+τ/(τ_3D*SP²)))
+ norm = (1-F + α*F)²
+ G = 1/n*(particle1 + particle2)/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n2D+n3D)
+ [1] τ_2D Diffusion time of surface bound particls
+ [2] τ_3D Diffusion time of freely diffusing particles
+ [3] F Fraction of molecules of the freely diffusing species
+ (n3D = n*F), 0 <= F <= 1
+ [4] SP SP=z₀/r₀ Structural parameter,
+ describes elongation of the confocal volume
+ [5] α Relative molecular brightness of particle
+ 3D compared to particle 2D (α = q3D/q2D)
+ [6] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud2D=parms[1]
+ taud3D=parms[2]
+ F=parms[3]
+ SP=parms[4]
+ alpha=parms[5]
+ off=parms[6]
+
+ g = double_pnum(n=n,
+ F1=1-F,
+ alpha=alpha,
+ comp1=twod,
+ comp2=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud2D},
+ kwargs2={"tau":tau,
+ "taudiff":taud3D,
+ "SP":SP},
+ )
+
+ G = off + g
+ return G
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ Effective number of freely diffusing particles in 3D solution:
+ [7] n3D = n*F
+ Effective number particles diffusing on 2D surface:
+ [9] n2D = n*(1-F)
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F3d = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n3D", n*F3d])
+ Info.append([u"n2D", n*(1.-F3d)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append([u"cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 240, # taud2D
+ 0.1, # taud3D
+ 0.5, # F3D
+ 7, # SP
+ 1.0, # alpha
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6036,
+ name="Separate 3D and 2D diffusion (confocal)",
+ comp="3D+2D",
+ mtype="Confocal (Gaussian)",
+ fctn=CF_Gxyz_3d2d_gauss,
+ par_labels=[
+ u"n",
+ u"τ_2D [ms]",
+ u"τ_3D [ms]",
+ u"F_3D",
+ u"SP",
+ u"\u03b1"+" (q_3D/q_2D)",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, "<", 1]],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_3d_3d.py b/pycorrfit/models/model_confocal_3d_3d.py
new file mode 100644
index 0000000..fc8fdc8
--- /dev/null
+++ b/pycorrfit/models/model_confocal_3d_3d.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import threed
+from .cp_mix import double_pnum
+
+
+def CF_Gxyz_gauss_3D3D(parms, tau):
+ u""" Two-component three-dimensional free diffusion
+ with a Gaussian laser profile.
+
+ particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
+ particle2 = α²*(1-F₁)/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle1 + particle2)/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n₁+n₂)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] SP SP=z₀/r₀, Structural parameter,
+ describes elongation of the confocal volume
+ [5] α Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [6] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ F=parms[3]
+ SP=parms[4]
+ alpha=parms[5]
+ off=parms[6]
+
+ g = double_pnum(n=n,
+ F1=F,
+ alpha=alpha,
+ comp1=threed,
+ comp2=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud1,
+ "SP":SP},
+ kwargs2={"tau":tau,
+ "taudiff":taud2,
+ "SP":SP},
+ )
+
+ G = off + g
+ return G
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [7] n₁ = n*F₁ Particle number of species 1
+ [8] n₂ = n*(1-F₁) Particle number of species 2
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*(1.-F1)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 0.5, # F
+ 5, # SP
+ 1.0, # alpha
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6035,
+ name="Separate 3D diffusion (confocal)",
+ comp="3D+3D",
+ mtype="Confocal (Gaussian)",
+ fctn=CF_Gxyz_gauss_3D3D,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"F"+u"\u2081",
+ u"SP",
+ u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1]],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_2d.py b/pycorrfit/models/model_confocal_t_2d.py
new file mode 100644
index 0000000..b0c833e
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_2d.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod
+from .cp_triplet import trip
+
+
+# 2D simple gauss
+def CF_Gxy_T_gauss(parms, tau):
+ u""" Two-dimensional diffusion with a Gaussian laser profile,
+ including a triplet component.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
+
+ G(τ) = offset + 1/( n * (1+τ/τ_diff) )*triplet
+
+ Calculation of diffusion coefficient and concentration
+ from the effective radius of the detection profile (r₀ = 2*σ):
+ D = r₀²/(4*τ_diff)
+ Conc = n/(π*r₀²)
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal area
+ [1] τ_diff Characteristic residence time in confocal area
+ [2] τ_trip Characteristic residence time in triplet state
+ [3] T Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [4] offset
+ *tau* - lag time
+ """
+ n = parms[0]
+ taudiff = parms[1]
+ tautrip = parms[2]
+ T = parms[3]
+ dc = parms[4]
+
+ triplet = trip(tau=tau, tautrip=tautrip, T=T)
+
+ BB = twod(tau=tau, taudiff=taudiff)
+
+ G = dc + 1/n * BB * triplet
+ return G
+
+
+def supplements(parms, countrate=None):
+ # We can only give you the effective particle number
+ n = parms[0]
+ Info = list()
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [4.0, 0.4, 0.001, 0.01, 0.0]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+model_setup(
+ modelid=6002,
+ name="2D diffusion with triplet (confocal)",
+ comp="T+2D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxy_T_gauss,
+ par_labels=[
+ u"n",
+ u"τ_diff [ms]",
+ u"τ_trip [ms]",
+ u"T",
+ u"offset"],
+ par_values=parms,
+ par_vary=[True, True, True, True, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, "<", 1]],
+ par_hr_labels=[
+ u"n",
+ u"τ_diff [ms]",
+ u"τ_trip [µs]",
+ u"T",
+ u"offset"],
+ par_hr_factors=[1., 1., 1000., 1., 1.],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_2d_2d.py b/pycorrfit/models/model_confocal_t_2d_2d.py
new file mode 100644
index 0000000..f4f777c
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_2d_2d.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod
+from .cp_triplet import trip
+from .cp_mix import double_pnum
+
+
+# 2D + 2D + Triplet Gauß
+ # Model 6031
+def CF_Gxyz_gauss_2D2DT(parms, tau):
+ u""" Two-component, two-dimensional diffusion with a Gaussian laser
+ profile, including a triplet component.
+ The triplet factor takes into account blinking according to triplet
+ states of excited molecules.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle1 = F₁/(1+τ/τ₁)
+ particle2 = α²*(1-F₁)/(1+τ/τ₂)
+ triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle1 + particle2)*triplet/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal area
+ (n = n₁+n₂)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] α Relative molecular brightness of particle 2
+ compared to particle 1 (α = q₂/q₁)
+ [5] τ_trip Characteristic residence time in triplet state
+ [6] T Fraction of particles in triplet (non-fluorescent)
+ state 0 <= T < 1
+ [7] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ F=parms[3]
+ alpha=parms[4]
+ tautrip=parms[5]
+ T=parms[6]
+ off=parms[7]
+
+ g = double_pnum(n=n,
+ F1=F,
+ alpha=alpha,
+ comp1=twod,
+ comp2=twod,
+ kwargs1={"tau":tau,
+ "taudiff":taud1},
+ kwargs2={"tau":tau,
+ "taudiff":taud2},
+ )
+
+ tr = trip(tau=tau, T=T, tautrip=tautrip)
+
+ G = off + g*tr
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [8] n₁ = n*F₁ Particle number of species 1
+ [9] n₂ = n*(1-F₁) Particle number of species 2
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*(1.-F1)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 0.5, # F
+ 1.0, # alpha
+ 0.001, # tautrip
+ 0.01, # T
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+# T
+boundaries[6] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6031,
+ name="Separate 2D diffusion with triplet (confocal)",
+ comp="T+2D+2D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxyz_gauss_2D2DT,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+u" [ms]",
+ u"τ"+u"\u2082"+u" [ms]",
+ u"F"+u"\u2081",
+ u"\u03b1"+u" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"τ_trip [ms]",
+ u"T",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1], [5, "<", 1]],
+ par_hr_labels=[
+ u"n",
+ u"τ"+u"\u2081"+u" [ms]",
+ u"τ"+u"\u2082"+u" [ms]",
+ u"F"+u"\u2081",
+ u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"τ_trip [µs]",
+ u"T",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # "n",
+ 1., # "τ"+u"\u2081"+" [ms]",
+ 1., # "τ"+u"\u2082"+" [ms]",
+ 1., # "F"+u"\u2081",
+ 1., # u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ 1000., # "τ_trip [µs]",
+ 1., # "T",
+ 1. # "offset"
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_3d.py b/pycorrfit/models/model_confocal_t_3d.py
new file mode 100644
index 0000000..10016f1
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_3d.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import threed
+from .cp_triplet import trip
+
+
+def CF_Gxyz_blink(parms, tau):
+ u""" Three-dimanesional free diffusion with a Gaussian laser profile
+ (eliptical), including a triplet component.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ G(τ) = offset + 1/( n*(1+τ/τ_diff) * sqrt(1 + τ/(SP²*τ_diff)) )
+ * ( 1+T/(1.-T)*exp(-τ/τ_trip) )
+
+ Calculation of diffusion coefficient and concentration
+ from the effective radius of the detection profile (r₀ = 2*σ):
+ D = r₀²/(4*τ_diff)
+ Conc = n/( sqrt(π³)*r₀²*z₀ )
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ [1] T Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [2] τ_trip Characteristic residence time in triplet state
+ [3] τ_diff Characteristic residence time in confocal volume
+ [4] SP SP=z₀/r₀ Structural parameter,
+ describes the axis ratio of the confocal volume
+ [5] offset
+ *tau* - lag time
+ """
+ n = parms[0]
+ T = parms[1]
+ tautrip = parms[2]
+ taudiff = parms[3]
+ SP = parms[4]
+ off = parms[5]
+
+ AA = trip(tau, tautrip, T)
+ BB = threed(tau, taudiff, SP)
+
+ G = off + 1/n * AA * BB
+ return G
+
+
+def supplements(parms, countrate=None):
+ # We can only give you the effective particle number
+ n = parms[0]
+ Info = list()
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [4.0, 0.2, 0.001, 0.4, 5.0, 0.0]
+
+## Boundaries
+boundaries = [[0, np.inf]]*len(parms)
+# T
+boundaries[1] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6011,
+ name="3D diffusion with triplet (confocal)",
+ comp="T+3D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxyz_blink,
+ par_labels=[
+ u"n",
+ u"T",
+ u"τ_trip [ms]",
+ u"τ_diff [ms]",
+ u"SP",
+ u"offset"],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[3, ">", 2]],
+ par_hr_labels=[
+ u"n",
+ u"T",
+ u"τ_trip [µs]",
+ u"τ_diff [ms]",
+ u"SP",
+ u"offset"],
+ par_hr_factors=[1., 1., 1000., 1., 1., 1.],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_3d_2d.py b/pycorrfit/models/model_confocal_t_3d_2d.py
new file mode 100644
index 0000000..90131a5
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_3d_2d.py
@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod, threed
+from .cp_triplet import trip
+from .cp_mix import double_pnum
+
+
+# 3D + 2D + T
+def CF_Gxyz_3d2dT_gauss(parms, tau):
+ u""" Two-component, two- and three-dimensional diffusion
+ with a Gaussian laser profile, including a triplet component.
+ The triplet factor takes into account blinking according to triplet
+ states of excited molecules.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle2D = (1-F)/ (1+τ/τ_2D)
+ particle3D = α²*F/( (1+τ/τ_3D) * sqrt(1+τ/(τ_3D*SP²)))
+ triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
+ norm = (1-F + α*F)²
+ G = 1/n*(particle1 + particle2)*triplet/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n2D+n3D)
+ [1] τ_2D Diffusion time of surface bound particls
+ [2] τ_3D Diffusion time of freely diffusing particles
+ [3] F Fraction of molecules of the freely diffusing species
+ (n3D = n*F), 0 <= F <= 1
+ [4] SP SP=z₀/r₀ Structural parameter,
+ describes elongation of the confocal volume
+ [5] α Relative molecular brightness of particle
+ 3D compared to particle 2D (α = q3D/q2D)
+ [6] τ_trip Characteristic residence time in triplet state
+ [7] T Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [8] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud2D=parms[1]
+ taud3D=parms[2]
+ F=parms[3]
+ SP=parms[4]
+ alpha=parms[5]
+ tautrip=parms[6]
+ T=parms[7]
+ off=parms[8]
+
+ g = double_pnum(n=n,
+ F1=1-F,
+ alpha=alpha,
+ comp1=twod,
+ comp2=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud2D},
+ kwargs2={"tau":tau,
+ "taudiff":taud3D,
+ "SP":SP},
+ )
+
+ tr = trip(tau=tau, T=T, tautrip=tautrip)
+
+ G = off + g*tr
+ return G
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ Effective number of freely diffusing particles in 3D solution:
+ [9] n3D = n*F
+ Effective number particles diffusing on 2D surface:
+ [10] n2D = n*(1-F)
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F3d = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n3D", n*F3d])
+ Info.append([u"n2D", n*(1.-F3d)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append([u"cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 240, # taud2D
+ 0.1, # taud3D
+ 0.5, # F3D
+ 7, # SP
+ 1.0, # alpha
+ 0.001, # tautrip
+ 0.01, # T
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+# T
+boundaries[7] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6032,
+ name="Separate 3D and 2D diffusion with triplet (confocal)",
+ comp="T+3D+2D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxyz_3d2dT_gauss,
+ par_labels=[
+ u"n",
+ u"τ_2D [ms]",
+ u"τ_3D [ms]",
+ u"F_3D",
+ u"SP",
+ u"\u03b1"+" (q_3D/q_2D)",
+ u"τ_trip [ms]",
+ u"T",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, "<", 1], [6, "<", 2]],
+ par_hr_labels=[
+ u"n",
+ u"τ_2D [ms]",
+ u"τ_3D [ms]",
+ u"F_3D",
+ u"SP",
+ u"\u03b1"+" (q_3D/q_2D)",
+ u"τ_trip [µs]",
+ u"T",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # "n",
+ 1., # "τ_2D [ms]",
+ 1., # "τ_3D [ms]",
+ 1., # "F_3D",
+ 1., # "SP",
+ 1., # u"\u03b1"+" (q_3D/q_2D)",
+ 1000., # "τ_trip [µs]",
+ 1., # "T",
+ 1. # "offset"
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_3d_3d.py b/pycorrfit/models/model_confocal_t_3d_3d.py
new file mode 100644
index 0000000..8e4f5bb
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_3d_3d.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import threed
+from .cp_triplet import trip
+from .cp_mix import double_pnum
+
+
+def CF_Gxyz_gauss_3D3DT(parms, tau):
+ u""" Two-component three-dimensional free diffusion
+ with a Gaussian laser profile, including a triplet component.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
+ particle2 = α²*(1-F₁)/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
+ triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle1 + particle2)*triplet/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n₁+n₂)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] SP SP=z₀/r₀, Structural parameter,
+ describes elongation of the confocal volume
+ [5] α Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [6] τ_trip Characteristic residence time in triplet state
+ [7] T Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [8] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ F=parms[3]
+ SP=parms[4]
+ alpha=parms[5]
+ tautrip=parms[6]
+ T=parms[7]
+ off=parms[8]
+
+ g = double_pnum(n=n,
+ F1=F,
+ alpha=alpha,
+ comp1=threed,
+ comp2=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud1,
+ "SP":SP},
+ kwargs2={"tau":tau,
+ "taudiff":taud2,
+ "SP":SP},
+ )
+
+ tr = trip(tau=tau, T=T, tautrip=tautrip)
+
+ G = off + g*tr
+ return G
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [9] n₁ = n*F₁ Particle number of species 1
+ [10] n₂ = n*(1-F₁) Particle number of species 2
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*(1.-F1)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 0.5, # F
+ 5, # SP
+ 1.0, # alpha
+ 0.001, # tautrip
+ 0.01, # T
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+# T
+boundaries[7] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6030,
+ name="Separate 3D diffusion with triplet (confocal)",
+ comp="T+3D+3D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxyz_gauss_3D3DT,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"F"+u"\u2081",
+ u"SP",
+ u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"τ_trip [ms]",
+ u"T",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1], [6, "<", 1]],
+ par_hr_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"F"+u"\u2081",
+ u"SP",
+ u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"τ_trip [µs]",
+ u"T",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # n
+ 1., # taud1
+ 1., # taud2
+ 1., # F
+ 1., # SP
+ 1., # alpha
+ 1000., # tautrip [µs]
+ 1., # T
+ 1. # offset
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_3d_3d_2d.py b/pycorrfit/models/model_confocal_t_3d_3d_2d.py
new file mode 100644
index 0000000..ef20fc7
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_3d_3d_2d.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod, threed
+from .cp_triplet import trip
+from .cp_mix import triple_pnum
+
+
+def CF_Gxyz_gauss_3D3D2DT(parms, tau):
+ u""" Two three-dimensional and one two-dimensional free diffusion
+ with a Gaussian laser profile, including a triplet component.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ F₃ = 1-F₁-F₂
+ particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
+ particle2 = α₂₁² * F₂/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
+ particle3 = α₃₁² * F₃/( (1+τ/τ₃))
+ triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
+ norm = (F₁ + α₂₁*F₂ + α₃₁*F₃)²
+ G = 1/n*(particle1 + particle2 + particle3)*triplet/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n₁+n₂+n₃)
+ [1] τ₁ Diffusion time of particle species 1 (3D)
+ [2] τ₂ Diffusion time of particle species 2 (3D)
+ [3] τ₃ Diffusion time of particle species 3 (2D)
+ [4] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [5] F₂ Fraction of molecules of species 2 (n₂ = n*F₂)
+ 0 <= F₂ <= 1
+ [6] SP SP=z₀/r₀, Structural parameter,
+ describes elongation of the confocal volume
+ [7] α₂₁ Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [8] α₃₁ Relative molecular brightness of particle
+ 3 compared to particle 1 (α = q₃/q₁)
+ [9] τ_trip Characteristic residence time in triplet state
+ [10] T Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [11] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ taud3=parms[3]
+ F1=parms[4]
+ F2=parms[5]
+ SP=parms[6]
+ alpha21=parms[7]
+ alpha31=parms[8]
+ tautrip=parms[9]
+ T=parms[10]
+ off=parms[11]
+
+ g = triple_pnum(n=n,
+ F1=F1,
+ F2=F2,
+ alpha21=alpha21,
+ alpha31=alpha31,
+ comp1=threed,
+ comp2=threed,
+ comp3=twod,
+ kwargs1={"tau":tau,
+ "taudiff":taud1,
+ "SP":SP},
+ kwargs2={"tau":tau,
+ "taudiff":taud2,
+ "SP":SP},
+ kwargs3={"tau":tau,
+ "taudiff":taud3},
+ )
+
+ tr = trip(tau=tau, T=T, tautrip=tautrip)
+
+ G = off + g*tr
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [12] n₁ = n*F₁ Particle number of species 1 (3D)
+ [13] n₂ = n*F₂ Particle number of species 2 (3D)
+ [14] n₃ = n*F₃ Particle number of species 3 (2D; F₃ = 1-F₁-F₂)
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[4]
+ F2 = parms[5]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*F2])
+ Info.append([u"n\u2083", n*(1-F1-F2)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 11000, # taud3
+ 0.5, # F1
+ 0.01, # F2
+ 5, # SP
+ 1.0, # alpha21
+ 1.0, # alpha31
+ 0.001, # tautrip
+ 0.01, # T
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[4] = [0,.9999999999999]
+boundaries[5] = [0,.9999999999999]
+# T
+boundaries[10] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6082,
+ name="Twofold 3D and one 2D diffusion with triplet (confocal)",
+ comp="T+3D+3D+2D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxyz_gauss_3D3D2DT,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"τ"+u"\u2083"+" [ms]",
+ u"F"+u"\u2081",
+ u"F"+u"\u2082",
+ u"SP",
+ u"\u03b1\u2082\u2081",
+ u"\u03b1\u2083\u2081",
+ u"τ_trip [ms]",
+ u"T",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, False, True,
+ False, True, False, False,
+ False, False, True, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1], [3, ">", 2], [9, "<", 1], [5, 4, "<", "1"]],
+ par_hr_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"τ"+u"\u2083"+" [ms]",
+ u"F"+u"\u2081",
+ u"F"+u"\u2082",
+ u"SP",
+ u"\u03b1\u2082\u2081",
+ u"\u03b1\u2083\u2081",
+ u"τ_trip [µs]",
+ u"T",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # n
+ 1., # taud1
+ 1., # taud2
+ 1., # taud3
+ 1., # F1
+ 1., # F2
+ 1., # SP
+ 1., # alpha21
+ 1., # alpha31
+ 1000., # tautrip [µs]
+ 1., # T
+ 1. # offset
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_t_3d_3d_3d.py b/pycorrfit/models/model_confocal_t_3d_3d_3d.py
new file mode 100644
index 0000000..2893bf8
--- /dev/null
+++ b/pycorrfit/models/model_confocal_t_3d_3d_3d.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import threed
+from .cp_triplet import trip
+from .cp_mix import triple_pnum
+
+
+def CF_Gxyz_gauss_3D3D3DT(parms, tau):
+ u""" Three-component three-dimensional free diffusion
+ with a Gaussian laser profile, including a triplet component.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ F₃ = 1-F₁-F₂
+ particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
+ particle2 = α₂₁² * F₂/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
+ particle3 = α₃₁² * F₃/( (1+τ/τ₃) * sqrt(1+τ/(τ₃*SP²)))
+ triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
+ norm = (F₁ + α₂₁*F₂ + α₃₁*F₃)²
+ G = 1/n*(particle1 + particle2 + particle3)*triplet/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n₁+n₂+n₃)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] τ₃ Diffusion time of particle species 3
+ [4] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [5] F₂ Fraction of molecules of species 2 (n₂ = n*F₂)
+ 0 <= F₂ <= 1
+ [6] SP SP=z₀/r₀, Structural parameter,
+ describes elongation of the confocal volume
+ [7] α₂₁ Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [8] α₃₁ Relative molecular brightness of particle
+ 3 compared to particle 1 (α = q₃/q₁)
+ [9] τ_trip Characteristic residence time in triplet state
+ [10] T Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [11] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ taud3=parms[3]
+ F1=parms[4]
+ F2=parms[5]
+ SP=parms[6]
+ alpha21=parms[7]
+ alpha31=parms[8]
+ tautrip=parms[9]
+ T=parms[10]
+ off=parms[11]
+
+ g = triple_pnum(n=n,
+ F1=F1,
+ F2=F2,
+ alpha21=alpha21,
+ alpha31=alpha31,
+ comp1=threed,
+ comp2=threed,
+ comp3=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud1,
+ "SP":SP},
+ kwargs2={"tau":tau,
+ "taudiff":taud2,
+ "SP":SP},
+ kwargs3={"tau":tau,
+ "taudiff":taud3,
+ "SP":SP},
+ )
+
+ tr = trip(tau=tau, T=T, tautrip=tautrip)
+
+ G = off + g*tr
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [12] n₁ = n*F₁ Particle number of species 1
+ [13] n₂ = n*F₂ Particle number of species 2
+ [14] n₃ = n*F₃ Particle number of species 3 (F₃ = 1-F₁-F₂)
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[4]
+ F2 = parms[5]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*F2])
+ Info.append([u"n\u2083", n*(1-F1-F2)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 11000, # taud3
+ 0.5, # F1
+ 0.01, # F2
+ 5, # SP
+ 1.0, # alpha21
+ 1.0, # alpha31
+ 0.001, # tautrip
+ 0.01, # T
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[4] = [0,.9999999999999]
+boundaries[5] = [0,.9999999999999]
+# T
+boundaries[10] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6081,
+ name="Threefold 3D diffusion with triplet (confocal)",
+ comp="T+3D+3D+3D",
+ mtype="Confocal (Gaussian) and triplet",
+ fctn=CF_Gxyz_gauss_3D3D3DT,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"τ"+u"\u2083"+" [ms]",
+ u"F"+u"\u2081",
+ u"F"+u"\u2082",
+ u"SP",
+ u"\u03b1\u2082\u2081",
+ u"\u03b1\u2083\u2081",
+ u"τ_trip [ms]",
+ u"T",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, False, True,
+ False, True, False, False,
+ False, False, True, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1], [3, ">", 2], [9, "<", 1], [5, 4, "<", "1"]],
+ par_hr_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"τ"+u"\u2083"+" [ms]",
+ u"F"+u"\u2081",
+ u"F"+u"\u2082",
+ u"SP",
+ u"\u03b1\u2082\u2081",
+ u"\u03b1\u2083\u2081",
+ u"τ_trip [µs]",
+ u"T",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # n
+ 1., # taud1
+ 1., # taud2
+ 1., # taud3
+ 1., # F1
+ 1., # F2
+ 1., # SP
+ 1., # alpha21
+ 1., # alpha31
+ 1000., # tautrip [µs]
+ 1., # T
+ 1. # offset
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_tt_2d_2d.py b/pycorrfit/models/model_confocal_tt_2d_2d.py
new file mode 100644
index 0000000..623550b
--- /dev/null
+++ b/pycorrfit/models/model_confocal_tt_2d_2d.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod
+from .cp_triplet import trip
+from .cp_mix import double_pnum
+
+
+# 2D + 2D + TT Gauß
+# Model 6044
+def CF_Gxyz_gauss_2D2DTT(parms, tau):
+ u""" Two-component three-dimensional free diffusion
+ with a Gaussian laser profile, including two triplet components.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle1 = F₁/(1+τ/τ₁)
+ particle2 = α²*(1-F₁)/(1+τ/τ₂)
+ triplet1 = 1 + T₁/(1-T₁)*exp(-τ/τ_trip₁)
+ triplet2 = 1 + T₂/(1-T₂)*exp(-τ/τ_trip₂)
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle1 + particle2)*triplet1*triplet2/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n₁+n₂)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] α Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [5] τ_trip₁ Characteristic residence time in triplet state
+ [6] T₁ Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [7] τ_trip₂ Characteristic residence time in triplet state
+ [8] T₂ Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [9] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ F=parms[3]
+ alpha=parms[4]
+ tautrip1=parms[5]
+ T1=parms[6]
+ tautrip2=parms[7]
+ T2=parms[8]
+ off=parms[9]
+
+ g = double_pnum(n=n,
+ F1=F,
+ alpha=alpha,
+ comp1=twod,
+ comp2=twod,
+ kwargs1={"tau":tau,
+ "taudiff":taud1},
+ kwargs2={"tau":tau,
+ "taudiff":taud2},
+ )
+
+ tr1 = trip(tau=tau, T=T1, tautrip=tautrip1)
+ tr2 = trip(tau=tau, T=T2, tautrip=tautrip2)
+
+ G = off + g * tr1 * tr2
+
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [10] n₁ = n*F₁ Particle number of species 1
+ [11] n₂ = n*(1-F₁) Particle number of species 2
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*(1.-F1)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 0.5, # F
+ 1.0, # alpha
+ 0.001, # tautrip1
+ 0.01, # T1
+ 0.002, # tautrip2
+ 0.01, # T2
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+# T
+boundaries[6] = [0,.9999999999999]
+boundaries[8] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6044,
+ name="Separate 2D diffusion with double triplet (confocal)",
+ comp="T+T+2D+2D",
+ mtype="Confocal (Gaussian) with double triplet",
+ fctn=CF_Gxyz_gauss_2D2DTT,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"F"+u"\u2081",
+ u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"τ_trip₁ [ms]",
+ u"T₁",
+ u"τ_trip₂ [ms]",
+ u"T₂",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1], [5, "<", 1], [7, ">", 5]],
+ par_hr_labels=[
+ u"n",
+ u"τ₁ [ms]",
+ u"τ₂ [ms]",
+ u"F₁",
+ u"\u03b1"+u" (q₂/q₁)",
+ u"τ_trip₁ [µs]",
+ u"T₁",
+ u"τ_trip₂ [µs]",
+ u"T₂",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # n
+ 1., # taud1
+ 1., # taud2
+ 1., # F
+ 1., # alpha
+ 1000., # tautrip1 [µs]
+ 1., # T1
+ 1000., # tautrip2 [µs]
+ 1., # T2
+ 1. # offset
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_tt_3d_2d.py b/pycorrfit/models/model_confocal_tt_3d_2d.py
new file mode 100644
index 0000000..07ab120
--- /dev/null
+++ b/pycorrfit/models/model_confocal_tt_3d_2d.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import twod, threed
+from .cp_triplet import trip
+from .cp_mix import double_pnum
+
+
+# 3D + 2D + TT Gauß
+# Model 6045
+def CF_Gxyz_gauss_3D2DTT(parms, tau):
+ u""" Two-component three-dimensional free diffusion
+ with a Gaussian laser profile, including two triplet components.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle2D = (1-F)/ (1+τ/τ_2D)
+ particle3D = α²*F/( (1+τ/τ_3D) * sqrt(1+τ/(τ_3D*SP²)))
+ triplet1 = 1 + T₁/(1-T₁)*exp(-τ/τ_trip₁)
+ triplet2 = 1 + T₂/(1-T₂)*exp(-τ/τ_trip₂)
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle2D + particle3D)*triplet1*triplet2/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n2D+n3D)
+ [1] τ_2D Diffusion time of surface bound particls
+ [2] τ_3D Diffusion time of freely diffusing particles
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] SP SP=z₀/r₀, Structural parameter,
+ describes elongation of the confocal volume
+ [5] α Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [6] τ_trip₁ Characteristic residence time in triplet state
+ [7] T₁ Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [8] τ_trip₂ Characteristic residence time in triplet state
+ [9] T₂ Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [10] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud2D=parms[1]
+ taud3D=parms[2]
+ F=parms[3]
+ SP=parms[4]
+ alpha=parms[5]
+ tautrip1=parms[6]
+ T1=parms[7]
+ tautrip2=parms[8]
+ T2=parms[9]
+ off=parms[10]
+
+ g = double_pnum(n=n,
+ F1=1-F,
+ alpha=alpha,
+ comp1=twod,
+ comp2=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud2D},
+ kwargs2={"tau":tau,
+ "taudiff":taud3D,
+ "SP":SP},
+ )
+
+ tr1 = trip(tau=tau, T=T1, tautrip=tautrip1)
+ tr2 = trip(tau=tau, T=T2, tautrip=tautrip2)
+
+ G = off + g * tr1 * tr2
+
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ Effective number of freely diffusing particles in 3D solution:
+ [11] n3D = n*F
+ Effective number particles diffusing on 2D surface:
+ [12] n2D = n*(1-F)
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F3d = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n3D", n*F3d])
+ Info.append([u"n2D", n*(1.-F3d)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append([u"cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 240, # taud2D
+ 0.1, # taud3D
+ 0.5, # F3D
+ 5, # SP
+ 1.0, # alpha
+ 0.001, # tautrip1
+ 0.01, # T1
+ 0.002, # tautrip2
+ 0.01, # T2
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+# T
+boundaries[7] = [0,.9999999999999]
+boundaries[9] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6045,
+ name="Separate 3D and 2D diffusion with double triplet (confocal)",
+ comp="T+T+3D+2D",
+ mtype="Confocal (Gaussian) with double triplet",
+ fctn=CF_Gxyz_gauss_3D2DTT,
+ par_labels=[
+ u"n",
+ u"τ_2D [ms]",
+ u"τ_3D [ms]",
+ u"F_3D",
+ u"SP",
+ u"\u03b1"+" (q_3D/q_2D)",
+ u"τ_trip₁ [ms]",
+ u"T₁",
+ u"τ_trip₂ [ms]",
+ u"T₂",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False, False, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, "<", 1], [6, "<", 2], [8, ">", 6]],
+ par_hr_labels=[
+ u"n",
+ u"τ_2D [ms]",
+ u"τ_3D [ms]",
+ u"F_3D",
+ u"SP",
+ u"\u03b1"+" (q_3D/q_2D)",
+ u"τ_trip₁ [µs]",
+ u"T₁",
+ u"τ_trip₂ [µs]",
+ u"T₂",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # n
+ 1., # "τ_2D [ms]",
+ 1., # "τ_3D [ms]",
+ 1., # "F_3D",
+ 1., # "SP",
+ 1., # u"\u03b1"+" (q_3D/q_2D)",
+ 1000., # tautrip1 [µs]
+ 1., # T1
+ 1000., # tautrip2 [µs]
+ 1., # T2
+ 1. # offset
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/models/model_confocal_tt_3d_3d.py b/pycorrfit/models/model_confocal_tt_3d_3d.py
new file mode 100644
index 0000000..01b1e19
--- /dev/null
+++ b/pycorrfit/models/model_confocal_tt_3d_3d.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+from __future__ import division
+
+import numpy as np
+
+from .control import model_setup
+from .cp_confocal import threed
+from .cp_triplet import trip
+from .cp_mix import double_pnum
+
+
+# 3D + 3D + Triplet Gauß
+# Model 6043
+def CF_Gxyz_gauss_3D3DTT(parms, tau):
+ u""" Two-component three-dimensional free diffusion
+ with a Gaussian laser profile, including two triplet components.
+ The triplet factor takes into account a blinking term.
+ Set *T* or *τ_trip* to 0, if no triplet component is wanted.
+
+ particle1 = F₁/( (1+τ/τ₁) * sqrt(1+τ/(τ₁*SP²)))
+ particle2 = α*(1-F₁)/( (1+τ/τ₂) * sqrt(1+τ/(τ₂*SP²)))
+ triplet1 = 1 + T₁/(1-T₁)*exp(-τ/τ_trip₁)
+ triplet2 = 1 + T₂/(1-T₂)*exp(-τ/τ_trip₂)
+ norm = (F₁ + α*(1-F₁))²
+ G = 1/n*(particle1 + particle2)*triplet1*triplet2/norm + offset
+
+ *parms* - a list of parameters.
+ Parameters (parms[i]):
+ [0] n Effective number of particles in confocal volume
+ (n = n₁+n₂)
+ [1] τ₁ Diffusion time of particle species 1
+ [2] τ₂ Diffusion time of particle species 2
+ [3] F₁ Fraction of molecules of species 1 (n₁ = n*F₁)
+ 0 <= F₁ <= 1
+ [4] SP SP=z₀/r₀, Structural parameter,
+ describes elongation of the confocal volume
+ [5] α Relative molecular brightness of particle
+ 2 compared to particle 1 (α = q₂/q₁)
+ [6] τ_trip₁ Characteristic residence time in triplet state
+ [7] T₁ Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [8] τ_trip₂ Characteristic residence time in triplet state
+ [9] T₂ Fraction of particles in triplet (non-fluorescent) state
+ 0 <= T < 1
+ [10] offset
+ *tau* - lag time
+ """
+ n=parms[0]
+ taud1=parms[1]
+ taud2=parms[2]
+ F=parms[3]
+ SP=parms[4]
+ alpha=parms[5]
+ tautrip1=parms[6]
+ T1=parms[7]
+ tautrip2=parms[8]
+ T2=parms[9]
+ off=parms[10]
+
+ g = double_pnum(n=n,
+ F1=F,
+ alpha=alpha,
+ comp1=threed,
+ comp2=threed,
+ kwargs1={"tau":tau,
+ "taudiff":taud1,
+ "SP":SP},
+ kwargs2={"tau":tau,
+ "taudiff":taud2,
+ "SP":SP},
+ )
+
+ tr1 = trip(tau=tau, T=T1, tautrip=tautrip1)
+ tr2 = trip(tau=tau, T=T2, tautrip=tautrip2)
+
+ G = off + g * tr1 * tr2
+
+ return G
+
+
+def supplements(parms, countrate=None):
+ u"""Supplementary parameters:
+ [11] n₁ = n*F₁ Particle number of species 1
+ [12] n₂ = n*(1-F₁) Particle number of species 2
+ """
+ # We can only give you the effective particle number
+ n = parms[0]
+ F1 = parms[3]
+ Info = list()
+ # The enumeration of these parameters is very important for
+ # plotting the normalized curve. Countrate must come out last!
+ Info.append([u"n\u2081", n*F1])
+ Info.append([u"n\u2082", n*(1.-F1)])
+ if countrate is not None:
+ # CPP
+ cpp = countrate/n
+ Info.append(["cpp [kHz]", cpp])
+ return Info
+
+
+parms = [
+ 25, # n
+ 5, # taud1
+ 1000, # taud2
+ 0.5, # F
+ 5, # SP
+ 1.0, # alpha
+ 0.001, # tautrip1
+ 0.01, # T1
+ 0.002, # tautrip2
+ 0.01, # T2
+ 0.0 # offset
+ ]
+
+## Boundaries
+# strictly positive
+boundaries = [[0, np.inf]]*len(parms)
+# F
+boundaries[3] = [0,.9999999999999]
+# T
+boundaries[7] = [0,.9999999999999]
+boundaries[9] = [0,.9999999999999]
+boundaries[-1] = [-np.inf, np.inf]
+
+
+model_setup(
+ modelid=6043,
+ name="Separate 3D diffusion with double triplet (confocal)",
+ comp="T+T+3D+3D",
+ mtype="Confocal (Gaussian) with double triplet",
+ fctn=CF_Gxyz_gauss_3D3DTT,
+ par_labels=[
+ u"n",
+ u"τ"+u"\u2081"+" [ms]",
+ u"τ"+u"\u2082"+" [ms]",
+ u"F"+u"\u2081",
+ u"SP",
+ u"\u03b1"+" (q"+u"\u2082"+"/q"+u"\u2081"+")",
+ u"τ_trip₁ [ms]",
+ u"T₁",
+ u"τ_trip₂ [ms]",
+ u"T₂",
+ u"offset"
+ ],
+ par_values=parms,
+ par_vary=[True, True, True, True, False, False, False, False, False, False, False],
+ par_boundaries=boundaries,
+ par_constraints=[[2, ">", 1], [6, "<", 1], [8, ">", 6]],
+ par_hr_labels=[
+ u"n",
+ u"τ₁ [ms]",
+ u"τ₂ [ms]",
+ u"F₁",
+ u"SP",
+ u"\u03b1"+u" (q₂/q₁)",
+ u"τ_trip₁ [µs]",
+ u"T₁",
+ u"τ_trip₂ [µs]",
+ u"T₂",
+ u"offset"
+ ],
+ par_hr_factors=[
+ 1., # n
+ 1., # taud1
+ 1., # taud2
+ 1., # F
+ 1., # SP
+ 1., # alpha
+ 1000., # tautrip1 [µs]
+ 1., # T1
+ 1000., # tautrip2 [µs]
+ 1., # T2
+ 1. # offset
+ ],
+ supplementary_method=supplements
+ )
diff --git a/pycorrfit/openfile.py b/pycorrfit/openfile.py
index abe2239..47033b4 100644
--- a/pycorrfit/openfile.py
+++ b/pycorrfit/openfile.py
@@ -16,12 +16,13 @@ import yaml
import zipfile
import warnings
-from . import doc
-
# These imports are required for loading data
from .readfiles import Filetypes # @UnusedImport
from .readfiles import BGFiletypes # @UnusedImport
-from .fcs_data_set import Trace
+from .trace import Trace
+from . import meta
+
+__version__ = meta.get_version()
def LoadSessionData(sessionfile, parameters_only=False):
@@ -476,7 +477,7 @@ def SaveSessionData(sessionfile, Infodict):
os.remove(os.path.join(tempdir, tracefilename))
# Save comments into txt file
commentfilename = "comments.txt"
- commentfile = open(commentfilename, 'wb')
+ commentfile = codecs.open(commentfilename, 'w', encoding="utf-8")
# Comments[-1] is comment on whole Session
Ckeys = Infodict["Comments"].keys()
Ckeys.sort()
@@ -545,7 +546,7 @@ def SaveSessionData(sessionfile, Infodict):
os.remove(os.path.join(tempdir, WeightFilename))
## Preferences
preferencesname = "preferences.cfg"
- with open(preferencesname, 'w') as fd:
+ with codecs.open(preferencesname, 'w',encoding="utf-8") as fd:
for key in Infodict["Preferences"]:
value = Infodict["Preferences"][key]
if isinstance(value, list):
@@ -557,7 +558,7 @@ def SaveSessionData(sessionfile, Infodict):
os.remove(os.path.join(tempdir, preferencesname))
## Readme
rmfilename = "Readme.txt"
- rmfile = open(rmfilename, 'wb')
+ rmfile = codecs.open(rmfilename, 'w', encoding="utf-8")
rmfile.write(ReadmeSession)
rmfile.close()
Arc.write(rmfilename)
@@ -572,43 +573,50 @@ def SaveSessionData(sessionfile, Infodict):
os.rmdir(tempdir)
-def ExportCorrelation(exportfile, Page, info, savetrace=True):
- """ Write correlation data to a file
+def ExportCorrelation(exportfile, correlation, page_info, savetrace=True):
+ """ Write correlation data (as displayed in PyCorrFit) to a file
Parameters
----------
exportfile : str
- Absolute filename to save data
- Page : PyCorrFit Page object
+ Absolute file name to save the data to
+ correlation : PyCorrFit "Correlation" object
Contains all correlation data
- info : module
- The `info` tool module. This is a workaround until Page has
- its own class to create info data.
+ page_info : module
+ A multi-line string containing information on the correlation that
+ will be written to the file as a comment
savetrace : bool
Append the trace to the file
+
+ Notes
+ -----
+ Note that this method exports the plotted data:
+ - Correlation.correlation_plot
+ - Correlation.residuals_plot
+ - Correlation.modeled_plot
+ which means that the data could be normalized to, for instance,
+ the total particle number `n`.
"""
-
openedfile = codecs.open(exportfile, 'w', encoding="utf-8")
## First, some doc text
openedfile.write(ReadmeCSV.replace('\n', '\r\n'))
- # The infos
- InfoMan = info.InfoClass(CurPage=Page)
- PageInfo = InfoMan.GetCurFancyInfo()
- for line in PageInfo.splitlines():
+ # The info
+ for line in page_info.splitlines():
openedfile.write(u"# "+line+"\r\n")
openedfile.write(u"#\r\n#\r\n")
# Get all the data we need from the Page
# Modeled data
- corr = Page.corr
+ corr = correlation
mod = corr.modeled_plot[:,1]
+
if corr.correlation is not None:
# Experimental data
- tau = corr.correlation_fit[:,0]
- exp = corr.correlation_fit[:,1]
- res = corr.residuals_fit[:,1]
+ tau = corr.correlation_plot[:,0]
+ exp = corr.correlation_plot[:,1]
+ res = corr.residuals_plot[:,1]
+
# Plotting! Because we only export plotted area.
-
if corr.is_weighted_fit:
weightname = corr.fit_weight_type
try:
@@ -696,13 +704,13 @@ def ExportCorrelation(exportfile, Page, info, savetrace=True):
dataWriter.writerow(["{:.10e}".format(time[i]),
"{:.10e}".format(intensity[i])])
- openedfile.close()
+ openedfile.close()
session_wildcards = [".pcfs", ".pycorrfit-session.zip", ".fcsfit-session.zip"]
-ReadmeCSV = """# This file was created using PyCorrFit version {}.
+ReadmeCSV = u"""# This file was created using PyCorrFit version {}.
#
# Lines starting with a '#' are treated as comments.
# The data is stored as CSV below this comment section.
@@ -712,10 +720,10 @@ ReadmeCSV = """# This file was created using PyCorrFit version {}.
# If this file is opened by PyCorrFit, only the first two
# columns will be imported as experimental data.
#
-""".format(doc.__version__)
+""".format(__version__)
-ReadmeSession = """This file was created using PyCorrFit version {}.
+ReadmeSession = u"""This file was created using PyCorrFit version {}.
The .zip archive you are looking at is a stored session of PyCorrFit.
If you are interested in how the data is stored, you will find
out here. Most important are the dimensions of units:
@@ -801,4 +809,4 @@ trace*.csv (where * is (Number of page) | appendix "A" or "B" point to
the respective channels (only in cross-correlation mode))
- Contains times [ms]
- Contains countrates [kHz]
-""".format(doc.__version__)
+""".format(__version__)
diff --git a/pycorrfit/readfiles/__init__.py b/pycorrfit/readfiles/__init__.py
index c984144..79926bc 100644
--- a/pycorrfit/readfiles/__init__.py
+++ b/pycorrfit/readfiles/__init__.py
@@ -44,8 +44,21 @@ def AddAllWildcard(Dictionary):
# To increase user comfort, we will now create a file opener thingy that
# knows how to open all files we know.
-def openAny(dirname, filename):
- """ Using the defined Filetypes and BGFiletypes, open the given file """
+def openAny(path, filename=None):
+ """ Using the defined Filetypes and BGFiletypes, open the given file
+
+ Parameters
+ ----------
+ path : str
+ Full path to file or directory containing `filename`
+ filename : str
+ The name of the file if not given in path (optional).
+ """
+ if filename is None:
+ dirname, filename = os.path.split(path)
+ else:
+ dirname = path
+
wildcard = filename.split(".")[-1]
for key in Filetypes.keys():
# Recurse into the wildcards
@@ -217,6 +230,21 @@ def openZIP(dirname, filename):
dictionary["Filename"] = Filelist
return dictionary
+def get_supported_extensions():
+ """
+ Returns list of extensions of currently supported file types.
+ """
+ extlist = []
+ for kf in list(Filetypes.keys()):
+ ext = kf.split("|")[-1]
+ ext = ext.split(";")
+ ext = [ e.lower().strip("*. ") for e in ext]
+ ext = list(np.unique(ext))
+ extlist += ext
+ extlist = list(np.unique(extlist))
+ extlist.sort()
+ return extlist
+
# The string that is shown when opening all supported files
# We add an empty space so it is listed first in the dialogs.
@@ -246,5 +274,4 @@ BGFiletypes = { "Correlator.com (*.SIN)|*.SIN;*.sin" : openSIN,
"Zip file (*.zip)|*.zip" : openZIP,
"PyCorrFit session (*.pcfs)|*.pcfs" : openZIP
}
-BGFiletypes = AddAllWildcard(BGFiletypes)
-
+BGFiletypes = AddAllWildcard(BGFiletypes)
\ No newline at end of file
diff --git a/pycorrfit/trace.py b/pycorrfit/trace.py
new file mode 100644
index 0000000..1269f2e
--- /dev/null
+++ b/pycorrfit/trace.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+""" PyCorrFit data set
+
+Classes for FCS data evaluation.
+"""
+from __future__ import print_function, division
+
+import hashlib
+import numpy as np
+import scipy.integrate as spintg
+
+
+class Trace(object):
+ """ unifies trace handling
+ """
+ def __init__(self, trace=None, countrate=None, duration=None,
+ name=None):
+ """ Load trace data
+
+ Parameters
+ ----------
+ trace : ndarray of shape (N, 2)
+ The array contains time [ms] and count rate [kHz].
+ coutrate : float
+ Average count rate [kHz].
+ Mandatory if `trace` is None.
+ duration : float
+ Duration of measurement in milliseconds.
+ Mandatory if `trace` is None.
+ name : str
+ The name of the trace.
+ """
+ self._countrate = None
+ self._duration = None
+ self._trace = None
+ self._uid = None
+
+ if trace is None:
+ self.countrate = countrate
+ self.duration = duration
+ else:
+ self.trace = trace
+
+ if name is None:
+ name = "{:.2f}kHz, {:.0f}s".format(self.countrate,
+ self.duration/1000)
+ self.name = name
+
+ def __getitem__(self, idx):
+ return self.trace[idx]
+
+ def __repr__(self):
+ text = "Trace of length {:.3f}s and countrate {:.3f}kHz".format(
+ self.duration/1000, self.countrate)
+ return text
+
+ @property
+ def countrate(self):
+ if self._countrate is None:
+ #self._countrate = np.average(self._trace[:,1])
+ # Take into account traces that have arbitrary sampling
+ self._countrate = spintg.simps(self._trace[:,1], self._trace[:,0]) / self.duration
+ return self._countrate
+
+ @countrate.setter
+ def countrate(self, value):
+ assert value is not None, "Setting value with None forbidden!"
+ assert self._trace is None, "Setting value impossible, "+\
+ "if `self.trace` is set."
+ self._countrate = value
+
+ @property
+ def duration(self):
+ if not hasattr(self, "_duration") or self._duration is None:
+ self._duration = self._trace[-1,0] - self._trace[0,0]
+ return self._duration
+
+ @duration.setter
+ def duration(self, value):
+ assert value is not None, "Setting value with None forbidden!"
+ assert self._trace is None, "Setting value impossible, "+\
+ "if `self.trace` is set."
+ self._duration = value
+
+ @property
+ def uid(self):
+ if self._uid is None:
+ hasher = hashlib.sha256()
+ hasher.update(str(np.random.random()))
+ hasher.update(str(self.trace))
+ hasher.update(self.name)
+ self._uid = hasher.hexdigest()
+ return self._uid
+
+ @property
+ def trace(self):
+ if self._trace is None:
+ self._trace = np.array([ [0, self.countrate],
+ [self.duration, self.countrate]
+ ])
+ return self._trace
+
+ @trace.setter
+ def trace(self, value):
+ assert value is not None, "Setting value with None forbidden!"
+ assert isinstance(value, np.ndarray), "value must be array!"
+ assert value.shape[1] == 2, "shape of array must be (N,2)!"
+ self._trace = value
+ # self.countrate is set automagically
diff --git a/setup.cfg b/setup.cfg
index 861a9f5..9c27bff 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,6 @@
+[aliases]
+test=pytest
+
[egg_info]
tag_build =
tag_date = 0
diff --git a/setup.py b/setup.py
index ab7a44c..b2e3a6c 100644
--- a/setup.py
+++ b/setup.py
@@ -8,9 +8,8 @@
# pip install wheel twine
# python setup.py bdist wheel
from __future__ import print_function
-from setuptools import setup, Extension, Command
+from setuptools import setup, Extension
import sys
-import subprocess
from os.path import join, dirname, realpath, exists
from warnings import warn
@@ -40,21 +39,6 @@ else:
)
]
-
-class PyTest(Command):
- """ Perform pytests
- """
- user_options = []
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
- def run(self):
- errno = subprocess.call([sys.executable, 'tests/runtests.py'])
- raise SystemExit(errno)
-
# Download documentation if it was not compiled
Documentation = join(dirname(realpath(__file__)), "doc/PyCorrFit_doc.pdf")
webdoc = "https://github.com/FCS-analysis/PyCorrFit/wiki/PyCorrFit_doc.pdf"
@@ -94,12 +78,12 @@ setup(
],
data_files=[('pycorrfit_doc', ['ChangeLog.txt', 'doc/PyCorrFit_doc.pdf'])],
description=description,
+ long_description=open('README.rst').read() if exists('README.rst') else '',
include_package_data=True,
keywords=["fcs", "fluorescence", "correlation", "spectroscopy",
"tir", "fitting"
],
license="GPL v2",
- long_description=open(join(dirname(__file__), 'Readme.txt')).read(),
name=name,
platforms=['ALL'],
url='https://github.com/FCS-analysis/PyCorrFit',
@@ -108,18 +92,18 @@ setup(
packages=['pycorrfit',
'pycorrfit.models',
'pycorrfit.readfiles',
- 'pycorrfit.tools'
+ 'pycorrfit.gui',
+ 'pycorrfit.gui.tools',
],
package_dir={'pycorrfit': 'pycorrfit',
'pycorrfit.models': 'pycorrfit/models',
'pycorrfit.readfiles': 'pycorrfit/readfiles',
- 'pycorrfit.tools': 'pycorrfit/tools'
+ 'pycorrfit.gui': 'pycorrfit/gui',
+ 'pycorrfit.gui.tools': 'pycorrfit/gui/tools',
},
# cython
ext_modules=EXTENSIONS,
- cmdclass={'build_ext': build_ext,
- 'test': PyTest,
- },
+ cmdclass={'build_ext': build_ext},
# requirements
extras_require = {
# If you need the GUI of this project in your project, add
@@ -133,7 +117,8 @@ setup(
"PyYAML >= 3.09",
"lmfit >= 0.9.2",
],
- setup_requires=["cython"],
+ setup_requires=["cython", 'pytest-runner'],
+ tests_require=["pytest", "urllib3", "simplejson"],
# scripts
entry_points={
"gui_scripts": ["{name:s}={name:s}:Main".format(
diff --git a/tests/README.md b/tests/README.md
index 64694ba..26d1293 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -1,13 +1,9 @@
### Test Scripts
-This will run all tests:
+Execute all tests using `setup.py` in the parent directory:
- python runtests.py
-
-Beautiful html output is possible with (Unix, package `aha` required)
-
- ./runtests_html.sh
+ python setup.py test
### Running single tests
diff --git a/tests/data_file_dl.py b/tests/data_file_dl.py
new file mode 100644
index 0000000..285ced2
--- /dev/null
+++ b/tests/data_file_dl.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Download experimental data files from https://github.com/FCS-analysis/FCSdata
+
+This module establishes
+"""
+from __future__ import division, print_function
+
+import os
+from os.path import abspath, dirname, join, exists
+
+import simplejson as json
+import urllib3
+import warnings
+
+# Download path root
+raw_origin = "https://github.com/FCS-analysis/FCSdata/raw/master/"
+# GitHub API root
+api_origin = "https://api.github.com/repos/FCS-analysis/FCSdata/git/"
+# Download directory
+dldir = join(dirname(abspath(__file__)), "data")
+# Pool Manager handles all requests
+pool_manager = urllib3.PoolManager()
+
+_fcs_data_tree = None
+
+def dl_file(url, dest, chunk_size=6553,
+ http=pool_manager):
+ """
+ Download `url` to `dest`.
+
+ Parameters
+ ----------
+ url : str
+ Full download URL
+ dest : str
+ Full download path. Directory will be created if non-existent.
+ chunk_size : int
+ Chunk size of download (download buffer size).
+ http : instance of `urllib3.PoolManager`
+ Manages all connections. Must implement the
+ `request` method.
+ """
+ if not exists(dirname(dest)):
+ os.makedirs(dirname(dest))
+ r = http.request('GET', url, preload_content=False)
+ with open(dest, 'wb') as out:
+ while True:
+ data = r.read(chunk_size)
+ if data is None or len(data)==0:
+ break
+ out.write(data)
+
+
+def get_data_file(filename, dldir=dldir, pool_manager=pool_manager,
+ api_origin=api_origin, raw_origin=raw_origin):
+ """
+ Return first occurrence of `filename` in the data file tree.
+ """
+ _f, ext = os.path.splitext(filename)
+ assert ext != "", "filename has no extension!"
+ files = get_data_files_ext(extension=ext, dldir=dldir,
+ pool_manager=pool_manager,
+ api_origin=api_origin,
+ raw_origin=raw_origin)
+
+ files = [ f for f in files if f.count(filename) ]
+ assert len(files) != 0, "filename not found"
+ return files[0]
+
+
+def get_data_files_ext(extension, dldir=dldir, pool_manager=pool_manager,
+ api_origin=api_origin, raw_origin=raw_origin):
+ """
+ Get all files in the repository `origin` that are
+ in the folder `extension` and have a file-ending
+ that matches `extension` (case-insensitive).
+
+ The files are downloaded and local paths in the
+ `dldir` directory are returned. If no internet
+ connection is available, previously downloaded
+ files will be used.
+
+ Parameters
+ ----------
+ extension : str
+ A file extension such as `fcs` or `sin`.
+ dldir : str
+ Path to download directory.
+ http : instance of `urllib3.PoolManager`
+ Manages all connections. Must implement the
+ `request` method.
+ raw_origin : str
+ Web root for downloads, e.g.
+ "https://raw.github.com/FCS-analysis/FCSdata"
+ api_origin : str
+ GitHub api URL, e.g.
+ "https://api.github.com/repos/FCS-analysis/FCSdata/git/"
+
+
+ Notes
+ -----
+ The files in the remote location must be sorted according to
+ file extionsion. E.g. all `*.sin` files must be located in a
+ folder in the root directory named `sin`.
+ """
+ ext = extension.lower()
+ if not ext.startswith("."):
+ ext = "."+ext
+ try:
+ # Get file list and download
+ files = get_data_tree_remote(pool_manager=pool_manager, api_origin=api_origin)
+ extfiles = [ f for f in files if f.lower().startswith(ext[1:]+"/") and f.lower().endswith(ext)]
+ extfiles.sort()
+
+ dl_files = []
+
+ for f in extfiles:
+ dest = join(dldir, f)
+ if not exists(dest):
+ dl_file(join(raw_origin, f), dest)
+ dl_files.append(dest)
+
+ except urllib3.exceptions.MaxRetryError:
+ # e.g. no internet connection
+ warnings.warn("Non connection, using previuously downloaded files only.")
+ files = get_data_tree_local(dldir=dldir)
+ dl_files = [ f for f in files if f.lower().endswith(ext)]
+
+
+ return dl_files
+
+
+def get_data_tree_local(dldir=dldir):
+ """
+ Returns FCSdata repository tree of local files.
+ """
+ loc_files = []
+ for root, _dirs, files in os.walk(dldir):
+ # sorting convention: the folder names the extension
+ relpath = os.path.relpath(root, dldir)
+ ext = os.path.basename(relpath[::-1])[::-1]
+ for f in files:
+ if f.lower().endswith(ext):
+ loc_files.append(os.path.join(root, f))
+
+ return loc_files
+
+
+def get_data_tree_remote(pool_manager=pool_manager, api_origin=api_origin):
+ """
+ Returns FCSdata repository tree.
+ The tree is saved in the global variable `_fcs_data_tree` to reduce
+ number of GitHub API requests.
+ """
+ global _fcs_data_tree
+ if _fcs_data_tree is None:
+ url = api_origin+"trees/master?recursive=1"
+ # headers
+ headers = {'User-Agent': __file__}
+ # GitHub API token to prevent rate-limits
+ # Key is generated with
+ #
+ # gem install travis
+ # travis encrypt GH_READ_API_TOKEN=secret-token
+ #
+ # Add the result to env in travis.yml.
+ if "GH_READ_API_TOKEN" in os.environ:
+ headers["Authorization"] = "token {}".format(os.environ["GH_READ_API_TOKEN"])
+ r = pool_manager.request("GET", url, headers=headers)
+ jd = json.loads(r.data)
+ tree = jd["tree"]
+ _fcs_data_tree = [ t["path"] for t in tree ]
+ return _fcs_data_tree
\ No newline at end of file
diff --git a/tests/test_constraints.py b/tests/test_constraints.py
new file mode 100644
index 0000000..84a4d8c
--- /dev/null
+++ b/tests/test_constraints.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test if constraints work with model functions.
+"""
+from __future__ import division, print_function
+
+import sys
+from os.path import abspath, dirname, split
+import numpy as np
+import os
+
+
+# Add parent directory to beginning of path variable
+sys.path.insert(0, dirname(dirname(abspath(__file__))))
+import data_file_dl
+import pycorrfit as pcf
+
+
+def test_fit_constraint_simple_inequality():
+ """ Check "smaller than" relation during fitting.
+ """
+ dfile = data_file_dl.get_data_file("019_cp_KIND+BFA.fcs")
+ data = pcf.readfiles.openAny(dfile)
+ corr = pcf.Correlation(correlation=data["Correlation"][0],
+ traces=data["Trace"][0],
+ corr_type=data["Type"][0],
+ filename=os.path.basename(dfile),
+ title="test correlation",
+ fit_model=6035 # confocal 3D+3D)
+ )
+ corr.fit_parameters_variable = [True, True, True, True, False, False, False]
+ # crop triplet data
+ corr.fit_ival[0] = 8
+ pcf.Fit(corr)
+ assert corr.fit_parameters[1] <= corr.fit_parameters[2]
+ # -> deliberately reverse everything and try again
+ corr.fit_parameters[1], corr.fit_parameters[2] = corr.fit_parameters[2], corr.fit_parameters[1]
+ corr.fit_parameters[3] = 1-corr.fit_parameters[3]
+ pcf.Fit(corr)
+ # This tests also for equality
+ assert corr.fit_parameters[1] <= corr.fit_parameters[2]
+ if corr.fit_parameters[1] == corr.fit_parameters[2]:
+ print("found identity of fit parameters - multiplying by two to see if relation holds")
+ corr.fit_parameters[2] *= 2
+ pcf.Fit(corr)
+ assert corr.fit_parameters[1] < corr.fit_parameters[2]
+
+
+def test_fit_constraint_sum_smaller_one():
+ """ Check "a+b<c" relation during fitting.
+ """
+ dfile = data_file_dl.get_data_file("019_cp_KIND+BFA.fcs")
+ data = pcf.readfiles.openAny(dfile)
+ corr = pcf.Correlation(correlation=data["Correlation"][0],
+ traces=data["Trace"][0],
+ corr_type=data["Type"][0],
+ filename=os.path.basename(dfile),
+ title="test correlation",
+ fit_model=6081 # confocal 3D+3D)
+ )
+ pcf.Fit(corr)
+ assert corr.fit_parameters[4] + corr.fit_parameters[5] < 1
+ parms0 = np.array([
+ 1.13827592342, # n
+ 3.0918704e-05, # τ₁ [ms]
+ 1.98835792339, # τ₂ [ms]
+ 2000.0, # τ₃ [ms]
+ 0.972264423555, # F₁
+ 0.021400173882, # F₂
+ 5.0, # SP
+ 1.0, # α₂₁
+ 1.0, # α₃₁
+ 1e-08, # τ_trip [ms]
+ 0.0, # T
+ 0.0, # offset
+ ])
+ corr.fit_parameters = parms0
+
+ vary = [False] * 12
+ vary[4] = vary[5] = True
+ corr.fit_parameters_variable = vary
+ # crop triplet data
+ pcf.Fit(corr)
+ assert corr.fit_parameters[4] + corr.fit_parameters[5] < 1
+ # -> deliberately reverse everything and try again
+ corr.fit_parameters[4], corr.fit_parameters[5] = corr.fit_parameters[5], corr.fit_parameters[4]
+ pcf.Fit(corr)
+ # This tests also for equality
+ assert corr.fit_parameters[4] + corr.fit_parameters[5] < 1
+
+
+if __name__ == "__main__":
+ # Run all tests
+ loc = locals()
+ for key in list(loc.keys()):
+ if key.startswith("test_") and hasattr(loc[key], "__call__"):
+ loc[key]()
diff --git a/tests/test_file_formats.py b/tests/test_file_formats.py
new file mode 100644
index 0000000..d43a125
--- /dev/null
+++ b/tests/test_file_formats.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Test if pycorrfit can open all file formats.
+"""
+from __future__ import division, print_function
+
+import sys
+from os.path import abspath, dirname, split
+import numpy as np
+
+# Add parent directory to beginning of path variable
+sys.path.insert(0, dirname(dirname(abspath(__file__))))
+import data_file_dl
+import pycorrfit
+
+# Files that are known to not work
+exclude = ["sin/Correlator.com_Integer-Mode.SIN"]
+
+
+def test_open():
+ """
+ Try to open all files supported files
+ """
+ # get list of supported file extensions
+ for ext in pycorrfit.readfiles.get_supported_extensions():
+ files = data_file_dl.get_data_files_ext(ext)
+ for f in files:
+ if len([ex for ex in exclude if f.endswith(ex) ]):
+ continue
+ print(f)
+ dn, fn = split(f)
+ pycorrfit.readfiles.openAny(dn, fn)
+
+
+if __name__ == "__main__":
+ # Run all tests
+ loc = locals()
+ for key in list(loc.keys()):
+ if key.startswith("test_") and hasattr(loc[key], "__call__"):
+ loc[key]()
diff --git a/tests/test_fit_model_gaussian.py b/tests/test_fit_model_gaussian.py
index 303c6f9..7c653f2 100644
--- a/tests/test_fit_model_gaussian.py
+++ b/tests/test_fit_model_gaussian.py
@@ -18,12 +18,11 @@ from pycorrfit import models as mdls
# GLOBAL PARAMETERS FOR THIS TEST:
TAU = 1.468e-6
-
def test_6001():
#2D
model = mdls.modeldict[6001]
parms = [4.874, 0.2476, 0.015]
- assert model(parms, tau=TAU) - 0.22016907491127263 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.22016907491127263) < 1e-14
def test_6002():
@@ -31,12 +30,12 @@ def test_6002():
model = mdls.modeldict[6002]
# n τ_diff τ_trip T offset
parms = [4.891, 0.853, 0.00141, 0.0121, 0.034]
- assert model(parms, tau=TAU) - 0.24095843709396209 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.24095843709396209) < 1e-14
model2 = mdls.modeldict[6001]
parms2 = [4.891, 0.853, 0.034]
parms1 = [4.891, 0.853, 0.0, 0.0, 0.034]
- assert model(parms1, tau=TAU) - model2(parms2, tau=TAU) < 1e-14
+ assert abs(model(parms1, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
def test_6031():
@@ -52,7 +51,7 @@ def test_6031():
0.0314, # T
0.00021 # offset
]
- assert model(parms, tau=TAU) - 0.41629799102222742 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.41629799102222742) < 1e-14
model2 = mdls.modeldict[6002]
parms2 = [4.891, 0.853, 0.0012, 0.108, 0.034]
@@ -66,7 +65,7 @@ def test_6031():
0.108, # T
0.034 # offset
]
- assert model(parms1, tau=TAU) - model2(parms2, tau=TAU) < 1e-14
+ assert abs(model(parms1, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
def test_6011():
@@ -74,19 +73,19 @@ def test_6011():
model = mdls.modeldict[6011]
# n T τ_trip τ_diff SP offset
parms = [2.168, 0.1682, 0.00028, 0.54, 5.864, 0.0053]
- assert model(parms, tau=TAU) - 0.55933660640533278 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.55933660640533278) < 1e-14
model2 = mdls.modeldict[6012]
parms2 = [2.168, 0.54, 5.864, 0.0053]
parms1 = [2.168, 0, 1.0, 0.54, 5.864, 0.0053]
- assert model(parms1, tau=TAU) - model2(parms2, tau=TAU) < 1e-14
+ assert abs(model(parms1, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
def test_6012():
#3D
model = mdls.modeldict[6012]
parms = [2.168, 0.54, 5.864, 0.0053]
- assert model(parms, tau=TAU) - 0.46655334038750634 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.46655334038750634) < 1e-14
def test_6030():
@@ -103,7 +102,7 @@ def test_6030():
0.1151, # T
0.008 # offset
]
- assert model(parms, tau=TAU) - 0.53367456244118261 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.53367456244118261) < 1e-14
model2 = mdls.modeldict[6011]
# n T τ_trip τ_diff SP offset
@@ -119,7 +118,7 @@ def test_6030():
0.1682, # T
0.0053 # offset
]
- assert model(parms1, tau=TAU) - model2(parms2, tau=TAU) < 1e-14
+ assert abs(model(parms1, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
def test_6032():
@@ -136,7 +135,7 @@ def test_6032():
0.108, # T
0.008 # offset
]
- assert model(parms, tau=TAU) - 0.72001694812574801 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.72001694812574801) < 1e-14
#->T+3D
model2 = mdls.modeldict[6011]
@@ -153,7 +152,7 @@ def test_6032():
0.1682, # T
0.0053 # offset
]
- assert model(parms1a, tau=TAU) - model2(parms2, tau=TAU) < 1e-14
+ assert abs(model(parms1a, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
#->T+2D
model3 = mdls.modeldict[6002]
@@ -170,10 +169,11 @@ def test_6032():
0.0121, # T
0.034 # offset
]
- assert model(parms1b, tau=TAU) - model3(parms3, tau=TAU) < 1e-14
+ assert abs(model(parms1b, tau=TAU) - model3(parms3, tau=TAU)) < 1e-14
def test_6043():
+ # TT+3D+3D
model = mdls.modeldict[6043]
parms = [
1.452, # n
@@ -188,7 +188,7 @@ def test_6043():
0.0102, # T2
0.00004 # offset
]
- assert model(parms, tau=TAU) - 0.70599013426715551 < 1e-14
+ assert abs(model(parms, tau=TAU) - 0.70599013426715551) < 1e-14
#->T+3D+3D
model2 = mdls.modeldict[6030]
@@ -217,7 +217,251 @@ def test_6043():
0.008 # offset
]
- assert model(parms1, tau=TAU) - model2(parms2, tau=TAU) < 1e-14
+ assert abs(model(parms1, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+
+
+def test_6044():
+ # TT+2D+2D
+ model = mdls.modeldict[6044]
+ parms = [
+ 1.452, # n
+ 4.48, # taud1
+ 8438, # taud2
+ 0.425, # F
+ 0.876, # alpha
+ 0.0012, # tautrip1
+ 0.0101, # T1
+ 0.0021, # tautrip2
+ 0.0102, # T2
+ 0.00004 # offset
+ ]
+ assert abs(model(parms, tau=TAU) - 0.70599013619282502) < 1e-14
+
+ #->T+2D+2D
+ model2 = mdls.modeldict[6031]
+ parms2 = [
+ 2.153, # n
+ 5.54, # taud1
+ 1532, # taud2
+ 0.4321, # F
+ 0.9234, # alpha
+ 0.002648, # tautrip
+ 0.1151, # T
+ 0.008 # offset
+ ]
+ parms1 = [
+ 2.153, # n
+ 5.54, # taud1
+ 1532, # taud2
+ 0.4321, # F
+ 0.9234, # alpha
+ 0.002648, # tautrip1
+ 0.1151, # T1
+ 0.0021, # tautrip2
+ 0.0, # T2
+ 0.008 # offset
+ ]
+
+ assert abs(model(parms1, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+
+
+def test_6045():
+ #TT+3D+2D
+ model = mdls.modeldict[6045]
+ parms = [
+ 25.123, # n
+ 240.123, # taud2D
+ 0.1125, # taud3D
+ 0.3512, # F3D
+ 5.312, # SP
+ 0.87671, # alpha
+ 0.0021987, # tautrip1
+ 0.032341, # T1
+ 0.0013243, # tautrip2
+ 0.014341, # T2
+ 0.12310 # offset
+ ]
+ assert abs(model(parms, tau=TAU) - 0.16498917764250026) < 1e-14
+
+ #->T+3D+2D
+ model2 = mdls.modeldict[6032]
+ parms2 = [
+ 25.123, # n
+ 240.123, # taud2D
+ 0.1125, # taud3D
+ 0.3512, # F3D
+ 5.312, # SP
+ 0.87671, # alpha
+ 0.0021987, # tautrip1
+ 0.032341, # T1
+ 0.12310 # offset
+ ]
+
+ parms1a = [
+ 25.123, # n
+ 240.123, # taud2D
+ 0.1125, # taud3D
+ 0.3512, # F3D
+ 5.312, # SP
+ 0.87671, # alpha
+ 0.0021987, # tautrip1
+ 0.032341, # T1
+ 0.1, # tautrip2
+ 0.0, # T2
+ 0.12310 # offset
+ ]
+ parms1b = [
+ 25.123, # n
+ 240.123, # taud2D
+ 0.1125, # taud3D
+ 0.3512, # F3D
+ 5.312, # SP
+ 0.87671, # alpha
+ 0.1, # tautrip1
+ 0.0, # T1
+ 0.0021987, # tautrip2
+ 0.032341, # T2
+ 0.12310 # offset
+ ]
+ assert abs(model(parms1a, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+ assert abs(model(parms1b, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+
+
+def test_6081():
+ # T+3D+3D+3D
+ model = mdls.modeldict[6081]
+ parms = [
+ 1.412, # n
+ 4.498, # taud1
+ 245, # taud2
+ 2910, # taud3
+ 0.123, # F1
+ 0.321, # F3
+ 5.12, # SP
+ 0.876, # alpha21
+ 0.378, # alpha31
+ 0.0021, # tautrip
+ 0.021, # T
+ -0.0004 # offset
+ ]
+ assert abs(model(parms, tau=TAU) - 0.85970140411643392) < 1e-14
+
+ #->T+3D+3D
+ model2 = mdls.modeldict[6030]
+ parms2 = [
+ 2.153, # n
+ 1.120, # taud1
+ 30.120, # taud2
+ 0.4321, # F
+ 4.4387, # SP
+ 0.4321, # alpha
+ 0.002, # tautrip
+ 0.1151, # T
+ 1.2008 # offset
+ ]
+
+ parmsa = [
+ 2.153, # n
+ 1.120, # taud1
+ 30.120, # taud2
+ 100.00, # taud3
+ 0.4321, # F1
+ 1-0.4321, # F2
+ 4.4387, # SP
+ 0.4321, # alpha21
+ 1, # alpha31
+ 0.002, # tautrip
+ 0.1151, # T
+ 1.2008 # offset
+ ]
+
+ parmsb = [
+ 2.153, # n
+ 1.120, # taud1
+ 10.000, # taud2
+ 30.120, # taud3
+ 0.4321, # F1
+ 0, # F2
+ 4.4387, # SP
+ 1, # alpha21
+ .4321, # alpha31
+ 0.002, # tautrip
+ 0.1151, # T
+ 1.2008 # offset
+ ]
+
+
+ assert abs(model(parmsa, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+ assert abs(model(parmsb, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+
+
+def test_6082():
+ # T+3D+3D+2D
+ model = mdls.modeldict[6082]
+ parms = [
+ 1.412, # n
+ 4.498, # taud1
+ 245, # taud2
+ 2910, # taud3
+ 0.123, # F1
+ 0.321, # F3
+ 5.12, # SP
+ 0.876, # alpha21
+ 0.378, # alpha31
+ 0.0021, # tautrip
+ 0.021, # T
+ -0.0004 # offset
+ ]
+ assert abs(model(parms, tau=TAU) - 0.85970140411789908) < 1e-14
+
+ #->T+3D+2D
+ model2 = mdls.modeldict[6032]
+ parms2 = [
+ 2.153, # n
+ 30.120, # taud1
+ 1.234, # taud2
+ 0.4321, # F
+ 4.4387, # SP
+ 1.341, # alpha
+ 0.002, # tautrip
+ 0.1151, # T
+ 1.2008 # offset
+ ]
+
+ parmsa = [
+ 2.153, # n
+ 1.234, # taud1
+ 1, # taud2
+ 30.120, # taud3
+ 0.4321, # F1
+ 0, # F2
+ 4.4387, # SP
+ 1., # alpha21
+ 1/1.341, # alpha31
+ 0.002, # tautrip
+ 0.1151, # T
+ 1.2008 # offset
+ ]
+
+ parmsb = [
+ 2.153, # n
+ 1, # taud1
+ 1.234, # taud2
+ 30.120, # taud3
+ 0, # F1
+ 0.4321, # F2
+ 4.4387, # SP
+ 1, # alpha21
+ 1/1.341, # alpha31
+ 0.002, # tautrip
+ 0.1151, # T
+ 1.2008 # offset
+ ]
+
+
+ assert abs(model(parmsa, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+ assert abs(model(parmsb, tau=TAU) - model2(parms2, tau=TAU)) < 1e-14
+
if __name__ == "__main__":
diff --git a/tests/test_fit_models.py b/tests/test_fit_models.py
index de78490..400ae99 100644
--- a/tests/test_fit_models.py
+++ b/tests/test_fit_models.py
@@ -14,7 +14,8 @@ import numpy as np
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import pycorrfit
-from pycorrfit.fcs_data_set import Correlation, Fit
+from pycorrfit.correlation import Correlation
+from pycorrfit.fit import Fit
# GLOBAL PARAMETERS FOR THIS TEST:
@@ -71,6 +72,9 @@ def test_fit_single_parameter():
"""
Deviate a single parameter and fit it back.
"""
+ allow_fail = [
+ [6082, "SP"],
+ ]
faillist=list()
for model in pycorrfit.models.models:
fullparms = model.default_values
@@ -79,7 +83,8 @@ def test_fit_single_parameter():
fitval = fit_single_parameter(model.id, fullparms, ii, newval, noise=False)
#print(val-fitval)
if not np.allclose([val], [fitval]):
- faillist.append([model.id, model.parameters[0][ii], val, fitval])
+ if not [model.id, model.parameters[0][ii]] in allow_fail:
+ faillist.append([model.id, model.parameters[0][ii], val, fitval])
if len(faillist) != 0:
raise ValueError("Model tests failed for:\n", faillist)
@@ -89,6 +94,7 @@ def fit_single_parameter_with_noise(noise=0.005):
Deviate a single parameter and fit it back.
"""
faillist=list()
+ succlist=list()
for model in pycorrfit.models.models:
fullparms = model.default_values
for ii, val in enumerate(fullparms):
@@ -96,28 +102,27 @@ def fit_single_parameter_with_noise(noise=0.005):
fitval = fit_single_parameter(model.id, fullparms, ii, newval, noise=noise)
if not np.allclose([val], [fitval], atol=.1, rtol=.1):
faillist.append([model.id, model.parameters[0][ii], val, fitval])
- return faillist
+ else:
+ succlist.append([model.id, model.parameters[0][ii], val, fitval])
+ return succlist, faillist
def test_fit_single_parameter_with_noise_one_permille():
- faillist = fit_single_parameter_with_noise(noise=0.001)
- if len(faillist) > 1:
+ succlist, faillist = fit_single_parameter_with_noise(noise=0.001)
+ if len(faillist)/len(succlist) > .01:
raise ValueError("Model tests failed for:\n", faillist)
def test_fit_single_parameter_with_noise_two_percent():
- faillist = fit_single_parameter_with_noise(noise=0.02)
- if len(faillist) > 5:
+ succlist, faillist = fit_single_parameter_with_noise(noise=0.02)
+ if len(faillist)/len(succlist) > .05:
raise ValueError("Model tests failed for:\n", faillist)
def test_fit_single_parameter_with_noise_five_percent():
- faillist = fit_single_parameter_with_noise(noise=0.05)
- if len(faillist) > 10:
+ succlist, faillist = fit_single_parameter_with_noise(noise=0.05)
+ if len(faillist)/len(succlist) > .10:
raise ValueError("Model tests failed for:\n", faillist)
-
-
-
if __name__ == "__main__":
# Run all tests
loc = locals()
diff --git a/tests/test_global_fit.py b/tests/test_global_fit.py
index 18c6d7a..a6b5767 100644
--- a/tests/test_global_fit.py
+++ b/tests/test_global_fit.py
@@ -7,8 +7,8 @@ import numpy as np
# Add parent directory to beginning of path variable
sys.path.insert(0, dirname(dirname(abspath(__file__))))
-import pycorrfit # @UnusedImport
-from pycorrfit.fcs_data_set import Correlation, Fit
+from pycorrfit.correlation import Correlation
+from pycorrfit.fit import Fit
def create_corr():
diff --git a/tests/test_simple.py b/tests/test_simple.py
index fe0a869..50234ad 100644
--- a/tests/test_simple.py
+++ b/tests/test_simple.py
@@ -10,7 +10,8 @@ import numpy as np
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import pycorrfit # @UnusedImport
-from pycorrfit.fcs_data_set import Correlation, Fit
+from pycorrfit.correlation import Correlation
+from pycorrfit.fit import Fit
def create_corr():
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/pycorrfit.git
More information about the debian-med-commit
mailing list