[Python-modules-commits] [colorspacious] 01/08: import colorspacious_1.0.0.orig.tar.xz

Sandro Tosi morph at moszumanska.debian.org
Sun Sep 18 19:29:54 UTC 2016


This is an automated email from the git hooks/post-receive script.

morph pushed a commit to branch master
in repository colorspacious.

commit a640388be2969b4dcf64ea82faf38514daa09ac6
Author: Sandro Tosi <morph at debian.org>
Date:   Sun Sep 18 19:48:37 2016 +0100

    import colorspacious_1.0.0.orig.tar.xz
---
 MANIFEST.in                                 |   3 +
 PKG-INFO                                    |  93 ++++
 README.rst                                  |  77 ++++
 colorspacious.egg-info/PKG-INFO             |  93 ++++
 colorspacious.egg-info/SOURCES.txt          |  39 ++
 colorspacious.egg-info/dependency_links.txt |   1 +
 colorspacious.egg-info/requires.txt         |   0
 colorspacious.egg-info/top_level.txt        |   1 +
 colorspacious/__init__.py                   |  17 +
 colorspacious/basics.py                     | 225 ++++++++++
 colorspacious/ciecam02.py                   | 551 +++++++++++++++++++++++
 colorspacious/comparison.py                 |  67 +++
 colorspacious/conversion.py                 | 406 +++++++++++++++++
 colorspacious/cvd.py                        | 301 +++++++++++++
 colorspacious/gold_values.py                | 248 +++++++++++
 colorspacious/illuminants.py                | 125 ++++++
 colorspacious/luoetal2006.py                |  60 +++
 colorspacious/testing.py                    |  63 +++
 colorspacious/transform_graph.py            | 667 ++++++++++++++++++++++++++++
 colorspacious/util.py                       |  41 ++
 colorspacious/version.py                    |  21 +
 doc/Makefile                                | 177 ++++++++
 doc/_static/closelabel.png                  | Bin 0 -> 168 bytes
 doc/_static/facebox.css                     |  80 ++++
 doc/_static/facebox.js                      | 312 +++++++++++++
 doc/_static/loading.gif                     | Bin 0 -> 2767 bytes
 doc/_static/show-code.js                    |  38 ++
 doc/bibliography.bib                        |  34 ++
 doc/bibliography.rst                        |   5 +
 doc/changes.rst                             |  76 ++++
 doc/conf.py                                 | 306 +++++++++++++
 doc/grace_hopper.png                        | Bin 0 -> 628280 bytes
 doc/index.rst                               |  40 ++
 doc/make.bat                                | 242 ++++++++++
 doc/overview.rst                            |  49 ++
 doc/reference.rst                           | 360 +++++++++++++++
 doc/requirements.txt                        |  10 +
 doc/tutorial.rst                            | 324 ++++++++++++++
 setup.cfg                                   |   8 +
 setup.py                                    |  35 ++
 40 files changed, 5195 insertions(+)

diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..2e87bbd
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,3 @@
+include LICENSE.txt README.rst
+recursive-include doc *
+prune doc/_build
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..122015d
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,93 @@
+Metadata-Version: 1.1
+Name: colorspacious
+Version: 1.0.0
+Summary: A powerful, accurate, and easy-to-use Python library for doing colorspace conversions
+Home-page: https://github.com/njsmith/colorspacious
+Author: Nathaniel J. Smith
+Author-email: njs at pobox.com
+License: MIT
+Description: colorspacious
+        =============
+        
+        .. image:: https://travis-ci.org/njsmith/colorspacious.png?branch=master
+           :target: https://travis-ci.org/njsmith/colorspacious
+        .. image:: https://coveralls.io/repos/njsmith/colorspacious/badge.png?branch=master
+           :target: https://coveralls.io/r/njsmith/colorspacious?branch=master
+        
+        Colorspacious is a powerful, accurate, and easy-to-use library for
+        performing colorspace conversions.
+        
+        In addition to the most common standard colorspaces (sRGB, XYZ, xyY,
+        CIELab, CIELCh), we also include: color vision deficiency ("color
+        blindness") simulations using the approach of Machado et al (2009); a
+        complete implementation of `CIECAM02
+        <https://en.wikipedia.org/wiki/CIECAM02>`_; and the perceptually
+        uniform CAM02-UCS / CAM02-LCD / CAM02-SCD spaces proposed by Luo et al
+        (2006).
+        
+        To get started, simply write::
+        
+          from colorspacious import cspace_convert
+        
+          Jp, ap, bp = cspace_convert([64, 128, 255], "sRGB255", "CAM02-UCS")
+        
+        This converts an sRGB value (represented as integers between 0-255) to
+        CAM02-UCS `J'a'b'` coordinates (assuming standard sRGB viewing
+        conditions by default). This requires passing through 4 intermediate
+        colorspaces; ``cspace_convert`` automatically finds the optimal route
+        and applies all conversions in sequence:
+        
+        This function also of course accepts arbitrary NumPy arrays, so
+        converting a whole image is just as easy as converting a single value.
+        
+        Documentation:
+          http://colorspacious.readthedocs.org/
+        
+        Installation:
+          ``pip install colorspacious``
+        
+        Downloads:
+          https://pypi.python.org/pypi/colorspacious/
+        
+        Code and bug tracker:
+          https://github.com/njsmith/colorspacious
+        
+        Contact:
+          Nathaniel J. Smith <njs at pobox.com>
+        
+        Dependencies:
+          * Python 2.6+, or 3.3+
+          * NumPy
+        
+        Developer dependencies (only needed for hacking on source):
+          * nose: needed to run tests
+        
+        License:
+          MIT, see LICENSE.txt for details.
+        
+        References:
+        
+        * Luo, M. R., Cui, G., & Li, C. (2006). Uniform colour spaces based on
+          CIECAM02 colour appearance model. Color Research & Application, 31(4),
+          320–330. doi:10.1002/col.20227
+        
+        * Machado, G. M., Oliveira, M. M., & Fernandes, L. A. (2009). A
+          physiologically-based model for simulation of color vision
+          deficiency. Visualization and Computer Graphics, IEEE Transactions on,
+          15(6), 1291–1298. http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html
+        
+        Other Python packages with similar functionality that you might want
+        to check out as well or instead:
+        
+        * ``colour``: http://colour-science.org/
+        * ``colormath``: http://python-colormath.readthedocs.org/
+        * ``ciecam02``: https://pypi.python.org/pypi/ciecam02/
+        * ``ColorPy``: http://markkness.net/colorpy/ColorPy.html
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..c93daeb
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,77 @@
+colorspacious
+=============
+
+.. image:: https://travis-ci.org/njsmith/colorspacious.png?branch=master
+   :target: https://travis-ci.org/njsmith/colorspacious
+.. image:: https://coveralls.io/repos/njsmith/colorspacious/badge.png?branch=master
+   :target: https://coveralls.io/r/njsmith/colorspacious?branch=master
+
+Colorspacious is a powerful, accurate, and easy-to-use library for
+performing colorspace conversions.
+
+In addition to the most common standard colorspaces (sRGB, XYZ, xyY,
+CIELab, CIELCh), we also include: color vision deficiency ("color
+blindness") simulations using the approach of Machado et al (2009); a
+complete implementation of `CIECAM02
+<https://en.wikipedia.org/wiki/CIECAM02>`_; and the perceptually
+uniform CAM02-UCS / CAM02-LCD / CAM02-SCD spaces proposed by Luo et al
+(2006).
+
+To get started, simply write::
+
+  from colorspacious import cspace_convert
+
+  Jp, ap, bp = cspace_convert([64, 128, 255], "sRGB255", "CAM02-UCS")
+
+This converts an sRGB value (represented as integers between 0-255) to
+CAM02-UCS `J'a'b'` coordinates (assuming standard sRGB viewing
+conditions by default). This requires passing through 4 intermediate
+colorspaces; ``cspace_convert`` automatically finds the optimal route
+and applies all conversions in sequence:
+
+This function also of course accepts arbitrary NumPy arrays, so
+converting a whole image is just as easy as converting a single value.
+
+Documentation:
+  http://colorspacious.readthedocs.org/
+
+Installation:
+  ``pip install colorspacious``
+
+Downloads:
+  https://pypi.python.org/pypi/colorspacious/
+
+Code and bug tracker:
+  https://github.com/njsmith/colorspacious
+
+Contact:
+  Nathaniel J. Smith <njs at pobox.com>
+
+Dependencies:
+  * Python 2.6+, or 3.3+
+  * NumPy
+
+Developer dependencies (only needed for hacking on source):
+  * nose: needed to run tests
+
+License:
+  MIT, see LICENSE.txt for details.
+
+References:
+
+* Luo, M. R., Cui, G., & Li, C. (2006). Uniform colour spaces based on
+  CIECAM02 colour appearance model. Color Research & Application, 31(4),
+  320–330. doi:10.1002/col.20227
+
+* Machado, G. M., Oliveira, M. M., & Fernandes, L. A. (2009). A
+  physiologically-based model for simulation of color vision
+  deficiency. Visualization and Computer Graphics, IEEE Transactions on,
+  15(6), 1291–1298. http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html
+
+Other Python packages with similar functionality that you might want
+to check out as well or instead:
+
+* ``colour``: http://colour-science.org/
+* ``colormath``: http://python-colormath.readthedocs.org/
+* ``ciecam02``: https://pypi.python.org/pypi/ciecam02/
+* ``ColorPy``: http://markkness.net/colorpy/ColorPy.html
diff --git a/colorspacious.egg-info/PKG-INFO b/colorspacious.egg-info/PKG-INFO
new file mode 100644
index 0000000..122015d
--- /dev/null
+++ b/colorspacious.egg-info/PKG-INFO
@@ -0,0 +1,93 @@
+Metadata-Version: 1.1
+Name: colorspacious
+Version: 1.0.0
+Summary: A powerful, accurate, and easy-to-use Python library for doing colorspace conversions
+Home-page: https://github.com/njsmith/colorspacious
+Author: Nathaniel J. Smith
+Author-email: njs at pobox.com
+License: MIT
+Description: colorspacious
+        =============
+        
+        .. image:: https://travis-ci.org/njsmith/colorspacious.png?branch=master
+           :target: https://travis-ci.org/njsmith/colorspacious
+        .. image:: https://coveralls.io/repos/njsmith/colorspacious/badge.png?branch=master
+           :target: https://coveralls.io/r/njsmith/colorspacious?branch=master
+        
+        Colorspacious is a powerful, accurate, and easy-to-use library for
+        performing colorspace conversions.
+        
+        In addition to the most common standard colorspaces (sRGB, XYZ, xyY,
+        CIELab, CIELCh), we also include: color vision deficiency ("color
+        blindness") simulations using the approach of Machado et al (2009); a
+        complete implementation of `CIECAM02
+        <https://en.wikipedia.org/wiki/CIECAM02>`_; and the perceptually
+        uniform CAM02-UCS / CAM02-LCD / CAM02-SCD spaces proposed by Luo et al
+        (2006).
+        
+        To get started, simply write::
+        
+          from colorspacious import cspace_convert
+        
+          Jp, ap, bp = cspace_convert([64, 128, 255], "sRGB255", "CAM02-UCS")
+        
+        This converts an sRGB value (represented as integers between 0-255) to
+        CAM02-UCS `J'a'b'` coordinates (assuming standard sRGB viewing
+        conditions by default). This requires passing through 4 intermediate
+        colorspaces; ``cspace_convert`` automatically finds the optimal route
+        and applies all conversions in sequence:
+        
+        This function also of course accepts arbitrary NumPy arrays, so
+        converting a whole image is just as easy as converting a single value.
+        
+        Documentation:
+          http://colorspacious.readthedocs.org/
+        
+        Installation:
+          ``pip install colorspacious``
+        
+        Downloads:
+          https://pypi.python.org/pypi/colorspacious/
+        
+        Code and bug tracker:
+          https://github.com/njsmith/colorspacious
+        
+        Contact:
+          Nathaniel J. Smith <njs at pobox.com>
+        
+        Dependencies:
+          * Python 2.6+, or 3.3+
+          * NumPy
+        
+        Developer dependencies (only needed for hacking on source):
+          * nose: needed to run tests
+        
+        License:
+          MIT, see LICENSE.txt for details.
+        
+        References:
+        
+        * Luo, M. R., Cui, G., & Li, C. (2006). Uniform colour spaces based on
+          CIECAM02 colour appearance model. Color Research & Application, 31(4),
+          320–330. doi:10.1002/col.20227
+        
+        * Machado, G. M., Oliveira, M. M., & Fernandes, L. A. (2009). A
+          physiologically-based model for simulation of color vision
+          deficiency. Visualization and Computer Graphics, IEEE Transactions on,
+          15(6), 1291–1298. http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html
+        
+        Other Python packages with similar functionality that you might want
+        to check out as well or instead:
+        
+        * ``colour``: http://colour-science.org/
+        * ``colormath``: http://python-colormath.readthedocs.org/
+        * ``ciecam02``: https://pypi.python.org/pypi/ciecam02/
+        * ``ColorPy``: http://markkness.net/colorpy/ColorPy.html
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
diff --git a/colorspacious.egg-info/SOURCES.txt b/colorspacious.egg-info/SOURCES.txt
new file mode 100644
index 0000000..b4e3b2a
--- /dev/null
+++ b/colorspacious.egg-info/SOURCES.txt
@@ -0,0 +1,39 @@
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+colorspacious/__init__.py
+colorspacious/basics.py
+colorspacious/ciecam02.py
+colorspacious/comparison.py
+colorspacious/conversion.py
+colorspacious/cvd.py
+colorspacious/gold_values.py
+colorspacious/illuminants.py
+colorspacious/luoetal2006.py
+colorspacious/testing.py
+colorspacious/transform_graph.py
+colorspacious/util.py
+colorspacious/version.py
+colorspacious.egg-info/PKG-INFO
+colorspacious.egg-info/SOURCES.txt
+colorspacious.egg-info/dependency_links.txt
+colorspacious.egg-info/requires.txt
+colorspacious.egg-info/top_level.txt
+doc/Makefile
+doc/bibliography.bib
+doc/bibliography.rst
+doc/changes.rst
+doc/conf.py
+doc/grace_hopper.png
+doc/index.rst
+doc/make.bat
+doc/overview.rst
+doc/reference.rst
+doc/requirements.txt
+doc/tutorial.rst
+doc/_static/closelabel.png
+doc/_static/facebox.css
+doc/_static/facebox.js
+doc/_static/loading.gif
+doc/_static/show-code.js
\ No newline at end of file
diff --git a/colorspacious.egg-info/dependency_links.txt b/colorspacious.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/colorspacious.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/colorspacious.egg-info/requires.txt b/colorspacious.egg-info/requires.txt
new file mode 100644
index 0000000..e69de29
diff --git a/colorspacious.egg-info/top_level.txt b/colorspacious.egg-info/top_level.txt
new file mode 100644
index 0000000..5399317
--- /dev/null
+++ b/colorspacious.egg-info/top_level.txt
@@ -0,0 +1 @@
+colorspacious
diff --git a/colorspacious/__init__.py b/colorspacious/__init__.py
new file mode 100644
index 0000000..c0175b3
--- /dev/null
+++ b/colorspacious/__init__.py
@@ -0,0 +1,17 @@
+# This file is part of colorspacious
+# Copyright (C) 2014 Nathaniel Smith <njs at pobox.com>
+# See file LICENSE.txt for license information.
+
+from .version import __version__
+
+from .illuminants import standard_illuminant_XYZ100, as_XYZ100_w
+
+from .cvd import machado_et_al_2009_matrix
+
+from .ciecam02 import CIECAM02Space, CIECAM02Surround, NegativeAError, JChQMsH
+
+from .luoetal2006 import LuoEtAl2006UniformSpace, CAM02UCS, CAM02SCD, CAM02LCD
+
+from .conversion import cspace_converter, cspace_convert
+
+from .comparison import deltaE
diff --git a/colorspacious/basics.py b/colorspacious/basics.py
new file mode 100644
index 0000000..7f6efbb
--- /dev/null
+++ b/colorspacious/basics.py
@@ -0,0 +1,225 @@
+# This file is part of colorspacious
+# Copyright (C) 2014-2015 Nathaniel Smith <njs at pobox.com>
+# See file LICENSE.txt for license information.
+
+# Basic colorspaces: conversions between sRGB, XYZ, xyY, CIELab
+
+import numpy as np
+
+from .util import stacklast, color_cart2polar, color_polar2cart
+from .illuminants import as_XYZ100_w
+from .testing import check_conversion
+
+################################################################
+# sRGB <-> sRGB-linear <-> XYZ100
+################################################################
+
+# https://en.wikipedia.org/wiki/SRGB#The_reverse_transformation
+def C_linear(C_srgb):
+    out = np.empty(C_srgb.shape, dtype=float)
+    linear_portion = (C_srgb < 0.04045)
+    a = 0.055
+    out[linear_portion] = C_srgb[linear_portion] / 12.92
+    out[~linear_portion] = ((C_srgb[~linear_portion] + a) / (a + 1)) ** 2.4
+    return out
+
+def C_srgb(C_linear):
+    out = np.empty(C_linear.shape, dtype=float)
+    linear_portion = (C_linear <= 0.0031308)
+    a = 0.055
+    out[linear_portion] = C_linear[linear_portion] * 12.92
+    out[~linear_portion] = (1+a) * C_linear[~linear_portion] ** (1/2.4) - a
+    return out
+
+XYZ100_to_sRGB1_matrix = np.array([
+    # This is the exact matrix specified in IEC 61966-2-1:1999
+    [ 3.2406, -1.5372, -0.4986],
+    [-0.9689,  1.8758,  0.0415],
+    [ 0.0557, -0.2040,  1.0570],
+    ])
+
+# Condition number is 4.3, inversion is safe:
+sRGB1_to_XYZ100_matrix = np.linalg.inv(XYZ100_to_sRGB1_matrix)
+
+def XYZ100_to_sRGB1_linear(XYZ100):
+    """Convert XYZ to linear sRGB, where XYZ is normalized so that reference
+    white D65 is X=95.05, Y=100, Z=108.90 and sRGB is on the 0-1 scale. Linear
+    sRGB has a linear relationship to actual light, so it is an appropriate
+    space for simulating light (e.g. for alpha blending).
+
+    """
+    XYZ100 = np.asarray(XYZ100, dtype=float)
+    # this is broadcasting matrix * array-of-vectors, where the vector is the
+    # last dim
+    RGB_linear = np.einsum("...ij,...j->...i", XYZ100_to_sRGB1_matrix, XYZ100 / 100)
+    return RGB_linear
+
+def sRGB1_linear_to_sRGB1(sRGB1_linear):
+    return C_srgb(np.asarray(sRGB1_linear, dtype=float))
+
+def sRGB1_to_sRGB1_linear(sRGB1):
+    """Convert sRGB (as floats in the 0-to-1 range) to linear sRGB."""
+    sRGB1 = np.asarray(sRGB1, dtype=float)
+    sRGB1_linear = C_linear(sRGB1)
+    return sRGB1_linear
+
+def sRGB1_linear_to_XYZ100(sRGB1_linear):
+    sRGB1_linear = np.asarray(sRGB1_linear, dtype=float)
+    # this is broadcasting matrix * array-of-vectors, where the vector is the
+    # last dim
+    XYZ100 = np.einsum("...ij,...j->...i", sRGB1_to_XYZ100_matrix, sRGB1_linear)
+    XYZ100 *= 100
+    return XYZ100
+
+def test_sRGB1_to_sRGB1_linear():
+    from .gold_values import sRGB1_sRGB1_linear_gold
+    check_conversion(sRGB1_to_sRGB1_linear, sRGB1_linear_to_sRGB1,
+                     sRGB1_sRGB1_linear_gold,
+                     a_max=1, b_max=1)
+
+def test_sRGB1_linear_to_XYZ100():
+    from .gold_values import sRGB1_linear_XYZ100_gold
+    check_conversion(sRGB1_linear_to_XYZ100, XYZ100_to_sRGB1_linear,
+                     sRGB1_linear_XYZ100_gold,
+                     a_max=1, b_max=100)
+
+################################################################
+# XYZ <-> xyY
+################################################################
+
+# These functions work identically for both the 0-100 and 0-1 versions of
+# XYZ/xyY.
+def XYZ_to_xyY(XYZ):
+    XYZ = np.asarray(XYZ, dtype=float)
+    norm = np.sum(XYZ, axis=-1, keepdims=True)
+    xy = XYZ[..., :2] / norm
+    return np.concatenate((xy, XYZ[..., 1:2]), axis=-1)
+
+def xyY_to_XYZ(xyY):
+    xyY = np.asarray(xyY, dtype=float)
+    x = xyY[..., 0]
+    y = xyY[..., 1]
+    Y = xyY[..., 2]
+    X = Y / y * x
+    Z = Y / y * (1 - x - y)
+    return stacklast(X, Y, Z)
+
+_XYZ100_to_xyY100_test_vectors = [
+    ([10, 20, 30], [ 10. / 60,  20. / 60, 20]),
+    ([99, 98,  3], [99. / 200, 98. / 200, 98]),
+    ]
+
+_XYZ1_to_xyY1_test_vectors = [
+    ([0.10, 0.20, 0.30], [ 0.10 / 0.60,  0.20 / 0.60, 0.20]),
+    ([0.99, 0.98, 0.03], [0.99 / 2.00, 0.98 / 2.00, 0.98]),
+    ]
+
+def test_XYZ_to_xyY():
+    check_conversion(XYZ_to_xyY, xyY_to_XYZ,
+                     _XYZ100_to_xyY100_test_vectors, b_max=[1, 1, 100])
+
+    check_conversion(XYZ_to_xyY, xyY_to_XYZ,
+                     _XYZ1_to_xyY1_test_vectors, b_max=[1, 1, 1])
+
+################################################################
+# XYZ100 <-> CIEL*a*b*
+################################################################
+
+# https://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions
+def _f(t):
+    out = np.empty(t.shape, dtype=float)
+    linear_portion = (t < (6. / 29) ** 3)
+    out[linear_portion] = ((1. / 3) * (29. / 6) ** 2 * t[linear_portion]
+                           + 4. / 29)
+    out[~linear_portion] = t[~linear_portion] ** (1. / 3)
+    return out
+
+def XYZ100_to_CIELab(XYZ100, XYZ100_w):
+    XYZ100 = np.asarray(XYZ100, dtype=float)
+    XYZ100_w = as_XYZ100_w(XYZ100_w)
+
+    fXYZ100_norm = _f(XYZ100 / XYZ100_w)
+    L = 116 * fXYZ100_norm[..., 1:2] - 16
+    a = 500 * (fXYZ100_norm[..., 0:1] - fXYZ100_norm[..., 1:2])
+    b = 200 * (fXYZ100_norm[..., 1:2] - fXYZ100_norm[..., 2:3])
+    return np.concatenate((L, a, b), axis=-1)
+
+def _finv(t):
+    linear_portion = (t <= 6. / 29)
+    out = np.select([linear_portion, ~linear_portion],
+                    [3 * (6. / 29) ** 2 * (t - 4. / 29),
+                     t ** 3])
+    return out
+
+def CIELab_to_XYZ100(CIELab, XYZ100_w):
+    CIELab = np.asarray(CIELab, dtype=float)
+    XYZ100_w = as_XYZ100_w(XYZ100_w)
+
+    L = CIELab[..., 0]
+    a = CIELab[..., 1]
+    b = CIELab[..., 2]
+    X_w = XYZ100_w[..., 0]
+    Y_w = XYZ100_w[..., 1]
+    Z_w = XYZ100_w[..., 2]
+
+    l_piece = 1. / 116 * (L + 16)
+    X = X_w * _finv(l_piece + 1. / 500 * a)
+    Y = Y_w * _finv(l_piece)
+    Z = Z_w * _finv(l_piece - 1. / 200 * b)
+
+    return stacklast(X, Y, Z)
+
+def test_XYZ100_to_CIELab():
+    from .gold_values import XYZ100_CIELab_gold_D65, XYZ100_CIELab_gold_D50
+
+    check_conversion(XYZ100_to_CIELab, CIELab_to_XYZ100,
+                     XYZ100_CIELab_gold_D65,
+                     # Stick to randomized values in the mid-range to avoid
+                     # hitting negative luminances
+                     b_min=[10, -30, -30], b_max=[90, 30, 30],
+                     XYZ100_w="D65")
+
+    check_conversion(XYZ100_to_CIELab, CIELab_to_XYZ100,
+                     XYZ100_CIELab_gold_D50,
+                     # Stick to randomized values in the mid-range to avoid
+                     # hitting negative luminances
+                     b_min=[10, -30, -30], b_max=[90, 30, 30],
+                     XYZ100_w="D50")
+
+    XYZ100_1 = np.asarray(XYZ100_CIELab_gold_D65[0][0])
+    CIELab_1 = np.asarray(XYZ100_CIELab_gold_D65[0][1])
+
+    XYZ100_2 = np.asarray(XYZ100_CIELab_gold_D50[1][0])
+    CIELab_2 = np.asarray(XYZ100_CIELab_gold_D50[1][1])
+
+    XYZ100_mixed = np.concatenate((XYZ100_1[np.newaxis, :],
+                                   XYZ100_2[np.newaxis, :]))
+    CIELab_mixed = np.concatenate((CIELab_1[np.newaxis, :],
+                                   CIELab_2[np.newaxis, :]))
+
+    XYZ100_w_mixed = np.row_stack((as_XYZ100_w("D65"), as_XYZ100_w("D50")))
+
+    assert np.allclose(XYZ100_to_CIELab(XYZ100_mixed, XYZ100_w=XYZ100_w_mixed),
+                       CIELab_mixed, rtol=0.001)
+    assert np.allclose(CIELab_to_XYZ100(CIELab_mixed, XYZ100_w=XYZ100_w_mixed),
+                       XYZ100_mixed, rtol=0.001)
+
+################################################################
+# CIELab <-> CIELCh
+################################################################
+
+def CIELab_to_CIELCh(CIELab):
+    CIELab = np.asarray(CIELab)
+    L = CIELab[..., 0]
+    a = CIELab[..., 1]
+    b = CIELab[..., 2]
+    C, h = color_cart2polar(a, b)
+    return stacklast(L, C, h)
+
+def CIELCh_to_CIELab(CIELCh):
+    CIELCh = np.asarray(CIELCh)
+    L = CIELCh[..., 0]
+    C = CIELCh[..., 1]
+    h = CIELCh[..., 2]
+    a, b = color_polar2cart(C, h)
+    return stacklast(L, a, b)
diff --git a/colorspacious/ciecam02.py b/colorspacious/ciecam02.py
new file mode 100644
index 0000000..fc8988b
--- /dev/null
+++ b/colorspacious/ciecam02.py
@@ -0,0 +1,551 @@
+# This file is part of colorspacious
+# Copyright (C) 2014 Nathaniel Smith <njs at pobox.com>
+# See file LICENSE.txt for license information.
+
+from __future__ import division
+
+from collections import namedtuple
+
+import numpy as np
+
+from .illuminants import as_XYZ100_w
+
+__all__ = [
+    "CIECAM02Surround", "CIECAM02Space", "NegativeAError",
+    "JChQMsH",
+]
+
+
+# F, c, Nc: surround parameters
+#            F     c      Nc
+# Average   1.0   0.69   1.0
+# Dim       0.9   0.59   0.95
+# Dark      0.8   0.525  0.8
+CIECAM02Surround = namedtuple("CIECAM02Surround", ["F", "c", "N_c"])
+CIECAM02Surround.AVERAGE = CIECAM02Surround(1.0, 0.69,  1.0)
+CIECAM02Surround.DIM     = CIECAM02Surround(0.9, 0.59,  1.95)
+CIECAM02Surround.DARK    = CIECAM02Surround(0.8, 0.525, 1.8)
+
+JChQMsH = namedtuple("JChQMsH", ["J", "C", "h", "Q", "M", "s", "H"])
+
+M_CAT02 = np.asarray([[ 0.7328,  0.4296, -0.1624],
+                      [-0.7036,  1.6975,  0.0061],
+                      [ 0.0030,  0.0136,  0.9834]])
+
+M_HPE = np.asarray([[ 0.38971,  0.68898, -0.07868],
+                    [-0.22981,  1.18340,  0.04641],
+                    [ 0.00000,  0.00000,  1.00000]])
+
+# These are very well-conditioned matrices (condition numbers <4), so just
+# taking the inverse is fine, and it simplifies things below.
+M_CAT02_inv = np.linalg.inv(M_CAT02)
+M_HPE_M_CAT02_inv = np.dot(M_HPE, M_CAT02_inv)
+M_CAT02_M_HPE_inv = np.dot(M_CAT02, np.linalg.inv(M_HPE))
+
+h_i = np.asarray([20.14,  90.00, 164.25, 237.53, 380.14])
+e_i = np.asarray([ 0.8,    0.7,    1.0,    1.2,    0.8])
+H_i = np.asarray([ 0.0,  100.0,  200.0,  300.0,  400.0])
+
+def broadcasting_matvec(A, B):
+    # We want to handle two cases that come up a bunch below.
+    # B is always a vector, or collection of vectors. So it has shape
+    #    (..., j)
+    # where j is the number of entries in each vector.
+    # A is either a matrix or a vector, and we want to broadcast np.dot(A,
+    # B_vec) over all possible B_vecs.
+    # When A has shape (i, j), this means our result should have shape
+    #    (..., i)
+    # and when A has shape (j,), this means our result should have shape
+    #    (...)
+    # So a generalization is that given
+    #   A.shape == (...1, j)
+    #   B.shape == (...2, j)
+    # we want a result with shape
+    #   (...2, ...1)
+    # it turns out that this is the magic incantation for doing that:
+    return np.inner(B, A)
+
+def require_exactly_one(**kwargs):
+    non_None = sum(v is not None for v in kwargs.values())
+    if non_None != 1:
+        raise ValueError("Exactly one of %s must be specified"
+                         % (", ".join(kwargs)))
+
+class NegativeAError(ValueError):
+    """A :class:`ValueError` that can be raised when converting to CIECAM02.
+
+    See :meth:`CIECAM02Space.XYZ100_to_CIECAM02` for details.
+    """
+    pass
+
+class CIECAM02Space(object):
+    """An object representing a particular set of CIECAM02 viewing conditions.
+
+    :param XYZ100_w: The whitepoint. Either a string naming one of the known
+         standard whitepoints like ``"D65"``, or else a point in XYZ100 space.
+    :param Y_b: Background luminance.
+    :param L_A: Luminance of the adapting field (in cd/m^2).
+    :param surround: A :class:`CIECAM02Surround` object.
+
+    """
+    def __init__(self, XYZ100_w, Y_b, L_A,
+                 surround=CIECAM02Surround.AVERAGE):
+        self.XYZ100_w = as_XYZ100_w(XYZ100_w)
+        # as_XYZ100_w allows for multiple whitepoints to be returned, but we
+        # aren't vectorized WRT whitepoint
+        if self.XYZ100_w.shape != (3,):
+            raise ValueError("Hey! XYZ100_w should have shape (3,)!")
+        self.Y_b = float(Y_b)
+        self.L_A = float(L_A)
+        self.surround = surround
+        self.F = float(surround.F)
+        self.c = float(surround.c)
+        self.N_c = float(surround.N_c)
+
+        self.RGB_w = np.dot(M_CAT02, self.XYZ100_w)
+        self.D = self.F * (1 - (1/3.6) * np.exp((-self.L_A - 42) / 92))
+        self.D = np.clip(self.D, 0, 1)
+
+        self.D_RGB = self.D * self.XYZ100_w[1] / self.RGB_w + 1 - self.D
+        # Fairchild (2013), pages 290-292, recommends using this equation
+        # instead, though notes that it doesn't make much difference as part
+        # of a full CIECAM02 system. (It matters more if you're only using
+        # pieces.)
+        #self.D_RGB = self.D * 100 / self.RGB_w + 1 - self.D
+        self.k = 1 / (5 * self.L_A + 1)
+        self.F_L = (0.2 * self.k ** 4 * (5 * self.L_A)
+                    + 0.1 * (1 - self.k**4)**2 * (5 * self.L_A) ** (1./3))
+        self.n = self.Y_b / self.XYZ100_w[1]
+        self.z = 1.48 + np.sqrt(self.n)
+        self.N_bb = 0.725 * (1 / self.n)**0.2
+        self.N_cb = self.N_bb  #??
+
+        self.RGB_wc = self.D_RGB * self.RGB_w
+        self.RGBprime_w = np.dot(M_HPE_M_CAT02_inv, self.RGB_wc)
+        tmp = ((self.F_L * self.RGBprime_w) / 100) ** 0.42
+        self.RGBprime_aw = 400 * (tmp / (tmp + 27.13)) + 0.1
+        self.A_w = ((np.dot([2, 1, 1. / 20], self.RGBprime_aw) - 0.305)
+                    * self.N_bb)
+
+    def __repr__(self):
+        surround_string = ", surround=%r" % (self.surround,)
+        if self.surround == CIECAM02Surround.AVERAGE:
+            surround_string = ""
+        return "%s(%r, %r, %r%s) " % (
+            self.__class__.__name__,
+            list(self.XYZ100_w),
+            self.Y_b,
+            self.L_A,
+            surround_string)
+
+    # XYZ100 must have shape (3,) or (3, n)
+    def XYZ100_to_CIECAM02(self, XYZ100, on_negative_A="raise"):
+        """Computes CIECAM02 appearance correlates for the given tristimulus
+        value(s) XYZ (normalized to be on the 0-100 scale).
+
+        Example: ``vc.XYZ100_to_CIECAM02([30.0, 45.5, 21.0])``
+
+        :param XYZ100: An array-like of tristimulus values. These should be
+          given on the 0-100 scale, not the 0-1 scale. The array-like should
+          have shape ``(..., 3)``; e.g., you can use a simple 3-item list
+          (shape = ``(3,)``), or to efficiently perform multiple computations
+          at once, you could pass a higher-dimensional array, e.g. an image.
+        :arg on_negative_A: A known infelicity of the CIECAM02 model is that
+          for some inputs, the achromatic signal :math:`A` can be negative,
+          which makes it impossible to compute :math:`J`, :math:`C`,
+          :math:`Q`, :math:`M`, or :math:`s` -- only :math:`h`: and :math:`H`
+          are spared. (See, e.g., section 2.6.4.1 of :cite:`Luo-CIECAM02` for
+          discussion.) This argument allows you to specify a strategy for
+          handling such points. Options are:
+
+          * ``"raise"``: throws a :class:`NegativeAError` (a subclass of
+            :class:`ValueError`)
+          * ``"nan"``: return not-a-number values for the affected
+            elements. (This may be particularly useful if converting a large
+            number of points at once.)
+
+        :returns: A named tuple of type :class:`JChQMsH`, with attributes
+          ``J``, ``C``, ``h``, ``Q``, ``M``, ``s``, and ``H`` containing the
+          CIECAM02 appearance correlates.
+
+        """
+
+        #### Argument checking
+
+        XYZ100 = np.asarray(XYZ100, dtype=float)
+        if XYZ100.shape[-1] != 3:
+            raise ValueError("XYZ100 shape must be (..., 3)")
+
+        #### Step 1
+
+        RGB = broadcasting_matvec(M_CAT02, XYZ100)
+
+        #### Step 2
+
+        RGB_C = self.D_RGB * RGB
+
+        #### Step 3
+
+        RGBprime = broadcasting_matvec(M_HPE_M_CAT02_inv, RGB_C)
+
+        #### Step 4
+
+        RGBprime_signs = np.sign(RGBprime)
+
+        tmp = (self.F_L * RGBprime_signs * RGBprime / 100) ** 0.42
+        RGBprime_a = RGBprime_signs * 400 * (tmp / (tmp + 27.13)) + 0.1
+
+        #### Step 5
+
+        a = broadcasting_matvec([1, -12. / 11, 1. / 11], RGBprime_a)
+        b = broadcasting_matvec([1. / 9, 1. / 9, -2. / 9], RGBprime_a)
+        h_rad = np.arctan2(b, a)
+        h = np.rad2deg(h_rad) % 360
+
+        # #### Step 6
+
+        # hprime = h, unless h < 20.14, in which case hprime = h + 360.
+        hprime = np.select([h < h_i[0], True], [h + 360, h])
+        # we use 0-based indexing, so our i is one less than the reference
+        # formulas' i.
+        i = np.searchsorted(h_i, hprime, side="right") - 1
+        tmp = (hprime - h_i[i]) / e_i[i]
+        H = H_i[i] + ((100 * tmp)
+                      / (tmp + (h_i[i + 1] - hprime) / e_i[i + 1]))
+
+        #### Step 7
+
+        A = ((broadcasting_matvec([2, 1, 1. / 20], RGBprime_a) - 0.305)
+             * self.N_bb)
+
+        if on_negative_A == "raise":
+            if np.any(A < 0):
+                raise NegativeAError("attempted to convert a tristimulus "
+                                     "value whose achromatic signal was "
+                                     "negative, and on_negative_A=\"raise\"")
+        elif on_negative_A == "nan":
+            A = np.select([A < 0, True], [np.nan, A])
+        else:
+            raise ValueError("Invalid on_negative_A argument: got %r, "
+                             "expected \"raise\" or \"nan\""
+                             % (on_negative_A,))
+
+        #### Step 8
+
+        J = 100 * (A / self.A_w) ** (self.c * self.z)
+
+        #### Step 9
+
+        Q = self._J_to_Q(J)
+
+        #### Step 10
+
+        e = (12500. / 13) * self.N_c * self.N_cb * (np.cos(h_rad + 2) + 3.8)
+        t = (e * np.sqrt(a ** 2 + b ** 2)
+             / broadcasting_matvec([1, 1, 21. / 20], RGBprime_a))
+
+        C = t**0.9 * (J / 100)**0.5 * (1.64 - 0.29**self.n)**0.73
+        M = C * self.F_L**0.25
+        s = 100 * (M / Q)**0.5
+
+        return JChQMsH(J, C, h, Q, M, s, H)
+
+    def _J_to_Q(self, J):
+        return ((4 / self.c) * (J / 100) ** 0.5
+                * (self.A_w + 4) * self.F_L**0.25)
+
+    def CIECAM02_to_XYZ100(self, J=None, C=None, h=None,
+                           Q=None, M=None, s=None, H=None):
+        """Return the unique tristimulus values that have the given CIECAM02
+        appearance correlates under these viewing conditions.
+
+        You must specify 3 arguments:
+
+        * Exactly one of ``J`` and ``Q``
+        * Exactly one of ``C``, ``M``, and ``s``
+        * Exactly one of ``h`` and ``H``.
+
+        Arguments can be vectors, in which case they will be broadcast against
+        each other.
+
+        Returned tristimulus values will be on the 0-100 scale, not the 0-1
+        scale.
+        """
+
+        #### Argument checking
+
+        require_exactly_one(J=J, Q=Q)
+        require_exactly_one(C=C, M=M, s=s)
+        require_exactly_one(h=h, H=H)
+
+        if J is not None:
+            J = np.asarray(J, dtype=float)
+        if C is not None:
+            C = np.asarray(C, dtype=float)
+        if h is not None:
+            h = np.asarray(h, dtype=float)
+        if Q is not None:
+            Q = np.asarray(Q, dtype=float)
+        if M is not None:
+            M = np.asarray(M, dtype=float)
+        if s is not None:
+            s = np.asarray(s, dtype=float)
+        if H is not None:
+            H = np.asarray(H, dtype=float)
+
+        #### Step 1: conversions to get JCh
+
+        if J is None:
+            J = 6.25 * ((self.c * Q) / ((self.A_w + 4) * self.F_L**0.25)) ** 2
+
+        if C is None:
+            if M is not None:
+                C = M / self.F_L**0.25
+            else:
+                assert s is not None
+                # when starting from s, we need Q
+                if Q is None:
+                    Q = self._J_to_Q(J)
+                C = (s / 100) ** 2 * (Q / self.F_L**0.25)
+
+        if h is None:
+            i = np.searchsorted(H_i, H, side="right") - 1
+            # BROKEN:
+            num1 = (H - H_i[i]) * (e_i[i + 1] * h_i[i] - e_i[i] * h_i[i + 1])
+            num2 = -100 * h_i[i] * e_i[i + 1]
+            denom1 = (H - H_i[i]) * (e_i[i + 1] - e_i[i])
+            denom2 = -100 * e_i[i + 1]
+            hprime = (num1 + num2) / (denom1 + denom2)
+            h = np.select([hprime > 360, True], [hprime - 360, hprime])
+
+        J, C, h = np.broadcast_arrays(J, C, h)
+        target_shape = J.shape
+
+        # 0d arrays break indexing stuff
+        if J.ndim == 0:
+            J = np.atleast_1d(J)
+            C = np.atleast_1d(C)
+            h = np.atleast_1d(h)
+
+        #### Step 2
+
+        t = (C
+             / (np.sqrt(J / 100) * (1.64 - 0.29**self.n) ** 0.73)
+            ) ** (1 / 0.9)
+        e_t = 0.25 * (np.cos(np.deg2rad(h) + 2) + 3.8)
+        A = self.A_w * (J / 100) ** (1 / (self.c * self.z))
+
+        # an awkward way of calculating 1/t such that 1/0 -> inf
+        with np.errstate(divide="ignore"):
... 4476 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/colorspacious.git



More information about the Python-modules-commits mailing list