[pyresample] 01/06: New upstream version 1.3.0

Antonio Valentino a_valentino-guest at moszumanska.debian.org
Sun Mar 12 08:16:58 UTC 2017


This is an automated email from the git hooks/post-receive script.

a_valentino-guest pushed a commit to branch master
in repository pyresample.

commit 48eb8e02a42722c6dda3cc3098e7f5aeca4ae33f
Author: Antonio Valentino <antonio.valentino at tiscali.it>
Date:   Sat Mar 11 20:13:30 2017 +0000

    New upstream version 1.3.0
---
 .bumpversion.cfg                                 |   2 +-
 .travis.yml                                      |   1 +
 appveyor.yml                                     |   4 +-
 changelog.rst                                    | 142 ++++++
 docs/source/API.rst                              |   7 +-
 docs/source/_static/images/bilinear_overview.png | Bin 0 -> 1152178 bytes
 docs/source/_static/images/nearest_overview.png  | Bin 0 -> 801023 bytes
 docs/source/swath.rst                            | 114 +++++
 pyresample/bilinear/__init__.py                  | 533 +++++++++++++++++++++++
 pyresample/geometry.py                           |  37 +-
 pyresample/kd_tree.py                            |   4 -
 pyresample/test/__init__.py                      |   4 +-
 pyresample/test/test_bilinear.py                 | 239 ++++++++++
 pyresample/version.py                            |   2 +-
 14 files changed, 1051 insertions(+), 38 deletions(-)

diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index dfc5ee1..3553896 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
 [bumpversion]
-current_version = 1.2.9
+current_version = 1.3.0
 commit = True
 tag = True
 
diff --git a/.travis.yml b/.travis.yml
index 481c053..78fb339 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,6 +15,7 @@ install:
 - if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then pip install unittest2; fi
 - if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then pip install importlib; fi
 - if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then pip install "sphinx<1.5.0"; fi
+- if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then pip install "pillow<4.0.0"; fi
 - if [[ $TRAVIS_PYTHON_VERSION == "2.7" ]]; then pip install "matplotlib>=1.5.0"; fi
 - if [[ $TRAVIS_PYTHON_VERSION == "2.7" ]]; then pip install "sphinx>=1.5.0"; fi
 - if [[ $TRAVIS_PYTHON_VERSION == "3.3" ]]; then pip install "matplotlib<1.5.0"; fi
diff --git a/appveyor.yml b/appveyor.yml
index 9975c62..b85c25e 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -11,12 +11,12 @@ environment:
     - PYTHON: "C:\\Python27_32"
       PYTHON_VERSION: "2.7.8"
       PYTHON_ARCH: "32"
-      MINICONDA_VERSION: ""
+      MINICONDA_VERSION: "2"
 
     - PYTHON: "C:\\Python27_64"
       PYTHON_VERSION: "2.7.8"
       PYTHON_ARCH: "64"
-      MINICONDA_VERSION: ""
+      MINICONDA_VERSION: "2"
 
     - PYTHON: "C:\\Python34_32"
       PYTHON_VERSION: "3.4.1"
diff --git a/changelog.rst b/changelog.rst
index 979a912..5a68a7a 100644
--- a/changelog.rst
+++ b/changelog.rst
@@ -1,6 +1,148 @@
 Changelog
 =========
 
+v1.3.0 (2017-02-07)
+-------------------
+
+- update changelog. [Martin Raspaud]
+
+- Bump version: 1.2.9 → 1.3.0. [Martin Raspaud]
+
+- Merge pull request #55 from pytroll/feature-bilinear. [Martin Raspaud]
+
+  Feature bilinear
+
+- Add Python2 miniconda version number. [Panu Lahtinen]
+
+- Rename *area_in* to *source_geo_def* and *area_out* to
+  *target_area_def* [Panu Lahtinen]
+
+- Fix search radius from 50e5 meters to 50e3 meters. [Panu Lahtinen]
+
+- Add access to kd_tree parameters reduce_data, segments and epsilon.
+  [Panu Lahtinen]
+
+- Add missing return value to docstring. [Panu Lahtinen]
+
+- Remove possibility to use tuple of coordinates as "in_area" [Panu
+  Lahtinen]
+
+- Try if older version of Pillow is installable with Python 2.6. [Panu
+  Lahtinen]
+
+- Remove obsolete tests + minor adjustments + comments. [Panu Lahtinen]
+
+  Remove tests for functions that were removed.  Add test for getting
+  coefficients for quadratic equations.  Add test for _get_ts().  Test
+  that small variations doesn't cause failures when solving the quadratic
+  equation.  Check all pixels of the output in test_get_bil_info().
+
+
+- Adjust order so that most common case is first. [Panu Lahtinen]
+
+- Remove parallelity checks. [Panu Lahtinen]
+
+  Don't bother checking if lines area parallel, just run the most common
+  (irregular rectangle) for all data, and run the two algorigthms
+  consecutively for those where no valid data is yet present (ie. have
+  np.nan).
+
+
+- Test failure of _get_ts_irregular when verticals are parallel. [Panu
+  Lahtinen]
+
+- Refactor numpyfying. [Panu Lahtinen]
+
+- Clarify function name. [Panu Lahtinen]
+
+- Refactor. [Panu Lahtinen]
+
+  Move common parts of _get_ts_irregular() and _get_ts_uprights_parallel()
+  to two functions: one to get the parameters for quadratic equation and
+  one to solve the other fractional distance not solved from the quadratic
+  equation.
+
+
+- Fix example code. [Panu Lahtinen]
+
+- Enable doctest for resampling from bilinear coefficients. [Panu
+  Lahtinen]
+
+- Fix unittest which had wrong "correct" value. [Panu Lahtinen]
+
+- Replace np.ma.masked_where() with np.ma.masked_invalid() [Panu
+  Lahtinen]
+
+- Move input checks to a function. [Panu Lahtinen]
+
+- Add more unit tests. [Panu Lahtinen]
+
+- Move check of source area to get_bil_info() [Panu Lahtinen]
+
+- Ensure data is not a masked array. [Panu Lahtinen]
+
+- Remove indexing which isn't used. [Panu Lahtinen]
+
+- Unpack result one step further to get a float instead of ndarray.
+  [Panu Lahtinen]
+
+- Mask out warnings about invalid values in less and greater. [Panu
+  Lahtinen]
+
+- Documentation for pyresample.bilinear. [Panu Lahtinen]
+
+- Add few tests for bilinear interpolation. [Panu Lahtinen]
+
+- Fix typos, fix _get_ts_parallellogram() [Panu Lahtinen]
+
+- Adjust comment. [Panu Lahtinen]
+
+- Ignore messages about invalid values due to np.nan. [Panu Lahtinen]
+
+- Handle cases with parallel sides in the rectangle formed by
+  neighbours. [Panu Lahtinen]
+
+- Make it possible to give input coordinates instead of area definition.
+  [Panu Lahtinen]
+
+- Fixes: check for # datasets, output shape for multiple datasets,
+  masking, make output reshaping optional. [Panu Lahtinen]
+
+- Add convenience function resample_bilinear(), remove unused logging.
+  [Panu Lahtinen]
+
+- Rename get_corner() as _get_corner() [Panu Lahtinen]
+
+- Add better docstrings, rename helper functions private. [Panu
+  Lahtinen]
+
+- Cleanup code. [Panu Lahtinen]
+
+- Extend docstrings, add a keyword to return masked arrays or arrays
+  with np.nan:s. [Panu Lahtinen]
+
+- Add default value for search radius, adjust default number of
+  neighbours. [Panu Lahtinen]
+
+- Initial version of bilinear resampling. [Panu Lahtinen]
+
+  NOTE: Only works if both source and destination are area definitions.
+  Also to be added is handling for the cases where a__ equals zero (use
+  linear solution of bx + c = 0), testing, logging and all the error
+  handling.
+
+
+- Allow areas to be flipped. [Martin Raspaud]
+
+- Factorize get_xy_from_lonlat and get_xy_from_proj_coords. [Martin
+  Raspaud]
+
+- Remove `fill_value` documentation for get_neighbour_info. [davidh-
+  ssec]
+
+  Fix #50
+
+
 v1.2.9 (2016-12-13)
 -------------------
 
diff --git a/docs/source/API.rst b/docs/source/API.rst
index 234b086..1731916 100644
--- a/docs/source/API.rst
+++ b/docs/source/API.rst
@@ -21,6 +21,11 @@ pyresample.kd_tree
 .. automodule:: kd_tree
 	:members:
 	
+pyresample.bilinear
+---------------------------------
+.. automodule:: bilinear
+	:members:
+	
 pyresample.utils
 ---------------------------------
 .. automodule:: utils
@@ -41,4 +46,4 @@ pyresample.ewa
 .. automodule:: ewa
     :members:
 
-	
\ No newline at end of file
+	
diff --git a/docs/source/_static/images/bilinear_overview.png b/docs/source/_static/images/bilinear_overview.png
new file mode 100644
index 0000000..a1121cc
Binary files /dev/null and b/docs/source/_static/images/bilinear_overview.png differ
diff --git a/docs/source/_static/images/nearest_overview.png b/docs/source/_static/images/nearest_overview.png
new file mode 100644
index 0000000..84ef12b
Binary files /dev/null and b/docs/source/_static/images/nearest_overview.png differ
diff --git a/docs/source/swath.rst b/docs/source/swath.rst
index 779ab59..b370996 100644
--- a/docs/source/swath.rst
+++ b/docs/source/swath.rst
@@ -242,6 +242,120 @@ Speedup using pykdtree
 
 pykdtree can be used instead of scipy to gain significant speedup for large datasets. See :ref:`multi`. 
 
+pyresample.bilinear
+-------------------
+
+Compared to nearest neighbour resampling, bilinear interpolation
+produces smoother results near swath edges of polar satellite data and
+edges of geostationary satellites.
+
+The algorithm is implemented from http://www.ahinson.com/algorithms_general/Sections/InterpolationRegression/InterpolationIrregularBilinear.pdf
+
+Below is shown a comparison between image generated with nearest
+neighbour resampling (top) and with bilinear interpolation
+(bottom):
+
+.. image:: _static/images/nearest_overview.png
+   :width: 50%
+.. image:: _static/images/bilinear_overview.png
+   :width: 50%
+
+Click images to see the full resolution versions.
+
+The *perceived* sharpness of the bottom image is lower, but there is more detail present.
+
+resample_bilinear
+*****************
+
+Function for resampling using bilinear interpolation for irregular source grids.
+
+.. doctest::
+
+ >>> import numpy as np
+ >>> from pyresample import bilinear, geometry
+ >>> target_def = geometry.AreaDefinition('areaD',
+ ...                                      'Europe (3km, HRV, VTC)',
+ ...                                      'areaD',
+ ...                                      {'a': '6378144.0', 'b': '6356759.0',
+ ...                                       'lat_0': '50.00', 'lat_ts': '50.00',
+ ...                                       'lon_0': '8.00', 'proj': 'stere'},
+ ...                                      800, 800,
+ ...                                      [-1370912.72, -909968.64,
+ ...                                       1029087.28, 1490031.36])
+ >>> data = np.fromfunction(lambda y, x: y*x, (50, 10))
+ >>> lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
+ >>> lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
+ >>> source_def = geometry.SwathDefinition(lons=lons, lats=lats)
+ >>> result = bilinear.resample_bilinear(data, source_def, target_def,
+ ...                                     radius=50e3, neighbours=32,
+ ...                                     nprocs=1, fill_value=0,
+ ...                                     reduce_data=True, segments=None,
+ ...                                     epsilon=0)
+
+The **target_area** needs to be an area definition with **proj4_string**
+attribute.
+
+..
+    The **source_def** can be either an area definition as above,
+    or a 2-tuple of (lons, lats).
+
+Keyword arguments which are passed to **kd_tree**:
+
+* **radius**: radius around each target pixel in meters to search for
+  neighbours in the source data
+* **neighbours**: number of closest locations to consider when
+  selecting the four data points around the target pixel
+* **nprocs**: number of processors to use for finding the closest pixels
+* **fill_value**: fill invalid pixel with this value.  If
+  **fill_value=None** is used, masked arrays will be returned
+* **reduce_data**: do/don't do preliminary data reduction before calculating
+  the neigbour info
+* **segments**: number of segments to use in neighbour search
+* **epsilon**: maximum uncertainty allowed in neighbour search
+
+The example above shows the default value for each keyword argument.
+
+Resampling from bilinear coefficients
+*************************************
+
+As for nearest neighbour resampling, also bilinear interpolation can
+be split in two steps.
+
+* Calculate interpolation coefficients, input data reduction matrix
+  and mapping matrix
+* Use this information to resample several datasets between these two
+  areas/swaths
+
+Only the first step is computationally expensive operation, so by
+re-using this information the overall processing time is reduced
+significantly.  This is also done internally by the
+**resample_bilinear** function, but separating these steps makes it
+possible to cache the coefficients if the same transformation is done
+over and over again.  This is very typical in operational
+geostationary satellite image processing.
+
+.. doctest::
+
+ >>> import numpy as np
+ >>> from pyresample import bilinear, geometry
+ >>> target_def = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)',
+ ...                                      'areaD',
+ ...                                      {'a': '6378144.0', 'b': '6356759.0',
+ ...                                       'lat_0': '50.00', 'lat_ts': '50.00',
+ ...                                       'lon_0': '8.00', 'proj': 'stere'},
+ ...                                      800, 800,
+ ...                                      [-1370912.72, -909968.64,
+ ...                                       1029087.28, 1490031.36])
+ >>> data = np.fromfunction(lambda y, x: y*x, (50, 10))
+ >>> lons = np.fromfunction(lambda y, x: 3 + x, (50, 10))
+ >>> lats = np.fromfunction(lambda y, x: 75 - y, (50, 10))
+ >>> source_def = geometry.SwathDefinition(lons=lons, lats=lats)
+ >>> t_params, s_params, input_idxs, idx_ref = \
+ ...     bilinear.get_bil_info(source_def, target_def, radius=50e3, nprocs=1)
+ >>> res = bilinear.get_sample_from_bil_info(data.ravel(), t_params, s_params,
+ ...                                         input_idxs, idx_ref)
+
+
 pyresample.ewa
 --------------
 
diff --git a/pyresample/bilinear/__init__.py b/pyresample/bilinear/__init__.py
new file mode 100644
index 0000000..7e7f855
--- /dev/null
+++ b/pyresample/bilinear/__init__.py
@@ -0,0 +1,533 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017
+
+# Author(s):
+
+#   Panu Lahtinen <panu.lahtinen at fmi.fi>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Code for resampling using bilinear algorithm for irregular grids.
+
+The algorithm is taken from
+
+http://www.ahinson.com/algorithms_general/Sections/InterpolationRegression/InterpolationIrregularBilinear.pdf
+
+"""
+
+import numpy as np
+from pyproj import Proj
+
+from pyresample import kd_tree
+
+
+def resample_bilinear(data, source_geo_def, target_area_def, radius=50e3,
+                      neighbours=32, nprocs=1, fill_value=0,
+                      reduce_data=True, segments=None, epsilon=0):
+    """Resample using bilinear interpolation.
+
+    data : numpy array
+        Array of single channel data points or
+        (source_geo_def.shape, k) array of k channels of datapoints
+    source_geo_def : object
+        Geometry definition of source data
+    target_area_def : object
+        Geometry definition of target area
+    radius : float, optional
+        Cut-off distance in meters
+    neighbours : int, optional
+        Number of neighbours to consider for each grid point when
+        searching the closest corner points
+    nprocs : int, optional
+        Number of processor cores to be used for getting neighbour info
+    fill_value : {int, None}, optional
+        Set undetermined pixels to this value.
+        If fill_value is None a masked array is returned with undetermined
+        pixels masked
+    reduce_data : bool, optional
+        Perform initial coarse reduction of source dataset in order
+        to reduce execution time
+    segments : int or None
+        Number of segments to use when resampling.
+        If set to None an estimate will be calculated
+    epsilon : float, optional
+        Allowed uncertainty in meters. Increasing uncertainty
+        reduces execution time
+
+    Returns
+    -------
+    data : numpy array
+        Source data resampled to target geometry
+    """
+
+    # Calculate the resampling information
+    t__, s__, input_idxs, idx_ref = get_bil_info(source_geo_def,
+                                                 target_area_def,
+                                                 radius=radius,
+                                                 neighbours=neighbours,
+                                                 nprocs=nprocs,
+                                                 masked=False,
+                                                 reduce_data=reduce_data,
+                                                 segments=segments,
+                                                 epsilon=epsilon)
+
+    data = _check_data_shape(data, input_idxs)
+
+    result = np.nan * np.zeros((target_area_def.size, data.shape[1]))
+    for i in range(data.shape[1]):
+        result[:, i] = get_sample_from_bil_info(data[:, i], t__, s__,
+                                                input_idxs, idx_ref,
+                                                output_shape=None)
+
+    if fill_value is None:
+        result = np.ma.masked_invalid(result)
+    else:
+        result[np.isnan(result)] = fill_value
+
+    return result
+
+
+def get_sample_from_bil_info(data, t__, s__, input_idxs, idx_arr,
+                             output_shape=None):
+    """Resample data using bilinear interpolation.
+
+    Parameters
+    ----------
+    data : numpy array
+        1d array to be resampled
+    t__ : numpy array
+        Vertical fractional distances from corner to the new points
+    s__ : numpy array
+        Horizontal fractional distances from corner to the new points
+    input_idxs : numpy array
+        Valid indices in the input data
+    idx_arr : numpy array
+        Mapping array from valid source points to target points
+    output_shape : tuple, optional
+        Tuple of (y, x) dimension for the target projection.
+        If None (default), do not reshape data.
+
+    Returns
+    -------
+    result : numpy array
+        Source data resampled to target geometry
+    """
+
+    # Reduce data
+    new_data = data[input_idxs]
+    data_min = np.nanmin(new_data)
+    data_max = np.nanmax(new_data)
+
+    new_data = new_data[idx_arr]
+
+    # Get neighbour data to separate variables
+    p_1 = new_data[:, 0]
+    p_2 = new_data[:, 1]
+    p_3 = new_data[:, 2]
+    p_4 = new_data[:, 3]
+
+    result = (p_1 * (1 - s__) * (1 - t__) +
+              p_2 * s__ * (1 - t__) +
+              p_3 * (1 - s__) * t__ +
+              p_4 * s__ * t__)
+
+    if hasattr(result, 'mask'):
+        mask = result.mask
+        result = result.data
+        result[mask] = np.nan
+
+    with np.errstate(invalid='ignore'):
+        idxs = (result > data_max) | (result < data_min)
+
+    result[idxs] = np.nan
+
+    if output_shape is not None:
+        result = result.reshape(output_shape)
+
+    return result
+
+
+def get_bil_info(source_geo_def, target_area_def, radius=50e3, neighbours=32,
+                 nprocs=1, masked=False, reduce_data=True, segments=None,
+                 epsilon=0):
+    """Calculate information needed for bilinear resampling.
+
+    source_geo_def : object
+        Geometry definition of source data
+    target_area_def : object
+        Geometry definition of target area
+    radius : float, optional
+        Cut-off distance in meters
+    neighbours : int, optional
+        Number of neighbours to consider for each grid point when
+        searching the closest corner points
+    nprocs : int, optional
+        Number of processor cores to be used for getting neighbour info
+    masked : bool, optional
+        If true, return masked arrays, else return np.nan values for
+        invalid points (default)
+    reduce_data : bool, optional
+        Perform initial coarse reduction of source dataset in order
+        to reduce execution time
+    segments : int or None
+        Number of segments to use when resampling.
+        If set to None an estimate will be calculated
+    epsilon : float, optional
+        Allowed uncertainty in meters. Increasing uncertainty
+        reduces execution time
+
+    Returns
+    -------
+    t__ : numpy array
+        Vertical fractional distances from corner to the new points
+    s__ : numpy array
+        Horizontal fractional distances from corner to the new points
+    input_idxs : numpy array
+        Valid indices in the input data
+    idx_arr : numpy array
+        Mapping array from valid source points to target points
+    """
+
+    # Check source_geo_def
+    # if isinstance(source_geo_def, tuple):
+    #     from pyresample.geometry import SwathDefinition
+    #     lons, lats = _mask_coordinates(source_geo_def[0], source_geo_def[1])
+    #     source_geo_def = SwathDefinition(lons, lats)
+
+    # Calculate neighbour information
+    (input_idxs, output_idxs, idx_ref, dists) = \
+        kd_tree.get_neighbour_info(source_geo_def, target_area_def,
+                                   radius, neighbours=neighbours,
+                                   nprocs=nprocs, reduce_data=reduce_data,
+                                   segments=segments, epsilon=epsilon)
+
+    del output_idxs, dists
+
+    # Reduce index reference
+    input_size = input_idxs.sum()
+    index_mask = (idx_ref == input_size)
+    idx_ref = np.where(index_mask, 0, idx_ref)
+
+    # Get output projection as pyproj object
+    proj = Proj(target_area_def.proj4_string)
+
+    # Get output x/y coordinates
+    out_x, out_y = _get_output_xy(target_area_def, proj)
+
+    # Get input x/ycoordinates
+    in_x, in_y = _get_input_xy(source_geo_def, proj, input_idxs, idx_ref)
+
+    # Get the four closest corner points around each output location
+    pt_1, pt_2, pt_3, pt_4, idx_ref = \
+        _get_bounding_corners(in_x, in_y, out_x, out_y, neighbours, idx_ref)
+
+    # Calculate vertical and horizontal fractional distances t and s
+    t__, s__ = _get_ts(pt_1, pt_2, pt_3, pt_4, out_x, out_y)
+
+    # Remove mask and put np.nan at the masked locations instead
+    if masked:
+        mask = np.isnan(t__) | np.isnan(s__)
+        t__ = np.ma.masked_where(mask, t__)
+        s__ = np.ma.masked_where(mask, s__)
+
+    return t__, s__, input_idxs, idx_ref
+
+
+def _get_ts(pt_1, pt_2, pt_3, pt_4, out_x, out_y):
+    """Calculate vertical and horizontal fractional distances t and s"""
+
+    # General case, ie. where the the corners form an irregular rectangle
+    t__, s__ = _get_ts_irregular(pt_1, pt_2, pt_3, pt_4, out_y, out_x)
+
+    # Cases where verticals are parallel
+    idxs = np.isnan(t__) | np.isnan(s__)
+    # Remove extra dimensions
+    idxs = idxs.ravel()
+
+    if np.any(idxs):
+        t__[idxs], s__[idxs] = \
+            _get_ts_uprights_parallel(pt_1[idxs, :], pt_2[idxs, :],
+                                      pt_3[idxs, :], pt_4[idxs, :],
+                                      out_y[idxs], out_x[idxs])
+
+    # Cases where both verticals and horizontals are parallel
+    idxs = np.isnan(t__) | np.isnan(s__)
+    # Remove extra dimensions
+    idxs = idxs.ravel()
+    if np.any(idxs):
+        t__[idxs], s__[idxs] = \
+            _get_ts_parallellogram(pt_1[idxs, :], pt_2[idxs, :], pt_3[idxs, :],
+                                   out_y[idxs], out_x[idxs])
+
+    with np.errstate(invalid='ignore'):
+        idxs = (t__ < 0) | (t__ > 1) | (s__ < 0) | (s__ > 1)
+    t__[idxs] = np.nan
+    s__[idxs] = np.nan
+
+    return t__, s__
+
+
+def _get_ts_irregular(pt_1, pt_2, pt_3, pt_4, out_y, out_x):
+    """Get parameters for the case where none of the sides are parallel."""
+
+    # Get parameters for the quadratic equation
+    a__, b__, c__ = _calc_abc(pt_1, pt_2, pt_3, pt_4, out_y, out_x)
+
+    # Get the valid roots from interval [0, 1]
+    t__ = _solve_quadratic(a__, b__, c__, min_val=0., max_val=1.)
+
+    # Calculate parameter s
+    s__ = _solve_another_fractional_distance(t__, pt_1[:, 1], pt_3[:, 1],
+                                             pt_2[:, 1], pt_4[:, 1], out_y)
+
+    return t__, s__
+
+
+def _get_ts_uprights_parallel(pt_1, pt_2, pt_3, pt_4, out_y, out_x):
+    """Get parameters for the case where uprights are parallel"""
+
+    # Get parameters for the quadratic equation
+    a__, b__, c__ = _calc_abc(pt_1, pt_3, pt_2, pt_4, out_y, out_x)
+
+    # Get the valid roots from interval [0, 1]
+    s__ = _solve_quadratic(a__, b__, c__, min_val=0., max_val=1.)
+
+    # Calculate parameter t
+    t__ = _solve_another_fractional_distance(s__, pt_1[:, 1], pt_2[:, 1],
+                                             pt_3[:, 1], pt_4[:, 1], out_y)
+
+    return t__, s__
+
+
+def _get_ts_parallellogram(pt_1, pt_2, pt_3, out_y, out_x):
+    """Get parameters for the case where uprights are parallel"""
+
+    # Pairwise longitudal separations between reference points
+    x_21 = pt_2[:, 0] - pt_1[:, 0]
+    x_31 = pt_3[:, 0] - pt_1[:, 0]
+
+    # Pairwise latitudal separations between reference points
+    y_21 = pt_2[:, 1] - pt_1[:, 1]
+    y_31 = pt_3[:, 1] - pt_1[:, 1]
+
+    t__ = (x_21 * (out_y - pt_1[:, 1]) - y_21 * (out_x - pt_1[:, 0])) / \
+          (x_21 * y_31 - y_21 * x_31)
+    with np.errstate(invalid='ignore'):
+        idxs = (t__ < 0.) | (t__ > 1.)
+    t__[idxs] = np.nan
+
+    s__ = (out_x - pt_1[:, 0] + x_31 * t__) / x_21
+
+    with np.errstate(invalid='ignore'):
+        idxs = (s__ < 0.) | (s__ > 1.)
+    s__[idxs] = np.nan
+
+    return t__, s__
+
+
+def _solve_another_fractional_distance(f__, y_1, y_2, y_3, y_4, out_y):
+    """Solve parameter t__ from s__, or vice versa.  For solving s__,
+    switch order of y_2 and y_3."""
+    y_21 = y_2 - y_1
+    y_43 = y_4 - y_3
+
+    with np.errstate(divide='ignore'):
+        g__ = ((out_y - y_1 - y_21 * f__) /
+               (y_3 + y_43 * f__ - y_1 - y_21 * f__))
+
+    # Limit values to interval [0, 1]
+    with np.errstate(invalid='ignore'):
+        idxs = (g__ < 0) | (g__ > 1)
+    g__[idxs] = np.nan
+
+    return g__
+
+
+def _calc_abc(pt_1, pt_2, pt_3, pt_4, out_y, out_x):
+    """Calculate coefficients for quadratic equation for
+    _get_ts_irregular() and _get_ts_uprights().  For _get_ts_uprights
+    switch order of pt_2 and pt_3.
+    """
+    # Pairwise longitudal separations between reference points
+    x_21 = pt_2[:, 0] - pt_1[:, 0]
+    x_31 = pt_3[:, 0] - pt_1[:, 0]
+    x_42 = pt_4[:, 0] - pt_2[:, 0]
+
+    # Pairwise latitudal separations between reference points
+    y_21 = pt_2[:, 1] - pt_1[:, 1]
+    y_31 = pt_3[:, 1] - pt_1[:, 1]
+    y_42 = pt_4[:, 1] - pt_2[:, 1]
+
+    a__ = x_31 * y_42 - y_31 * x_42
+    b__ = out_y * (x_42 - x_31) - out_x * (y_42 - y_31) + \
+        x_31 * pt_2[:, 1] - y_31 * pt_2[:, 0] + \
+        y_42 * pt_1[:, 0] - x_42 * pt_1[:, 1]
+    c__ = out_y * x_21 - out_x * y_21 + pt_1[:, 0] * pt_2[:, 1] - \
+        pt_2[:, 0] * pt_1[:, 1]
+
+    return a__, b__, c__
+
+
+def _mask_coordinates(lons, lats):
+    """Mask invalid coordinate values"""
+    lons = lons.ravel()
+    lats = lats.ravel()
+    idxs = ((lons < -180.) | (lons > 180.) |
+            (lats < -90.) | (lats > 90.))
+    lons[idxs] = np.nan
+    lats[idxs] = np.nan
+
+    return lons, lats
+
+
+def _get_corner(stride, valid, in_x, in_y, idx_ref):
+    """Get closest set of coordinates from the *valid* locations"""
+    idxs = np.argmax(valid, axis=1)
+    invalid = np.invert(np.max(valid, axis=1))
+    # Replace invalid points with np.nan
+    x__ = in_x[stride, idxs]
+    x__[invalid] = np.nan
+    y__ = in_y[stride, idxs]
+    y__[invalid] = np.nan
+    idx = idx_ref[stride, idxs]
+
+    return x__, y__, idx
+
+
+def _get_bounding_corners(in_x, in_y, out_x, out_y, neighbours, idx_ref):
+    """Get four closest locations from (in_x, in_y) so that they form a
+    bounding rectangle around the requested location given by (out_x,
+    out_y).
+    """
+
+    # Find four closest pixels around the target location
+
+    # Tile output coordinates to same shape as neighbour info
+    out_x_tile = np.tile(out_x, (neighbours, 1)).T
+    out_y_tile = np.tile(out_y, (neighbours, 1)).T
+
+    # Get differences in both directions
+    x_diff = out_x_tile - in_x
+    y_diff = out_y_tile - in_y
+
+    stride = np.arange(x_diff.shape[0])
+
+    # Upper left source pixel
+    valid = (x_diff > 0) & (y_diff < 0)
+    x_1, y_1, idx_1 = _get_corner(stride, valid, in_x, in_y, idx_ref)
+
+    # Upper right source pixel
+    valid = (x_diff < 0) & (y_diff < 0)
+    x_2, y_2, idx_2 = _get_corner(stride, valid, in_x, in_y, idx_ref)
+
+    # Lower left source pixel
+    valid = (x_diff > 0) & (y_diff > 0)
+    x_3, y_3, idx_3 = _get_corner(stride, valid, in_x, in_y, idx_ref)
+
+    # Lower right source pixel
+    valid = (x_diff < 0) & (y_diff > 0)
+    x_4, y_4, idx_4 = _get_corner(stride, valid, in_x, in_y, idx_ref)
+
+    # Combine sorted indices to idx_ref
+    idx_ref = np.vstack((idx_1, idx_2, idx_3, idx_4)).T
+
+    return (np.vstack((x_1, y_1)).T, np.vstack((x_2, y_2)).T,
+            np.vstack((x_3, y_3)).T, np.vstack((x_4, y_4)).T, idx_ref)
+
+
+def _solve_quadratic(a__, b__, c__, min_val=0.0, max_val=1.0):
+    """Solve quadratic equation and return the valid roots from interval
+    [*min_val*, *max_val*]
+
+    """
+
+    def int_and_float_to_numpy(val):
+        if not isinstance(val, np.ndarray):
+            if isinstance(val, (int, float)):
+                val = [val]
+            val = np.array(val)
+        return val
+
+    a__ = int_and_float_to_numpy(a__)
+    b__ = int_and_float_to_numpy(b__)
+    c__ = int_and_float_to_numpy(c__)
+
+    discriminant = b__ * b__ - 4 * a__ * c__
+
+    # Solve the quadratic polynomial
+    with np.errstate(invalid='ignore', divide='ignore'):
+        x_1 = (-b__ + np.sqrt(discriminant)) / (2 * a__)
+        x_2 = (-b__ - np.sqrt(discriminant)) / (2 * a__)
+
+    # Find valid solutions, ie. 0 <= t <= 1
+    x__ = x_1.copy()
+    with np.errstate(invalid='ignore'):
+        idxs = (x_1 < min_val) | (x_1 > max_val)
+    x__[idxs] = x_2[idxs]
+
+    with np.errstate(invalid='ignore'):
+        idxs = (x__ < min_val) | (x__ > max_val)
+    x__[idxs] = np.nan
+
+    return x__
+
+
+def _get_output_xy(target_area_def, proj):
+    """Get x/y coordinates of the target grid."""
+    # Read output coordinates
+    out_lons, out_lats = target_area_def.get_lonlats()
+    out_lons, out_lats = _mask_coordinates(out_lons, out_lats)
+
+    out_x, out_y = proj(out_lons, out_lats)
+
+    return out_x, out_y
+
+
+def _get_input_xy(source_geo_def, proj, input_idxs, idx_ref):
+    """Get x/y coordinates for the input area and reduce the data."""
+    in_lons, in_lats = source_geo_def.get_lonlats()
+
+    # Select valid locations
+    in_lons = in_lons.ravel()[input_idxs]
+    in_lats = in_lats.ravel()[input_idxs]
+
+    # Mask invalid values
+    in_lons, in_lats = _mask_coordinates(in_lons, in_lats)
+
+    # Expand input coordinates for each output location
+    in_lons = in_lons[idx_ref]
+    in_lats = in_lats[idx_ref]
+
+    # Convert coordinates to output projection x/y space
+    in_x, in_y = proj(in_lons, in_lats)
+
+    return in_x, in_y
+
+
+def _check_data_shape(data, input_idxs):
+    """Check data shape and adjust if necessary."""
+    # Handle multiple datasets
+    if data.ndim > 2 and data.shape[0] * data.shape[1] == input_idxs.shape[0]:
+        data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
+    # Also ravel single dataset
+    elif data.shape[0] != input_idxs.size:
+        data = data.ravel()
+
+    # Ensure two dimensions
+    if data.ndim == 1:
+        data = np.expand_dims(data, 1)
+
+    return data
diff --git a/pyresample/geometry.py b/pyresample/geometry.py
index 8f6a25c..3000598 100644
--- a/pyresample/geometry.py
+++ b/pyresample/geometry.py
@@ -674,29 +674,9 @@ class AreaDefinition(BaseDefinition):
                 raise ValueError("lon and lat is not of the same shape!")
 
         pobj = _spatial_mp.Proj(self.proj4_string)
-        upl_x = self.area_extent[0]
-        upl_y = self.area_extent[3]
-        xscale = abs(self.area_extent[2] -
-                     self.area_extent[0]) / float(self.x_size)
-        yscale = abs(self.area_extent[1] -
-                     self.area_extent[3]) / float(self.y_size)
-
         xm_, ym_ = pobj(lon, lat)
-        x__ = (xm_ - upl_x) / xscale
-        y__ = (upl_y - ym_) / yscale
 
-        if isinstance(x__, np.ndarray) and isinstance(y__, np.ndarray):
-            mask = (((x__ < 0) | (x__ > self.x_size)) |
-                    ((y__ < 0) | (y__ > self.y_size)))
-            return (np.ma.masked_array(x__.astype('int'), mask=mask,
-                                       fill_value=-1),
-                    np.ma.masked_array(y__.astype('int'), mask=mask,
-                                       fill_value=-1))
-        else:
-            if ((x__ < 0 or x__ > self.x_size) or
-                    (y__ < 0 or y__ > self.y_size)):
-                raise ValueError('Point outside area:( %f %f)' % (x__, y__))
-            return int(x__), int(y__)
+        return self.get_xy_from_proj_coords(xm_, ym_)
 
     def get_xy_from_proj_coords(self, xm_, ym_):
         """Retrieve closest x and y coordinates (column, row indices) for a
@@ -732,21 +712,22 @@ class AreaDefinition(BaseDefinition):
 
         upl_x = self.area_extent[0]
         upl_y = self.area_extent[3]
-        xscale = abs(self.area_extent[2] -
-                     self.area_extent[0]) / float(self.x_size)
-        yscale = abs(self.area_extent[1] -
-                     self.area_extent[3]) / float(self.y_size)
+        xscale = (self.area_extent[2] -
+                  self.area_extent[0]) / float(self.x_size)
+        # because rows direction is the opposite of y's
+        yscale = (self.area_extent[1] -
+                  self.area_extent[3]) / float(self.y_size)
 
         x__ = (xm_ - upl_x) / xscale
-        y__ = (upl_y - ym_) / yscale
+        y__ = (ym_ - upl_y) / yscale
 
         if isinstance(x__, np.ndarray) and isinstance(y__, np.ndarray):
             mask = (((x__ < 0) | (x__ > self.x_size)) |
                     ((y__ < 0) | (y__ > self.y_size)))
             return (np.ma.masked_array(x__.astype('int'), mask=mask,
-                                       fill_value=-1),
+                                       fill_value=-1, copy=False),
                     np.ma.masked_array(y__.astype('int'), mask=mask,
-                                       fill_value=-1))
+                                       fill_value=-1, copy=False))
         else:
             if ((x__ < 0 or x__ > self.x_size) or
                     (y__ < 0 or y__ > self.y_size)):
diff --git a/pyresample/kd_tree.py b/pyresample/kd_tree.py
index f787e0e..e08ca84 100644
--- a/pyresample/kd_tree.py
+++ b/pyresample/kd_tree.py
@@ -289,10 +289,6 @@ def get_neighbour_info(source_geo_def, target_geo_def, radius_of_influence,
     epsilon : float, optional
         Allowed uncertainty in meters. Increasing uncertainty
         reduces execution time
-    fill_value : int or None, optional
-            Set undetermined pixels to this value.
-            If fill_value is None a masked array is returned 
-            with undetermined pixels masked    
     reduce_data : bool, optional
         Perform initial coarse reduction of source dataset in order
         to reduce execution time
diff --git a/pyresample/test/__init__.py b/pyresample/test/__init__.py
index f919801..baccb3c 100644
--- a/pyresample/test/__init__.py
+++ b/pyresample/test/__init__.py
@@ -35,7 +35,8 @@ from pyresample.test import (
     test_utils,
     test_ewa_ll2cr,
     test_ewa_fornav,
-    )
+    test_bilinear,
+)
 
 import unittest
 
@@ -55,6 +56,7 @@ def suite():
     mysuite.addTests(test_utils.suite())
     mysuite.addTests(test_ewa_ll2cr.suite())
     mysuite.addTests(test_ewa_fornav.suite())
+    mysuite.addTests(test_bilinear.suite())
 
     return mysuite
 
diff --git a/pyresample/test/test_bilinear.py b/pyresample/test/test_bilinear.py
new file mode 100644
index 0000000..7bb3362
--- /dev/null
+++ b/pyresample/test/test_bilinear.py
@@ -0,0 +1,239 @@
+import unittest
+import numpy as np
+
+from pyproj import Proj
+
+import pyresample.bilinear as bil
+from pyresample import geometry, utils, kd_tree
+
+
+class Test(unittest.TestCase):
+
+    pts_irregular = (np.array([[-1., 1.], ]),
+                     np.array([[1., 2.], ]),
+                     np.array([[-2., -1.], ]),
+                     np.array([[2., -4.], ]))
+    pts_vert_parallel = (np.array([[-1., 1.], ]),
+                         np.array([[1., 2.], ]),
+                         np.array([[-1., -1.], ]),
+                         np.array([[1., -2.], ]))
+    pts_both_parallel = (np.array([[-1., 1.], ]),
+                         np.array([[1., 1.], ]),
+                         np.array([[-1., -1.], ]),
+                         np.array([[1., -1.], ]))
+
+    # Area definition with four pixels
+    target_def = geometry.AreaDefinition('areaD',
+                                         'Europe (3km, HRV, VTC)',
+                                         'areaD',
+                                         {'a': '6378144.0',
+                                          'b': '6356759.0',
+                                          'lat_0': '50.00',
+                                          'lat_ts': '50.00',
+                                          'lon_0': '8.00',
+                                          'proj': 'stere'},
+                                         4, 4,
+                                         [-1370912.72,
+                                          -909968.64000000001,
+                                          1029087.28,
+                                          1490031.3600000001])
+
+    # Input data around the target pixel at 0.63388324, 55.08234642,
+    in_shape = (100, 100)
+    data1 = np.ones((in_shape[0], in_shape[1]))
+    data2 = 2. * data1
+    lons, lats = np.meshgrid(np.linspace(-5., 5., num=in_shape[0]),
+                             np.linspace(50., 60., num=in_shape[1]))
+    swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
+
+    radius = 50e3
+    neighbours = 32
+    input_idxs, output_idxs, idx_ref, dists = \
+        kd_tree.get_neighbour_info(swath_def, target_def,
+                                   radius, neighbours=neighbours,
+                                   nprocs=1)
+    input_size = input_idxs.sum()
+    index_mask = (idx_ref == input_size)
+    idx_ref = np.where(index_mask, 0, idx_ref)
+
+    def test_calc_abc(self):
+        # No np.nan inputs
+        pt_1, pt_2, pt_3, pt_4 = self.pts_irregular
+        res = bil._calc_abc(pt_1, pt_2, pt_3, pt_4, 0.0, 0.0)
+        self.assertFalse(np.isnan(res[0]))
+        self.assertFalse(np.isnan(res[1]))
+        self.assertFalse(np.isnan(res[2]))
+        # np.nan input -> np.nan output
+        res = bil._calc_abc(np.array([[np.nan, np.nan]]),
+                            pt_2, pt_3, pt_4, 0.0, 0.0)
+        self.assertTrue(np.isnan(res[0]))
+        self.assertTrue(np.isnan(res[1]))
+        self.assertTrue(np.isnan(res[2]))
+
+    def test_get_ts_irregular(self):
+        res = bil._get_ts_irregular(self.pts_irregular[0],
+                                    self.pts_irregular[1],
+                                    self.pts_irregular[2],
+                                    self.pts_irregular[3],
+                                    0., 0.)
+        self.assertEqual(res[0], 0.375)
+        self.assertEqual(res[1], 0.5)
+        res = bil._get_ts_irregular(self.pts_vert_parallel[0],
+                                    self.pts_vert_parallel[1],
+                                    self.pts_vert_parallel[2],
+                                    self.pts_vert_parallel[3],
+                                    0., 0.)
+        self.assertTrue(np.isnan(res[0]))
+        self.assertTrue(np.isnan(res[1]))
+
+    def test_get_ts_uprights_parallel(self):
+        res = bil._get_ts_uprights_parallel(self.pts_vert_parallel[0],
+                                            self.pts_vert_parallel[1],
+                                            self.pts_vert_parallel[2],
+                                            self.pts_vert_parallel[3],
+                                            0., 0.)
+        self.assertEqual(res[0], 0.5)
+        self.assertEqual(res[1], 0.5)
+
+    def test_get_ts_parallellogram(self):
+        res = bil._get_ts_parallellogram(self.pts_both_parallel[0],
+                                         self.pts_both_parallel[1],
+                                         self.pts_both_parallel[2],
+                                         0., 0.)
+        self.assertEqual(res[0], 0.5)
+        self.assertEqual(res[1], 0.5)
+
+    def test_get_ts(self):
+        out_x = np.array([[0.]])
+        out_y = np.array([[0.]])
+        res = bil._get_ts(self.pts_irregular[0],
+                          self.pts_irregular[1],
+                          self.pts_irregular[2],
+                          self.pts_irregular[3],
+                          out_x, out_y)
+        self.assertEqual(res[0], 0.375)
+        self.assertEqual(res[1], 0.5)
+        res = bil._get_ts(self.pts_both_parallel[0],
+                          self.pts_both_parallel[1],
+                          self.pts_both_parallel[2],
+                          self.pts_both_parallel[3],
+                          out_x, out_y)
+        self.assertEqual(res[0], 0.5)
+        self.assertEqual(res[1], 0.5)
+        res = bil._get_ts(self.pts_vert_parallel[0],
+                          self.pts_vert_parallel[1],
+                          self.pts_vert_parallel[2],
+                          self.pts_vert_parallel[3],
+                          out_x, out_y)
+        self.assertEqual(res[0], 0.5)
+        self.assertEqual(res[1], 0.5)
+
+    def test_solve_quadratic(self):
+        res = bil._solve_quadratic(1, 0, 0)
+        self.assertEqual(res[0], 0.0)
+        res = bil._solve_quadratic(1, 2, 1)
+        self.assertTrue(np.isnan(res[0]))
+        res = bil._solve_quadratic(1, 2, 1, min_val=-2.)
+        self.assertEqual(res[0], -1.0)
+        # Test that small adjustments work
+        pt_1, pt_2, pt_3, pt_4 = self.pts_vert_parallel
+        pt_1 = self.pts_vert_parallel[0].copy()
+        pt_1[0][0] += 1e-7
+        res = bil._calc_abc(pt_1, pt_2, pt_3, pt_4, 0.0, 0.0)
+        res = bil._solve_quadratic(res[0], res[1], res[2])
+        self.assertAlmostEqual(res[0], 0.5, 5)
+        res = bil._calc_abc(pt_1, pt_3, pt_2, pt_4, 0.0, 0.0)
+        res = bil._solve_quadratic(res[0], res[1], res[2])
+        self.assertAlmostEqual(res[0], 0.5, 5)
+
+    def test_get_output_xy(self):
+        proj = Proj(self.target_def.proj4_string)
+        out_x, out_y = bil._get_output_xy(self.target_def, proj)
+        self.assertTrue(out_x.all())
+        self.assertTrue(out_y.all())
+
+    def test_get_input_xy(self):
+        proj = Proj(self.target_def.proj4_string)
+        in_x, in_y = bil._get_output_xy(self.swath_def, proj)
+        self.assertTrue(in_x.all())
+        self.assertTrue(in_y.all())
+
+    def test_get_bounding_corners(self):
+        proj = Proj(self.target_def.proj4_string)
+        out_x, out_y = bil._get_output_xy(self.target_def, proj)
+        in_x, in_y = bil._get_input_xy(self.swath_def, proj,
+                                       self.input_idxs, self.idx_ref)
+        res = bil._get_bounding_corners(in_x, in_y, out_x, out_y,
+                                        self.neighbours, self.idx_ref)
+        for i in range(len(res) - 1):
+            pt_ = res[i]
+            for j in range(2):
+                # Only the sixth output location has four valid corners
+                self.assertTrue(np.isfinite(pt_[5, j]))
+
+    def test_get_bil_info(self):
+        t__, s__, input_idxs, idx_arr = bil.get_bil_info(self.swath_def,
+                                                         self.target_def)
+        # Only 6th index should have valid values
+        for i in range(len(t__)):
+            if i == 5:
+                self.assertAlmostEqual(t__[i], 0.684850870155, 5)
+                self.assertAlmostEqual(s__[i], 0.775433912393, 5)
+            else:
+                self.assertTrue(np.isnan(t__[i]))
+                self.assertTrue(np.isnan(s__[i]))
+
+    def test_get_sample_from_bil_info(self):
+        t__, s__, input_idxs, idx_arr = bil.get_bil_info(self.swath_def,
+                                                         self.target_def)
+        # Sample from data1
+        res = bil.get_sample_from_bil_info(self.data1.ravel(), t__, s__,
+                                           input_idxs, idx_arr)
+        self.assertEqual(res[5], 1.)
+        # Sample from data2
+        res = bil.get_sample_from_bil_info(self.data2.ravel(), t__, s__,
+                                           input_idxs, idx_arr)
+        self.assertEqual(res[5], 2.)
+        # Reshaping
+        res = bil.get_sample_from_bil_info(self.data2.ravel(), t__, s__,
+                                           input_idxs, idx_arr,
+                                           output_shape=self.target_def.shape)
+        res = res.shape
+        self.assertEqual(res[0], self.target_def.shape[0])
+        self.assertEqual(res[1], self.target_def.shape[1])
+
+    def test_resample_bilinear(self):
+        # Single array
+        res = bil.resample_bilinear(self.data1,
+                                    self.swath_def,
+                                    self.target_def)
+        self.assertEqual(res.size, self.target_def.size)
+        # There should be only one pixel with value 1, all others are 0
+        self.assertEqual(res.sum(), 1)
+
+        # Single array with masked output
+        res = bil.resample_bilinear(self.data1,
+                                    self.swath_def,
+                                    self.target_def, fill_value=None)
+        self.assertTrue(hasattr(res, 'mask'))
+        # There should be only one valid pixel
+        self.assertEqual(self.target_def.size - res.mask.sum(), 1)
+
+        # Two stacked arrays
+        data = np.dstack((self.data1, self.data2))
+        res = bil.resample_bilinear(data,
+                                    self.swath_def,
+                                    self.target_def)
+        shp = res.shape
+        self.assertEqual(shp[0], self.target_def.size)
+        self.assertEqual(shp[1], 2)
+
+
+def suite():
+    """The test suite.
+    """
+    loader = unittest.TestLoader()
+    mysuite = unittest.TestSuite()
+    mysuite.addTest(loader.loadTestsFromTestCase(Test))
+
+    return mysuite
diff --git a/pyresample/version.py b/pyresample/version.py
index ef9842d..31f6462 100644
--- a/pyresample/version.py
+++ b/pyresample/version.py
@@ -15,4 +15,4 @@
 # You should have received a copy of the GNU Lesser General Public License along
 # with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-__version__ = '1.2.9'
+__version__ = '1.3.0'

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/pyresample.git



More information about the Pkg-grass-devel mailing list