[python-mpop] 01/06: New upstream version 1.3.0

Antonio Valentino a_valentino-guest at moszumanska.debian.org
Tue Nov 1 10:49:31 UTC 2016


This is an automated email from the git hooks/post-receive script.

a_valentino-guest pushed a commit to branch master
in repository python-mpop.

commit 5189864ec7fd259fba5d36eee0c6716144d61873
Author: Antonio Valentino <antonio.valentino at tiscali.it>
Date:   Tue Nov 1 09:54:36 2016 +0000

    New upstream version 1.3.0
---
 .bumpversion.cfg                                   |    7 +
 .gitchangelog.rc                                   |  192 ++
 .travis.yml                                        |    1 +
 changelog.rst                                      |  443 +++
 ...S-Aqua.cfg.template => DREOS-Aqua.cfg.template} |   14 +-
 ...Terra.cfg.template => DREOS-Terra.cfg.template} |   13 +-
 etc/hsaf10.cfg.template                            |   26 +
 etc/odyssey.cfg.template                           |   25 +
 etc/s2a.cfg.template                               |   88 +
 etc/sentinel1b.cfg.template                        |   11 +
 mpop/afgl.dat                                      |   60 +
 mpop/channel.py                                    |  349 +++
 mpop/imageo/HRWimage.py                            |  680 +++++
 mpop/imageo/TRTimage.py                            |  131 +
 mpop/imageo/formats/ninjotiff.py                   |  282 +-
 mpop/imageo/formats/ninjotiff_example              |   25 +-
 mpop/imageo/formats/writer_options.py              |   29 +
 mpop/imageo/geo_image.py                           |   82 +-
 mpop/imageo/palettes.py                            |  271 +-
 mpop/instruments/s2_composites.py                  |   24 +
 mpop/instruments/seviri.py                         |   84 +-
 mpop/instruments/viirs.py                          |  202 +-
 mpop/instruments/visir.py                          |   48 +-
 mpop/satellites/__init__.py                        |    2 +-
 mpop/satin/aapp1b.py                               |   44 +-
 mpop/satin/fy3_mersi.py                            |  201 ++
 mpop/satin/fy3_virr.py                             |   10 +-
 mpop/satin/gac_l1b.py                              |   37 +-
 mpop/satin/gribformat.py                           |   87 +
 mpop/satin/hdfeos_l1b.py                           |  113 +-
 mpop/satin/helper_functions.py                     |   22 +-
 mpop/satin/hsaf_h03.py                             |  226 ++
 mpop/satin/mipp_xrit.py                            |  139 +-
 mpop/satin/mpef_oca.py                             |  358 +++
 mpop/satin/msg_seviri_hdf.py                       |  265 ++
 mpop/satin/nc_pps_l2.py                            |   44 +-
 mpop/satin/nwcsaf_hrw_hdf.py                       |  355 +++
 mpop/satin/nwcsaf_msg.py                           | 3086 ++++++++++++++++++++
 mpop/satin/odyssey_radar.py                        |  222 ++
 mpop/satin/s2_msi.py                               |   69 +
 mpop/satin/viirs_compact.py                        |  324 +-
 mpop/satin/viirs_sdr.py                            |  117 +-
 mpop/satout/cfscene.py                             |   52 +-
 mpop/satout/netcdf4.py                             |   22 +-
 mpop/scene.py                                      |  206 +-
 mpop/tests/test_geo_image.py                       |  144 +-
 mpop/tests/test_projector.py                       |    5 +-
 mpop/tools.py                                      |  174 ++
 mpop/version.py                                    |    2 +-
 setup.py                                           |    4 +-
 utils/get_tile_def.py                              |   51 +
 51 files changed, 8939 insertions(+), 529 deletions(-)

diff --git a/.bumpversion.cfg b/.bumpversion.cfg
new file mode 100644
index 0000000..b581344
--- /dev/null
+++ b/.bumpversion.cfg
@@ -0,0 +1,7 @@
+[bumpversion]
+current_version = 1.3.0
+commit = True
+tag = True
+
+[bumpversion:file:mpop/version.py]
+
diff --git a/.gitchangelog.rc b/.gitchangelog.rc
new file mode 100644
index 0000000..80c1ea7
--- /dev/null
+++ b/.gitchangelog.rc
@@ -0,0 +1,192 @@
+##
+## Format
+##
+##   ACTION: [AUDIENCE:] COMMIT_MSG [!TAG ...]
+##
+## Description
+##
+##   ACTION is one of 'chg', 'fix', 'new'
+##
+##       Is WHAT the change is about.
+##
+##       'chg' is for refactor, small improvement, cosmetic changes...
+##       'fix' is for bug fixes
+##       'new' is for new features, big improvement
+##
+##   AUDIENCE is optional and one of 'dev', 'usr', 'pkg', 'test', 'doc'
+##
+##       Is WHO is concerned by the change.
+##
+##       'dev'  is for developpers (API changes, refactors...)
+##       'usr'  is for final users (UI changes)
+##       'pkg'  is for packagers   (packaging changes)
+##       'test' is for testers     (test only related changes)
+##       'doc'  is for doc guys    (doc only changes)
+##
+##   COMMIT_MSG is ... well ... the commit message itself.
+##
+##   TAGs are additionnal adjective as 'refactor' 'minor' 'cosmetic'
+##
+##       They are preceded with a '!' or a '@' (prefer the former, as the
+##       latter is wrongly interpreted in github.) Commonly used tags are:
+##
+##       'refactor' is obviously for refactoring code only
+##       'minor' is for a very meaningless change (a typo, adding a comment)
+##       'cosmetic' is for cosmetic driven change (re-indentation, 80-col...)
+##       'wip' is for partial functionality but complete subfunctionality.
+##
+## Example:
+##
+##   new: usr: support of bazaar implemented
+##   chg: re-indentend some lines !cosmetic
+##   new: dev: updated code to be compatible with last version of killer lib.
+##   fix: pkg: updated year of licence coverage.
+##   new: test: added a bunch of test around user usability of feature X.
+##   fix: typo in spelling my name in comment. !minor
+##
+##   Please note that multi-line commit message are supported, and only the
+##   first line will be considered as the "summary" of the commit message. So
+##   tags, and other rules only applies to the summary.  The body of the commit
+##   message will be displayed in the changelog without reformatting.
+
+
+##
+## ``ignore_regexps`` is a line of regexps
+##
+## Any commit having its full commit message matching any regexp listed here
+## will be ignored and won't be reported in the changelog.
+##
+ignore_regexps = [
+        r'@minor', r'!minor',
+        r'@cosmetic', r'!cosmetic',
+        r'@refactor', r'!refactor',
+        r'@wip', r'!wip',
+	r'^Merge commit .* into HEAD',
+        r'^([cC]hg|[fF]ix|[nN]ew)\s*:\s*[p|P]kg:',
+        r'^([cC]hg|[fF]ix|[nN]ew)\s*:\s*[d|D]ev:',
+        r'^(.{3,3}\s*:)?\s*[fF]irst commit.?\s*$',
+  ]
+
+
+## ``section_regexps`` is a list of 2-tuples associating a string label and a
+## list of regexp
+##
+## Commit messages will be classified in sections thanks to this. Section
+## titles are the label, and a commit is classified under this section if any
+## of the regexps associated is matching.
+##
+section_regexps = [
+    ('New', [
+	r'^[nN]ew\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n]*)$',
+     ]),
+    ('Changes', [
+        r'^[cC]hg\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n]*)$',
+     ]),
+    ('Fix', [
+        r'^([Bb]ug)?[fF]ix\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n]*)$',
+     ]),
+
+    ('Other', None ## Match all lines
+     ),
+
+]
+
+
+## ``body_process`` is a callable
+##
+## This callable will be given the original body and result will
+## be used in the changelog.
+##
+## Available constructs are:
+##
+##   - any python callable that take one txt argument and return txt argument.
+##
+##   - ReSub(pattern, replacement): will apply regexp substitution.
+##
+##   - Indent(chars="  "): will indent the text with the prefix
+##     Please remember that template engines gets also to modify the text and
+##     will usually indent themselves the text if needed.
+##git log --pretty=format:"- %s%n%b" --since="$(git show -s --format=%ad `git rev-list --tags --max-count=1`)"
+##   - Wrap(regexp=r"\n\n"): re-wrap text in separate paragraph to fill 80-Columns
+##
+##   - noop: do nothing
+##
+##   - ucfirst: ensure the first letter is uppercase.
+##     (usually used in the ``subject_process`` pipeline)
+##
+##   - final_dot: ensure text finishes with a dot
+##     (usually used in the ``subject_process`` pipeline)
+##
+##   - strip: remove any spaces before or after the content of the string
+##
+## Additionally, you can `pipe` the provided filters, for instance:
+#body_process = Wrap(regexp=r'\n(?=\w+\s*:)') | Indent(chars="  ")
+#body_process = Wrap(regexp=r'\n(?=\w+\s*:)')
+#body_process = noop
+body_process = ReSub(r'(?m)\s*^Signed-off-by: .*$\s*', '')
+
+
+## ``subject_process`` is a callable
+##
+## This callable will be given the original subject and result will
+## be used in the changelog.
+##
+## Available constructs are those listed in ``body_process`` doc.
+subject_process = (strip |
+    ReSub(r'^([cC]hg|[fF]ix|[nN]ew)\s*:\s*((dev|use?r|pkg|test|doc)\s*:\s*)?([^\n@]*)(@[a-z]+\s+)*$', r'\4') |
+    ucfirst | final_dot)
+
+
+## ``tag_filter_regexp`` is a regexp
+##
+## Tags that will be used for the changelog must match this regexp.
+##
+tag_filter_regexp = r'^v[0-9]+\.[0-9]+(\.[0-9]+)?$'
+
+
+## ``unreleased_version_label`` is a string
+##
+## This label will be used as the changelog Title of the last set of changes
+## between last valid tag and HEAD if any.
+unreleased_version_label = "%%version%% (unreleased)"
+
+
+## ``output_engine`` is a callable
+##
+## This will change the output format of the generated changelog file
+##
+## Available choices are:
+##
+##   - rest_py
+##
+##        Legacy pure python engine, outputs ReSTructured text.
+##        This is the default.
+##
+##   - mustache(<template_name>)
+##
+##        Template name could be any of the available templates in
+##        ``templates/mustache/*.tpl``.
+##        Requires python package ``pystache``.
+##        Examples:
+##           - mustache("markdown")
+##           - mustache("restructuredtext")
+##
+##   - makotemplate(<template_name>)
+##
+##        Template name could be any of the available templates in
+##        ``templates/mako/*.tpl``.
+##        Requires python package ``mako``.
+##        Examples:
+##           - makotemplate("restructuredtext")
+##
+output_engine = rest_py
+#output_engine = mustache("restructuredtext")
+#output_engine = mustache("markdown")
+#output_engine = makotemplate("restructuredtext")
+
+
+## ``include_merges`` is a boolean
+##
+## This option tells git-log whether to include merge commits in the log.
+## The default is to include them.
+include_merges = False
diff --git a/.travis.yml b/.travis.yml
index 44b455a..e98e6be 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,6 +7,7 @@ install:
 - pip install coveralls
 - pip install pyorbital
 script: coverage run --source=mpop setup.py test
+sudo: false
 after_success: coveralls
 deploy:
   provider: pypi
diff --git a/changelog.rst b/changelog.rst
index f1287e9..b52a698 100644
--- a/changelog.rst
+++ b/changelog.rst
@@ -1,6 +1,449 @@
 Changelog
 =========
 
+v1.3.0 (2016-10-27)
+-------------------
+
+- Update changelog. [Martin Raspaud]
+
+- Bump version: 1.2.1 → 1.3.0. [Martin Raspaud]
+
+- Add bump and gitchangelog configs. [Martin Raspaud]
+
+- Fix pep8 compliance. [Martin Raspaud]
+
+- Use filenames for mipp only if the files are relevant. [Martin
+  Raspaud]
+
+- Handle time_slot tuples better by splitting them. [Martin Raspaud]
+
+  This allows mpop to be backwards compatible for non mipp-based readers.
+
+
+- Allow providing filenames to the mipp xrit reader. [Martin Raspaud]
+
+  This allows mpop to use filenames as provided by trollduction.
+
+- Make it possible to specify custom stretching of truecolor imagery.
+  [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Merge pull request #36 from khunger/feature-fill-value-substitution.
+  [Martin Raspaud]
+
+  New option fill_value_subst
+
+- New option fill_value_subst. [Christian Kliche]
+
+  This option can be used in conjunction with GeoImage.fill_value. Any occurrences of
+  fill_value within the image data will be replaced with fill_value_subst before storing
+  to image file.
+
+  Example trollduction configuration to use this feature:
+
+  <file>test.tif
+  	<format_params>
+  		<fill_value_subst>1</fill_value_subst>
+  	</format_params>
+  </file>
+
+
+- Merge pull request #37 from khunger/feature-xrit-sublon-metadata.
+  [Martin Raspaud]
+
+  Atmospheric correction and xrit metadata "sublon" in sat scene info
+
+- Algorithm for atmosheric correction. [Christian Kliche]
+
+  Added new algorithm to Channel class to apply atmospheric correction
+  on a copy of channel data using given satellite zenith angle data.
+  Creates a new channel containing the corrected data.
+
+
+- Added xrit metadata "sublon" to sat scene info. [Christian Kliche]
+
+- Bugfix fill_value in cf-output. [Adam.Dybbroe]
+
+- Support PPS on I-band resolution. [Adam.Dybbroe]
+
+- Bugfix platform naming. [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Merge pull request #35 from khunger/feature-writer-options. [Martin
+  Raspaud]
+
+  Feature writer options
+
+- Added tests for save with writer_options. [Christian Kliche]
+
+- Fixed unit tests. [Christian Kliche]
+
+- Changed parameter order for backwards compatibility. [Christian
+  Kliche]
+
+- GeoImage.save extended by writer_options dict. [Christian Kliche]
+
+  Some dict keys for options  used by GeoImage.save
+  are defined in writer_options.py.
+  All options within this dict will be forwarded to custom writers
+  like NinJoTiff writer module.
+
+
+- GeoImage.save extended by writer_options dict. [Christian Kliche]
+
+  Some dict keys for options  used by GeoImage.save
+  are defined in writer_options.py.
+  All options within this dict will be forwarded to custom writers
+  like NinJoTiff writer module.
+
+
+- Allow adding int, float and str attributes to the main info object.
+  [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Merge pull request #33 from meteoswiss-mdr/pre-master. [Martin
+  Raspaud]
+
+  H-SAF and Odyssey reader
+
+- Add odyssey reader. [hau]
+
+- Renamed hsaf reader. [hau]
+
+- Merge branch 'pre-master' of https://github.com/meteoswiss-mdr/mpop
+  into pre-master. [hau]
+
+- Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre-
+  master. [hau]
+
+- Add config file for reading hsaf data. [hau]
+
+- Add new reader for HSAF h03 product. [hau]
+
+- Small bugfix for hdf5 SEVIRI reader. [hau]
+
+- Add option area_aggregation. [Adam.Dybbroe]
+
+  Default is True for backward compatibility. If False, the band_axis p
+  arameter is obsolete and all  bands are separated in 2d arrays. W
+  riting goes faster this way.
+
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Merge pull request #34 from ninahakansson/pre-master. [Martin Raspaud]
+
+  Faster writing in cfscene.py for pps
+
+- Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre-
+  master. [Nina.Hakansson]
+
+- Faster writing with time_dimension by checking the fastest condition
+  first. [Nina.Hakansson]
+
+  The condition "(chn.area, chn.info['units']) in area_units"
+  takes several seconds to check for a npp scene combined of
+  some granules.
+
+
+- Pep8 editorials. [Adam.Dybbroe]
+
+- Adapt writing to new cfscene. [Adam.Dybbroe]
+
+- Bugfix sun-sat angles: Sort geofiles before. [Adam.Dybbroe]
+
+- Make writer able to have time dimension and falt band structure.
+  [Adam.Dybbroe]
+
+  Use time_dimension=True to use this way of storing data
+
+- Bugfix viirs geolocation. [Adam.Dybbroe]
+
+  When geolocation granule files are not ordered in time,
+  geolocation got wrong when calling the loader with a list of files
+
+- Add time_dimension option in CFScene writer. [Adam.Dybbroe]
+
+  Time dimension is used in Diana (visualisation system at SMHI) and in PPS
+
+- Fix netcdf file output. [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Ensure proper handling of lower and uppercase epsg/EPSG init strings.
+  [Panu Lahtinen]
+
+- Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre-
+  master. [Panu Lahtinen]
+
+- Get proper srs when using init=EPSG:<int> in projection definition.
+  [Panu Lahtinen]
+
+- Change keyword argument to filenames. [Adam.Dybbroe]
+
+- Adapt to pillow. [Adam.Dybbroe]
+
+- Fix palettes normalised to 0-1 for mpop. [Adam.Dybbroe]
+
+- Add imagery capability for OCA cloud parameters. [Adam.Dybbroe]
+
+  Only the cloud top pressure (ctp) parameters is okay so far.
+  Need to check effective radius and COT
+
+
+- Add the FY3 MERSI-I reader. [Adam.Dybbroe]
+
+- Add mpef oca reader. [Adam.Dybbroe]
+
+- Add the embeded palette to ninjotiff generation if not overriden.
+  [Martin Raspaud]
+
+- Merge pull request #30 from khunger/fix-read-area-calculation. [Martin
+  Raspaud]
+
+  More conservative approach to handle errors in area_def_names_to_extent()
+
+- Use readers def area_extent if calculation fails. [Christian Kliche]
+
+- Merge pull request #32 from meteoswiss-mdr/pre-master. [Martin
+  Raspaud]
+
+  parallax correction and high resolution winds
+
+- Make use of sat_nr function in nwcsaf_msg.py. [hau]
+
+- Option for estimating cth for parallax correction. [hau]
+
+  introduced a optional argument if cth should be estimated or not.
+  Additionally introduced a small function to extract the satellite number.
+
+
+- User choice of background color for day_mircophysics. [hau]
+
+- Added functionality for parallax correction. [hau]
+
+  added new functions:
+     mpop/tools.py   -> estimate_cth
+     mpop/scene.py   -> get_orbital
+                        parallax_corr
+     mpop/channel.py -> get_viewing_geometry
+                        parallax_corr
+                        vinc_vect
+
+  estimate_cth
+    simple estimation of cloud top height comparing 10.8 micron temperature with temperature profile
+
+  get_orbital
+    small wrapper to get the satellite orbital from pyorbital
+
+  parallax_corr (scene.py)
+    performs parallax correction for all loaded channels
+
+  get_viewing_geometry
+    small function returning viewing azimuth and elevation angle for current channel
+
+  parallax_corr (channel.py)
+    performs parallax correction for a single channel
+
+  vinc_vect
+    parallized version of the vinc function
+
+
+- Copy the information of the palette for NWCSAF products. [hau]
+
+  ... when reprojecting
+
+
+- Add other satellite number definition to reader. [hau]
+
+  add 8 and 9 entry for meteosat 8 and 9
+  before only 08 and 09 were possible
+
+
+- Updated the _Calibrator call msg_seviri_hdf.py. [hau]
+
+  Updated initialization of the _Calibrator function
+  in mpop/satin/msg_seviri_hdf.py
+
+  msg_seviri_hdf.py uses the _Calibrator function in
+  mipp/xrit/MSG.py that was updated by
+  Martin and now takes another number of input arguments:
+
+  before Martin s change
+  class _Calibrator(object):
+      def __init__(self, hdr, channel_name):
+
+  after Martin s change
+  class _Calibrator(object):
+      def __init__(self, hdr, channel_name, bits_per_pixel):
+
+  so now the argument bits_per_pixel is set to 10.
+
+
+- Merge branch 'test' into pre-master. [hau]
+
+- Add file to read high resolution wind data from NWCSAF. [hau]
+
+- Add code to process TRT. [hau]
+
+  TRT is an MeteoSwiss tool to detect
+  thunderstorm cells.
+  The data can be processed with this file.
+
+
+- Add a code file to process HRW data from NWC-SAF. [hau]
+
+- Add reader for geo-hdf format EUMETSAT archive. [hau]
+
+  geo-hdf is a possible data format that you can order
+  from the EUMETSAT data archive
+  It enables to specify smaller regions.
+
+
+- New reader nwcsaf and modified scene.py. [hau]
+
+  a new file for reading NWCSAF data is submitted
+  it can read CTTH, CMa, CT, CCR, CRPh, PC, SPhR
+
+  added a small option to scene.py
+  in order to specify which level specifies the desired format
+  of the input file
+
+
+- Add cloud phase palette and palette2colormap function. [hau]
+
+- Test commit for submodule. [hau]
+
+- Merge pull request #31 from elfsprite/pre-master. [Martin Raspaud]
+
+  Added S2A reader files to fork
+
+- Tile definition is now downloaded and converted automatically. [Matias
+  Takala]
+
+- Added S2A reader files to fork. [Matias Takala]
+
+- Fix typo. [Adam.Dybbroe]
+
+- Make it possible to specify fill-value in overview_sun. [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Fix bug of setting shape to viirs reader using foreign band name.
+  [Martin Raspaud]
+
+- Reorganize imports. [Martin Raspaud]
+
+- Add mercator to the supported ninjo projections. [Martin Raspaud]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Lars Orum Rasmussen]
+
+- Added a config template for sentinel-1b. [Lars Orum Rasmussen]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Ninjotiff.save now supports palette ('P') mode. [ras]
+
+- New option to handle if data is scaled between 0 and 1. [ras]
+
+- Set fill values as a kw arg. [Martin Raspaud]
+
+- Use readers def area_extent if calculation fails. [Christian Kliche]
+
+- Keep mask when stacking segments in aapp1b. [Martin Raspaud]
+
+- Get sun-sat angles for modise reading. [Adam.Dybbroe]
+
+- Bugfix getting satellite zenith and azimuth and document
+  get_sunsat_angles method. [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Keep track of sun corrected channels for eg ears viirs. [Martin
+  Raspaud]
+
+- Fix viirs_compact to remove files even when crashing. [Martin Raspaud]
+
+- Add method to retriev the sun-satellite viewing geometry.
+  [Adam.Dybbroe]
+
+- Pep8. [Adam.Dybbroe]
+
+- Fix the snow_age composite to right luminosity. [Martin Raspaud]
+
+- Add MF's copyright notice for the luts. [Martin Raspaud]
+
+- Use original luts for the snow_age composite. [Martin Raspaud]
+
+- Fix projector test. [Martin Raspaud]
+
+- Work around the problem coming at night when M9-channel would mask all
+  the data away. [Panu Lahtinen]
+
+- Add Snow Age RGB composite. [Panu Lahtinen]
+
+- Handle cases where DNB and/or M channel data are requested but no such
+  files are present. [Panu Lahtinen]
+
+- Bugfix hdfeos. [Martin Raspaud]
+
+  Checkfile was using an undefined variable
+
+- Finish integrating trollsift into hdfeos reader. [Martin Raspaud]
+
+  Some parts were left unfinished. Shoud we fixed now.
+
+- Fix new hdfeos reader to look for data on invalid input. [Martin
+  Raspaud]
+
+  the hdfeos reader would fail looking for data in standard places when an
+  iterable of invalid files would be provided.
+
+- Fix hdfeos reader to look for data on invalid input. [Martin Raspaud]
+
+  the hdfeos reader would fail looking for data in standard places when an
+  iterable of invalid files would be provided.
+
+- Merge branch 'pre-master' of https://github.com/pytroll/mpop into pre-
+  master. [Panu Lahtinen]
+
+- Update config templates for MODIS to match recent updates in EOS
+  reader. [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Run travis tests in a container. [Martin Raspaud]
+
+- Fix bug related to modis DR. [Adam.Dybbroe]
+
+- Mask out dubious lon/lat values. [Adam.Dybbroe]
+
+- Merge branch 'pre-master' of github.com:pytroll/mpop into pre-master.
+  [Adam.Dybbroe]
+
+- Added contributions from Christian (DWD) to ninjotiff. It support
+  input data in the [0.0, 1.0] range. [Lars Orum Rasmussen]
+
+- Print more debug info concerning calibration. [Adam.Dybbroe]
+
+- Fix formattin character in log message. [Panu Lahtinen]
+
 v1.2.1 (2015-12-14)
 -------------------
 
diff --git a/etc/EOS-Aqua.cfg.template b/etc/DREOS-Aqua.cfg.template
similarity index 92%
rename from etc/EOS-Aqua.cfg.template
rename to etc/DREOS-Aqua.cfg.template
index aa49774..840b925 100644
--- a/etc/EOS-Aqua.cfg.template
+++ b/etc/DREOS-Aqua.cfg.template
@@ -1,15 +1,17 @@
 [satellite]
+satname='aqua'
+number=''
+instruments=('modis',)
+
+[modis-level2]
+format=hdfeos_l1b.ModisReader
 instruments = ('modis',)
 
 [modis-level2]
 format=hdfeos_l1b
 dir=/data/prod/satellit/modis/lvl1
-
-filename250 = MYD02Qkm_A%Y%m%d_%H%M%S_*.hdf
-filename500 = MYD02Hkm_A%Y%m%d_%H%M%S_*.hdf
-filename1000 = MYD021km_A%Y%m%d_%H%M%S_*.hdf
-
-geofile1000 = MYD03_A%Y%m%d_%H%M%S_*.hdf
+filename = MYD02{resolution:1s}km_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf
+geofile=MYD03_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf
 
 [modis-level3]
 format = nc_pps_l2.PPSReader
diff --git a/etc/EOS-Terra.cfg.template b/etc/DREOS-Terra.cfg.template
similarity index 92%
rename from etc/EOS-Terra.cfg.template
rename to etc/DREOS-Terra.cfg.template
index 0cb1c58..bb1ea06 100644
--- a/etc/EOS-Terra.cfg.template
+++ b/etc/DREOS-Terra.cfg.template
@@ -1,15 +1,14 @@
 [satellite]
-instruments = ('modis',)
+satname='terra'
+number=''
+instruments=('modis',)
 
 [modis-level2]
-format=hdfeos_l1b
+format=hdfeos_l1b.ModisReader
 dir=/data/prod/satellit/modis/lvl1
+filename = MOD02{resolution:1s}km_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf
+geofile=MOD03_A{start_time:%y%j_%H%M%S}_{processing_time:%Y%j%H%M%S}.hdf
 
-filename250 = MOD02Qkm_A%y%j_%H%M%S_*.hdf
-filename500 = MOD02Hkm_A%y%j_%H%M%S_*.hdf
-filename1000 = MOD021km_A%y%j_%H%M%S_*.hdf
-
-geofile = MOD03_A%y%j_%H%M%S_*.hdf
 
 [modis-level3]
 format = nc_pps_l2.PPSReader
diff --git a/etc/hsaf10.cfg.template b/etc/hsaf10.cfg.template
new file mode 100755
index 0000000..191bdd0
--- /dev/null
+++ b/etc/hsaf10.cfg.template
@@ -0,0 +1,26 @@
+[satellite]
+satname = 'hsaf'
+projection = 'geos(0.0)'
+number = '10'
+instruments = ('seviri',)
+proj4_params = 'proj=geos lon_0=0.0 lat_0=0.00 a=6378169.00 b=6356583.80 h=35785831.00'
+
+#[seviri-level1]
+##filename = h03_%Y%m%d_%H%M_rom.grb.nc4
+#filename = h03_%Y%m%d_%H%M_rom.*
+#format = 'read_h03'
+##dir = /data/COALITION2/database/meteosat/HSAF/%Y/%m/%d
+#dir = /data/cinesat/in/hsaf
+
+[seviri-level2]
+#filename = h03_%Y%m%d_%H%M_rom.grb.nc4
+filename = h03_%Y%m%d_%H%M_rom.*
+format = 'hsaf_h03'
+#dir = /data/COALITION2/database/meteosat/HSAF/%Y/%m/%d
+dir = /data/cinesat/in/hsaf
+
+[seviri-1]
+frequency = (0.00, 0.00, 0.00)
+resolution = 3000.403165817
+name = 'h03'
+size = (1900, 900)
\ No newline at end of file
diff --git a/etc/odyssey.cfg.template b/etc/odyssey.cfg.template
new file mode 100755
index 0000000..cf7c965
--- /dev/null
+++ b/etc/odyssey.cfg.template
@@ -0,0 +1,25 @@
+[satellite]
+satname = 'odyssey'
+variant =
+number = 
+instruments = ('radar',)
+
+[radar-level2]
+dir = /data/cinesat/in/radar
+format = odyssey_radar
+projection = odyssey
+
+[RATE]
+#filename = T_PAAH21_C_EUOC_%Y%m%d%H%M??.hdf
+filename = meteoswiss.radar.euoc_rain_rate.%Y%m%d%H%M.hdf5
+name = 'RATE'
+
+[DBZH]
+#filename = T_PABH21_C_EUOC_%Y%m%d%H%M??.hdf
+filename = meteoswiss.radar.euoc_maximum_reflectivit.%Y%m%d%H%M.hdf5
+name = 'DBZH'
+
+[ACRR]
+#filename = T_PASH21_C_EUOC_%Y%m%d%H%M??.hdf
+filename = meteoswiss.radar.euoc_1h_accumulation.%Y%m%d%H%M.hdf5
+name = 'ACRR'
diff --git a/etc/s2a.cfg.template b/etc/s2a.cfg.template
new file mode 100644
index 0000000..4a18837
--- /dev/null
+++ b/etc/s2a.cfg.template
@@ -0,0 +1,88 @@
+[satellite]
+satname = s2a
+variant = 
+number = 
+instruments = ('msi',)
+
+[msi-level2]
+filename=%(satellite)s_OPER_%(instrument)s_L1C_??_????_%Y%m%dT%H%M%S_A%(orbit)s_T?????_%(band)s.jp2
+dir = /s2a_datadir
+format = s2_msi
+
+[msi-b1]
+frequency = (0.433, 0.443, 0.453)
+resolution = 60
+name = 'B01'
+size = (1830,1830)
+
+[msi-b2]
+frequency = (0.4575, 0.490, 0.5225)
+resolution = 10
+name = 'B02'
+size = (10980,10980)
+
+[msi-b3]
+frequency = (0.5225, 0.560, 0.5775)
+resolution = 10
+name = 'B03'
+size = (10980,10980)
+
+[msi-b4]
+frequency = (0.650, 0.665, 0.680)
+resolution = 10
+name = 'B04'
+size = (10980,10980)
+
+[msi-b5]
+frequency = (0.6975, 0.705, 0.7125)
+resolution = 20
+name = 'B05'
+size = (5490,5490)
+
+[msi-b6]
+frequency = (0.7325, 0.740, 0.7475)
+resolution = 20
+name = 'B06'
+size = (5490,5490)
+
+[msi-b7]
+frequency = (0.773, 0.783, 0.793)
+resolution = 20 
+name = 'B07'
+size = (5490,5490)
+
+[msi-b8]
+frequency = (0.7845, 0.842, 0.8995)
+resolution = 10  
+name = 'B08'
+size = (10980,10980)
+
+[msi-b8a]
+frequency = (0.855,0.865,0.875)
+resolution = 20
+name = 'B8A'
+size = (5490,5490)
+
+[msi-b9]
+frequency = (0.935,0.945,0.955)
+resolution = 60   
+name = 'B09'
+size = (1830,1830)
+
+[msi-b10]
+frequency = (1.360,1.375,1.390)
+resolution = 60
+name = 'B10'
+size = (1830,1830)
+
+[msi-b11]
+frequency = (1.565,1.610,1.655)
+resolution = 20
+name = 'B11'
+size = (5490,5490)
+
+[msi-b12]
+frequency = (2.100,2.190,2.280)
+resolution = 20 
+name = 'B12'
+size = (5490,5490)
diff --git a/etc/sentinel1b.cfg.template b/etc/sentinel1b.cfg.template
new file mode 100644
index 0000000..7c3ffff
--- /dev/null
+++ b/etc/sentinel1b.cfg.template
@@ -0,0 +1,11 @@
+[satellite]
+satname = 'sentinel'
+number = '1b'
+instruments = ('s1b', )
+
+[s1b-level2]
+format = 'mipp_sentinel.GeoTiffReader'
+# S1B_IW_GRDM_1SSH_20120109T054406_20120109T054424_001889_000001_ACD9.SAFE/
+# s1B-iw-grd-hh-20120109t054406-20120109t054424-001889-000001-001.tiff
+dir = /data/ras/sentinel-1/???_*_%Y%m%dT%H%M%S_*_%(orbit)s_*SAFE
+filename = manifest.safe
diff --git a/mpop/afgl.dat b/mpop/afgl.dat
new file mode 100644
index 0000000..cdc2158
--- /dev/null
+++ b/mpop/afgl.dat
@@ -0,0 +1,60 @@
+# different temperature profiles
+# (1) AFGL atmospheric constituent profile. U.S. standard atmosphere 1976. ( AFGL-TR-86-0110) 
+# (2) AFGL atmospheric constituent profile. tropical. ( AFGL-TR-86-0110)
+# (3) AFGL atmospheric constituent profile. midlatitude summer. ( AFGL-TR-86-0110) 
+# (4) AFGL atmospheric constituent profile. midlatitude winter. ( AFGL-TR-86-0110)
+# (5) AFGL atmospheric constituent profile. subarctic summer. ( AFGL-TR-86-0110) 
+# (6) AFGL atmospheric constituent profile. subarctic winter. ( AFGL-TR-86-0110)
+#
+#             (1) us stand  (2) tropic    (3) MS      (4) MW      (5) SS      (6) SW
+#     z(km)      T(K)         T(K)          T(K)       T(K)        T(K)        T(K) 
+    120.000     360.000      380.000     380.000     333.000     380.000     333.000 
+    115.000     300.000      299.700     316.800     293.000     322.700     288.500 
+    110.000     240.000      241.600     262.400     259.500     270.100     252.600 
+    105.000     208.800      212.000     222.200     237.100     226.000     234.000 
+    100.000     195.100      190.700     190.500     218.600     190.400     218.500 
+     95.000     188.400      184.300     178.300     208.300     176.800     211.000 
+     90.000     186.900      177.000     165.000     199.500     161.600     202.300 
+     85.000     188.900      177.100     165.100     199.800     161.700     213.100 
+     80.000     198.600      184.800     174.100     210.100     170.600     223.900 
+     75.000     208.400      201.800     196.100     220.400     193.600     234.700 
+     70.000     219.600      218.900     218.100     230.700     216.600     245.400 
+     65.000     233.300      236.000     240.100     240.900     239.700     248.400 
+     60.000     247.000      253.100     257.100     250.800     262.700     250.900 
+     55.000     260.800      263.400     269.300     260.600     274.000     259.100 
+     50.000     270.700      270.200     275.700     265.700     277.200     259.300 
+     47.500     270.600      269.600     275.200     265.100     276.200     253.200 
+     45.000     264.200      264.800     269.900     258.500     273.600     247.000 
+     42.500     257.300      259.400     263.700     250.800     269.500     240.800 
+     40.000     250.400      254.000     257.500     243.200     262.100     234.700 
+     37.500     243.435      248.500     251.300     235.500     254.600     228.500 
+     35.000     236.500      243.100     245.200     227.900     247.200     222.300 
+     32.500     229.588      237.700     239.000     220.400     240.000     218.500 
+     30.000     226.500      232.300     233.700     217.400     235.100     216.000 
+     27.500     224.000      227.000     228.450     215.500     231.000     213.600 
+     25.000     221.600      221.400     225.100     215.200     228.100     211.200 
+     24.000     220.600      219.200     223.900     215.200     226.600     211.800 
+     23.000     219.600      217.000     222.800     215.200     225.200     212.400 
+     22.000     218.600      214.600     221.600     215.200     225.200     213.000 
+     21.000     217.600      210.700     220.400     215.200     225.200     213.600 
+     20.000     216.700      206.700     219.200     215.200     225.200     214.200 
+     19.000     216.700      202.700     217.900     215.200     225.200     214.800 
+     18.000     216.700      198.800     216.800     215.700     225.200     215.400 
+     17.000     216.700      194.800     215.700     216.200     225.200     216.000 
+     16.000     216.700      197.000     215.700     216.700     225.200     216.600 
+     15.000     216.700      203.700     215.700     217.200     225.200     217.200 
+     14.000     216.700      210.300     215.700     217.700     225.200     217.200 
+     13.000     216.700      217.000     215.800     218.200     225.200     217.200 
+     12.000     216.700      223.600     222.300     218.700     225.200     217.200 
+     11.000     216.800      230.100     228.800     219.200     225.200     217.200 
+     10.000     223.300      237.000     235.300     219.700     225.200     217.200 
+      9.000     229.700      243.600     241.700     225.700     232.200     217.200 
+      8.000     236.200      250.300     248.200     231.700     239.200     220.600 
+      7.000     242.700      257.000     254.700     237.700     246.100     227.300 
+      6.000     249.200      263.600     261.200     243.700     253.100     234.100 
+      5.000     255.700      270.300     267.200     249.700     260.100     240.900 
+      4.000     262.200      277.000     273.200     255.700     265.500     247.700 
+      3.000     268.700      283.700     279.200     261.700     270.900     252.700 
+      2.000     275.200      287.700     285.200     265.200     276.300     255.900 
+      1.000     281.700      293.700     289.700     268.700     281.700     259.100 
+      0.000     288.200      299.700     294.200     272.200     287.200     257.200 
\ No newline at end of file
diff --git a/mpop/channel.py b/mpop/channel.py
index 6c0952a..72a1837 100644
--- a/mpop/channel.py
+++ b/mpop/channel.py
@@ -41,6 +41,7 @@ try:
 except ImportError:
     sza = None
 
+from mpop.tools import viewzen_corr as vz_corr
 
 class GeolocationIncompleteError(Exception):
 
@@ -314,6 +315,8 @@ class Channel(GenericChannel):
                       calibration_unit=self.unit)
         res.area = coverage_instance.out_area
         res.info = self.info
+        if hasattr(self, 'palette'):      # UH, new 
+            res.palette = self.palette    # UH, new
         if self.is_loaded():
             LOG.info("Projecting channel %s (%fμm)..."
                      % (self.name, self.wavelength_range[1]))
@@ -376,6 +379,10 @@ class Channel(GenericChannel):
         also stored to the info dictionary of the originating channel.
         '''
 
+        if self.info.get('sun_zen_correction_applied'):
+            LOG.debug("Sun zenith correction already applied, skipping")
+            return self
+
         import mpop.tools
 
         try:
@@ -423,6 +430,348 @@ class Channel(GenericChannel):
             LOG.debug("cos_limit = %f", cos_limit)
             # Mask out data where the sun elevation is below a threshold:
             new_ch.data = np.ma.masked_where(cos_zen < cos_limit, new_ch.data, copy=False)
+            new_ch.info["sun_zen_correction_applied"] = True
+        return new_ch
+
+    def get_viewing_geometry(self, orbital, time_slot, altitude=None):
+        '''Calculates the azimuth and elevation angle as seen by the observer 
+           at the position of the current area pixel. 
+           inputs:
+             orbital   an orbital object define by the tle file 
+                       (see pyorbital.orbital import Orbital or mpop/scene.py get_oribtal)
+             time_slot time object specifying the observation time
+             altitude  optinal: altitude of the observer above the earth ellipsoid
+           outputs:
+             azi       azimuth viewing angle in degree (south is 0, counting clockwise)
+             ele       elevation viewing angle in degree (zenith is 90, horizon is 0)
+        '''
+
+        try:
+            from pyorbital.orbital import Orbital
+        except ImportError:
+            LOG.warning("Could not load pyorbital.orbial.Orbital")
+            return None
+
+        try:
+            from pyorbital import tlefile
+        except ImportError:
+            LOG.warning("Could not load pyorbital.tlefile")
+            return None
+
+        (lons, lats) = self.area.get_lonlats()
+        # Calculate observer azimuth and elevation
+        if altitude==None:
+            altitude = np.zeros(lons.shape)
+        azi, ele = orbital.get_observer_look(time_slot, lons, lats, altitude)
+
+        return (azi, ele) 
+
+    def vinc_vect(phi, lembda, alpha, s, f=None, a=None, degree=True):
+        """ Vincenty's Direct formular
+
+        Returns the lat and long of projected point and reverse azimuth
+        given a reference point and a distance and azimuth to project.
+        lats, longs and azimuths are passed in radians.
+
+        Keyword arguments:
+            phi    Latitude in degree/radians
+            lembda Longitude in degree/radians
+            alpha    Geodetic azimuth in degree/radians
+            s    Ellipsoidal distance in meters
+            f    WGS84 parameter
+            a    WGS84 parameter
+            degree Boolean if in/out values are in degree or radians.
+                   Default is in degree
+
+        Returns:
+            (phiout,  lembdaout,  alphaout ) as a tuple
+
+        """
+        if degree:
+            phi = np.deg2rad(phi)
+            lembda = np.deg2rad(lembda)
+            alpha = np.deg2rad(alpha)
+
+        if f is None:
+            f = 1/298.257223563
+        if a is None:
+            a = 6378137
+
+        two_pi = 2.0*np.pi
+
+        if isinstance(alpha, np.ndarray):
+            alpha[alpha < 0.0] += two_pi
+            alpha[alpha > two_pi] -= two_pi
+
+        else:
+            if alpha < 0.0:
+                alpha = alpha + two_pi
+            if (alpha > two_pi):
+                alpha = alpha - two_pi
+        """
+        alphama = np.ma.masked_less_equal(alphama, two_pi)
+        alpha = alphama - two_pi
+        alpha.mask = np.ma.nomask
+        logger.debug(alpha)
+        """
+        b = a * (1.0 - f)
+
+        tan_u1 = (1-f) * np.tan(phi)
+        u_1 = np.arctan(tan_u1)
+        sigma1 = np.arctan2(tan_u1, np.cos(alpha))
+
+        sinalpha = np.cos(u_1) * np.sin(alpha)
+        cosalpha_sq = 1.0 - sinalpha * sinalpha
+
+        u_2 = cosalpha_sq * (a * a - b * b) / (b * b)
+        aa_ = 1.0 + (u_2 / 16384) * (4096 + u_2 * (-768 + u_2 *
+                                                   (320 - 175 * u_2)))
+        bb_ = (u_2 / 1024) * (256 + u_2 * (-128 + u_2 * (74 - 47 * u_2)))
+
+        # Starting with the approximation
+        sigma = (s / (b * aa_))
+        last_sigma = 2.0 * sigma + 2.0  # something impossible
+
+        # Iterate the following three equations
+        # until there is no significant change in sigma
+
+        # two_sigma_m , delta_sigma
+
+        def iter_sigma(sigma, last_sigma, sigma1, s, b, aa_, bb_):
+            while (abs((last_sigma - sigma) / sigma) > 1.0e-9):
+                two_sigma_m = 2 * sigma1 + sigma
+
+                delta_sigma = (bb_ * np.sin(sigma) *
+                               (np.cos(two_sigma_m) + (bb_/4) *
+                                (np.cos(sigma) *
+                                 (-1 + 2 * np.power(np.cos(two_sigma_m), 2) -
+                                  (bb_ / 6) * np.cos(two_sigma_m) *
+                                  (-3 + 4 * np.power(np.sin(sigma), 2)) *
+                                  (-3 + 4 * np.power(np.cos(two_sigma_m), 2))))))
+                last_sigma = sigma
+                sigma = (s / (b * aa_)) + delta_sigma
+
+            return(sigma, two_sigma_m)
+
+        # Check for array inputs
+        arraybool = [isinstance(ele, np.ndarray) for ele in (sigma, last_sigma,
+                                                             sigma1)]
+        logger.debug("Sigma Arrays?: " + str(arraybool))
+        if all(arraybool):
+            viter_sigma = np.vectorize(iter_sigma)
+            sigma, two_sigma_m = viter_sigma(sigma, last_sigma, sigma1, s, b, aa_,
+                                             bb_)
+
+        else:
+            sigma, two_sigma_m = iter_sigma(sigma, last_sigma, sigma1, s, b, aa_,
+                                            bb_)
+
+        phiout = np.arctan2((np.sin(u_1) * np.cos(sigma) +
+                             np.cos(u_1) * np.sin(sigma) * np.cos(alpha)),
+                            ((1 - f) * np.sqrt(np.power(sinalpha, 2) +
+                                               pow(np.sin(u_1) *
+                                               np.sin(sigma) -
+                                               np.cos(u_1) *
+                                               np.cos(sigma) *
+                                               np.cos(alpha), 2))))
+
+        deltalembda = np.arctan2((np.sin(sigma) * np.sin(alpha)),
+                                 (np.cos(u_1) * np.cos(sigma) -
+                                  np.sin(u_1) * np.sin(sigma) * np.cos(alpha)))
+
+        cc_ = (f/16) * cosalpha_sq * (4 + f * (4 - 3 * cosalpha_sq))
+
+        omega = (deltalembda - (1 - cc_) * f * sinalpha *
+                 (sigma + cc_ * np.sin(sigma) * (np.cos(two_sigma_m) + cc_ *
+                                                 np.cos(sigma) *
+                                                 (-1 + 2 *
+                                                  np.power(np.cos(two_sigma_m),
+                                                           2)))))
+
+        lembdaout = lembda + omega
+
+        alphaout = np.arctan2(sinalpha, (-np.sin(u_1) * np.sin(sigma) +
+                                         np.cos(u_1) * np.cos(sigma) *
+                                         np.cos(alpha)))
+
+        alphaout = alphaout + two_pi / 2.0
+
+        if isinstance(alphaout, np.ndarray):
+            alphaout[alphaout < 0.0] += two_pi
+            alphaout[alphaout > two_pi] -= two_pi
+
+        else:
+            if alphaout < 0.0:
+                alphaout = alphaout + two_pi
+            if (alphaout > two_pi):
+                alphaout = alphaout - two_pi
+
+        if degree:
+            phiout = np.rad2deg(phiout)
+            lembdaout = np.rad2deg(lembdaout)
+            alphaout = np.rad2deg(alphaout)
+
+        return(phiout, lembdaout, alphaout)
+
+
+    def parallax_corr(self, cth=None, time_slot=None, orbital=None, azi=None, ele=None, fill="False"):
+        '''Perform the parallax correction for channel at
+        *time_slot* (datetime.datetime() object), assuming the cloud top height cth
+        and the viewing geometry given by the satellite orbital "orbital" and return the
+        corrected channel. 
+        Authors: Ulrich Hamann (MeteoSwiss), Thomas Leppelt (DWD)
+        Example calls:
+            * calling this function (using orbital and time_slot)
+                 orbital = data.get_oribtal()
+                 data["VIS006"].parallax_corr(cth=data["CTTH"].height, time_slot=data.time_slot, orbital=orbital)
+            * calling this function (using viewing geometry)
+                 orbital = data.get_oribtal()
+                 (azi, ele) = get_viewing_geometry(self, orbital, time_slot)
+                 data["VIS006"].parallax_corr(cth=data["CTTH"].height, azi=azi, ele=ele)
+        Optional input:
+          cth        The parameter cth is the cloud top height 
+                     (or  the altitude of the object that should be shifted).
+                     cth must have the same size and projection as the channel
+
+          orbital    an orbital object define by the tle file 
+                     (see pyorbital.orbital import Orbital or mpop/scene.py get_oribtal)
+          azi        azimuth viewing angle in degree (south is 0, counting clockwise)
+                     e.g. as given by self.get_viewing_geometry
+          ele        elevation viewing angle in degree (zenith is 90, horizon is 0)
+                     e.g. as given by self.get_viewing_geometry
+          fill       specifies the interpolation method to fill the gaps
+                     (basically areas behind the cloud that can't be observed by the satellite instrument)
+                     "False" (default): no interpolation, gaps are np.nan values and mask is set accordingly
+                     "nearest": fill gaps with nearest neighbour
+                     "bilinear": use scipy.interpolate.griddata with linear interpolation 
+                                 to fill the gaps
+                   
+        output: 
+          parallax corrected channel
+                     the content of the channel will be parallax corrected.
+                     The name of the new channel will be
+                     *original_chan.name+'_PC'*, eg. "IR_108_PC". This name is
+                     also stored to the info dictionary of the originating channel.
+        '''
+
+        # get time_slot from info, if present
+        if time_slot==None:
+            if "time" in self.info.keys():
+                time_slot=self.info["time"]
+
+        if azi==None or ele==None:
+            if time_slot==None or orbital==None:
+                print "*** Error in parallax_corr (mpop/channel.py)"
+                print "    parallax_corr needs either time_slot and orbital"
+                print "    data[\"IR_108\"].parallax_corr(data[\"CTTH\"].height, time_slot=data.time_slot, orbital=orbital)"
+                print "    or the azimuth and elevation angle"
+                print "    data[\"IR_108\"].parallax_corr(data[\"CTTH\"].height, azi=azi, ele=ele)"
+                quit()
+            else:
+                print ("... calculate viewing geometry (orbit and time are given)")
+                (azi, ele) = self.get_viewing_geometry(orbital, time_slot)
+        else:
+            print ("... azimuth and elevation angle given")
+
+        # mask the cloud top height
+        cth_ = np.ma.masked_where(cth < 0, cth, copy=False)
+
+        # Elevation displacement
+        dz = cth_ / np.tan(np.deg2rad(ele))
+
+        # Create the new channel (by copying) and initialize the data with None values
+        new_ch = copy.deepcopy(self)
+        new_ch.data[:,:] = np.nan
+
+        # Set the name
+        new_ch.name += '_PC'
+
+        # Add information about the corrected version to original channel
+        self.info["parallax_corrected"] = self.name + '_PC'
+
+        # get projection coordinates in meter
+        (proj_x,proj_y) = self.area.get_proj_coords()
+
+        print "... calculate parallax shift"
+        # shifting pixels according to parallax corretion 
+        proj_x_pc = proj_x - np.sin(np.deg2rad(azi)) * dz # shift West-East   in m  # ??? sign correct ??? 
+        proj_y_pc = proj_y + np.cos(np.deg2rad(azi)) * dz # shift North-South in m
+
+        # get indices for the pixels for the original position 
+        (y,x)  = self.area.get_xy_from_proj_coords(proj_x, proj_y)
+            # comment: might be done more efficient with meshgrid
+            # >>> x = np.arange(-5.01, 5.01, 0.25)
+            # >>> y = np.arange(-5.01, 5.01, 0.25)
+            # >>> xx, yy = np.meshgrid(x, y)
+        # get indices for the pixels at the parallax corrected position 
+        (y_pc,x_pc) = self.area.get_xy_from_proj_coords(proj_x_pc, proj_y_pc)
+
+        # copy cloud free satellite pixels (surface observations)
+        ind = np.where(cth_.mask == True)
+        new_ch.data[x[ind],y[ind]] = self.data[x[ind],y[ind]]
+
+        print "... copy data to parallax corrected position"
+        # copy cloudy pixel with new position modified with parallax shift
+        ind = np.where(x_pc.mask == False)
+        new_ch.data[x_pc[ind],y_pc[ind]] = self.data[x[ind],y[ind]]
+
+        # Mask out data gaps (areas behind the clouds)
+        new_ch.data = np.ma.masked_where(np.isnan(new_ch.data), new_ch.data, copy=False)
+
+        if fill.lower()=="false":
+            return new_ch
+        elif fill=="nearest":
+            print "*** fill missing values with nearest neighbour" 
+            from scipy.ndimage import distance_transform_edt
+            invalid = np.isnan(new_ch.data)
+            ind = distance_transform_edt(invalid, return_distances=False, return_indices=True)
+            new_ch.data = new_ch.data[tuple(ind)]
+        elif fill=="bilinear":
+            # this function does not interpolate at the outer boundaries 
+            from scipy.interpolate import griddata
+            ind = np.where(new_ch.data.mask == False)
+            points = np.transpose(np.append([y[ind]], [x[ind]], axis=0))
+            values = new_ch.data[ind]
+            new_ch.data = griddata(points, values, (y, x), method='linear')
+
+            # fill the remaining pixels with nearest neighbour
+            from scipy.ndimage import distance_transform_edt
+            invalid = np.isnan(new_ch.data)
+            ind = distance_transform_edt(invalid, return_distances=False, return_indices=True)
+            new_ch.data = new_ch.data[tuple(ind)]
+        else:
+            print "*** Error in parallax_corr (channel.py)"
+            print "    unknown gap fill method ", fill
+            quit()
+
+        return new_ch
+
+    def viewzen_corr(self, view_zen_angle_data):
+        """Apply atmospheric correction on a copy of this channel data
+        using the given satellite zenith angle data of the same shape.
+        Returns a new channel containing the corrected data.
+        The name of the new channel will be *original_chan.name+'_VZC'*,
+        eg. "IR108_VZC".  This name is also stored to the info dictionary of
+        the originating channel.
+        """
+
+        # copy channel data which will be corrected in place
+        chn_data = self.data.copy()
+        CHUNK_SZ = 500
+        for start in xrange(0, chn_data.shape[1], CHUNK_SZ):
+            # apply correction on channel data
+            vz_corr(chn_data[:, start:start + CHUNK_SZ],
+                    view_zen_angle_data[:, start:start + CHUNK_SZ])
+
+        new_ch = Channel(name=self.name + "_VZC",
+                         resolution=self.resolution,
+                         wavelength_range=self.wavelength_range,
+                         data=chn_data,
+                         calibration_unit=self.unit)
+
+        # Add information about the corrected version to original channel
+        self.info["view_zen_corrected"] = self.name + '_VZC'
+
         return new_ch
 
     # Arithmetic operations on channels.
diff --git a/mpop/imageo/HRWimage.py b/mpop/imageo/HRWimage.py
new file mode 100644
index 0000000..511e72e
--- /dev/null
+++ b/mpop/imageo/HRWimage.py
@@ -0,0 +1,680 @@
+import matplotlib as mpl   # this HAS TO BE the very first lines (before any other matplotlib functions are imported) 
+mpl.use('Agg')             # this HAS TO BE the very first lines (before any other matplotlib functions are imported) 
+from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+from matplotlib.figure import Figure
+import matplotlib.pyplot as plt
+from matplotlib import rcParams
+from PIL import Image as PIL_Image
+from TRTimage import fig2data, fig2img
+from pylab import text as pylab_text
+from numpy import sin, cos, radians, where, nonzero, transpose, arange, append, meshgrid, mgrid, empty, isnan, nan, percentile
+from numpy import sum as np_sum
+from scipy.interpolate import griddata  # interp2d
+from matplotlib.patches import Rectangle
+from matplotlib.colors import Normalize
+
+def prepare_figure(obj_area):
+
+    # create new figure 
+    #fig = Figure()  # old version, does not work for the stream plot 
+
+    ## Turn interactive plotting off
+    #plt.ioff()
+    fig = plt.figure()   # needs a DISPLAY environment variable (simulated here with mpl.use('Agg'))
+
+    # define size of image 
+    nx = obj_area.x_size
+    ny = obj_area.y_size
+    # canvas figure 
+    canvas = FigureCanvas(fig)
+    # get dots per inch of the screen
+    DPI = fig.get_dpi()
+    # print "DPI", DPI
+    fig.set_size_inches(nx/float(DPI),ny/float(DPI))
+    # set fonts to bold
+    plt.rc('font', weight='bold')
+    # get axis object 
+    ax = fig.add_subplot(111, aspect='equal')
+    ## eliminates margins totally 
+    fig.subplots_adjust(left=0.0,right=1.0,bottom=0.0,top=1.0, wspace=0, hspace=0)
+    # set limits of the axis
+    ax.set_xlim(0, nx)
+    ax.set_ylim(0, ny)
+    # set transparent backgroud 
+    fig.patch.set_alpha(0.0)        # transparent outside of diagram  
+    ax.set_axis_bgcolor([1,0,0,0])  # transparent color inside diagram 
+
+    return fig, ax 
+
+
+def HRWimage( HRW_data, obj_area, hrw_channels=None, min_correlation=None, cloud_type=None, style='barbs', \
+                        barb_length=None, color_mode='channel', pivot='middle', legend=True, legend_loc=3):
+
+    """Create a PIL image from high resolution wind data
+       required input:
+         HRW data [HRW_class]:      HRW_class instant containing the data, see mpop/satin/nwcsaf_hrw_hdf.py
+         obj_area [area_class]:     instant of area class, returned by area_def
+       optional input
+       hrw_channels [string array]: giving the channels that are used derive the HRW vectors
+                                    e.g. hrw_channels['HRV','WV_073']
+       min_correlation [int]:       minimum correlation of tracking, if below arrow is not shown
+       cloud_type [int array]:      cloud types of the wind vectors, e.g. cloud_type=[8,10,11]
+       style [string]:              different styles of plotting
+                                    style='barbs' or style='5min_displacement' or style='15min_displacement'
+       color_mode [string]:         choose color of the wind symbols, possible choises:
+                                    color_mode='channel'      -> one color per SEVIRI channel used to derive HRW
+                                    color_mode='pressure'     -> colorcoded cloud top pressure
+                                    color_mode='temperature'  -> colorcoded cloud top temperature
+                                    color_mode='cloud_type'   -> NWC-SAF cloud types
+                                    color_mode='correlation'  80 ... 100
+                                    color_mode='conf_nwp'      70 ... 100
+                                    color_mode='conf_no_nwp'   70 ... 100
+       pivot [string]:              position of the barb, e.g. pivot='middle' == center of barb at origin
+       legend [True or False] :     show legend or not 
+       legend_loc [string or int]:  location of the legend 
+                                    upper right    1
+                                    upper left     2
+                                    lower left     3
+                                    lower right    4
+                                    right          5
+                                    center left    6
+                                    center right   7
+                                    lower center   8
+                                    upper center   9
+                                    center         10
+                                    best
+    """
+
+    #if min_correlation != None:
+    #    print "    filter for min_correlation = ", min_correlation
+    #    inds = where(HRW_data.correlation > min_correlation)
+    #    HRW_data = HRW_data.subset(inds)
+     
+    print "... create HRWimage, color_mode = ", color_mode
+
+    # get a empty figure with transparent background, no axis and no margins outside the diagram
+    fig, ax = prepare_figure(obj_area)
+
+    # define arrow properties 
+    head_width  = 0.006 * min(obj_area.x_size,obj_area.x_size)
+    head_length = 2 * head_width
+    m_per_s_to_knots = 1.944
+
+    #barb_length = 0.008 * min(obj_area.x_size,obj_area.x_size)
+    
+    if barb_length == None:
+        n_winds = len(HRW_data.wind_id)
+        if n_winds < 300:
+            barb_length = 5.68
+        elif n_winds < 500:
+            barb_length = 5.43
+        elif n_winds < 700:
+            barb_length = 5.18
+        elif n_winds < 900:
+            barb_length = 4.68
+        else:          
+            barb_length = 4.00
+    print "barb_length", barb_length
+
+    if color_mode == 'channel':
+        classes = ('HRV',          'VIS008 ', 'WV_062 ',   'WV_073 ',   'IR_120 ')
+        colors   = ['mediumorchid', 'red',     'limegreen', 'darkgreen', 'darkturquoise']
+    elif color_mode == 'pressure':
+        classes = ['<200hPa',  '200-300hPa','300-400hPa','400-500hPa','500-600hPa','600-700hPa','700-800hPa', '800-900hPa','>900hPa']
+        colors   = ['darksalmon', 'red'     ,'darkorange','yellow'    ,'lime'      ,'seagreen',  'deepskyblue','blue',      'mediumorchid']
+        classes = tuple(['CTP '+cl for cl in classes])
+    elif color_mode == 'cloud_type' or color_mode == 'cloudtype':
+        classes=['non-processed','cloud free land', 'cloud free sea', 'land snow', 'sea ice',\
+                 'very low cum.', 'very low', 'low cum.', 'low', 'med cum.', 'med', 'high cum.', 'high', 'very high cum.', 'very high', \
+                 'sem. thin', 'sem. med.', 'sem. thick', 'sem. above', 'broken', 'undefined']
+
+        colors = empty( (len(classes),3), dtype=int )
+        colors[ 0,:] = [100, 100, 100]
+        colors[ 1,:] = [  0, 120,   0]
+        colors[ 2,:] = [  0,   0,   0]
+        colors[ 3,:] = [250, 190, 250]
+        colors[ 4,:] = [220, 160, 220]
+        colors[ 5,:] = [255, 150,   0]
+        colors[ 6,:] = [255, 100,   0]
+        colors[ 7,:] = [255, 220,   0]
+        colors[ 8,:] = [255, 180,   0]
+        colors[ 9,:] = [255, 255, 140]
+        colors[10,:] = [240, 240,   0]
+        colors[11,:] = [250, 240, 200]
+        colors[12,:] = [215, 215, 150]
+        colors[13,:] = [255, 255, 255]
+        colors[14,:] = [230, 230, 230]
+        colors[15,:] = [  0,  80, 215]
+        colors[16,:] = [  0, 180, 230]
+        colors[17,:] = [  0, 240, 240]
+        colors[18,:] = [ 90, 200, 160]
+        colors[19,:] = [200,   0, 200]
+        colors[20,:] = [ 95,  60,  30]
+        colors = colors/255.
+    elif color_mode in ['correlation','conf_nwp','conf_no_nwp']:
+        classes  = ['<70',    '<75',     '<80',    '<85',   '<90',  '<95',   '>95' ]
+        colors   = ['indigo', 'darkred', 'red','darkorange','gold', 'lime', 'green']
+        classes = tuple([color_mode+' '+cl for cl in classes])
+    else:
+          print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)"
+          print "    unknown color_mode"
+          quit()
+
+    for wid in range(len(HRW_data.wind_id)):
+
+        if color_mode == 'channel':
+
+            if HRW_data.channel[wid].find('HRV') != -1: # HRV
+                barbcolor = colors[0]
+            elif HRW_data.channel[wid].find('VIS008') != -1: #  0.8 micro m
+                barbcolor = colors[1]
+            elif HRW_data.channel[wid].find('WV_062') != -1: #  6.2 micro m
+                barbcolor = colors[2]
+            elif HRW_data.channel[wid].find('WV_073') != -1: #  7.3 micro m
+                barbcolor = colors[3]
+            elif HRW_data.channel[wid].find('IR_120') != -1: # 12.0 micro m
+                barbcolor = colors[4]
+
+        elif color_mode == 'pressure':
+
+            if HRW_data.pressure[wid] < 20000:
+                barbcolor = colors[0]
+            elif HRW_data.pressure[wid] < 30000:
+                barbcolor = colors[1]
+            elif HRW_data.pressure[wid] < 40000:
+                barbcolor = colors[2]
+            elif HRW_data.pressure[wid] < 50000:
+                barbcolor = colors[3] 
+            elif HRW_data.pressure[wid] < 60000:
+                barbcolor = colors[4]
+            elif HRW_data.pressure[wid] < 70000:
+                barbcolor = colors[5]
+            elif HRW_data.pressure[wid] < 80000:
+                barbcolor = colors[6]
+            elif HRW_data.pressure[wid] < 90000:
+                barbcolor = colors[7]
+            else:
+                barbcolor = colors[8]
+
+        elif color_mode == 'cloud_type' or color_mode == 'cloudtype':
+
+            barbcolor = list(colors[HRW_data.cloud_type[wid], :])
+
+        elif color_mode in ['correlation','conf_nwp','conf_no_nwp']:
+            if color_mode == 'correlation':
+                cdata = HRW_data.correlation
+            elif color_mode == 'conf_nwp':
+                cdata = HRW_data.conf_nwp
+            elif color_mode == 'conf_no_nwp':
+                cdata = HRW_data.conf_no_nwp
+
+            if cdata[wid] < 70:
+                barbcolor = colors[0]
+            elif cdata[wid] < 75:
+                barbcolor = colors[1]
+            elif cdata[wid] < 80:
+                barbcolor = colors[2]
+            elif cdata[wid] < 85:
+                barbcolor = colors[3] 
+            elif cdata[wid] < 90:
+                barbcolor = colors[4]
+            elif cdata[wid] < 95:
+                barbcolor = colors[5]
+            else:
+                barbcolor = colors[6]
+        else:
+              print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)"
+              print "    unknown color_mode"
+              quit()
+      
+        x0, y0 = obj_area.get_xy_from_lonlat( HRW_data.lon[wid], HRW_data.lat[wid], outside_error=False) #, return_int=True
+
+        u = HRW_data.wind_speed[wid] * -1 * sin(radians(HRW_data.wind_direction[wid])) 
+        v = HRW_data.wind_speed[wid] * -1 * cos(radians(HRW_data.wind_direction[wid]))
+
+        #print '%6s %3d %10.7f %10.7f %7.2f %7.1f %8.1f %10s' % (HRW_data.channel[wid], HRW_data.wind_id[wid], \
+        #                                                        HRW_data.lon[wid], HRW_data.lat[wid], \
+        #                                                        HRW_data.wind_speed[wid]*m_per_s_to_knots, \
+        #                                                        HRW_data.wind_direction[wid], HRW_data.pressure[wid], barbcolor)
+
+
+        if style == 'barbs':
+            u = HRW_data.wind_speed[wid] * -1 * sin(radians(HRW_data.wind_direction[wid])) * m_per_s_to_knots
+            v = HRW_data.wind_speed[wid] * -1 * cos(radians(HRW_data.wind_direction[wid])) * m_per_s_to_knots
+            ax.barbs(x0, obj_area.y_size - y0, u * m_per_s_to_knots, v * m_per_s_to_knots, length = barb_length, pivot='middle', barbcolor=barbcolor)
+
+        elif style == '5min_displacement' or style == '15min_displacement':
+            if style == '5min_displacement':
+                t_in_s =  5*60
+            elif style == '15min_displacement':
+                t_in_s = 15*60
+            dx = u * t_in_s / obj_area.pixel_size_x
+            dy = v * t_in_s / obj_area.pixel_size_y
+            ax.arrow(x0, y0, dx, dy, head_width = head_width, head_length = head_length, fc=barbcolor, ec=barbcolor)
+
+    if legend:
+
+        rcParams['legend.handlelength'] = 0
+        rcParams['legend.numpoints'] = 1
+
+        # create blank rectangle
+        rec = Rectangle((0, 0), 0, 0, fc="w", fill=False, edgecolor='none', linewidth=0)
+
+        ##  *fontsize*: [size in points | 'xx-small' | 'x-small' | 'small' |
+        ##              'medium' | 'large' | 'x-large' | 'xx-large']
+
+
+        alpha=1.0
+        bbox={'facecolor':'white', 'alpha':alpha, 'pad':10}
+
+        print "... add legend: color is a function of ",  color_mode
+        
+        recs = empty( len(classes), dtype=object)
+        recs[:] = rec 
+
+        #if color_mode == 'pressure':
+        #    recs = [rec, rec, rec, rec, rec, rec, rec, rec, rec]
+        #if color_mode == 'channel':
+        #    recs = [rec, rec, rec, rec, rec]
+        #if color_mode in ['correlation','conf_nwp','conf_no_nwp']:
+        #    recs = [rec, rec, rec, rec, rec, rec, rec]
+
+        size=12
+        if color_mode=='cloud_type':
+            size=10
+
+        leg = ax.legend(recs, classes, loc=legend_loc, prop={'size':size})
+
+        for color,text in zip(colors,leg.get_texts()):
+            text.set_color(color)
+
+
+    return fig2img ( fig )
+
+
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+
+def HRWstreamplot( u2d, v2d, obj_area, interpol_method, color_mode='speed', density=(3,3), linewidth_mode="scaled", linewidth_max=2.5, \
+                   min_correlation=None, legend=True, legend_loc=3, vmax=None, colorbar=True, fontcolor='w'):
+
+    """ Create a streamplot image in PIL format of a 2d wind field
+           color_mode [string]:     choose color of the stream lines,
+                                    color_mode='speed' -> wind speed
+                                    color_mode='u'     -> u-wind component
+                                    color_mode='v'     -> v-wind component
+           density [2 int tuple]    density of stream lines, default density = (4,4)
+           linewidth_mode [string]  "scaled" to color_mode data
+                                    "const" always linewith_max
+    """
+
+
+    ## get a empty figure with transparent background, no axis and no margins outside the diagram
+    fig, ax = prepare_figure(obj_area)
+    #print dir(ax)
+
+    # check if there is there have been enough observation (or if data is not a number array)
+    if isnan(np_sum(u2d)):
+        print "... there are not enough observations"
+        ax.text(0.95, 0.01, 'currently not enough observations',
+                verticalalignment='bottom', horizontalalignment='right',
+                transform=ax.transAxes, color='red', fontsize=15)
+    else:
+        print "there is enough data, interpolation method: ", interpol_method
+
+        # create grid for the wind data
+        [nx, ny] = u2d.shape  # for ccs4 this is (640, 710)
+        Y, X = mgrid[nx-1:-1:-1, 0:ny] # watch out for Y->nx and X->ny
+        #print "X.shape ", Y.shape
+        #print Y[:,0]
+
+        print "   calculate color data ", color_mode
+        if color_mode == 'speed':
+            from numpy import sqrt
+            cdata = sqrt(u2d*u2d + v2d*v2d)
+        elif color_mode == 'u':
+            cdata = u2d
+        elif color_mode == 'v':
+            cdata = v2d
+        else:
+            print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)"
+            print "    unknown color_mode"
+            quit()
+
+        print "   calculate linewidth ", linewidth_mode
+        if linewidth_mode == "const":
+            linewidth = linewidth_max
+        elif linewidth_mode == "scaled":
+            if vmax != None:
+                linewidth = 1 + linewidth_max*(cdata) / vmax
+            else:
+                linewidth = 1 + linewidth_max*(cdata) / cdata.max()
+        else:
+            print "*** Error in HRW_streamplot (mpop/imageo/HRWimage.py)"
+            print "    unknown linewidth_mode"
+            quit()
+
+        print "... data_max =", cdata.max() ,", vmax=", vmax
+
+        if vmax != None:
+            norm = Normalize(vmin=0, vmax=vmax)
+        else:
+            norm = Normalize(vmin=0, vmax=cdata.max())
+
+        #optional arguments of streamplot
+        #           density=1, linewidth=None, color=None,
+        #           cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
+        #           minlength=0.1, transform=None, zorder=1, start_points=None, INTEGRATOR='RK4',density=(10,10)
+        plt.streamplot(X, Y, u2d, v2d, color=cdata, linewidth=linewidth, cmap=plt.cm.rainbow, density=density, norm=norm) 
+
+        if colorbar:
+            colorbar_ax = fig.add_axes([0.9, 0.1, 0.05, 0.8])
+            cbar = plt.colorbar(cax=colorbar_ax)
+            plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color=fontcolor)
+            xlabel = cbar.ax.set_xlabel('m/s', weight='bold') #get the title property handler
+            plt.setp(xlabel, color=fontcolor) 
+
+        #plt.savefig("test_streamplot.png")
+
+        # add information about interpolation method
+        ax.text(0.95, 0.01, interpol_method,
+                verticalalignment='bottom', horizontalalignment='right',
+                transform=ax.transAxes, color='green', fontsize=15)
+
+    return fig2img ( fig )
+
+
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+
+
+def fill_with_closest_pixel(data, invalid=None):
+    """
+    Replace the value of invalid 'data' cells (indicated by 'invalid') 
+    by the value of the nearest valid data cell
+
+    Input:
+        data:    numpy array of any dimension
+        invalid: a binary array of same shape as 'data'. True cells set where data
+                 value should be replaced.
+                 If None (default), use: invalid  = np.isnan(data)
+
+    Output: 
+        Return a filled array. 
+    """
+
+    from numpy import isnan 
+    from scipy.ndimage import distance_transform_edt
+
+    if invalid is None: invalid = isnan(data)
+
+    ind = distance_transform_edt(invalid, return_distances=False, return_indices=True)
+
+    return data[tuple(ind)]
+
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+def HRWscatterplot( HRW_data, title='', hrw_channels=None, min_correlation=None, cloud_type=None, color_mode='direction'):
+
+    ## get a empty figure with transparent background, no axis and no margins outside the diagram
+    # fig = plt.figure()
+    import pylab 
+    fig = pylab.figure()
+    ax = plt.subplot(111)
+    ax.set_yscale("log", nonposx='clip')
+    plt.scatter(HRW_data.wind_speed, HRW_data.pressure/100, s=5, c=HRW_data.wind_direction, alpha=0.5, edgecolor='none')
+    pylab.title(title)
+    pylab.ylim([1000,100])
+    plt.yticks([1000,900,800,700,600,500,400,300,200,100], ['1000','900','800','700','600','500','400','300','200','100'], rotation='horizontal')
+
+    p = percentile(HRW_data.wind_speed, 95)
+    vmax = (round(p/10)+1)*10
+    print "... vmax:", vmax 
+
+    plt.plot([0,vmax], [680,680], color='g')
+    plt.plot([0,vmax], [440,440], color='b')
+
+    pylab.xlim([0,vmax])
+    ax.set_xlabel('HRW [m/s]')
+    ax.set_ylabel('p [hPa]')
+
+    cbar = plt.colorbar()
+    cbar.ax.set_ylabel('wind direction')
+
+    return fig2img ( fig )
+
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+# ----------------------------------------------------------------------------------------------------------
+
+def HRW_2dfield( HRW_data, obj_area, interpol_method=None, hrw_channels=None, min_correlation=None, level=''):
+
+    print "... calculate 2d wind field (HRW_2dfield)"
+
+    if min_correlation != None:
+        print "    filter for min_correlation = ", min_correlation
+        inds = where(HRW_data.correlation > min_correlation)
+        HRW_data.subset(inds)
+
+    xx, yy = obj_area.get_xy_from_lonlat( HRW_data.lon, HRW_data.lat, outside_error=False, return_int=False) #, return_int=True
+
+    yy = obj_area.y_size - yy  
+
+    uu = - HRW_data.wind_speed * sin(radians(HRW_data.wind_direction))
+    vv = - HRW_data.wind_speed * cos(radians(HRW_data.wind_direction))
+
+    # get rid of all vectors outside of the field 
+    index = nonzero(xx)
+    xx = xx[index]
+    yy = yy[index]
+    uu = uu[index]
+    vv = vv[index]
+
+    points = transpose(append([xx], [yy], axis=0))
+    #print type(uu), uu.shape
+    #print type(points), points.shape
+    #print points[0], yy[0], xx[0]
+    #print uu[0]
+
+    nx = obj_area.x_size
+    ny = obj_area.y_size
+
+    x2 = arange(nx)
+    y2 = (ny-1) - arange(ny)
+
+    grid_x, grid_y = meshgrid(x2, y2)
+    
+    if interpol_method == None:
+        # we need at least 2 winds to interpolate 
+        if uu.size < 4:  
+            print "*** Warning, not wnough wind data available, n_winds = ", uu.size
+            fake = empty(grid_x.shape)
+            fake[:,:] = nan
+            HRW_data.interpol_method = None
+            return fake, fake
+
+        elif uu.size < 50:
+            interpol_method = "RBF"
+
+        else:
+            interpol_method = "linear + nearest"
+     
+    #interpol_method = "nearest"
+    #interpol_method = "cubic + nearest" # might cause unrealistic overshoots
+    #interpol_method = "kriging"
+    #interpol_method = "..."
+
+    print "... min windspeed (org data): ", HRW_data.wind_speed.min()
+    print "... max windspeed (org data): ", HRW_data.wind_speed.max()
+
+    for i_iteration in [0,1]:
+
+        if interpol_method == "nearest":
+
+            print '... fill with nearest neighbour'
+            # griddata, see http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.griddata.html
+            grid_u1x = griddata(points, uu, (grid_x, grid_y), method='nearest')
+            grid_v1x = griddata(points, vv, (grid_x, grid_y), method='nearest')
+
+        elif interpol_method == "RBF":
+
+            print '... inter- and extrapolation using radial basis functions'
+            # https://www.youtube.com/watch?v=_cJLVhdj0j4
+            print "... start Rbf"
+            from scipy.interpolate import Rbf
+            # rbfu = Rbf(xx, yy, uu, epsilon=0.1) #
+            rbfu = Rbf(xx, yy, uu, epsilon=0.2)
+            grid_u1x = rbfu(grid_x, grid_y)
+            rbfv = Rbf(xx, yy, vv, epsilon=0.1) #
+            grid_v1x = rbfv(grid_x, grid_y)
+            print "... finish Rbf"
+            # !very! slow for a large number of observations 
+
+        elif interpol_method == "linear + nearest" or interpol_method == "cubic + nearest":
+
+            if interpol_method == "linear + nearest":
+                print '... calculate linear interpolation'
+                # griddata, see http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.griddata.html
+                grid_u1 = griddata(points, uu, (grid_x, grid_y), method='linear')
+                grid_v1 = griddata(points, vv, (grid_x, grid_y), method='linear')
+            elif interpol_method == "cubic + nearest":
+                # smoother, but can cause unrealistic overshoots
+                print '... calculate cubic interpolation'
+                grid_u1 = griddata(points, uu, (grid_x, grid_y), method='cubic')
+                grid_v1 = griddata(points, vv, (grid_x, grid_y), method='cubic')
+            else:
+                print "*** Error in mpop/imageo/HRWimage.py"
+                print "    unknown interpolation method: ", interpol_method
+                quit()
+
+            if 1==1:
+                # use faster function to extrapolate with closest neighbour
+                print "... fill outside area with closest value"
+                grid_u1x = fill_with_closest_pixel(grid_u1, invalid=None) 
+                grid_v1x = fill_with_closest_pixel(grid_v1, invalid=None) 
+            else:
+                # use griddata to extrapolate with closest neighbour
+                points2 = transpose(append([grid_x.flatten()], [grid_y.flatten()], axis=0))
+                print type(grid_x.flatten()), grid_x.flatten().shape
+                print type(points2), points2.shape
+                mask = ~isnan(grid_v1.flatten())
+                inds = where(mask)[0]
+                grid_u1x = griddata(points2[inds], grid_u1.flatten()[inds], (grid_x, grid_y), method='nearest')
+                grid_v1x = griddata(points2[inds], grid_v1.flatten()[inds], (grid_x, grid_y), method='nearest')
+
+            if 1==0:
+                # add othermost points as additional data
+                y_add = [0,    0, ny-1, ny-1]
+                x_add = [0, nx-1,    0, nx-1]
+                for (i,j) in zip(x_add,y_add):
+                    uu = append(uu, grid_u0[i,j])
+                    vv = append(vv, grid_v0[i,j])
+                xx = append(xx, x_add)
+                yy = append(yy, y_add)
+                points = transpose(append([yy], [xx], axis=0))
+
+                print 'calc extent1'
+                grid_u1e = griddata(points, uu, (grid_x, grid_y), method='linear')
+                grid_v1e = griddata(points, vv, (grid_x, grid_y), method='linear')
+
+        else:
+            print "*** Error in mpop/imageo/HRWimage.py"
+            print "    unknown interpol_method", interpol_method
+            quit()
+
+        ##http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
+        ##http://stackoverflow.com/questions/3526514/problem-with-2d-interpolation-in-scipy-non-rectangular-grid
+        #print "SmoothBivariateSpline:"
+        #from scipy.interpolate import SmoothBivariateSpline
+        #fitu = SmoothBivariateSpline( xx, yy, uu, s=1000) # , kx=3, ky=3, s = smooth * z2sum m 
+        #from numpy import empty 
+        #grid_u_SBP = empty(grid_x.shape)
+        #for i in range(0,nx-1):       # starting upper right going down
+        #    for j in range(0,ny-1):   # starting lower right going right
+        #        #print i,j
+        #        grid_u_SBP[j,i] = fitu(j,i)
+
+        #grid_u_SBP = np.array([k.predict([x,y]) for x,y in zip(np.ravel(grid_x), np.ravel(grid_y))])
+        #grid_u_SBP = grid_u_SBP.reshape(grid_x.shape)
+
+        ##print x2
+        ##print y2
+        #grid_u_SBP = fitu(x2,y2)
+        ##print "grid_u_SBP.shape", grid_u_SBP.shape
+        ###print grid_u_SBP
+        #print "End SmoothBivariateSpline:"
+
+        #print "bisplrep:"
+        #from scipy import interpolate
+        #tck = interpolate.bisplrep(xx, yy, uu)
+        #grid_u_BSR = interpolate.bisplev(grid_x[:,0], grid_y[0,:], tck)
+        #print grid_u_BSR.shape
+        #print "bisplrep"
+        #print "grid_v1x.shape", grid_v1x.shape
+
+        extent=(0,nx,0,ny)
+        origin='lower'
+        origin='upper'
+        origin=None 
+
+        # show different stages of 2d inter- and extra-polation 
+        if 1==0:
+            print 'make matplotlib.pyplot'
+            import matplotlib.pyplot as plt
+            vmin=-10
+            vmax=10
+            fig = plt.figure()
+            plt.subplot(221)
+            plt.title('u '+interpol_method)
+            plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1)
+            plt.imshow(grid_u1x, vmin=vmin, vmax=vmax) #, extent=extent
+            #plt.colorbar()
+            plt.subplot(222)
+            plt.title('v '+interpol_method)
+            plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1)
+            plt.imshow(grid_v1x, origin=origin, vmin=vmin, vmax=vmax) #, extent=extent
+            #plt.colorbar()
+
+            # standard calculation for comparison 
+            print '... calculate linear interpolation'
+            grid_u1 = griddata(points, uu, (grid_x, grid_y), method='linear')
+            grid_v1 = griddata(points, vv, (grid_x, grid_y), method='linear')
+            grid_u1xx = fill_with_closest_pixel(grid_u1, invalid=None) 
+            grid_v1xx = fill_with_closest_pixel(grid_v1, invalid=None) 
+
+            plt.subplot(223)
+            plt.title('U Linear+Nearest')
+            plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1)
+            plt.imshow(grid_u1xx, origin=origin, vmin=vmin, vmax=vmax) #, extent=extent
+            #plt.colorbar()
+            plt.subplot(224)
+            plt.title('V Linear+Nearest') 
+            plt.plot(points[:,0], ny-1-points[:,1], 'k.', ms=1)
+            #plt.title('Cubic')
+            plt.imshow(grid_v1xx, origin=origin, vmin=vmin, vmax=vmax) #, extent=extent
+            #plt.colorbar()
+            plt.gcf().set_size_inches(6, 6)
+            #plt.show()  # does not work with AGG
+            tmpfile="test_hrw"+level+".png"
+            fig.savefig(tmpfile)
+            print "display "+tmpfile+" &"
+
+
+        if grid_u1x.min() < -150 or grid_v1x.min() < -150 or grid_u1x.max() > 150 or grid_v1x.max() > 150:
+            print "*** Warning, numerical instability detected, interpolation method: ", interpol_method
+            print "    min u windspeed (u 2dimensional): ", grid_u1x.min()
+            print "    min v windspeed (v 2dimensional): ", grid_v1x.min()
+            print "    max u windspeed (u 2dimensional): ", grid_u1x.max()
+            print "    max v windspeed (v 2dimensional): ", grid_v1x.max()
+            interpol_method = "glinear + nearest"
+            print "... try another interpolation method: ", interpol_method
+        else:
+            # (hopefully) numerical stable interpolation, exit the interpolation loop
+            break 
+
+    HRW_data.interpol_method = interpol_method
+
+    return grid_u1x, grid_v1x
diff --git a/mpop/imageo/TRTimage.py b/mpop/imageo/TRTimage.py
new file mode 100644
index 0000000..10f3e80
--- /dev/null
+++ b/mpop/imageo/TRTimage.py
@@ -0,0 +1,131 @@
+from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+from matplotlib.figure import Figure
+#import matplotlib as mpl
+#mpl.use('Agg')
+#from pylab import figure
+from pylab import rand
+import matplotlib.pyplot as plt
+from matplotlib.patches import Ellipse
+from uuid import uuid4
+import subprocess
+from PIL import Image as PIL_Image
+import numpy
+
+def fig2data ( fig ):
+    """
+    @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
+    @param fig a matplotlib figure
+    @return a numpy 3D array of RGBA values
+    """
+    # draw the renderer
+    fig.canvas.draw ( )
+
+    # Get the RGBA buffer from the figure
+    w,h = fig.canvas.get_width_height()
+    buf = numpy.fromstring ( fig.canvas.tostring_argb(), dtype=numpy.uint8 )
+    buf.shape = ( w, h, 4 )
+
+    # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
+    buf = numpy.roll ( buf, 3, axis = 2 )
+    return buf
+
+def fig2img ( fig ):
+    """
+    @brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
+    @param fig a matplotlib figure
+    @return a Python Imaging Library ( PIL ) image
+    """
+    # put the figure pixmap into a numpy array
+    buf = fig2data ( fig )
+    w, h, d = buf.shape
+    return PIL_Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) )
+
+
+def TRTimage( TRTcell_IDs, TRTcells, obj_area, minRank=8, alpha_max=1.0, plot_vel=True):
+
+    # define size of image 
+    nx = obj_area.x_size
+    ny = obj_area.y_size
+
+    # create new figure 
+    fig = Figure()
+    # canvas figure 
+    canvas = FigureCanvas(fig)
+    # get dots per inch of the screen
+    DPI = fig.get_dpi()
+    # print "DPI", DPI
+    fig.set_size_inches(nx/float(DPI),ny/float(DPI))
+    # get axis object 
+    ax = fig.add_subplot(111, aspect='equal')
+    ## eliminates margins totally 
+    fig.subplots_adjust(left=0.0,right=1.0,bottom=0.0,top=1.0, wspace=0, hspace=0)
+    #plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0) # does only work with x11 display 
+    # set limits of the axis
+    ax.set_xlim(0, nx)
+    ax.set_ylim(0, ny)
+    # set transparent backgroud 
+    fig.patch.set_alpha(0.0)        # transparent outside of diagram  
+    ax.set_axis_bgcolor([1,0,0,0])  # transparent color inside diagram 
+
+    # define arrow properties 
+    head_width  = 0.006 * min(obj_area.x_size,obj_area.x_size)
+    head_length = 2 * head_width
+
+    pixel_size_x_km = 0.001 * obj_area.pixel_size_x
+    pixel_size_y_km = 0.001 * obj_area.pixel_size_y
+
+    for cell in TRTcell_IDs:
+
+        if TRTcells[cell].RANKr > minRank:
+
+            (x0,y0) = obj_area.get_xy_from_lonlat(TRTcells[cell].lon, TRTcells[cell].lat, outside_error=False, return_int=False)
+            y0 = (obj_area.y_size-1)-y0
+            # print (x0,y0)
+
+            vx = TRTcells[cell].vel_x
+            vy = TRTcells[cell].vel_y
+   
+            # !!!scaling of width and height is not correct, that is on map projection, but not on the ground!!!
+            e = Ellipse( xy     =  (x0, y0),                           \
+                         width  =  2*TRTcells[cell].ell_S / pixel_size_x_km, \
+                         height =  2*TRTcells[cell].ell_L / pixel_size_y_km, \
+                         angle  = -TRTcells[cell].angle )
+            
+            ax.add_artist(e)
+            e.set_clip_box(ax.bbox)
+            
+            if TRTcells[cell].RANKr <= 12:
+                cell_color="white"
+                alpha = (alpha_max-0.2) / 12. * TRTcells[cell].RANKr
+            elif TRTcells[cell].RANKr <= 15:
+                cell_color="white"
+                alpha = alpha_max
+            elif TRTcells[cell].RANKr <= 25:
+                cell_color="green"
+                alpha = alpha_max
+            elif TRTcells[cell].RANKr <= 35:
+                cell_color="yellow"
+                alpha = alpha_max
+            else:
+                cell_color="red"
+                alpha = alpha_max
+            # print "cell ID: %s, cell rank: %2d, cell_color:%7s, alpha = %4.1f" % (cell, TRTcells[cell].RANKr, cell_color, alpha)
+            e.set_alpha(alpha)       # transparency: 0.0 transparent, 1 total visible  
+            e.set_facecolor(cell_color)  # "white" or [1,1,1]
+
+            if plot_vel:
+                ax.arrow(x0, y0, vx, vy, head_width = head_width, head_length = head_length, fc=cell_color, ec=cell_color)
+
+    if 1==1:
+        # print " !!! convert fig to image by function fig2img !!!"
+        ### this would avoid saving into a file, but it fills the transparent areas with "white"
+        PIL_image = fig2img ( fig )  
+    else: 
+        tmp_file = '/tmp/TRT_'+str(uuid4())+'.png'
+        # print tmp_file
+        plt.savefig(tmp_file, dpi=DPI, transparent=True) #, bbox_inches='tight'
+        # subprocess.call("display "+tmp_file+" &", shell=True) 
+        PIL_image = PIL_Image.open(tmp_file)
+        subprocess.call("rm "+tmp_file+" &", shell=True) 
+
+    return PIL_image
diff --git a/mpop/imageo/formats/ninjotiff.py b/mpop/imageo/formats/ninjotiff.py
index f179a4e..cbbfe75 100644
--- a/mpop/imageo/formats/ninjotiff.py
+++ b/mpop/imageo/formats/ninjotiff.py
@@ -35,22 +35,24 @@ Edited by Christian Kliche (Ernst Basler + Partner) to replace pylibtiff with
 a modified version of tifffile.py (created by Christoph Gohlke)
 """
 
-import os
-import logging
 import calendar
-from datetime import datetime
+import logging
+import os
 from copy import deepcopy
+from datetime import datetime
+
 import numpy as np
 
 from mpop.imageo.formats import tifffile
+import mpop.imageo.formats.writer_options as write_opts
 
 log = logging.getLogger(__name__)
 
-#-------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
 #
 # Ninjo tiff tags from DWD
 #
-#-------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
 # Geotiff tags.
 GTF_ModelPixelScale = 33550
 GTF_ModelTiepoint = 33922
@@ -114,14 +116,15 @@ NINJO_TAGS_INV = dict((v, k) for k, v in NINJO_TAGS.items())
 # model_pixel_scale_tag_count ? ...
 # Sometimes DWD product defines an array of length 2 (instead of 3 (as in geotiff)).
 #
-MODEL_PIXEL_SCALE_COUNT = int(os.environ.get("GEOTIFF_MODEL_PIXEL_SCALE_COUNT", 3))
+MODEL_PIXEL_SCALE_COUNT = int(os.environ.get(
+    "GEOTIFF_MODEL_PIXEL_SCALE_COUNT", 3))
 
 
-#-------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
 #
 # Read Ninjo products config file.
 #
-#-------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
 def get_product_config(product_name, force_read=False):
     """Read Ninjo configuration entry for a given product name.
 
@@ -143,6 +146,7 @@ def get_product_config(product_name, force_read=False):
 
 
 class _Singleton(type):
+
     def __init__(cls, name_, bases_, dict_):
         super(_Singleton, cls).__init__(name_, bases_, dict_)
         cls.instance = None
@@ -178,7 +182,6 @@ class ProductConfigs(object):
                 return str(val)
 
         filename = self._find_a_config_file()
-        #print "Reading Ninjo config file: '%s'" % filename
         log.info("Reading Ninjo config file: '%s'" % filename)
 
         cfg = ConfigParser()
@@ -202,11 +205,11 @@ class ProductConfigs(object):
         raise ValueError("Could not find a Ninjo tiff config file")
 
 
-#-------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
 #
 # Write Ninjo Products
 #
-#-------------------------------------------------------------------------------
+#-------------------------------------------------------------------------
 def _get_physic_value(physic_unit):
     # return Ninjo's physics unit and value.
     if physic_unit.upper() in ('K', 'KELVIN'):
@@ -226,6 +229,8 @@ def _get_projection_name(area_def):
     proj_name = area_def.proj_dict['proj']
     if proj_name in ('eqc',):
         return 'PLAT'
+    elif proj_name in ('merc',):
+        return 'MERC'
     elif proj_name in ('stere',):
         lat_0 = area_def.proj_dict['lat_0']
         if lat_0 < 0:
@@ -236,11 +241,12 @@ def _get_projection_name(area_def):
 
 
 def _get_pixel_size(projection_name, area_def):
-    if projection_name == 'PLAT':
+    if projection_name in ['PLAT', 'MERC']:
         upper_left = area_def.get_lonlat(0, 0)
-        lower_right = area_def.get_lonlat(area_def.shape[0], area_def.shape[1])
-        pixel_size = abs(lower_right[0] - upper_left[0]) / area_def.shape[1],\
-            abs(upper_left[1] - lower_right[1]) / area_def.shape[0]
+        lower_right = area_def.get_lonlat(
+            area_def.shape[0] - 1, area_def.shape[1] - 1)
+        pixel_size = abs(lower_right[0] - upper_left[0]) / (area_def.shape[1] - 1),\
+            abs(upper_left[1] - lower_right[1]) / (area_def.shape[0] - 1)
     elif projection_name in ('NPOL', 'SPOL'):
         pixel_size = (np.rad2deg(area_def.pixel_size_x / float(area_def.proj_dict['a'])),
                       np.rad2deg(area_def.pixel_size_y / float(area_def.proj_dict['b'])))
@@ -263,7 +269,7 @@ def _get_satellite_altitude(filename):
     return None
 
 
-def _finalize(geo_image, dtype=np.uint8, value_range=None):
+def _finalize(geo_image, dtype=np.uint8, value_range_measurement_unit=None, data_is_scaled_01=True):
     """Finalize a mpop GeoImage for Ninjo. Specialy take care of phycical scale
     and offset.
 
@@ -271,9 +277,11 @@ def _finalize(geo_image, dtype=np.uint8, value_range=None):
         geo_image : mpop.imageo.geo_image.GeoImage
             See MPOP's documentation.
         dtype : bits per sample np.unit8 or np.unit16 (default: np.unit8)
-        value_range: list or tuple
+        value_range_measurement_unit: list or tuple
             Defining minimum and maximum value range. Data will be clipped into
             that range. Default is no clipping and auto scale.
+        data_is_scaled_01: boolean
+            If true (default), input data is assumed to be in the [0.0, 1.0] range.
 
     :Returns:
         image : numpy.array
@@ -287,42 +295,73 @@ def _finalize(geo_image, dtype=np.uint8, value_range=None):
 
     **Notes**:
         physic_val = image*scale + offset
+        Example values for value_range_measurement_unit are (0, 125) or (40.0, -87.5)
     """
     if geo_image.mode == 'L':
         # PFE: mpop.satout.cfscene
         data = geo_image.channels[0]
         fill_value = np.iinfo(dtype).min
         log.debug("Transparent pixel are forced to be %d" % fill_value)
-        log.debug("Before scaling: %.2f, %.2f, %.2f" % (data.min(), data.mean(), data.max()))
+        log.debug("Before scaling: %.2f, %.2f, %.2f" %
+                  (data.min(), data.mean(), data.max()))
         if np.ma.count_masked(data) == data.size:
             # All data is masked
             data = np.ones(data.shape, dtype=dtype) * fill_value
             scale = 1
             offset = 0
         else:
-            if value_range:
-                data.clip(value_range[0], value_range[1], data)
-                chn_min = value_range[0]
-                chn_max = value_range[1]
-                log.debug("Scaling, using value range %.2f - %.2f" %
-                          (value_range[0], value_range[1]))
+            if value_range_measurement_unit and data_is_scaled_01:
+                # No additional scaling of the input data - assume that data is within [0.0, 1.0]
+                # and interpretate 0.0 as value_range_measurement_unit[0]
+                # and 1.0 as value_range_measurement_unit[1]
+                channels, fill_value = geo_image._finalize(dtype)
+                fill_value = fill_value or (0,)
+                data = channels[0]
+
+                scale = ((value_range_measurement_unit[1] - value_range_measurement_unit[0]) /
+                         (np.iinfo(dtype).max - 1.0))
+
+                # Handle the case where all data has the same value.
+                scale = scale or 1
+                offset = value_range_measurement_unit[0]
+
+                mask = data.mask
+
+                # Make room for transparent pixel.
+                scale_fill_value = (
+                    (np.iinfo(dtype).max - 1.0) / np.iinfo(dtype).max)
+                data = 1 + (data.data * scale_fill_value).astype(dtype)
+
+                offset -= scale
+                scale /= scale_fill_value
+
             else:
-                chn_max = data.max()
-                chn_min = data.min()
-                log.debug("Doing auto scaling")
-
-            # Make room for transparent pixel.
-            scale = ((chn_max - chn_min) /
-                     (np.iinfo(dtype).max - 1.0))
-
-            # Handle the case where all data has the same value.
-            scale = scale or 1
-            offset = chn_min
-
-            # Scale data to dtype, and adjust for transparent pixel forced to be minimum.
-            mask = data.mask
-            data = 1 + ((data.data - offset) / scale).astype(dtype)
-            offset -= scale
+                if value_range_measurement_unit:
+                    data.clip(value_range_measurement_unit[
+                              0], value_range_measurement_unit[1], data)
+                    chn_min = value_range_measurement_unit[0]
+                    chn_max = value_range_measurement_unit[1]
+                    log.debug("Scaling, using value range %.2f - %.2f" %
+                              (value_range_measurement_unit[0], value_range_measurement_unit[1]))
+                else:
+                    chn_max = data.max()
+                    chn_min = data.min()
+                    log.debug("Doing auto scaling")
+
+                # Make room for transparent pixel.
+                scale = ((chn_max - chn_min) /
+                         (np.iinfo(dtype).max - 1.0))
+
+                # Handle the case where all data has the same value.
+                scale = scale or 1
+                offset = chn_min
+
+                # Scale data to dtype, and adjust for transparent pixel forced
+                # to be minimum.
+                mask = data.mask
+                data = 1 + ((data.data - offset) / scale).astype(dtype)
+                offset -= scale
+
             data[mask] = fill_value
 
             if log.getEffectiveLevel() == logging.DEBUG:
@@ -356,12 +395,23 @@ def _finalize(geo_image, dtype=np.uint8, value_range=None):
                           channels[3].filled(fill_value[3])))
         return data, 1.0, 0.0, fill_value[0]
 
+    elif geo_image.mode == 'P':
+        fill_value = 0
+        data = geo_image.channels[0]
+        if isinstance(data, np.ma.core.MaskedArray):
+            data = data.filled(fill_value)
+        data = data.astype(dtype)
+        log.debug("Value range: %.2f, %.2f, %.2f" %
+                  (data.min(), data.mean(), data.max()))
+        return data, 1.0, 0.0, fill_value
+
     else:
         raise ValueError("Don't known how til handle image mode '%s'" %
                          str(geo_image.mode))
 
 
-def save(geo_image, filename, ninjo_product_name=None, **kwargs):
+def save(geo_image, filename, ninjo_product_name=None, writer_options=None,
+         **kwargs):
     """MPOP's interface to Ninjo TIFF writer.
 
     :Parameters:
@@ -372,6 +422,9 @@ def save(geo_image, filename, ninjo_product_name=None, **kwargs):
     :Keywords:
         ninjo_product_name : str
             Optional index to Ninjo configuration file.
+        writer_options : dict
+            options dictionary as defined in MPOP interface 
+            See _write
         kwargs : dict
             See _write
 
@@ -380,31 +433,54 @@ def save(geo_image, filename, ninjo_product_name=None, **kwargs):
         * 8 bits grayscale with a colormap (if specified, inverted for IR channels).
         * 16 bits grayscale with no colormap (if specified, MinIsWhite is set for IR).
         * min value will be reserved for transparent color.
-        * RGB images will use mpop.imageo.image's standard finalize.
+        * If possible mpop.imageo.image's standard finalize will be used.
     """
 
-    dtype = np.uint8  # @UndefinedVariable
+    if writer_options:
+        # add writer_options
+        kwargs.update(writer_options)
+        if 'ninjo_product_name' in writer_options:
+            ninjo_product_name = writer_options['ninjo_product_name']
+
+    dtype = np.uint8
     if 'nbits' in kwargs:
         nbits = int(kwargs['nbits'])
         if nbits == 16:
-            dtype = np.uint16  # @UndefinedVariable
+            dtype = np.uint16
 
     try:
-        # TODO: don't force min and max to integers.
-        value_range = int(kwargs["ch_min"]), int(kwargs["ch_max"])
+        value_range_measurement_unit = (float(kwargs["ch_min_measurement_unit"]),
+                                        float(kwargs["ch_max_measurement_unit"]))
     except KeyError:
-        value_range = None
+        value_range_measurement_unit = None
+
+    data_is_scaled_01 = bool(kwargs.get("data_is_scaled_01", True))
+
+    data, scale, offset, fill_value = _finalize(geo_image,
+                                                dtype=dtype,
+                                                data_is_scaled_01=data_is_scaled_01,
+                                                value_range_measurement_unit=value_range_measurement_unit,)
 
-    data, scale, offset, fill_value = _finalize(geo_image, dtype=dtype, value_range=value_range)
     area_def = geo_image.area
     time_slot = geo_image.time_slot
 
     # Some Ninjo tiff names
-    kwargs['image_dt'] = time_slot
-    kwargs['transparent_pix'] = fill_value
     kwargs['gradient'] = scale
     kwargs['axis_intercept'] = offset
+    kwargs['transparent_pix'] = fill_value
+    kwargs['image_dt'] = time_slot
     kwargs['is_calibrated'] = True
+    if geo_image.mode == 'P' and 'cmap' not in kwargs:
+        r, g, b = zip(*geo_image.palette)
+        r = list((np.array(r) * 255).astype(np.uint8))
+        g = list((np.array(g) * 255).astype(np.uint8))
+        b = list((np.array(b) * 255).astype(np.uint8))
+        if len(r) < 256:
+            r += [0] * (256 - len(r))
+            g += [0] * (256 - len(g))
+            b += [0] * (256 - len(b))
+        kwargs['cmap'] = r, g, b
+
     write(data, filename, area_def, ninjo_product_name, **kwargs)
 
 
@@ -431,20 +507,21 @@ def write(image_data, output_fn, area_def, product_name=None, **kwargs):
             See _write
     """
     upper_left = area_def.get_lonlat(0, 0)
-    lower_right = area_def.get_lonlat(area_def.shape[0], area_def.shape[1])
+    lower_right = area_def.get_lonlat(
+        area_def.shape[0] - 1, area_def.shape[1] - 1)
 
     if len(image_data.shape) == 3:
         if image_data.shape[2] == 4:
             shape = (area_def.y_size, area_def.x_size, 4)
-            log.info("Will generate RGBA product '%s'" % product_name)
+            log.info("Will generate RGBA product")
         else:
             shape = (area_def.y_size, area_def.x_size, 3)
-            log.info("Will generate RGB product '%s'" % product_name)
+            log.info("Will generate RGB product")
         write_rgb = True
     else:
         shape = (area_def.y_size, area_def.x_size)
         write_rgb = False
-        log.info("Will generate product '%s'" % product_name)
+        log.info("Will generate single band product")
 
     if image_data.shape != shape:
         raise ValueError("Raster shape %s does not correspond to expected shape %s" % (
@@ -701,7 +778,8 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
             # Always generate colormap for 8 bit gray scale.
             cmap = _default_colormap(reverse)
         elif reverse:
-            # No colormap for 16 bit gray scale, but for IR, specify white is minimum.
+            # No colormap for 16 bit gray scale, but for IR, specify white is
+            # minimum.
             min_is_white = True
 
     if cmap and len(cmap) != 3:
@@ -757,7 +835,8 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
             else:
                 args["photometric"] = 'minisblack'
 
-        # planarconfig, samples_per_pixel, orientation, sample_format set by tifffile.py
+        # planarconfig, samples_per_pixel, orientation, sample_format set by
+        # tifffile.py
 
         args["tile_width"] = tile_width
         args["tile_length"] = tile_length
@@ -770,7 +849,8 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
 
         # NinJo specific tags
         if description is not None:
-            extra_tags.append((NINJO_TAGS["NTD_Description"], 's', 0, description, True))
+            extra_tags.append(
+                (NINJO_TAGS["NTD_Description"], 's', 0, description, True))
 
         # Geo tiff tags
         if MODEL_PIXEL_SCALE_COUNT == 3:
@@ -782,19 +862,24 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
         extra_tags.append((GTF_ModelTiepoint,
                            'd', 6, [0.0, 0.0, 0.0, origin_lon, origin_lat, 0.0], True))
         extra_tags.append((NINJO_TAGS["NTD_Magic"], 's', 0, "NINJO", True))
-        extra_tags.append((NINJO_TAGS["NTD_SatelliteNameID"], 'I', 1, sat_id, True))
-        extra_tags.append((NINJO_TAGS["NTD_DateID"], 'I', 1, image_epoch, True))
-        extra_tags.append((NINJO_TAGS["NTD_CreationDateID"], 'I', 1, file_epoch, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_SatelliteNameID"], 'I', 1, sat_id, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_DateID"], 'I', 1, image_epoch, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_CreationDateID"], 'I', 1, file_epoch, True))
         extra_tags.append((NINJO_TAGS["NTD_ChannelID"], 'I', 1, chan_id, True))
         extra_tags.append((NINJO_TAGS["NTD_HeaderVersion"], 'i', 1, 2, True))
         if omit_filename_path:
             extra_tags.append((NINJO_TAGS["NTD_FileName"], 's', 0,
                                os.path.basename(output_fn), True))
         else:
-            extra_tags.append((NINJO_TAGS["NTD_FileName"], 's', 0, output_fn, True))
+            extra_tags.append(
+                (NINJO_TAGS["NTD_FileName"], 's', 0, output_fn, True))
         extra_tags.append((NINJO_TAGS["NTD_DataType"], 's', 0, data_cat, True))
         # Hardcoded to 0
-        extra_tags.append((NINJO_TAGS["NTD_SatelliteNumber"], 's', 0, "\x00", True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_SatelliteNumber"], 's', 0, "\x00", True))
 
         if write_rgb:
             extra_tags.append((NINJO_TAGS["NTD_ColorDepth"], 'i', 1, 24, True))
@@ -803,14 +888,20 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
         else:
             extra_tags.append((NINJO_TAGS["NTD_ColorDepth"], 'i', 1, 8, True))
 
-        extra_tags.append((NINJO_TAGS["NTD_DataSource"], 's', 0, data_source, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_DataSource"], 's', 0, data_source, True))
         extra_tags.append((NINJO_TAGS["NTD_XMinimum"], 'i', 1, 1, True))
-        extra_tags.append((NINJO_TAGS["NTD_XMaximum"], 'i', 1, image_data.shape[1], True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_XMaximum"], 'i', 1, image_data.shape[1], True))
         extra_tags.append((NINJO_TAGS["NTD_YMinimum"], 'i', 1, 1, True))
-        extra_tags.append((NINJO_TAGS["NTD_YMaximum"], 'i', 1, image_data.shape[0], True))
-        extra_tags.append((NINJO_TAGS["NTD_Projection"], 's', 0, projection, True))
-        extra_tags.append((NINJO_TAGS["NTD_MeridianWest"], 'f', 1, meridian_west, True))
-        extra_tags.append((NINJO_TAGS["NTD_MeridianEast"], 'f', 1, meridian_east, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_YMaximum"], 'i', 1, image_data.shape[0], True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_Projection"], 's', 0, projection, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_MeridianWest"], 'f', 1, meridian_west, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_MeridianEast"], 'f', 1, meridian_east, True))
 
         if radius_a is not None:
             extra_tags.append((NINJO_TAGS["NTD_EarthRadiusLarge"],
@@ -818,28 +909,39 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
         if radius_b is not None:
             extra_tags.append((NINJO_TAGS["NTD_EarthRadiusSmall"],
                                'f', 1, float(radius_b), True))
-        # extra_tags.append((NINJO_TAGS["NTD_GeodeticDate"], 's', 0, "\x00", True)) # ---?
+        # extra_tags.append((NINJO_TAGS["NTD_GeodeticDate"], 's', 0, "\x00",
+        # True)) # ---?
         if ref_lat1 is not None:
-            extra_tags.append((NINJO_TAGS["NTD_ReferenceLatitude1"], 'f', 1, ref_lat1, True))
+            extra_tags.append(
+                (NINJO_TAGS["NTD_ReferenceLatitude1"], 'f', 1, ref_lat1, True))
         if ref_lat2 is not None:
-            extra_tags.append((NINJO_TAGS["NTD_ReferenceLatitude2"], 'f', 1, ref_lat2, True))
+            extra_tags.append(
+                (NINJO_TAGS["NTD_ReferenceLatitude2"], 'f', 1, ref_lat2, True))
         if central_meridian is not None:
             extra_tags.append((NINJO_TAGS["NTD_CentralMeridian"],
                                'f', 1, central_meridian, True))
-        extra_tags.append((NINJO_TAGS["NTD_PhysicValue"], 's', 0, physic_value, True))
-        extra_tags.append((NINJO_TAGS["NTD_PhysicUnit"], 's', 0, physic_unit, True))
-        extra_tags.append((NINJO_TAGS["NTD_MinGrayValue"], 'i', 1, min_gray_val, True))
-        extra_tags.append((NINJO_TAGS["NTD_MaxGrayValue"], 'i', 1, max_gray_val, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_PhysicValue"], 's', 0, physic_value, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_PhysicUnit"], 's', 0, physic_unit, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_MinGrayValue"], 'i', 1, min_gray_val, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_MaxGrayValue"], 'i', 1, max_gray_val, True))
         extra_tags.append((NINJO_TAGS["NTD_Gradient"], 'f', 1, gradient, True))
-        extra_tags.append((NINJO_TAGS["NTD_AxisIntercept"], 'f', 1, axis_intercept, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_AxisIntercept"], 'f', 1, axis_intercept, True))
         if altitude is not None:
-            extra_tags.append((NINJO_TAGS["NTD_Altitude"], 'f', 1, altitude, True))
+            extra_tags.append(
+                (NINJO_TAGS["NTD_Altitude"], 'f', 1, altitude, True))
         extra_tags.append((NINJO_TAGS["NTD_IsBlackLineCorrection"],
                            'i', 1, is_blac_corrected, True))
         extra_tags.append((NINJO_TAGS["NTD_IsAtmosphereCorrected"],
                            'i', 1, is_atmo_corrected, True))
-        extra_tags.append((NINJO_TAGS["NTD_IsCalibrated"], 'i', 1, is_calibrated, True))
-        extra_tags.append((NINJO_TAGS["NTD_IsNormalized"], 'i', 1, is_normalized, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_IsCalibrated"], 'i', 1, is_calibrated, True))
+        extra_tags.append(
+            (NINJO_TAGS["NTD_IsNormalized"], 'i', 1, is_normalized, True))
         extra_tags.append((NINJO_TAGS["NTD_TransparentPixel"],
                            'i', 1, transparent_pix, True))
 
@@ -861,17 +963,17 @@ def _write(image_data, output_fn, write_rgb=False, **kwargs):
         tifargs['bigtiff'] = True
 
     with tifffile.TiffWriter(output_fn, **tifargs) as tif:
-            tif.save(image_data, **args)
-            for _, scale in enumerate((2, 4, 8, 16)):
-                shape = (image_data.shape[0] / scale,
-                         image_data.shape[1] / scale)
-                if shape[0] > tile_width and shape[1] > tile_length:
-                    args = _create_args(image_data[::scale, ::scale],
-                                        pixel_xres * scale, pixel_yres * scale)
-                    for key in header_only_keys:
-                        if key in args:
-                            del args[key]
-                    tif.save(image_data[::scale, ::scale], **args)
+        tif.save(image_data, **args)
+        for _, scale in enumerate((2, 4, 8, 16)):
+            shape = (image_data.shape[0] / scale,
+                     image_data.shape[1] / scale)
+            if shape[0] > tile_width and shape[1] > tile_length:
+                args = _create_args(image_data[::scale, ::scale],
+                                    pixel_xres * scale, pixel_yres * scale)
+                for key in header_only_keys:
+                    if key in args:
+                        del args[key]
+                tif.save(image_data[::scale, ::scale], **args)
 
     log.info("Successfully created a NinJo tiff file: '%s'" % (output_fn,))
 
diff --git a/mpop/imageo/formats/ninjotiff_example b/mpop/imageo/formats/ninjotiff_example
index 1660f87..28a7206 100755
--- a/mpop/imageo/formats/ninjotiff_example
+++ b/mpop/imageo/formats/ninjotiff_example
@@ -33,12 +33,15 @@ Saving an image for 'chn' will then go like:
     image.save(filename,
                fformat='mpop.imageo.formats.ninjotiff',
                physic_unit=physic_unit,
-               value_range=value_range,
+               ch_min_measurement_unit=min_value,
+               ch_max_measurement_unit=max_value,
                **ninjotiff_config[chn])
 """
 import sys
 import os
 from datetime import datetime, timedelta
+import logging
+logging.basicConfig(level=logging.DEBUG)
 
 # Basic check.
 try:
@@ -78,7 +81,6 @@ CHANNEL_DICT = {
     }
 
 BITS_PER_SAMPLE = 8
-
 DO_CONVECTION = False
 
 for area_name, area_in, area_out in AREAS:
@@ -106,17 +108,20 @@ for area_name, area_in, area_out in AREAS:
             physic_unit = scene[chn].unit = 'C'
 
         # Value range as DWD
-        value_range = None
         if physic_unit in ('C', 'CELSIUS'):
             # IR
-            value_range = [-88.5, 40]
+            value_range = (-88.5, 40.)
         else:
             # VIS
-            value_range = [0., 125.]
+            value_range = (0., 125.)
         
-
-        # GeoImage without any data scaling or enhancement.
-        img = scene.image(chn, mode="L")
+        #
+        # A GeoImage specifying a color range.
+        #
+        # If no color_range specified, MPOP will not scaled the data into the [0., 1.] range.
+        # In that case set the data_is_scaled_01 option to False in img.save
+        #
+        img = scene.image(chn, mode="L", crange=[value_range])
         LOG.info("%s (%s, %s) %.2f %.2f %.2f" % (chn_name, physic_unit,
                                                  img.channels[0].dtype,
                                                  img.channels[0].min(),
@@ -136,8 +141,8 @@ for area_name, area_in, area_out in AREAS:
                  fformat='mpop.imageo.formats.ninjotiff',
                  physic_unit=physic_unit,
                  ninjo_product_name=chn_name,
-                 ch_min=value_range[0],
-                 ch_max=value_range[1],
+                 ch_min_measurement_unit=value_range[0],
+                 ch_max_measurement_unit=value_range[1],
                  nbits=BITS_PER_SAMPLE)
 
         # Cleanup.
diff --git a/mpop/imageo/formats/writer_options.py b/mpop/imageo/formats/writer_options.py
new file mode 100644
index 0000000..df8294d
--- /dev/null
+++ b/mpop/imageo/formats/writer_options.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016.
+
+# Author(s):
+
+#   Christian Kliche <christian.kliche at ebp.de>
+
+# This file is part of the mpop.
+
+# mpop is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# mpop is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with mpop.  If not, see <http://www.gnu.org/licenses/>.
+'''
+Module for writer option constants
+'''
+WR_OPT_NBITS = 'nbits'
+WR_OPT_COMPRESSION = 'compression'
+WR_OPT_BLOCKSIZE = 'blocksize'
+WR_OPT_FILL_VALUE_SUBST = 'fill_value_subst'
diff --git a/mpop/imageo/geo_image.py b/mpop/imageo/geo_image.py
index c1157b0..2e15f73 100644
--- a/mpop/imageo/geo_image.py
+++ b/mpop/imageo/geo_image.py
@@ -32,22 +32,25 @@
 
 """Module for geographic images.
 """
+import logging
 import os
 
 import numpy as np
 
+from mpop import CONFIG_PATH
+from mpop.utils import ensure_dir
+
 try:
     from trollimage.image import Image, UnknownImageFormat
 except ImportError:
     from mpop.imageo.image import Image, UnknownImageFormat
 
 
-from mpop import CONFIG_PATH
-import logging
-from mpop.utils import ensure_dir
+import mpop.imageo.formats.writer_options as write_opts
 
 logger = logging.getLogger(__name__)
 
+
 class GeoImage(Image):
     """This class defines geographic images. As such, it contains not only data
     of the different *channels* of the image, but also the area on which it is
@@ -74,7 +77,7 @@ class GeoImage(Image):
 
     def save(self, filename, compression=6,
              tags=None, gdal_options=None,
-             fformat=None, blocksize=256, **kwargs):
+             fformat=None, blocksize=256, writer_options=None, **kwargs):
         """Save the image to the given *filename*. If the extension is "tif",
         the image is saved to geotiff_ format, in which case the *compression*
         level can be given ([0, 9], 0 meaning off). See also
@@ -84,18 +87,44 @@ class GeoImage(Image):
         options for the gdal saving driver. A *blocksize* other than 0 will
         result in a tiled image (if possible), with tiles of size equal to
         *blocksize*.
-
         If the specified format *fformat* is not know to MPOP (and PIL), we
         will try to import module *fformat* and call the method `fformat.save`.
 
+        Use *writer_options* to define parameters that should be forwarded to
+        custom writers. Dictionary keys listed in
+        mpop.imageo.formats.writer_options will be interpreted by this
+        function instead of *compression*, *blocksize* and nbits in
+        *tags* dict.
 
         .. _geotiff: http://trac.osgeo.org/geotiff/
         """
         fformat = fformat or os.path.splitext(filename)[1][1:]
 
+        # prefer parameters in writer_options dict
+        # fill dict if parameters are missing
+        writer_options = writer_options or {}
+        tags = tags or {}
+        if writer_options.get(write_opts.WR_OPT_COMPRESSION, None):
+            compression = writer_options[write_opts.WR_OPT_COMPRESSION]
+        elif compression is not None:
+            writer_options[write_opts.WR_OPT_COMPRESSION] = compression
+
+        if writer_options.get(write_opts.WR_OPT_BLOCKSIZE, None):
+            blocksize = writer_options[write_opts.WR_OPT_BLOCKSIZE]
+        elif blocksize is not None:
+            writer_options[write_opts.WR_OPT_BLOCKSIZE] = blocksize
+
+        if writer_options.get(write_opts.WR_OPT_NBITS, None):
+            tags['NBITS'] = writer_options[write_opts.WR_OPT_NBITS]
+        elif tags.get('NBITS') is not None:
+            writer_options[write_opts.WR_OPT_NBITS] = tags.get('NBITS')
+
         if fformat.lower() in ('tif', 'tiff'):
+            kwargs = kwargs or {}
+            kwargs['writer_options'] = writer_options
             return self.geotiff_save(filename, compression, tags,
-                                     gdal_options, blocksize, **kwargs)
+                                     gdal_options, blocksize,
+                                     **kwargs)
         try:
             # Let image.pil_save it ?
             Image.save(self, filename, compression, fformat=fformat)
@@ -105,8 +134,10 @@ class GeoImage(Image):
             try:
                 saver = __import__(fformat, globals(), locals(), ['save'])
             except ImportError:
-                raise  UnknownImageFormat(
+                raise UnknownImageFormat(
                     "Unknown image format '%s'" % fformat)
+            kwargs = kwargs or {}
+            kwargs['writer_options'] = writer_options
             saver.save(self, filename, **kwargs)
 
     def _gdal_write_channels(self, dst_ds, channels, opacity, fill_value):
@@ -136,7 +167,8 @@ class GeoImage(Image):
     def geotiff_save(self, filename, compression=6,
                      tags=None, gdal_options=None,
                      blocksize=0, geotransform=None,
-                     spatialref=None, floating_point=False):
+                     spatialref=None, floating_point=False,
+                     writer_options=None):
         """Save the image to the given *filename* in geotiff_ format, with the
         *compression* level in [0, 9]. 0 means not compressed. The *tags*
         argument is a dict of tags to include in the image (as metadata).  By
@@ -144,6 +176,9 @@ class GeoImage(Image):
         spatialref information, this can be overwritten by the arguments
         *geotransform* and *spatialref*. *floating_point* allows the saving of
         'L' mode images in floating point format if set to True.
+        When argument *writer_options* is not none and entry 'fill_value_subst'
+        is included, its numeric value will be used to substitute image data
+        that would be equal to the fill_value (used to replace masked data).
 
         .. _geotiff: http://trac.osgeo.org/geotiff/
         """
@@ -152,6 +187,7 @@ class GeoImage(Image):
         raster = gdal.GetDriverByName("GTiff")
 
         tags = tags or {}
+        writer_options = writer_options or {}
 
         if floating_point:
             if self.mode != "L":
@@ -179,6 +215,12 @@ class GeoImage(Image):
             opacity = np.iinfo(dtype).max
             channels, fill_value = self._finalize(dtype)
 
+            fill_value_subst = writer_options.get(
+                write_opts.WR_OPT_FILL_VALUE_SUBST, None)
+            if fill_value is not None and fill_value_subst is not None:
+                for i, chan in enumerate(channels):
+                    np.place(chan, chan == fill_value[i], int(fill_value_subst))
+
         logger.debug("Saving to GeoTiff.")
 
         if tags is not None:
@@ -264,9 +306,7 @@ class GeoImage(Image):
                                       fill_value)
         else:
             raise NotImplementedError("Saving to GeoTIFF using image mode"
-                                      " %s is not implemented."%self.mode)
-
-
+                                      " %s is not implemented." % self.mode)
 
         # Create raster GeoTransform based on upper left corner and pixel
         # resolution ... if not overwritten by argument geotransform.
@@ -285,7 +325,6 @@ class GeoImage(Image):
             except (utils.AreaNotFound, AttributeError):
                 area = self.area
 
-
             try:
                 adfgeotransform = [area.area_extent[0], area.pixel_size_x, 0,
                                    area.area_extent[3], 0, -area.pixel_size_y]
@@ -300,9 +339,8 @@ class GeoImage(Image):
                     pass
                 try:
                     # Check for epsg code.
-                    srs.SetAuthority('PROJCS', 'EPSG',
-                                     int(area.proj_dict['init'].
-                                         split('epsg:')[1]))
+                    srs.ImportFromEPSG(int(area.proj_dict['init'].
+                                           lower().split('epsg:')[1]))
                 except (KeyError, IndexError):
                     pass
                 srs = srs.ExportToWkt()
@@ -319,7 +357,6 @@ class GeoImage(Image):
 
         dst_ds = None
 
-
     def add_overlay(self, color=(0, 0, 0), width=0.5, resolution=None):
         """Add coastline and political borders to image, using *color* (tuple
         of integers between 0 and 255).
@@ -335,8 +372,6 @@ class GeoImage(Image):
         +-----+-------------------------+---------+
         """
 
-
-
         img = self.pil_image()
 
         import ConfigParser
@@ -394,12 +429,10 @@ class GeoImage(Image):
             for idx in range(len(self.channels)):
                 self.channels[idx] = np.ma.array(arr[:, :, idx] / 255.0)
 
-
     def add_overlay_config(self, config_file):
         """Add overlay to image parsing a configuration file.
-           
-        """
 
+        """
 
         import ConfigParser
         conf = ConfigParser.ConfigParser()
@@ -417,7 +450,6 @@ class GeoImage(Image):
             logger.warning("AGGdraw lib not installed...width and opacity properties are not available for overlays.")
             from pycoast import ContourWriter
             cw_ = ContourWriter(coast_dir)
-            
 
         logger.debug("Getting area for overlay: " + str(self.area))
 
@@ -429,15 +461,14 @@ class GeoImage(Image):
 
         img = self.pil_image()
 
-
         from mpop.projector import get_area_def
         if isinstance(self.area, str):
             self.area = get_area_def(self.area)
         logger.info("Add overlays to image.")
         logger.debug("Area = " + str(self.area.area_id))
 
-        foreground=cw_.add_overlay_from_config(config_file, self.area)
-        img.paste(foreground,mask=foreground.split()[-1])
+        foreground = cw_.add_overlay_from_config(config_file, self.area)
+        img.paste(foreground, mask=foreground.split()[-1])
 
         arr = np.array(img)
 
@@ -446,4 +477,3 @@ class GeoImage(Image):
         else:
             for idx in range(len(self.channels)):
                 self.channels[idx] = np.ma.array(arr[:, :, idx] / 255.0)
-
diff --git a/mpop/imageo/palettes.py b/mpop/imageo/palettes.py
index 5e04603..5ab221b 100644
--- a/mpop/imageo/palettes.py
+++ b/mpop/imageo/palettes.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2009, 2013, 2015.
+# Copyright (c) 2009, 2013, 2015, 2016.
 
 # SMHI,
 # Folkborgsvägen 1,
@@ -30,6 +30,8 @@
 """Palette holder module.
 """
 
+import numpy as np
+
 
 def tv_legend():
     """Palette for TV.
@@ -68,6 +70,18 @@ def vv_legend():
     return convert_palette(legend)
 
 
+def cloud_phase():
+    """Palette for cloud phase.
+    """
+    legend = []
+    legend.append((0,    0,   0))  # Unprocessed: Black
+    legend.append((0,    0, 215))  # Water Clouds: Blue
+    legend.append((240, 240, 240))  # Ice Clouds: Almost White
+    legend.append((120, 120,   0))  # Uncertain Phase: ?
+
+    return convert_palette(legend)
+
+
 def cms_modified():
     """Palette for regular cloud classification.
     """
@@ -109,7 +123,7 @@ def ctth_height():
     """CTTH height palette.
     """
     legend = []
-    legend.append((0,     0,   0))
+    legend.append((0,   0,   0))
     legend.append((255,   0, 216))  # 0 meters
     legend.append((126,   0,  43))
     legend.append((153,  20,  47))
@@ -121,11 +135,11 @@ def ctth_height():
     legend.append((216, 255,   0))
     legend.append((178, 255,   0))
     legend.append((153, 255,   0))
-    legend.append((0,   255,   0))
-    legend.append((0,   140,  48))
-    legend.append((0,   178, 255))
-    legend.append((0,   216, 255))
-    legend.append((0,   255, 255))
+    legend.append((0, 255,   0))
+    legend.append((0, 140,  48))
+    legend.append((0, 178, 255))
+    legend.append((0, 216, 255))
+    legend.append((0, 255, 255))
     legend.append((238, 214, 210))
     legend.append((239, 239, 223))
     legend.append((255, 255, 255))  # 10,000 meters
@@ -141,38 +155,38 @@ def ctth_height_pps():
     Identical to the one found in the hdf5 files.
     """
     legend = []
-    legend.append((255, 0, 216))  # 0 meters
-    legend.append((255, 0, 216))  # 0 meters
-    legend.append((255, 0, 216))  # 0 meters
-    legend.append((126, 0, 43))
-    legend.append((126, 0, 43))
-    legend.append((153, 20, 47))
-    legend.append((153, 20, 47))
-    legend.append((153, 20, 47))
-    legend.append((178, 51, 0))
-    legend.append((178, 51, 0))
-    legend.append((255, 76, 0))
-    legend.append((255, 76, 0))
-    legend.append((255, 76, 0))
-    legend.append((255, 102, 0))
-    legend.append((255, 102, 0))
-    legend.append((255, 164, 0))
-    legend.append((255, 164, 0))
-    legend.append((255, 164, 0))
-    legend.append((255, 216, 0))
-    legend.append((255, 216, 0))
-    legend.append((216, 255, 0))
-    legend.append((216, 255, 0))
-    legend.append((178, 255, 0))
-    legend.append((178, 255, 0))
-    legend.append((178, 255, 0))
-    legend.append((153, 255, 0))
-    legend.append((153, 255, 0))
-    legend.append((0, 255, 0))
-    legend.append((0, 255, 0))
-    legend.append((0, 255, 0))
-    legend.append((0, 140, 48))
-    legend.append((0, 140, 48))
+    legend.append((255,   0, 216))  # 0 meters
+    legend.append((255,   0, 216))  # 0 meters
+    legend.append((255,   0, 216))  # 0 meters
+    legend.append((126,   0,  43))
+    legend.append((126,   0,  43))
+    legend.append((153,  20,  47))
+    legend.append((153,  20,  47))
+    legend.append((153,  20,  47))
+    legend.append((178,  51,   0))
+    legend.append((178,  51,   0))
+    legend.append((255,  76,   0))
+    legend.append((255,  76,   0))
+    legend.append((255,  76,   0))
+    legend.append((255, 102,   0))
+    legend.append((255, 102,   0))
+    legend.append((255, 164,   0))
+    legend.append((255, 164,   0))
+    legend.append((255, 164,   0))
+    legend.append((255, 216,   0))
+    legend.append((255, 216,   0))
+    legend.append((216, 255,   0))
+    legend.append((216, 255,   0))
+    legend.append((178, 255,   0))
+    legend.append((178, 255,   0))
+    legend.append((178, 255,   0))
+    legend.append((153, 255,   0))
+    legend.append((153, 255,   0))
+    legend.append((0, 255,   0))
+    legend.append((0, 255,   0))
+    legend.append((0, 255,   0))
+    legend.append((0, 140,  48))
+    legend.append((0, 140,  48))
     legend.append((0, 178, 255))
     legend.append((0, 178, 255))
     legend.append((0, 178, 255))
@@ -280,3 +294,182 @@ def convert_palette(palette):
                             i[1] / 255.0,
                             i[2] / 255.0))
     return new_palette
+
+
+def convert_palette2colormap(palette):
+    """Convert palette from [0,255] range to [0,1].
+    """
+    from trollimage.colormap import Colormap
+    j = 0
+    n_pal = len(palette) - 1
+    values = []
+    colors = []
+
+    red = [r for (r, g, b) in palette]
+    green = [g for (r, g, b) in palette]
+    blue = [b for (r, g, b) in palette]
+
+    max_pal = max(max(red), max(blue), max(green))
+    if max_pal <= 1.0:
+        # print "palette already normalized"
+        denom = 1.0
+    else:
+        # print "palette normalized to 255"
+        denom = 255.0
+
+    for i in palette:
+        values.append((n_pal - j) / float(n_pal))
+        colors.append((i[0] / denom, i[1] / denom, i[2] / denom))
+        j = j + 1
+    # reverse order to the entries
+    values = values[::-1]
+    colors = colors[::-1]
+
+    # for i in palette:
+    #    values.append( j /  float(n_pal))
+    #    colors.append((i[0] / 255.0, i[1] / 255.0, i[2] / 255.0))
+    #    j=j+1
+
+    # attention:
+    # Colormap(values, colors) uses the second input option of Colormap
+    # values has to be a list (not a tuple) and
+    # colors has to be the corresponding list of color tuples
+
+    return Colormap(values, colors)
+
+
+class LogColors(object):
+
+    """
+    Defines colors to use with `logdata2image`
+
+    """
+
+    def __init__(self, nodata, zeros, over, breaks):
+        self.nodata = nodata
+        self.zeros = zeros
+        self.over = over
+        self.breaks = breaks
+
+    def palette(self, N=256):
+        """
+        Build a palette for logarithmic data images.
+
+        """
+
+        max_value = self.breaks[-1][0]
+
+        pal = np.zeros((N, 3), dtype=np.uint8)
+
+        b_last, rgb_last = self.breaks[0]
+        for b, rgb in self.breaks[1:]:
+            # Get a slice of the palette array for the current interval
+            p = pal[
+                np.log(b_last + 1) * N / np.log(max_value):np.log(b + 1) * N / np.log(max_value)]
+            for i in range(3):  # red, green, blue
+                p[:, i] = np.linspace(rgb_last[i], rgb[i], p.shape[0])
+            b_last = b
+            rgb_last = rgb
+
+        pal[0] = self.nodata
+        pal[1] = self.zeros
+        pal[-1] = self.over
+
+        return pal
+
+
+class TriColors(LogColors):
+
+    """
+    Use three color tones in the intervals between the elements of *breaks*.
+
+    """
+    color_tones = [((0, 0, 200), (150, 150, 255)),  # dark to light blue
+                   ((150, 150, 0), (255, 255, 8)),  # greyish to bright yellow
+                   ((230, 150, 100), (230, 0, 0))]  # green to red
+
+    nodata = (0, 0, 0)  # black
+    # zeros = (20, 0, 20) # dark purple
+    # black  #There is no need to mark zeros with another col
+    zeros = (0, 0, 0)
+    over = (255, 0, 0)  # bright red
+
+    def __init__(self, breaks):
+        breaks = [(breaks[0], TriColors.color_tones[0][0]),
+                  (breaks[1], TriColors.color_tones[0][1]),
+
+                  (breaks[1], TriColors.color_tones[1][0]),
+                  (breaks[2], TriColors.color_tones[1][1]),
+
+                  (breaks[2], TriColors.color_tones[2][0]),
+                  (breaks[3], TriColors.color_tones[2][1])]
+
+        LogColors.__init__(self, TriColors.nodata, TriColors.zeros,
+                           TriColors.over, breaks)
+
+CPP_COLORS = {'cpp_cot': TriColors([0, 3.6, 23, 700]),  # ISCCP intervals
+              'cpp_reff': TriColors([0, 10, 20, 1000])}
+
+CPP_COLORS['cot'] = CPP_COLORS['cpp_cot']
+CPP_COLORS['reff'] = CPP_COLORS['cpp_reff']
+
+
+def get_ctp_legend():
+    """
+    Get the Cloud Top Pressure color palette
+    """
+
+    legend = []
+    legend.append((0, 0, 0))     # No data
+    legend.append((255, 0, 216))  # 0: 1000-1050 hPa (=100000-105000 Pa)
+    legend.append((126, 0, 43))  # 1: 950-1000 hPa
+    legend.append((153, 20, 47))  # 2: 900-950 hPa
+    legend.append((178, 51, 0))  # 3: 850-900 hPa
+    legend.append((255, 76, 0))  # 4: 800-850 hPa
+    legend.append((255, 102, 0))  # 5: 750-800 hPa
+    legend.append((255, 164, 0))  # 6: 700-750 hPa
+    legend.append((255, 216, 0))  # 7: 650-700 hPa
+    legend.append((216, 255, 0))  # 8: 600-650 hPa
+    legend.append((178, 255, 0))  # 9: 550-600 hPa
+    legend.append((153, 255, 0))  # 10: 500-550 hPa
+    legend.append((0, 255, 0))   # 11: 450-500 hPa
+    legend.append((0, 140, 48))  # 12: 400-450 hPa
+    legend.append((0, 178, 255))  # 13: 350-400 hPa
+    legend.append((0, 216, 255))  # 14: 300-350 hPa
+    legend.append((0, 255, 255))  # 15: 250-300 hPa
+    legend.append((238, 214, 210))  # 16: 200-250 hPa
+    legend.append((239, 239, 223))  # 17: 150-200 hPa
+    legend.append((255, 255, 255))  # 18: 100-150 hPa
+    legend.append((255, 255, 255))  # 19: 50-100 hPa
+    legend.append((255, 255, 255))  # 20: 0-50 hPa  (=0-5000 Pa)
+
+    palette = convert_palette(legend)
+    return palette
+
+
+def get_reff_legend():
+    return get_log_legend('reff')
+
+
+def get_cot_legend():
+    return get_log_legend('cot')
+
+
+def get_log_legend(product_name):
+    # This is the same data as is used in logdata2image (when indata as for
+    # the calls from cppimage)
+    legend = CPP_COLORS[product_name].palette()
+    palette = convert_palette(legend)
+    return palette
+
+
+def oca_get_scenetype_legend():
+
+    # Colorize using PPS/CPP palette
+    legend = np.array([[170, 130, 255],  # purple/blue for liquid (cph == 1)
+                       [220, 200, 255],  # almost white for ice (cph == 2)
+                       [255, 200, 200]   # Redish for multi layer clouds
+                       ])
+    legend = np.vstack([np.zeros((111, 3)), legend])
+    palette = convert_palette(legend)
+    return palette
diff --git a/mpop/instruments/s2_composites.py b/mpop/instruments/s2_composites.py
new file mode 100644
index 0000000..e3b7f9e
--- /dev/null
+++ b/mpop/instruments/s2_composites.py
@@ -0,0 +1,24 @@
+from mpop.imageo.geo_image import GeoImage
+
+def s2_truecolor(self):
+	
+	self.check_channels('B02','B03','B04')
+    
+	ch1 = self['B04'].data
+	ch2 = self['B03'].data
+	ch3 = self['B02'].data
+
+	img = GeoImage((ch1, ch2, ch3),
+				self.area,
+                                 self.time_slot,
+                                 fill_value=None,
+                                 mode="RGB")
+
+	img.enhance(stretch="linear")
+	#img.enhance(stretch="histogram")
+	img.enhance(gamma=2.0)
+    
+	return img
+
+s2_truecolor.prerequisites = set(['B02', 'B03','B04'])
+msi=[s2_truecolor]
diff --git a/mpop/instruments/seviri.py b/mpop/instruments/seviri.py
index 016e78f..649a2cc 100644
--- a/mpop/instruments/seviri.py
+++ b/mpop/instruments/seviri.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010, 2011, 2013, 2014.
+# Copyright (c) 2010, 2011, 2013, 2014, 2016.
 
 # Author(s):
 
@@ -31,13 +31,49 @@ import logging
 
 LOG = logging.getLogger(__name__)
 
-import os.path
-
 try:
     from pyorbital.astronomy import sun_zenith_angle as sza
 except ImportError:
     sza = None
 
+
+from mpop.imageo import palettes
+
+oca_palette_func = {'ll_ctp': palettes.get_ctp_legend,
+                    'ul_ctp': palettes.get_ctp_legend,
+                    'ul_cot': palettes.get_cot_legend,
+                    'll_cot': palettes.get_cot_legend,
+                    'reff': palettes.get_reff_legend,
+                    'scenetype': palettes.oca_get_scenetype_legend}
+
+
+def _arrange_log_data(arr, max_value, no_data_value):
+    """
+    Prepare logarithmic data for creating an image.
+
+    """
+    MAX_IM_VAL = 2**8 - 1
+
+    # Logarithmic data should never be negative
+    assert ((arr >= 0) + (arr == no_data_value)).all(), \
+        "Negative values encountered in cloud optical thickness"
+
+    # Confine image data values to the range [2, MAX_IM_VAL]
+    arr_log = np.log(arr + 1.)  # arr == 0 -> arr_log = 0
+    cot_im_data = arr_log * (MAX_IM_VAL - 3) / np.log(max_value + 1.) + 2.
+    cot_im_data[cot_im_data > MAX_IM_VAL] = MAX_IM_VAL
+
+    # Now that the data is adjusted, cast it to uint8 ([0, 2**8 - 1])
+    cot_im_data = cot_im_data.astype(np.uint8)
+
+    # Give no-data values a special color
+    cot_im_data[arr == no_data_value] = 0
+    # Give arr == 0 a special color
+    cot_im_data[arr == 0] = 1
+
+    return cot_im_data
+
+
 class SeviriCompositer(VisirCompositer):
 
     """This class sets up the Seviri instrument channel list.
@@ -138,7 +174,7 @@ class SeviriCompositer(VisirCompositer):
 
         r39 = self[3.9].get_reflectance(self[10.8].data,
                                         sun_zenith=None,
-                                        tb13_4=self[13.4].data,)
+                                        tb13_4=self[13.4].data)
 
         if r39 is None:
             raise RuntimeError("Couldn't derive 3.x reflectance. " +
@@ -300,11 +336,12 @@ class SeviriCompositer(VisirCompositer):
     snow.prerequisites = refl39_chan.prerequisites | set(
         [0.8, 1.63, 3.75])
 
-    def day_microphysics(self, wintertime=False):
+    def day_microphysics(self, wintertime=False, fill_value=None):
         """Make a 'Day Microphysics' RGB as suggested in the MSG interpretation guide
         (rgbpart04.ppt). It is kind of special as it requires the derivation of
         the daytime component of the mixed Terrestrial/Solar 3.9 micron
         channel. Furthermore the sun zenith angle is used.
+        for black backgroup specify: fill_value=(0,0,0)
         """
 
         self.refl39_chan()
@@ -335,7 +372,7 @@ class SeviriCompositer(VisirCompositer):
                                  self.area,
                                  self.time_slot,
                                  crange=crange,
-                                 fill_value=None, mode="RGB")
+                                 fill_value=fill_value, mode="RGB")
         if wintertime:
             img.gamma((1.0, 1.5, 1.0))
         else:
@@ -345,3 +382,38 @@ class SeviriCompositer(VisirCompositer):
 
     day_microphysics.prerequisites = refl39_chan.prerequisites | set(
         [0.8, 10.8])
+
+    def oca(self, fieldname):
+        """Make an OCA cloud parameter image"""
+
+        palette = oca_palette_func[fieldname]()
+        data = getattr(getattr(self['OCA'], fieldname), 'data')
+        if fieldname in ['scenetype']:
+            data = data.astype('uint8')
+
+        elif fieldname in ['ul_ctp', 'll_ctp']:
+            data = (22. - data / 5000.).astype('Int16')
+
+        elif fieldname in ['reff']:
+            data = (data * 1000000. + 0.5).astype('uint8')
+            data.fill_value = 255
+
+        elif fieldname in ['ul_cot', 'll_cot']:
+            data = np.ma.exp(data * np.ma.log(10))
+            max_value = palettes.CPP_COLORS['cot'].breaks[-1][0]
+            data.fill_value = 255
+            no_data = 255  # FIXME!
+            data = _arrange_log_data(data.filled(), max_value, no_data)
+
+        else:
+            raise NotImplementedError(
+                "No imagery for parameter %s implemented yet...", fieldname)
+
+        img = geo_image.GeoImage(data, self.area,
+                                 self.time_slot,
+                                 fill_value=(0),
+                                 mode="P",
+                                 palette=palette)
+        return img
+
+    oca.prerequisites = set(['OCA'])
diff --git a/mpop/instruments/viirs.py b/mpop/instruments/viirs.py
index 32061f5..d738c55 100644
--- a/mpop/instruments/viirs.py
+++ b/mpop/instruments/viirs.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015.
+# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016.
 
 # Author(s):
 
@@ -23,11 +23,13 @@
 """This modules describes the viirs instrument.
 It provides VIIRS specific methods for RGB-compositing.
 """
+import logging
+
 import numpy as np
 
 import mpop.imageo.geo_image as geo_image
 from mpop.instruments.visir import VisirCompositer
-import logging
+
 LOG = logging.getLogger(__name__)
 
 try:
@@ -37,6 +39,7 @@ except ImportError:
                 "Check the availability of the pyorbital module in your environment")
     sza = None
 
+
 # VIIRS
 # Since there is overlap between I-bands and M-bands we need to
 # specifically re-define some of the RGB composites already defined
@@ -78,13 +81,34 @@ class ViirsCompositer(VisirCompositer):
 
     overview.prerequisites = set(['M05', 'M07', 'M15'])
 
-    def overview_sun(self, stretch='linear', gamma=1.6):
-        """Make an Overview RGB image composite from VIIRS
-        channels. Sun-zenith correction is implicit for VIIRS
+    def overview_sun(self, stretch='linear', gamma=1.6, fill_value=(0, 0, 0)):
+        """Make an overview RGB image composite normalising with cosine to the
+        sun zenith angle.
         """
-        return self.overview(stretch=stretch, gamma=gamma)
+        self.check_channels('M05', 'M07', 'M15')
+
+        lonlats = self['M15'].area.get_lonlats()
+
+        red = self['M05'].sunzen_corr(self.time_slot, lonlats, limit=88.,
+                                      sunmask=95).data
+        green = self['M07'].sunzen_corr(self.time_slot, lonlats, limit=88.,
+                                        sunmask=95).data
+        blue = -self['M15'].data
 
-    overview_sun.prerequisites = overview.prerequisites
+        img = geo_image.GeoImage((red, green, blue),
+                                 self.area,
+                                 self.time_slot,
+                                 fill_value=fill_value,
+                                 mode="RGB")
+
+        if stretch:
+            img.enhance(stretch=stretch)
+        if gamma:
+            img.enhance(gamma=gamma)
+
+        return img
+
+    overview_sun.prerequisites = set(['M05', 'M07', 'M15'])
 
     def hr_overview(self):
         """Make a high resolution Overview RGB image composite 
@@ -109,7 +133,7 @@ class ViirsCompositer(VisirCompositer):
 
     hr_overview.prerequisites = set(['I01', 'I02', 'I05'])
 
-    def truecolor(self):
+    def truecolor(self, stretch='linear', gamma=2.0):
         """Make a True Color RGB image composite from
         M-bands only.
         """
@@ -125,8 +149,10 @@ class ViirsCompositer(VisirCompositer):
                                  fill_value=None,
                                  mode="RGB")
 
-        img.enhance(stretch="linear")
-        img.enhance(gamma=2.0)
+        if stretch:
+            img.enhance(stretch=stretch)
+        if gamma:
+            img.enhance(gamma=gamma)
 
         return img
 
@@ -548,7 +574,7 @@ class ViirsCompositer(VisirCompositer):
 
         a0, a1, a2, a3, a4 = (0.2228, -2.4683, 1.5867, -0.4275, -0.7768)
 
-        #X = np.maximum(self["M02"].data, self["M03"].data)/self["M04"].data
+        # X = np.maximum(self["M02"].data, self["M03"].data)/self["M04"].data
         X = self["M02"].data / self["M04"].data
         X = np.log10(X)
         chlor_a = 10 ** (a0 + a1 * X + a2 * (X ** 2) +
@@ -587,3 +613,157 @@ class ViirsCompositer(VisirCompositer):
         return img
 
     hr_cloudtop.prerequisites = set(['I04', 'I05'])
+
+    def snow_age(self):
+        """Make a Snow age RGB image composite.
+        """
+        self.check_channels('M07', 'M08', 'M09', 'M10', 'M11')
+
+        coeff = 255. / 160.
+
+        lonlats = self['M11'].area.get_lonlats()
+
+        m07 = self['M07'].sunzen_corr(
+            self.time_slot, lonlats, limit=88., sunmask=95).data * coeff
+        m08 = self['M08'].sunzen_corr(
+            self.time_slot, lonlats, limit=88., sunmask=95).data * coeff
+        m09 = self['M09'].sunzen_corr(
+            self.time_slot, lonlats, limit=88., sunmask=95).data * coeff
+        m10 = self['M10'].sunzen_corr(
+            self.time_slot, lonlats, limit=88., sunmask=95).data * coeff
+        m11 = self['M11'].sunzen_corr(
+            self.time_slot, lonlats, limit=88., sunmask=95).data * coeff
+
+        refcu = m11 - m10
+        refcu[refcu < 0] = 0
+
+        ch1 = m07 - refcu / 2. - m09 / 4.
+        ch2 = m08 + refcu / 4. + m09 / 4.
+        ch3 = m11 + m09
+
+        # Bernard Bellec snow Look-Up Tables V 1.0 (c) Meteo-France
+        # These Look-up Tables allow you to create the RGB snow product
+        # for SUOMI-NPP VIIRS Imager according to the algorithm
+        # presented at the second CSPP/IMAPP users' meeting at Eumetsat
+        # in Darmstadt on 14-16 April 2015
+        # The algorithm and the product are described in this
+        # presentation :
+        # http://www.ssec.wisc.edu/meetings/cspp/2015/Agenda%20PDF/Wednesday/Roquet_snow_product_cspp2015.pdf
+        #
+        # For further information you may contact
+        # Bernard Bellec at Bernard.Bellec at meteo.fr
+        # or
+        # Pascale Roquet at Pascale.Roquet at meteo.fr
+
+        luts = np.array([[0, 0, 0], [1, 2, 2], [3, 8, 5], [4, 12, 8], [6, 15, 10], [8, 18, 13], [9, 21, 16],
+                         [11, 24, 19], [13, 26, 21], [14, 28, 24], [
+                             16, 30, 27], [18, 32, 30], [19, 34, 32],
+                         [21, 36, 35], [22, 38, 38], [24, 40, 40], [
+                             26, 42, 43], [27, 43, 46], [29, 45, 49],
+                         [31, 47, 51], [32, 49, 54], [34, 50, 57], [
+                             36, 52, 60], [37, 54, 62], [39, 55, 65],
+                         [40, 57, 68], [42, 59, 70], [44, 60, 73], [
+                             45, 62, 76], [47, 64, 79], [49, 66, 81],
+                         [50, 67, 84], [52, 69, 87], [53, 71, 90], [
+                             55, 73, 92], [56, 75, 95], [58, 77, 98],
+                         [59, 79, 100], [61, 81, 103], [62, 83, 106], [
+                             64, 85, 109], [65, 86, 111], [67, 88, 114],
+                         [68, 90, 117], [70, 92, 119], [71, 94, 121], [
+                             73, 96, 124], [74, 98, 126], [76, 100, 129],
+                         [77, 102, 131], [79, 104, 134], [80, 106, 136], [
+                             82, 107, 139], [83, 109, 141], [85, 111, 144],
+                         [86, 113, 146], [88, 115, 149], [89, 117, 151], [
+                             91, 118, 154], [92, 120, 156], [94, 122, 159],
+                         [95, 124, 161], [97, 126, 162], [98, 128, 164], [
+                             100, 129, 166], [101, 131, 168],
+                         [103, 133, 170], [104, 135, 172], [106, 137, 173], [
+                             107, 138, 175], [109, 140, 177],
+                         [110, 142, 179], [112, 144, 181], [113, 145, 183], [
+                             114, 147, 184], [116, 149, 186],
+                         [117, 151, 188], [118, 152, 190], [120, 154, 192], [
+                             121, 156, 193], [123, 158, 194],
+                         [124, 159, 196], [125, 161, 197], [127, 163, 199], [
+                             128, 165, 200], [130, 166, 202],
+                         [131, 168, 203], [132, 170, 205], [134, 172, 206], [
+                             135, 173, 206], [136, 175, 207],
+                         [138, 177, 208], [139, 178, 209], [141, 180, 210], [
+                             142, 182, 211], [143, 184, 212],
+                         [145, 185, 213], [146, 187, 214], [148, 189, 215], [
+                             149, 191, 216], [150, 192, 217],
+                         [152, 194, 218], [153, 196, 219], [154, 198, 220], [
+                             156, 200, 220], [157, 201, 221],
+                         [159, 203, 221], [160, 205, 222], [161, 207, 223], [
+                             162, 209, 223], [163, 210, 224],
+                         [164, 212, 225], [166, 213, 225], [167, 214, 226], [
+                             168, 216, 227], [169, 217, 227],
+                         [171, 218, 228], [173, 220, 228], [174, 221, 228], [
+                             175, 222, 229], [176, 224, 229],
+                         [177, 225, 229], [178, 226, 230], [179, 227, 230], [
+                             181, 228, 230], [182, 229, 231],
+                         [183, 230, 231], [184, 231, 232], [185, 232, 232], [
+                             186, 233, 232], [187, 234, 233],
+                         [188, 235, 233], [190, 236, 233], [191, 237, 234], [
+                             192, 237, 234], [193, 238, 234],
+                         [194, 239, 235], [195, 240, 235], [196, 240, 236], [
+                             196, 241, 236], [197, 242, 236],
+                         [198, 243, 237], [199, 243, 237], [200, 244, 237], [
+                             201, 245, 238], [202, 245, 238],
+                         [203, 245, 238], [204, 246, 239], [205, 246, 239], [
+                             206, 246, 239], [207, 247, 239],
+                         [208, 247, 239], [209, 247, 239], [209, 248, 240], [
+                             210, 248, 240], [210, 248, 240],
+                         [211, 248, 240], [212, 248, 240], [212, 248, 241], [
+                             213, 248, 241], [214, 248, 241],
+                         [215, 248, 241], [216, 248, 241], [217, 248, 242], [
+                             217, 248, 242], [218, 248, 242],
+                         [219, 248, 242], [219, 248, 242], [220, 248, 243], [
+                             221, 248, 243], [221, 249, 243],
+                         [222, 249, 243], [223, 249, 243], [223, 249, 244], [
+                             223, 249, 244], [224, 249, 244],
+                         [224, 249, 244], [225, 249, 245], [225, 249, 245], [
+                             226, 249, 245], [226, 249, 245],
+                         [227, 249, 245], [227, 249, 246], [228, 249, 246], [
+                             228, 250, 246], [229, 250, 246],
+                         [229, 250, 246], [230, 250, 247], [230, 250, 247], [
+                             231, 250, 247], [231, 250, 247],
+                         [232, 250, 247], [233, 250, 248], [233, 250, 248], [
+                             233, 250, 248], [234, 250, 248],
+                         [234, 250, 248], [234, 250, 249], [235, 251, 249], [
+                             235, 251, 249], [235, 251, 249],
+                         [236, 251, 249], [236, 251, 250], [237, 251, 250], [
+                             237, 251, 250], [237, 251, 250],
+                         [238, 251, 250], [238, 251, 250], [238, 251, 250], [
+                             239, 251, 250], [239, 251, 250],
+                         [240, 251, 250], [240, 251, 250], [240, 252, 250], [
+                             241, 252, 250], [241, 252, 251],
+                         [241, 252, 251], [242, 252, 251], [242, 252, 251], [
+                             242, 252, 251], [243, 252, 251],
+                         [243, 252, 251], [244, 252, 251], [244, 252, 251], [
+                             244, 252, 251], [245, 252, 252],
+                         [245, 252, 252], [245, 253, 252], [246, 253, 252], [
+                             246, 253, 252], [247, 253, 252],
+                         [248, 253, 252], [248, 253, 252], [248, 253, 252], [
+                             249, 253, 252], [249, 253, 253],
+                         [249, 253, 253], [250, 253, 253], [250, 253, 253], [
+                             250, 253, 253], [250, 253, 253],
+                         [251, 254, 253], [251, 254, 253], [251, 254, 253], [
+                             252, 254, 253], [252, 254, 254],
+                         [252, 254, 254], [253, 254, 254], [253, 254, 254], [
+                             253, 254, 254], [253, 254, 254],
+                         [254, 254, 254], [254, 254, 254], [254, 254, 254], [254, 254, 254], [255, 255, 255]]) / 255.0
+        np.ma.clip(ch1, 0, 255, ch1)
+        np.ma.clip(ch2, 0, 255, ch2)
+        np.ma.clip(ch3, 0, 255, ch3)
+        ch1 = np.ma.array(
+            luts[:, 0][ch1.astype(np.uint8)], copy=False, mask=ch1.mask)
+        ch2 = np.ma.array(
+            luts[:, 1][ch2.astype(np.uint8)], copy=False, mask=ch2.mask)
+        ch3 = np.ma.array(
+            luts[:, 2][ch3.astype(np.uint8)], copy=False, mask=ch3.mask)
+
+        img = geo_image.GeoImage(
+            (ch1, ch2, ch3), self.area, self.time_slot, mode="RGB")
+
+        return img
+
+    snow_age.prerequisites = set(['M07', 'M08', 'M09', 'M10', 'M11'])
diff --git a/mpop/instruments/visir.py b/mpop/instruments/visir.py
index 7a673cb..91125ab 100644
--- a/mpop/instruments/visir.py
+++ b/mpop/instruments/visir.py
@@ -102,7 +102,7 @@ class VisirCompositer(Compositer):
         img.enhance(stretch="crude")
         return img
 
-    def overview(self, stretch='crude', gamma=1.6):
+    def overview(self, stretch='crude', gamma=1.6, fill_value=(0, 0, 0)):
         """Make an overview RGB image composite.
 
         +--------------------+--------------------+
@@ -126,7 +126,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB")
 
         if stretch:
@@ -139,7 +139,7 @@ class VisirCompositer(Compositer):
     overview.prerequisites = set([0.635, 0.85, 10.8])
 
     # def overview_sun(self, stretch='crude', gamma=1.6):
-    def overview_sun(self, stretch='linear', gamma=1.6):
+    def overview_sun(self, stretch='linear', gamma=1.6, fill_value=(0, 0, 0)):
         """Make an overview RGB image composite normalising with cosine to the
         sun zenith angle.
         """
@@ -156,7 +156,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((red, green, blue),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB")
 
         if stretch:
@@ -187,7 +187,7 @@ class VisirCompositer(Compositer):
 
     night_overview.prerequisites = set([3.75, 10.8, 12.0])
 
-    def natural(self, stretch=None, gamma=1.8):
+    def natural(self, stretch=None, gamma=1.8, fill_value=(0, 0, 0)):
         """Make a Natural Colors RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -209,7 +209,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((0, 90),
                                          (0, 90),
@@ -224,7 +224,7 @@ class VisirCompositer(Compositer):
 
     natural.prerequisites = set([0.635, 0.85, 1.63])
 
-    def airmass(self):
+    def airmass(self, fill_value=(0, 0, 0)):
         """Make an airmass RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -246,7 +246,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((-25, 0),
                                          (-40, 5),
@@ -319,7 +319,7 @@ class VisirCompositer(Compositer):
 
     wv_low.prerequisites = set([7.3])
 
-    def green_snow(self):
+    def green_snow(self, fill_value=(0, 0, 0)):
         """Make a Green Snow RGB image composite.
 
         +--------------------+--------------------+
@@ -343,7 +343,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB")
 
         img.enhance(stretch="crude")
@@ -353,7 +353,7 @@ class VisirCompositer(Compositer):
 
     green_snow.prerequisites = set([0.635, 1.63, 10.8])
 
-    def red_snow(self):
+    def red_snow(self, fill_value=(0, 0, 0)):
         """Make a Red Snow RGB image composite.
 
         +--------------------+--------------------+
@@ -377,7 +377,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB")
 
         img.enhance(stretch="crude")
@@ -386,7 +386,7 @@ class VisirCompositer(Compositer):
 
     red_snow.prerequisites = set([0.635, 1.63, 10.8])
 
-    def convection(self):
+    def convection(self, fill_value=(0, 0, 0)):
         """Make a Severe Convection RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -408,7 +408,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((-30, 0),
                                          (0, 55),
@@ -418,7 +418,7 @@ class VisirCompositer(Compositer):
 
     convection.prerequisites = set([0.635, 1.63, 3.75, 6.7, 7.3, 10.8])
 
-    def dust(self):
+    def dust(self, fill_value=(0, 0, 0)):
         """Make a Dust RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -439,7 +439,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((-4, 2),
                                          (0, 15),
@@ -451,7 +451,7 @@ class VisirCompositer(Compositer):
 
     dust.prerequisites = set([8.7, 10.8, 12.0])
 
-    def ash(self):
+    def ash(self, fill_value=(0, 0, 0)):
         """Make a Ash RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -472,7 +472,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((-4, 2),
                                          (-4, 5),
@@ -482,7 +482,7 @@ class VisirCompositer(Compositer):
 
     ash.prerequisites = set([8.7, 10.8, 12.0])
 
-    def fog(self):
+    def fog(self, fill_value=(0, 0, 0)):
         """Make a Fog RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -503,7 +503,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((-4, 2),
                                          (0, 6),
@@ -515,7 +515,7 @@ class VisirCompositer(Compositer):
 
     fog.prerequisites = set([8.7, 10.8, 12.0])
 
-    def night_fog(self):
+    def night_fog(self, fill_value=(0, 0, 0)):
         """Make a Night Fog RGB image composite.
 
         +--------------------+--------------------+--------------------+
@@ -537,7 +537,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB",
                                  crange=((-4, 2),
                                          (0, 6),
@@ -549,7 +549,7 @@ class VisirCompositer(Compositer):
 
     night_fog.prerequisites = set([3.75, 10.8, 12.0])
 
-    def cloudtop(self, stretch=(0.005, 0.005), gamma=None):
+    def cloudtop(self, stretch=(0.005, 0.005), gamma=None, fill_value=(0, 0, 0)):
         """Make a Cloudtop RGB image composite.
 
         +--------------------+--------------------+
@@ -573,7 +573,7 @@ class VisirCompositer(Compositer):
         img = geo_image.GeoImage((ch1, ch2, ch3),
                                  self.area,
                                  self.time_slot,
-                                 fill_value=(0, 0, 0),
+                                 fill_value=fill_value,
                                  mode="RGB")
 
         if stretch:
diff --git a/mpop/satellites/__init__.py b/mpop/satellites/__init__.py
index da13422..d0d1132 100644
--- a/mpop/satellites/__init__.py
+++ b/mpop/satellites/__init__.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010, 2011, 2014, 2015.
+# Copyright (c) 2010, 2011, 2014, 2015, 2016.
 
 # SMHI,
 # Folkborgsvägen 1,
diff --git a/mpop/satin/aapp1b.py b/mpop/satin/aapp1b.py
index 56d050a..40f9088 100644
--- a/mpop/satin/aapp1b.py
+++ b/mpop/satin/aapp1b.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-# Copyright (c) 2012, 2013, 2014, 2015 SMHI
+# Copyright (c) 2012, 2013, 2014, 2015, 2016 SMHI
 
 # Author(s):
 
@@ -167,21 +167,21 @@ def load_avhrr(satscene, options):
         import h5py
         LOGGER.info("Reading external calibration coefficients.")
         try:
-            fid = h5py.File(os.path.join(CONFIG_PATH, satscene.satname + \
+            fid = h5py.File(os.path.join(CONFIG_PATH, satscene.satname +
                                          '_calibration_data.h5'), 'r')
             calib_coeffs = {}
             for key in fid.keys():
                 date_diffs = []
                 for dat in fid[key]['datetime']:
-                    date_diffs.append(np.abs(satscene.time_slot - \
+                    date_diffs.append(np.abs(satscene.time_slot -
                                              datetime.datetime(dat[0],
                                                                dat[1],
                                                                dat[2])))
                 idx = date_diffs.index(min(date_diffs))
                 date_diff = satscene.time_slot - \
-                            datetime.datetime(fid[key]['datetime'][idx][0],
-                                              fid[key]['datetime'][idx][1],
-                                              fid[key]['datetime'][idx][2])
+                    datetime.datetime(fid[key]['datetime'][idx][0],
+                                      fid[key]['datetime'][idx][1],
+                                      fid[key]['datetime'][idx][2])
                 if date_diff.days < 0:
                     older_or_newer = "newer"
                 else:
@@ -513,10 +513,10 @@ class AAPP1b(object):
                     coeffs = calib_coeffs['ch1']
                 else:
                     coeffs = None
-                channels['1'].append( _vis_calibrate(data, 0,
-                                                     calibrate,
-                                                     pre_launch_coeffs,
-                                                     coeffs))
+                channels['1'].append(_vis_calibrate(data, 0,
+                                                    calibrate,
+                                                    pre_launch_coeffs,
+                                                    coeffs))
                 self.units['1'] = '%'
                 if calibrate == 0:
                     self.units['1'] = ''
@@ -562,7 +562,7 @@ class AAPP1b(object):
                 ch3b = _ir_calibrate(self._header[i], data, 0, calibrate)
                 channels['3B'].append(
                     np.ma.masked_array(ch3b,
-                                       np.logical_or((is3b is False) * \
+                                       np.logical_or((is3b is False) *
                                                      ch3b,
                                                      ch3b < 0.1)))
                 if calibrate == 1:
@@ -599,7 +599,7 @@ class AAPP1b(object):
         # transfer channel data to class attributes
         for ch_ in channels:
             try:
-                self.channels[ch_] = np.vstack(channels[ch_])
+                self.channels[ch_] = np.ma.vstack(channels[ch_])
             except ValueError:
                 self.channels[ch_] = None
         if "3A" in chns or "3B" in chns:
@@ -607,7 +607,7 @@ class AAPP1b(object):
         if "3A" in chns:
             self.channels['3A'].mask = self._is3b * self.channels['3A']
         if "3B" in chns:
-            self.channels['3B'].mask = np.logical_or((self._is3b is False) * \
+            self.channels['3B'].mask = np.logical_or((self._is3b is False) *
                                                      self.channels['3B'],
                                                      self.channels['3B'] < 0.1)
 
@@ -755,12 +755,12 @@ def _ir_calibrate(header, data, irchn, calib_type):
 def show(data, negate=False):
     """Show the stetched data.
     """
-    import Image as pil
+    from PIL import Image
     data = np.array((data - data.min()) * 255.0 /
                     (data.max() - data.min()), np.uint8)
     if negate:
         data = 255 - data
-    img = pil.fromarray(data)
+    img = Image.fromarray(data)
     img.show()
 
 CASES = {
@@ -781,15 +781,15 @@ if __name__ == "__main__":
     starttime = datetime.datetime(SCENE._header[0][0]["startdatayr"],
                                   1, 1, 0, 0)
     starttime += \
-            datetime.timedelta(days=int(SCENE._header[0][0]["startdatady"]) - 1,
-                               seconds=SCENE._header[0][0]["startdatatime"] / \
-                               1000.0)
+        datetime.timedelta(days=int(SCENE._header[0][0]["startdatady"]) - 1,
+                           seconds=SCENE._header[0][0]["startdatatime"] /
+                           1000.0)
     print "starttime:", starttime
     endtime = datetime.datetime(SCENE._header[-1][0]["enddatayr"], 1, 1, 0, 0)
     endtime += \
-            datetime.timedelta(days=int(SCENE._header[-1][0]["enddatady"]) - 1,
-                               seconds=SCENE._header[-1][0]["enddatatime"] / \
-                               1000.0)
+        datetime.timedelta(days=int(SCENE._header[-1][0]["enddatady"]) - 1,
+                           seconds=SCENE._header[-1][0]["enddatatime"] /
+                           1000.0)
     print "endtime:", endtime
     # print SCENE._data['hrpt'].shape
     #show(SCENE._data['hrpt'][:, :, 4].astype(np.float))
@@ -798,7 +798,7 @@ if __name__ == "__main__":
     SCENE.navigate()
     for i__ in AVHRR_CHANNEL_NAMES:
         data_ = SCENE.channels[i__]
-        print >> sys.stderr, "%-3s" % i_, \
+        print >> sys.stderr, "%-3s" % i__, \
             "%6.2f%%" % (100. * (float(np.ma.count(data_)) / data_.size)), \
             "%6.2f, %6.2f, %6.2f" % (data_.min(), data_.mean(), data_.max())
     show(SCENE.channels['2'], negate=False)
diff --git a/mpop/satin/fy3_mersi.py b/mpop/satin/fy3_mersi.py
new file mode 100644
index 0000000..bfe3799
--- /dev/null
+++ b/mpop/satin/fy3_mersi.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015 Adam.Dybbroe
+
+# Author(s):
+
+#   Adam.Dybbroe <a000680 at c20671.ad.smhi.se>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""A reader for the FY3 Mersi-1 
+"""
+
+
+import numpy as np
+import os
+import logging
+from datetime import datetime
+import glob
+from ConfigParser import ConfigParser
+from mpop import CONFIG_PATH
+import h5py
+import pdb
+
+LOGGER = logging.getLogger('mersi-1')
+
+
+def load(satscene, *args, **kwargs):
+    """Read data from file and load it into *satscene*.
+    A possible *calibrate* keyword argument is passed to the AAPP reader. 
+    Should be 0 for off (counts), 1 for default (brightness temperatures and
+    reflectances), and 2 for radiances only.
+
+    If *use_extern_calib* keyword argument is set True, use external
+    calibration data.
+
+    """
+    del args
+
+    conf = ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+    options = {}
+    for option, value in conf.items(satscene.instrument_name + "-level2",
+                                    raw=True):
+        options[option] = value
+
+    if kwargs.get("filename") is not None:
+        options["full_filename"] = kwargs["filename"]
+    if kwargs.get("calibrate") is not None:
+        options["calibrate"] = kwargs["calibrate"]
+    else:
+        options["calibrate"] = True
+
+    LOGGER.info("Loading instrument '%s'", satscene.instrument_name)
+
+    try:
+        CASES[satscene.instrument_name](satscene, options)
+    except KeyError:
+        raise KeyError("Unknown instrument '%s'" % satscene.instrument_name)
+
+
+def load_mersi(satscene, options):
+    """Read the Mersi-1 hdf file"""
+
+    if "filename_1000m" not in options:
+        raise IOError("No 1km mersi-1 filename given, cannot load.")
+
+    values = {"orbit": satscene.orbit,
+              "satname": satscene.satname,
+              "instrument": satscene.instrument_name,
+              "satellite": satscene.fullname
+              }
+
+    filename_1000m = \
+        os.path.join(satscene.time_slot.strftime(options["dir"]) % values,
+                     satscene.time_slot.strftime(
+                         options["filename_1000m"])
+                     % values)
+
+    LOGGER.debug("Filename= %s", filename_1000m)
+
+    datasets = ['EV_250_Aggr.1KM_RefSB',
+                'EV_250_Aggr.1KM_Emissive',
+                'EV_1KM_RefSB']
+
+    calibrate = options['calibrate']
+
+    # Get the calibration information:
+    h5f = h5py.File(filename_1000m)
+    # The K0, K1 and K2 coefficients:
+    vis_cal_coeff = h5f['Calibration']['VIS_Cal_Coeff'][:]
+    # See also "Update of Calibration for Reflective Solar Bands of MERSI / FY-3C"
+    # http://satellite.cma.gov.cn/PortalSite/Download/FY3C/CalibrationCoefficient/Update%20of%20Calibration%20for%20Reflective%20Solar%20Bands%20of%20MERSI_20140618.doc
+
+    sv_dn_average = h5f['Calibration']['SV_DN_average'][:]
+    # Expand array over all lines (10 lines per scan):
+    sv_dn_average = np.repeat(sv_dn_average, 10, axis=1)
+
+    date_orig = h5f.attrs['DN_Normalized_LUT_UpdateDate']
+    dtobj_orig = datetime.strptime(date_orig, '%Y-%m-%d')
+    obs_beg_date = h5f.attrs["Observing Beginning Date"]
+    obs_beg_time = h5f.attrs["Observing Beginning Time"]
+    dtobj_obs = datetime.strptime(
+        obs_beg_date + obs_beg_time, '%Y-%m-%d%H:%M:%S.%f')
+    h5f.close()
+
+    # Get the days since 'launch' or since coefficients update:
+    dsl = (dtobj_obs - dtobj_orig).days
+    slopes = (vis_cal_coeff[:, 0] +
+              vis_cal_coeff[:, 1] * dsl +
+              vis_cal_coeff[:, 2] * dsl * dsl)
+    # The slopes are available for band 1-4 and 6-20.
+    # To keep consistency with the other cal-coefficients we add the IR band as
+    # well, and set the slope to 1:
+    slopes = np.concatenate((slopes[0:4], [1], slopes[4:]))
+
+    mersi_band_index = 0
+    with h5py.File(filename_1000m) as h5f:
+
+        for dset in datasets:
+            band_data = h5f['Data'][dset]
+            valid_range = band_data.attrs['valid_range']
+            LOGGER.debug("valid-range = " + str(valid_range))
+            # FIXME! There seem to be useful data outside the valid range!
+            valid_range = (0, 65535)
+            fillvalue = band_data.attrs['FillValue']
+            band_names = band_data.attrs['band_name'].split(',')
+            slope = band_data.attrs['Slope']
+            intercept = band_data.attrs['Intercept']
+
+            LOGGER.debug('band names = ' + str(band_names))
+            for (i, band) in enumerate(band_names):
+                if band not in satscene.channels_to_load:
+                    continue
+
+                LOGGER.debug("Reading channel %s, i=%d", band, i)
+
+                # Take care of the case when there is only one
+                # single band (band 5: IR) in the dataset:
+                if len(band_data.shape) == 2:
+                    data = band_data
+                else:
+                    data = band_data[i]
+
+                bandmask = np.logical_or(np.less(data, valid_range[0]),
+                                         np.greater(data, valid_range[1]))
+
+                if calibrate:
+                    data = slopes[mersi_band_index] * (
+                        data - np.array([sv_dn_average[mersi_band_index]]).transpose())
+
+                satscene[band] = np.ma.masked_array(data,
+                                                    mask=bandmask,
+                                                    copy=False)
+
+                satscene[band].info = {
+                    'var_name': 'ch' + str(band),
+                    'var_data': satscene[band].data,
+                    'var_dim_names': ('x', 'y'),
+                    '_FillValue': fillvalue,
+                    'standard_name': '',
+                    'short_name': band,
+                    'scale_factor': slope,
+                    'add_offset': intercept,
+                }
+
+                mersi_band_index = mersi_band_index + 1
+
+    satscene.info = {
+        'Antenna': 'None',
+        'Receiver': 'Unknown',
+        'Time': satscene.time_slot.strftime("%Y-%m-%d %H:%M:%S UTC"),
+        'Area_Name': "swath",
+        #'Projection': 'satproj',
+        'Platform Name': satscene.fullname,
+        'Service': '',
+        #'Columns' : satscene.channels[0].shape[1],
+        #'Lines' : satscene.channels[0].shape[0],
+        'SampleX': 1.0,
+        'SampleY': 1.0,
+        'title': 'MERSI Level 1',
+    }
+
+    # Get geolocation information
+
+
+CASES = {
+    "mersi/1": load_mersi,
+}
diff --git a/mpop/satin/fy3_virr.py b/mpop/satin/fy3_virr.py
index c673e6f..4513977 100644
--- a/mpop/satin/fy3_virr.py
+++ b/mpop/satin/fy3_virr.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-# Copyright (c) 2015 Adam.Dybbroe
+# Copyright (c) 2015, 2016 Adam.Dybbroe
 
 # Author(s):
 
@@ -61,6 +61,7 @@ def load(satscene, *args, **kwargs):
     else:
         options["calibrate"] = True
 
+    LOGGER.debug("Calibrate = " + str(options["calibrate"]))
     LOGGER.info("Loading instrument '%s'", satscene.instrument_name)
 
     try:
@@ -93,12 +94,17 @@ def load_virr(satscene, options):
                 'EV_RefSB']
 
     calibrate = options['calibrate']
+    LOGGER.debug("Calibrate = " + str(calibrate))
 
     h5f = h5py.File(filename, 'r')
 
     # Get geolocation information
     lons = h5f['Longitude'][:]
     lats = h5f['Latitude'][:]
+    # Mask out unrealistic values:
+    mask = np.logical_or(lats > 90., lons > 90.)
+    lons = np.ma.masked_array(lons, mask=mask)
+    lats = np.ma.masked_array(lats, mask=mask)
     sunz = h5f['SolarZenith'][:]
     slope = h5f['SolarZenith'].attrs['Slope'][0]
     intercept = h5f['SolarZenith'].attrs['Intercept'][0]
@@ -156,6 +162,8 @@ def load_virr(satscene, options):
                     # Therefore multply wavenumber by 100 and radiances by
                     # 10^-5
                     data = rad2temp(emiss_centroid_wn[i] * 100., data * 1e-5)
+                    LOGGER.debug("IR data calibrated")
+
                 if dset in ['EV_RefSB']:
                     data = (visnir_offs[i] +
                             data * visnir_scales[i]) / np.cos(np.deg2rad(sunz))
diff --git a/mpop/satin/gac_l1b.py b/mpop/satin/gac_l1b.py
index 9c7dd74..d6313b5 100644
--- a/mpop/satin/gac_l1b.py
+++ b/mpop/satin/gac_l1b.py
@@ -22,7 +22,6 @@
 
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
 """Read a gac file.
 Reads L1b GAC data from KLM series of satellites (NOAA-15 and later) and does most of the computations.
 Format specification can be found here:
@@ -30,22 +29,23 @@ http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/c8/sec83142-1.htm
 
 """
 
-import os
 import glob
-from ConfigParser import ConfigParser
 import logging
+import os
+from ConfigParser import ConfigParser
 
 import numpy as np
+
+from mpop import CONFIG_PATH
 from pygac.gac_klm import KLMReader
 from pygac.gac_pod import PODReader
-from mpop import CONFIG_PATH
 
 LOGGER = logging.getLogger(__name__)
 
 
 def load(satscene, *args, **kwargs):
     """Read data from file and load it into *satscene*.
-    A possible *calibrate* keyword argument is passed to the AAPP reader. 
+    A possible *calibrate* keyword argument is passed to the AAPP reader.
     Should be 0 for off (counts), 1 for default (brightness temperatures and
     reflectances), and 2 for radiances only.
     """
@@ -78,20 +78,18 @@ def load_avhrr(satscene, options):
     if "filename" not in options:
         raise IOError("No filename given, cannot load.")
 
-    values = {"orbit":      satscene.orbit,
-              "satname":    satscene.satname,
-              "number":     satscene.number,
+    values = {"orbit": satscene.orbit,
+              "satname": satscene.satname,
+              "number": satscene.number,
               "instrument": satscene.instrument_name,
-              "satellite":  satscene.fullname
-              }
+              "satellite": satscene.fullname}
 
     if options["dir"] is None:
         filename = options["filename"]
     else:
-        filename = os.path.join(satscene.time_slot.strftime(options["dir"]) % values,
-                                satscene.time_slot.strftime(
-                                    options["filename"])
-                                % values)
+        filename = os.path.join(
+            satscene.time_slot.strftime(options["dir"]) % values,
+            satscene.time_slot.strftime(options["filename"]) % values)
 
         file_list = glob.glob(filename)
 
@@ -113,7 +111,6 @@ def load_avhrr(satscene, options):
         reader = PODReader
         chn_dict = AVHRR_CHANNEL_NAMES
 
-
     chns = satscene.channels_to_load & set(chn_dict.keys())
     LOGGER.info("Loading channels " + str(sorted(list(chns))))
 
@@ -139,8 +136,7 @@ def load_avhrr(satscene, options):
         satscene.area = geometry.SwathDefinition(lons=scene.lons,
                                                  lats=scene.lats)
         area_name = ("swath_" + satscene.fullname + "_" +
-                     str(satscene.time_slot) + "_"
-                     + str(scene.lats.shape))
+                     str(satscene.time_slot) + "_" + str(scene.lats.shape))
         satscene.area.area_id = area_name
         satscene.area.name = "Satellite projection"
         satscene.area_id = area_name
@@ -155,9 +151,4 @@ def load_avhrr(satscene, options):
 AVHRR3_CHANNEL_NAMES = {"1": 0, "2": 1, "3A": 2, "3B": 3, "4": 4, "5": 5}
 AVHRR_CHANNEL_NAMES = {"1": 0, "2": 1, "3": 2, "4": 3, "5": 4}
 
-CASES = {
-    "avhrr/1": load_avhrr,
-    "avhrr/2": load_avhrr,
-    "avhrr/3": load_avhrr,
-}
-
+CASES = {"avhrr/1": load_avhrr, "avhrr/2": load_avhrr, "avhrr/3": load_avhrr, }
diff --git a/mpop/satin/gribformat.py b/mpop/satin/gribformat.py
new file mode 100644
index 0000000..87f27f8
--- /dev/null
+++ b/mpop/satin/gribformat.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016 Adam.Dybbroe
+
+# Author(s):
+
+#   Adam.Dybbroe <a000680 at c20671.ad.smhi.se>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Utility functions to read Grib messages
+"""
+
+import os
+import pygrib
+import os.path
+
+
+class Grib(object):
+
+    def __init__(self, fname):
+
+        self._abspath = os.path.abspath(fname)
+
+    @property
+    def nmsgs(self):
+        '''Number of GRIB messages in file.
+        '''
+
+        prop = 'nmsgs'
+        attr = '_{}'.format(prop)
+
+        if not hasattr(self, attr):
+            grbs = pygrib.open(self._abspath)
+            nmsgs = grbs.messages
+            grbs.close()
+
+            setattr(self, attr, nmsgs)
+
+        return getattr(self, attr)
+
+    def get(self, gmessage, key='values'):
+        '''
+        Returns the value for the 'key' for a given message number 'gmessage' or
+        message field name 'gmessage'.
+        '''
+
+        grbs = pygrib.open(self._abspath)
+
+        if type(gmessage) == int:
+            mnbr = gmessage
+        elif type(gmessage) == str:
+            msg_found = False
+            msgnum = 1
+            while msgnum < self.nmsgs + 1:
+                if grbs[msgnum]['parameterName'] == gmessage:
+                    msg_found = True
+                    break
+                msgnum = msgnum + 1
+
+            if msg_found:
+                mnbr = msgnum
+            else:
+                print("No Grib message found with parameter name = %s" %
+                      gmessage)
+                return None
+
+        if grbs[mnbr].valid_key(key):
+
+            arr = grbs[mnbr][key]
+            grbs.close()
+            return arr
+        else:
+            grbs.close()
+            return
diff --git a/mpop/satin/hdfeos_l1b.py b/mpop/satin/hdfeos_l1b.py
index cf77183..738523b 100644
--- a/mpop/satin/hdfeos_l1b.py
+++ b/mpop/satin/hdfeos_l1b.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010-2014.
+# Copyright (c) 2010-2014, 2016.
 
 # SMHI,
 # Folkborgsvägen 1,
@@ -38,7 +38,7 @@ from fnmatch import fnmatch
 import os.path
 from ConfigParser import ConfigParser
 import multiprocessing
-from trollsift.parser import Parser
+from trollsift.parser import Parser, globify
 
 import math
 import numpy as np
@@ -66,6 +66,15 @@ def get_filename(template, time_slot):
     return file_list[0]
 
 
+def check_filename(tmpl):
+    file_list = glob.glob(tmpl)
+    if len(file_list) > 1:
+        raise IOError("More than 1 file matching template %s", tmpl)
+    elif len(file_list) == 0:
+        raise IOError("No EOS MODIS file matching " + tmpl)
+    return file_list[0]
+
+
 class ModisReader(Reader):
 
     pformat = "hdfeos_l1b"
@@ -74,6 +83,10 @@ class ModisReader(Reader):
            "Q": 250,
            "H": 500}
 
+    inv_res = {1000: "1",
+               250: "Q",
+               500: "H"}
+
     def __init__(self, *args, **kwargs):
         Reader.__init__(self, *args, **kwargs)
         self.datafiles = {}
@@ -82,6 +95,45 @@ class ModisReader(Reader):
         self.data = None
         self.areas = {}
 
+    def get_sunsat_angles(self, resolution=1000):
+        """Get sun-satellite viewing geometry for the 1km resolution data
+        Optional arguments:
+            None
+        Return
+            sun-zenith, sun-azimuth, sat-zenith, sat-azimuth
+
+        """
+        logger.debug("generating sun-sat viewing angles at %d", resolution)
+        if self.geofile is not None:
+            coarse_resolution = 1000
+            filename = self.geofile
+        else:
+            coarse_resolution = 5000
+            logger.info("Using 5km Sun-Sat viewing geometry and interpolating")
+            filename = (self.datafiles.get(1000) or
+                        self.datafiles.get(500) or
+                        self.datafiles.get(250))
+            raise NotImplementedError("Not yet implemented...")
+
+        logger.debug("Loading sun-sat angles from file: " + str(filename)
+                     + " at resolution " + str(coarse_resolution))
+
+        eosdata = SD(str(filename))
+        hdf_names = ['SolarZenith', 'SolarAzimuth',
+                     'SensorZenith', 'SensorAzimuth']
+        local_names = ['sunz', 'sun_azi',
+                       'satz', 'sat_azi']
+        data = {}
+        for lname, dname in zip(local_names, hdf_names):
+            data[lname] = eosdata.select(dname)
+            fill_value = data[lname].attributes()["_FillValue"]
+            scale = data[lname].attributes()["scale_factor"]
+            data[lname] = np.ma.masked_equal(data[lname].get(), fill_value)
+            data[lname] = data[lname] * scale
+
+        return (data['sunz'], data['sun_azi'],
+                data['satz'], data['sat_azi'])
+
     def load(self, satscene, filename=None, *args, **kwargs):
         conf = ConfigParser()
         conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
@@ -91,8 +143,8 @@ class ModisReader(Reader):
         options["geofile"] = os.path.join(options["dir"], options["geofile"])
         options.update(kwargs)
 
-        fparser = Parser(options["filename"])
-        gparser = Parser(options["geofile"])
+        fparser = Parser(options.get("filename"))
+        gparser = Parser(options.get("geofile"))
 
         if filename is not None:
             datasets = {}
@@ -102,10 +154,12 @@ class ModisReader(Reader):
             for fname in filename:
                 if fnmatch(os.path.basename(fname), fparser.globify()):
                     metadata = fparser.parse(os.path.basename(fname))
-                    datasets.setdefault(metadata["start_time"], []).append(fname)
+                    datasets.setdefault(
+                        metadata["start_time"], []).append(fname)
                 elif fnmatch(os.path.basename(fname), gparser.globify()):
                     metadata = fparser.parse(fname)
-                    datasets.setdefault(metadata["start_time"], []).append(fname)
+                    datasets.setdefault(
+                        metadata["start_time"], []).append(fname)
 
             scenes = []
             for start_time, dataset in datasets.iteritems():
@@ -114,15 +168,19 @@ class ModisReader(Reader):
                 self.load_dataset(newscn, filename=dataset, *args, **kwargs)
                 scenes.append(newscn)
 
-            entire_scene = assemble_segments(sorted(scenes, key=lambda x: x.time_slot))
-            satscene.channels = entire_scene.channels
-            satscene.area = entire_scene.area
-            satscene.orbit = int(entire_scene.orbit)
-            satscene.info["orbit_number"] = int(entire_scene.orbit)
+            if not scenes:
+                logger.debug("Looking for files")
+                self.load_dataset(satscene, *args, **kwargs)
+            else:
+                entire_scene = assemble_segments(
+                    sorted(scenes, key=lambda x: x.time_slot))
+                satscene.channels = entire_scene.channels
+                satscene.area = entire_scene.area
+                satscene.orbit = int(entire_scene.orbit)
+                satscene.info["orbit_number"] = int(entire_scene.orbit)
         else:
             self.load_dataset(satscene, *args, **kwargs)
 
-
     def load_dataset(self, satscene, filename=None, *args, **kwargs):
         """Read data from file and load it into *satscene*.
         """
@@ -135,8 +193,8 @@ class ModisReader(Reader):
         options["geofile"] = os.path.join(options["dir"], options["geofile"])
         options.update(kwargs)
 
-        fparser = Parser(options["filename"])
-        gparser = Parser(options["geofile"])
+        fparser = Parser(options.get("filename"))
+        gparser = Parser(options.get("geofile"))
 
         if isinstance(filename, (list, set, tuple)):
             # we got the entire dataset.
@@ -154,24 +212,27 @@ class ModisReader(Reader):
             filename = options["filename"]
             resolution = self.res[os.path.basename(filename)[5]]
             self.datafiles[resolution] = filename
-        else:
+        if not self.datafiles:
             # find files according to config
+            logger.debug(
+                "Didn't get any valid file as input, looking in defined places")
             resolution = int(options["resolution"]) or 1000
 
             for res in [250, 500, 1000]:
-                datafile = os.path.join(options['dir'],
-                                        options["filename" + str(res)])
+                datafile = globify(os.path.join(options['dir'],
+                                                options["filename"]),
+                                   {'resolution': self.inv_res[res],
+                                    'start_time': satscene.time_slot})
                 try:
-                    self.datafiles[res] = get_filename(datafile,
-                                                       satscene.time_slot)
+                    self.datafiles[res] = check_filename(datafile)
                 except IOError:
                     self.datafiles[res] = None
                     logger.warning("Can't find file for resolution %s with template: %s",
                                    str(res), datafile)
 
             try:
-                self.geofile = get_filename(options["geofile"],
-                                            satscene.time_slot)
+                self.geofile = check_filename(globify(options["geofile"],
+                                                      {'start_time': satscene.time_slot}))
             except IOError:
                 self.geofile = None
                 logger.warning("Can't find geofile with template: %s",
@@ -244,7 +305,8 @@ class ModisReader(Reader):
         #    return
 
         for band_name in loaded_bands:
-            lon, lat = self.get_lonlat(satscene[band_name].resolution, satscene.time_slot, cores)
+            lon, lat = self.get_lonlat(
+                satscene[band_name].resolution, satscene.time_slot, cores)
             area = geometry.SwathDefinition(lons=lon, lats=lat)
             satscene[band_name].area = area
 
@@ -1018,21 +1080,20 @@ if __name__ == "__main__":
                  u'/data/prod/satellit/modis/lvl1/thin_MYD021KM.A2015287.0300.005.2015287050819.NRT.hdf',
                  u'/data/prod/satellit/modis/lvl1/thin_MYD021KM.A2015287.0305.005.2015287050825.NRT.hdf']
 
-
     from mpop.utils import debug_on
     debug_on()
     from mpop.satellites import PolarFactory
     from datetime import datetime
     time_slot = datetime(2015, 10, 14, 2, 55)
     orbit = "18181"
-    global_data = PolarFactory.create_scene("EARSEOS-Aqua", "", "modis", time_slot, orbit)
+    global_data = PolarFactory.create_scene(
+        "EARSEOS-Aqua", "", "modis", time_slot, orbit)
 
     global_data.load([3.75, 0.555, 0.551, 7.3, 1.63, 10.8, 0.488, 12.0, 0.85, 0.469, 0.748, 0.443, 0.645, 6.7, 0.635,
                       8.7, 0.412], filename=filenames)
 
-
     #global_data.channels_to_load = set(['31'])
     #reader = ModisReader(global_data)
     #reader.load(global_data, filename=filenames)
     print global_data
-    #global_data[10.8].show()
\ No newline at end of file
+    # global_data[10.8].show()
diff --git a/mpop/satin/helper_functions.py b/mpop/satin/helper_functions.py
index ec2d647..c92da66 100644
--- a/mpop/satin/helper_functions.py
+++ b/mpop/satin/helper_functions.py
@@ -61,12 +61,15 @@ def area_def_names_to_extent(area_def_names, proj4_str,
         except AttributeError:
             boundaries = name.get_boundary_lonlats()
 
-        if (all(boundaries[0].side1 > 1e20) or
-                all(boundaries[0].side2 > 1e20) or
-                all(boundaries[0].side3 > 1e20) or
-                all(boundaries[0].side4 > 1e20)):
-            maximum_extent = list(default_extent)
-            continue
+        if (any(boundaries[0].side1 > 1e20) or
+                any(boundaries[0].side2 > 1e20) or
+                any(boundaries[0].side3 > 1e20) or
+                any(boundaries[0].side4 > 1e20)):
+            if default_extent:
+                maximum_extent = list(default_extent)
+                continue
+            else:
+                return None
 
         lon_sides = (boundaries[0].side1, boundaries[0].side2,
                      boundaries[0].side3, boundaries[0].side4)
@@ -76,6 +79,8 @@ def area_def_names_to_extent(area_def_names, proj4_str,
         maximum_extent = boundaries_to_extent(proj4_str, maximum_extent,
                                               default_extent,
                                               lon_sides, lat_sides)
+        if not maximum_extent:
+            return None
 
     maximum_extent[0] -= 10000
     maximum_extent[1] -= 10000
@@ -104,6 +109,11 @@ def boundaries_to_extent(proj4_str, maximum_extent, default_extent,
     # replace invalid values with NaN
     x_dir[np.abs(x_dir) > 1e20] = np.nan
     y_dir[np.abs(y_dir) > 1e20] = np.nan
+    
+    # return None when no default specified
+    if not default_extent:
+        if any(np.isnan(x_dir)) or any(np.isnan(x_dir)):
+            return None
 
     # Get the maximum needed extent from different corners.
     extent = [np.nanmin(x_dir),
diff --git a/mpop/satin/hsaf_h03.py b/mpop/satin/hsaf_h03.py
new file mode 100644
index 0000000..7243a5b
--- /dev/null
+++ b/mpop/satin/hsaf_h03.py
@@ -0,0 +1,226 @@
+"""
+Reader for EUMETSATs Hydrology SAF (HSAF) h03 product
+HSAF website http://hsaf.meteoam.it
+h03 product is precipitation rate at the ground 
+by GEO(MSG)/Infrared supported by LEO/Microwave
+http://hsaf.meteoam.it/precipitation.php?tab=3
+
+After registration the data is available from 
+ftp://ftphsaf.meteoam.it/h03
+
+possible accepted formats for this reader are:
+* grib as provided by HSAF
+* netCDF (grib file converted with cdo)
+
+- Initial version: 
+  2015-07-23 Ulrich Hamann (MeteoSwiss)
+"""
+
+from ConfigParser import ConfigParser
+from mpop import CONFIG_PATH
+import os
+import numpy.ma as ma
+from glob import glob
+import datetime
+
+def load(satscene, **kargs):
+    """Reader for EUMETSATs Hydrology SAF (HSAF) h03 product
+    h03 product is precipitation rate at the ground 
+    by GEO(MSG)/Infrared supported by LEO/Microwave
+    http://hsaf.meteoam.it/precipitation.php?tab=3
+    """
+
+    # Read config file content
+    conf = ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+    values = {"orbit": satscene.orbit,
+    "satname": satscene.satname,
+    "number": satscene.number,
+    "instrument": satscene.instrument_name,
+    "satellite": satscene.fullname
+    }
+
+    # end of scan time 12min after start 
+    end_time = satscene.time_slot + datetime.timedelta(minutes=12)
+
+    filepath    = end_time.strftime(conf.get("seviri-level2", "dir",raw=True))
+    filepattern = end_time.strftime(conf.get("seviri-level2", "filename",raw=True)) % values
+    filename = os.path.join( filepath, filepattern)
+
+    print "... search for file: ", filename
+    filenames=glob(str(filename))
+    if len(filenames) == 0:
+        print "*** Error, no file found"
+        quit()
+    elif len(filenames) > 1:
+        print "*** Warning, more than 1 datafile found: "
+        for filename in filenames:
+            print "    ", filename
+        
+    # possible formats: h03_20150513_1557_rom.grb.gz, h03_20150513_1612_rom.grb, h03_20150513_1612_rom.nc
+    fileformats = [filename.split(".")[-1] for filename in filenames]
+    # try to find grb file 
+
+    if 'grb' in fileformats:
+        # read grib 
+        data, fill_value, units, long_name = read_h03_grib(filenames[fileformats.index('grb')])
+    elif 'nc' in fileformats:
+        # read netCDF
+        data, fill_value, units, long_name = read_h03_netCDF(filenames[fileformats.index('nc')])
+    elif 'gz' in fileformats:
+        # unzip 
+        from subprocess import call
+        infile = filenames[fileformats.index('gz')]
+        outfile = infile[:-3]
+        print "    unizp ", infile 
+        # gunzip -c h03_20150513_1557_rom.grb.gz > h03_20150513_1557_rom.grb
+        # call("/bin/gunzip "+ infile                +" 2>&1", shell=True) # dont keep gz file 
+        call("/bin/gunzip -c "+ infile+" > "+ outfile  +" 2>&1", shell=True) # keep gz file 
+        # check format of gunziped file
+        if outfile.split(".")[-1] == 'grb':
+            data, fill_value, units, long_name = read_h03_grib(outfile)
+        elif outfile.split(".")[-1] == 'nc':
+            data, fill_value, units, long_name = read_h03_netCDF(outfile)
+
+    if units == "kg m**-2 s**-1" or units == "kg m-2s-1":
+        data *= 3600 
+        units = "kg m-2 h-1"
+
+    satscene['h03'] = data
+    satscene['h03'].fill_value = fill_value
+    satscene['h03'].units      = units
+    satscene['h03'].long_name  = long_name
+    satscene['h03'].product_name = 'h03'
+
+    # personal communication with help desk
+    # Each H03 grib file contains precipitation data of a 900x1900 pixel sub-area of the SEVIRI full disk area (3712x3712 pixels). 
+    # The first pixel  of H03 (pixel (1,1)) grib file corresponds to Seviri pixel (1095,85) if the Seviri pixel (1,1) is in the Nort-East. 
+    # I can confirm that only the prime satellite is used (position subsatellite longitude 0 degree East).
+    # For the future we are thinking to disseminate the h03 outputs already corrected in parallax.
+
+    # conversion of above information to correct AreaDefinition 
+    # full_disk = get_area_def("SeviriDiskFull")
+    # from mpop.projector import get_area_def
+    # import numpy as np
+    # np.array(area_def.get_proj_coords(data_slice=(85+900,1095     ))) - 3000.40316582 / 2.
+    #    array([-2284807.01076965,  2611850.9558437 ])
+    # np.array(area_def.get_proj_coords(data_slice=(85    ,1095+1900))) + 3000.40316582 / 2.
+    #                                        array([ 3418959.40744847,  5315214.20824482])
+    # or 
+    # aex = full_disk.get_area_extent_for_subsets(985,1095,85,2995)
+
+    proj = {'proj': 'geos', 'a': '6378169.0', 'b': '6356583.8', 'h': '35785831.0', 'lon_0': '0.0'}
+    aex =      (-2284807.01076965, 2611850.9558437,  3418959.40744847,  5315214.20824482)
+
+    from pyresample.geometry import AreaDefinition
+    satscene.area = AreaDefinition("hsaf",
+                                   "hsaf",
+                                   "geos0",
+                                   proj,
+                                   1900,
+                                   900,
+                                   aex)
+
+def read_h03_grib(filename):
+
+    try:
+        import pygrib
+    except ImportError:
+        print "... module pygrib needs to be installed"
+        quit()
+
+    # see http://pygrib.googlecode.com/svn/trunk/docs/pygrib-module.html
+    print("... read data from %s" % str(filename))
+
+    grbs = pygrib.open(filename)
+
+    #print(grbs)
+
+    #print 'inventory'
+    #for grb in grbs:
+    #    print(grb)
+    #print 'end inventory'
+
+    long_name  = 'Instantaneous rain rate'
+    units      = 'kg m**-2 s**-1'
+    _FillValue = 0.0 
+    grb = grbs.select(name=long_name)[0]
+    # print(grb)
+    data = ma.asarray(grb.values)
+    data.mask = (data == 0.0) 
+
+    print '    fill_value: ', 0 
+    print '    units:      ', units
+    print '    long_name:  ', long_name 
+    print '    datatype:   ', type(data)
+    print '    shape:      ', data.shape
+    print '    min/max:    ', data.min(), data.max()
+
+    return data, _FillValue, units, long_name 
+
+
+def read_h03_netCDF(filename):
+
+    try:
+        from netCDF4 import Dataset
+    except ImportError:
+        print "... module netCDF4 needs to be installed"
+        quit()
+
+    print("... read data from %s" % str(filename))
+
+    # Load data from netCDF file
+    # see also http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
+    ds = Dataset(filename, 'r')
+
+    if 'irrate' in ds.variables:
+        print '    found variable irrate'
+        var_name='irrate'            # converted with: cdo -f nc4c -z zip copy infile outfile
+    elif 'IRRATE_P30_GSV0' in ds.variables:
+        print '    found variable IRRATE_P30_GSV0'
+        var_name='IRRATE_P30_GSV0'   # converted with: ncl_convert2nc h03_20150529_0827_rom.grb -e grb -nc4c -cl 9
+        #variables:
+        #    float IRRATE_P30_GSV0(ygrid_0, xgrid_0) ;
+        #            IRRATE_P30_GSV0:initial_time = "05/29/2015 (08:27)" ;
+        #            IRRATE_P30_GSV0:parameter_template_discipline_category_number = 30, 3, 1, 1 ;
+        #            IRRATE_P30_GSV0:parameter_discipline_and_category = "Space products, Quantitative products" ;
+        #            IRRATE_P30_GSV0:grid_type = "Space view perspective or orthographic" ;
+        #            IRRATE_P30_GSV0:_FillValue = 1.e+20f ;
+        #            IRRATE_P30_GSV0:units = "kg m-2s-1" ;
+        #            IRRATE_P30_GSV0:long_name = "Instantaneous rain rate" ;
+        #            IRRATE_P30_GSV0:production_status = "Operational test products" ;
+        #            IRRATE_P30_GSV0:center = "Rome (RSMC)" ;
+        print '*** Error, does not work for unknown reason'
+        print '    data.mask = (data == _FillValue) | (data == 0.0) produce error'
+        quit()
+
+    #print type(ds.variables[var_name])
+    #print dir(ds.variables[var_name])
+  
+    _FillValue = ds.variables[var_name]._FillValue
+    # or fill_value = ds.variables[var_name].getncattr('_FillValue')
+    units      = ds.variables[var_name].units
+    long_name  = ds.variables[var_name].long_name 
+
+    # Read variable corresponding to channel name
+    data = ma.asarray(ds.variables[var_name])
+
+    print '    fill_value: ', ds.variables[var_name]._FillValue 
+    print '    units:      ', ds.variables[var_name].units
+    print '    long_name:  ', ds.variables[var_name].long_name 
+    print '    datatype:   ', ds.variables[var_name].datatype
+    print '    shape:      ', data.shape
+    print '    min/max:    ', data.min(), data.max()
+
+    if len(data.shape) == 3:
+        if data.shape[0] == 1:
+            print "   reduce to 2 dimensions (skip time dimension)"
+            data = ma.asarray(ds.variables[var_name][0,:,:])
+        else:
+            print "*** Error, unknown netCDF file format in h03_nc.py"
+            print "    probably more time steps in one file (not implemented yet)"
+            quit()
+
+    data.mask = (data == _FillValue) | (data == 0.0) 
+
+    return data, _FillValue, units, long_name 
diff --git a/mpop/satin/mipp_xrit.py b/mpop/satin/mipp_xrit.py
index 2832c92..ef02046 100644
--- a/mpop/satin/mipp_xrit.py
+++ b/mpop/satin/mipp_xrit.py
@@ -31,15 +31,15 @@
 """Interface to Eumetcast level 1.5 HRIT/LRIT format. Uses the MIPP reader.
 """
 import ConfigParser
+import fnmatch
+import logging
 import os
-from pyproj import Proj
 
-from mipp import xrit
-from mipp import CalibrationError, ReaderError
+from pyproj import Proj
 
+from mipp import CalibrationError, ReaderError, xrit
 from mpop import CONFIG_PATH
-import logging
-
+from mpop.plugin_base import Reader
 from mpop.satin.helper_functions import area_def_names_to_extent
 
 LOGGER = logging.getLogger(__name__)
@@ -54,8 +54,6 @@ try:
 except ImportError:
     LOGGER.warning("pyresample missing. Can only work in satellite projection")
 
-from mpop.plugin_base import Reader
-
 
 class XritReader(Reader):
 
@@ -73,8 +71,8 @@ def load(satscene, calibrate=True, area_extent=None, area_def_names=None,
     argument is passed to mipp (should be 0 for off, 1 for default, and 2 for
     radiances only).
     """
-    del kwargs
-    conf = ConfigParser.ConfigParser()
+
+    conf = ConfigParser.RawConfigParser()
     conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
     options = {}
     for option, value in conf.items(satscene.instrument_name + "-level2"):
@@ -83,22 +81,24 @@ def load(satscene, calibrate=True, area_extent=None, area_def_names=None,
     for section in conf.sections():
         if(section.startswith(satscene.instrument_name) and
            not (section == "satellite") and
-           not section[:-1].endswith("-level") and
+           # not section[:-1].endswith("-level") and
            not section.endswith("-granules")):
-            options[section] = conf.items(section)
+            options[section] = dict(conf.items(section))
+
+    filenames = kwargs.get('filename')
 
     CASES.get(satscene.instrument_name, load_generic)(satscene,
                                                       options,
                                                       calibrate,
                                                       area_extent,
-                                                      area_def_names)
+                                                      area_def_names,
+                                                      filenames)
 
 
 def load_generic(satscene, options, calibrate=True, area_extent=None,
-                 area_def_names=None):
+                 area_def_names=None, filenames=None):
     """Read imager data from file and load it into *satscene*.
     """
-    del options
 
     os.environ["PPP_CONFIG_DIR"] = CONFIG_PATH
 
@@ -117,6 +117,11 @@ def load_generic(satscene, options, calibrate=True, area_extent=None,
 
     from_area = False
 
+    if satscene.end_time is not None:
+        time_slot = satscene.time_slot, satscene.end_time
+    else:
+        time_slot = satscene.time_slot
+
     if area_extent is None and satscene.area is not None:
         if not satscene.area_def:
             satscene.area = get_area_def(satscene.area_id)
@@ -126,10 +131,56 @@ def load_generic(satscene, options, calibrate=True, area_extent=None,
     area_converted_to_extent = False
 
     for chn in satscene.channels_to_load:
+        use_filenames = False
+        # Sort out filenames
+        if filenames is not None:
+            for section in options.keys():
+                if section.endswith('-level1'):
+                    break
+            pattern_pro = eval(options[section].get('filename_pro'))
+            pattern_epi = eval(options[section].get('filename_epi'))
+            pattern = eval(options[section].get('filename'))
+
+            epilogue = None
+            prologue = None
+            image_files = []
+
+            if pattern_epi is not None:
+                glob_epi = satscene.time_slot.strftime(
+                    pattern_epi) % ({'segment': "EPI".ljust(9, '_')})
+
+            if pattern_pro is not None:
+                glob_pro = satscene.time_slot.strftime(
+                    pattern_pro) % ({'segment': "PRO".ljust(9, '_')})
+
+            glob_img = satscene.time_slot.strftime(
+                pattern) % ({'segment': "*", 'channel': chn + '*'})
+
+            for filename in filenames:
+                if fnmatch.fnmatch(os.path.basename(filename), glob_img):
+                    image_files.append(filename)
+                elif fnmatch.fnmatch(os.path.basename(filename), glob_pro):
+                    prologue = filename
+                elif fnmatch.fnmatch(os.path.basename(filename), glob_epi):
+                    epilogue = filename
+            if len(image_files) == 0 and prologue is None and epilogue is None:
+                use_filenames = False
+            else:
+                use_filenames = True
+
         if from_area:
             try:
-                metadata = xrit.sat.load(satscene.fullname, satscene.time_slot,
-                                         chn, only_metadata=True)
+                if use_filenames:
+                    metadata = xrit.sat.load_files(prologue,
+                                                   image_files,
+                                                   epilogue,
+                                                   platform_name=satscene.fullname,
+                                                   only_metadata=True)
+                else:
+                    metadata = xrit.sat.load(satscene.fullname,
+                                             time_slot,
+                                             chn,
+                                             only_metadata=True)
                 if(satscene.area_def.proj_dict["proj"] != "geos" or
                    float(satscene.area_def.proj_dict["lon_0"]) !=
                    metadata.sublon):
@@ -144,8 +195,17 @@ def load_generic(satscene, options, calibrate=True, area_extent=None,
         # Convert area definitions to maximal area_extent
         if not area_converted_to_extent and area_def_names is not None:
             try:
-                metadata = xrit.sat.load(satscene.fullname, satscene.time_slot,
-                                         chn, only_metadata=True)
+                if use_filenames:
+                    metadata = xrit.sat.load_files(prologue,
+                                                   image_files,
+                                                   epilogue,
+                                                   platform_name=satscene.fullname,
+                                                   only_metadata=True)
+                else:
+                    metadata = xrit.sat.load(satscene.fullname,
+                                             time_slot,
+                                             chn,
+                                             only_metadata=True)
             except ReaderError as err:
                 LOGGER.warning(str(err))
                 continue
@@ -159,16 +219,28 @@ def load_generic(satscene, options, calibrate=True, area_extent=None,
             # lon0=0.0), that is, do not pass default_extent=area_extent
             else:
                 area_extent = area_def_names_to_extent(area_def_names,
-                                                       metadata.proj4_params)
+                                                       metadata.proj4_params,
+                                                       default_extent=None)
+
+            if area_extent is None:
+                LOGGER.info('Could not derive area_extent from area_def_names')
 
             area_converted_to_extent = True
 
         try:
-            image = xrit.sat.load(satscene.fullname,
-                                  satscene.time_slot,
-                                  chn,
-                                  mask=True,
-                                  calibrate=calibrate)
+            if use_filenames:
+                image = xrit.sat.load_files(prologue,
+                                            image_files,
+                                            epilogue,
+                                            platform_name=satscene.fullname,
+                                            mask=True,
+                                            calibrate=calibrate)
+            else:
+                image = xrit.sat.load(satscene.fullname,
+                                      time_slot,
+                                      chn,
+                                      mask=True,
+                                      calibrate=calibrate)
             if area_extent:
                 metadata, data = image(area_extent)
             else:
@@ -176,11 +248,19 @@ def load_generic(satscene, options, calibrate=True, area_extent=None,
         except CalibrationError:
             LOGGER.warning(
                 "Loading non calibrated data since calibration failed.")
-            image = xrit.sat.load(satscene.fullname,
-                                  satscene.time_slot,
-                                  chn,
-                                  mask=True,
-                                  calibrate=False)
+            if use_filenames:
+                image = xrit.sat.load_files(prologue,
+                                            image_files,
+                                            epilogue,
+                                            platform_name=satscene.fullname,
+                                            mask=True,
+                                            calibrate=False)
+            else:
+                image = xrit.sat.load(satscene.fullname,
+                                      time_slot,
+                                      chn,
+                                      mask=True,
+                                      calibrate=False)
             if area_extent:
                 metadata, data = image(area_extent)
             else:
@@ -194,6 +274,7 @@ def load_generic(satscene, options, calibrate=True, area_extent=None,
         satscene[chn] = data
 
         satscene[chn].info['units'] = metadata.calibration_unit
+        satscene[chn].info['sublon'] = metadata.sublon
         satscene[chn].info['satname'] = satscene.satname
         satscene[chn].info['satnumber'] = satscene.number
         satscene[chn].info['instrument_name'] = satscene.instrument_name
diff --git a/mpop/satin/mpef_oca.py b/mpop/satin/mpef_oca.py
new file mode 100644
index 0000000..95381e9
--- /dev/null
+++ b/mpop/satin/mpef_oca.py
@@ -0,0 +1,358 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016 Adam.Dybbroe
+
+# Author(s):
+
+#   Adam.Dybbroe <a000680 at c20671.ad.smhi.se>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""A reader for the EUMETSAT MPEF OCA cloud parameters. Data are segmented LRIT
+encoded Grib messages
+
+"""
+
+import tempfile
+import os.path
+from ConfigParser import ConfigParser
+from mpop.satin.gribformat import Grib
+import mpop.channel
+from mpop import CONFIG_PATH
+from mpop.plugin_base import Reader
+from trollsift import parser
+from glob import glob
+import pyresample as pr
+import numpy as np
+
+import logging
+LOG = logging.getLogger(__name__)
+
+CFG_DIR = os.environ.get('PPP_CONFIG_DIR', './')
+AREA_DEF_FILE = os.path.join(CFG_DIR, "areas.def")
+if not os.path.exists(AREA_DEF_FILE):
+    raise IOError('Config file %s does not exist!' % AREA_DEF_FILE)
+
+
+LRIT_PATTERN = "L-000-{platform_name:_<5s}_-MPEF________-OCAE_____-{segment:_<9s}-{nominal_time:%Y%m%d%H%M}-{compressed:_<2s}"
+
+
+SCENE_TYPE_LAYERS = {111: 'Single Layer Water Cloud',
+                     112: 'Single Layer Ice Cloud',
+                     113: 'Multi Layer Cloud'}
+
+OCA_FIELDS = [{'Pixel scene type': 'Scene type'},
+              {'24': 'Measurement Cost',
+               'abbrev': 'JM', 'units': ''},
+              {'25': 'Upper Layer Cloud Optical Thickness', 'units': '',
+               'abbrev': 'ULCOT'},
+              {'26': 'Upper Layer Cloud Top Pressure', 'units': 'Pa',
+               'abbrev': 'ULCTP'},
+              {'27': 'Upper Layer Cloud Effective Radius', 'units': 'm',
+               'abbrev': 'ULCRE'},
+              {'28': 'Error in Upper Layer Cloud Optical Thickness', 'units': '',
+               'abbrev': 'ERR-ULCOT'},
+              {'29': 'Error in Upper Layer Cloud Top Pressure', 'units': 'Pa',
+               'abbrev': 'ERR-ULCTP'},
+              {'30': 'Error in Upper Layer Cloud Effective Radius', 'units': 'm',
+               'abbrev': 'ERR-ULCRE'},
+              {'31': 'Lower Layer Cloud Optical Thickness',
+                  'units': '', 'abbrev': 'LLCOT'},
+              {'32': 'Lower Layer Cloud Top Pressure',
+                  'units': 'Pa', 'abbrev': 'LLCTP'},
+              {'33': 'Error in Lower Layer Cloud Optical Thickness',
+                  'units': '', 'abbrev': 'ERR-LLCOT'},
+              {'34': 'Error in Lower Layer Cloud Top Pressure', 'units': 'Pa',
+               'abbrev': 'ERR-LLCTP'}]
+
+FIELDNAMES = {'scenetype': ('Pixel scene type', None),
+              'cost': ('24', None),
+              'ul_cot': ('25', '28'),
+              'ul_ctp': ('26', '29'),
+              'reff': ('27', '30'),
+              'll_cot': ('31', '33'),
+              'll_ctp': ('32', '34')}
+
+
+class OCAField(object):
+
+    """One OCA data field with metadata"""
+
+    def __init__(self, units=None, long_name='', standard_name=''):
+        self.units = units
+        self.data = None
+        self.error = None
+        self.long_name = long_name
+        self.standard_name = standard_name
+        self.info = {}
+
+
+class OCAData(mpop.channel.GenericChannel):
+
+    """The OCA scene data"""
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self)
+        self.name = "OCA"
+        self.mda = {}
+        self._keys = []
+        self._refs = {}
+
+        self._lritfiles = None
+        self._gribfilename = None
+        self._store_grib = False
+
+        self.resolution = 3000
+
+        self.scenetype = OCAField()
+        self.cost = OCAField()
+        self.ul_cot = OCAField()
+        self.ll_cot = OCAField()
+        self.ul_ctp = OCAField()
+        self.ll_ctp = OCAField()
+        self.reff = OCAField()
+        self._projectables = []
+        for field in FIELDNAMES.keys():
+            self._projectables.append(field)
+
+        self.timeslot = None
+        self.area_def = pr.utils.load_area(AREA_DEF_FILE, 'met09globeFull')
+        self.shape = None
+
+    def readgrib(self):
+        """Read the data"""
+
+        oca = Grib(self._gribfilename)
+        self.scenetype.data = oca.get('Pixel scene type')[::-1, ::-1]
+        self.scenetype.long_name = OCA_FIELDS[0]['Pixel scene type']
+
+        for field in FIELDNAMES.keys():
+
+            setattr(getattr(self, field), 'data', oca.get(
+                FIELDNAMES[field][0])[::-1, ::-1])
+            param = [s for s in OCA_FIELDS if FIELDNAMES[field][0] in s][0]
+            if 'units' in param:
+                setattr(getattr(self, field), 'units', param['units'])
+            if 'abbrev' in param:
+                setattr(getattr(self, field), 'standard_name', param['abbrev'])
+            setattr(getattr(self, field), 'long_name',
+                    param[FIELDNAMES[field][0]])
+            param_name = FIELDNAMES[field][1]
+            if param_name:
+                setattr(
+                    getattr(self, field), 'error', oca.get(param_name)[::-1, ::-1])
+
+        if not self._store_grib:
+            os.remove(self._gribfilename)
+
+    def read_from_lrit(self, filenames, gribfilename=None):
+        """Read and concatenate the LRIT segments"""
+
+        self._lritfiles = filenames
+
+        if len(filenames) == 0:
+            print("No files provided!")
+            return
+
+        if gribfilename:
+            self._store_grib = True
+            self._gribfilename = gribfilename
+        else:
+            self._store_grib = False
+            self._gribfilename = tempfile.mktemp(suffix='.grb')
+
+        p__ = parser.Parser(LRIT_PATTERN)
+
+        bstr = {}
+        nsegments = 0
+        for lritfile in self._lritfiles:
+            if os.path.basename(lritfile).find('PRO') > 0:
+                print("PRO file... %s: Skip it..." % lritfile)
+                continue
+
+            res = p__.parse(os.path.basename(lritfile))
+            segm = int(res['segment'].strip('_'))
+            if not self.timeslot:
+                self.timeslot = res['nominal_time']
+            LOG.debug("Segment = %d", segm)
+            nsegments = nsegments + 1
+
+            with open(lritfile) as fpt:
+                fpt.seek(103)
+                bstr[segm] = fpt.read()
+
+        fstr = bstr[1]
+        for idx in range(2, nsegments + 1):
+            fstr = fstr + bstr[idx]
+
+        with open(self._gribfilename, 'wb') as fpt:
+            fpt.write(fstr)
+
+        self.readgrib()
+
+    def project(self, coverage):
+        """Project the data"""
+        LOG.debug("Projecting channel %s...", (self.name))
+        import copy
+        res = copy.copy(self)
+
+        res.name = self.name
+        res.resolution = self.resolution
+        res.filled = True
+        res.area = coverage.out_area
+        resolution_str_x = str(int(res.area.pixel_size_x)) + 'm'
+        resolution_str_y = str(int(res.area.pixel_size_y)) + 'm'
+
+        time_axis = 0
+
+        # Project the data
+        for var in self._projectables:
+            LOG.info("Projecting " + str(var))
+            res.__dict__[var] = copy.copy(self.__dict__[var])
+            data = coverage.project_array(self.__dict__[var].data)
+            valid_min = np.min(data)
+            valid_max = np.max(data)
+
+            res.__dict__[var].data = data
+            res.__dict__[var].info['var_name'] = var
+            res.__dict__[var].info[
+                'var_data'] = np.ma.expand_dims(data, time_axis)
+
+            dim_names = ['y' + resolution_str_y,
+                         'x' + resolution_str_x]
+            dim_names.insert(0, 'time')
+            res.__dict__[var].info['var_dim_names'] = dim_names
+            res.__dict__[var].info['long_name'] = res.__dict__[var].long_name
+            res.__dict__[var].info[
+                'standard_name'] = res.__dict__[var].standard_name
+            res.__dict__[var].info['valid_range'] = np.array(
+                [valid_min, valid_max])
+            # res.__dict__[var].info['resolution'] = res.resolution
+
+        return res
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return True
+
+
+class OCAReader(Reader):
+
+    pformat = "mpef_oca"
+
+    def load(self, satscene, *args, **kwargs):
+        """Read data from files and load it into *satscene*.
+        """
+        lonlat_is_loaded = False
+
+        lritfiles = kwargs.get('filenames')
+
+        if "OCA" not in satscene.channels_to_load:
+            LOG.warning("No OCA product requested. Nothing to be done...")
+            return
+
+        area_name = satscene.area_id or satscene.area.area_id
+        # platform_name = satscene.satname
+
+        conf = ConfigParser()
+        conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+
+        # Reading the products
+        product = "oca"
+        classes = {product: OCAData}
+
+        LOG.debug("Loading " + product)
+
+        if not lritfiles:
+            dummy, lritfiles = get_lrit_filenames(satscene, area_name)
+
+        LOG.info("Filenames = " + str(lritfiles))
+
+        chn = classes[product]()
+        chn.read_from_lrit(lritfiles)
+
+        # Prepare info object for netCDF writer:
+        resolution_str = str(int(chn.resolution)) + 'm'
+        for field in chn._projectables:
+
+            getattr(chn, field).info['var_name'] = field
+            getattr(chn, field).info['var_data'] = getattr(chn, field).data
+            getattr(chn, field).info[
+                'var_dir_names'] = getattr(chn, field).data
+
+            getattr(chn, field).info['var_dim_names'] = ('y' + resolution_str,
+                                                         'x' + resolution_str)
+            getattr(chn, field).info['long_name'] = getattr(
+                chn, field).long_name
+            getattr(chn, field).info['standard_name'] = getattr(
+                chn, field).standard_name
+            valid_min = np.min(getattr(chn, field).data)
+            valid_max = np.max(getattr(chn, field).data)
+            getattr(chn, field).info['valid_range'] = np.array(
+                [valid_min, valid_max])
+            getattr(chn, field).info['resolution'] = chn.resolution
+
+        satscene.channels.append(chn)
+
+        LOG.info("Loading MPEF OCA cloud parameters done")
+
+        return
+
+
+def get_lrit_filenames(scene, area_name):
+    """Get the set of lrit filenames for the given scene
+    """
+
+    conf = ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg"))
+
+    filename = conf.get(scene.instrument_name + "-level4",
+                        "filename",
+                        raw=True,
+                        vars=os.environ)
+    directory = conf.get(scene.instrument_name + "-level4",
+                         "dir",
+                         vars=os.environ)
+    pathname_tmpl = os.path.join(directory, filename)
+    LOG.debug("Path = " + str(pathname_tmpl))
+
+    fparser = parser.Parser(pathname_tmpl)
+
+    lrit_files = glob(
+        parser.globify(pathname_tmpl, {'nominal_time': scene.time_slot}))
+
+    prologue = None
+    segmfiles = []
+    segm_numbers = []
+    for item in lrit_files:
+        p__ = fparser.parse(item)
+        segm = p__['segment'].strip('_')
+        if segm == 'PRO':
+            prologue = item
+        else:
+            segm_numbers.append(int(segm))
+            segmfiles.append(item)
+
+    if not prologue:
+        LOG.warning("No prologue file found for timeslot")
+
+    segm_numbers.sort()
+    if range(1, 11) == segm_numbers:
+        LOG.info("All ten segment files found")
+    else:
+        LOG.warning("Less than 10 segments found: %s", str(segm_numbers))
+
+    return prologue, segmfiles
diff --git a/mpop/satin/msg_seviri_hdf.py b/mpop/satin/msg_seviri_hdf.py
new file mode 100755
index 0000000..60e18ea
--- /dev/null
+++ b/mpop/satin/msg_seviri_hdf.py
@@ -0,0 +1,265 @@
+"""Loader for MSG, netcdf format.
+"""
+from ConfigParser import ConfigParser
+from mpop import CONFIG_PATH
+import os
+import numpy.ma as ma
+from numpy import array as np_array
+from numpy import nan as np_nan
+from glob import glob
+from mpop.projector import get_area_def
+import datetime 
+
+try:
+    import h5py
+except ImportError:
+    print "... module h5py needs to be installed"
+    quit()
+
+from mipp.xrit.MSG import _Calibrator
+
+import logging
+LOG = logging.getLogger(__name__)
+#from mpop.utils import debug_on
+#debug_on()
+
+SatelliteIds = { '08': 321,  # Meteosat 8 
+                  '8': 321,  # Meteosat 8 
+                 '09': 322,  # Meteosat 9 
+                  '9': 322,  # Meteosat 9 
+                 '10': 323,  # Meteosat 10
+                 '11': 324 } # Meteosat 11
+
+channel_numbers = {"VIS006": 1,
+                   "VIS008": 2,
+                   "IR_016": 3,
+                   "IR_039": 4,
+                   "WV_062": 5,
+                   "WV_073": 6,
+                   "IR_087": 7,
+                   "IR_097": 8,
+                   "IR_108": 9,
+                   "IR_120": 10,
+                   "IR_134": 11,
+                   "HRV": 12}
+
+dict_channel= {'VIS006':'Channel 01','VIS008':'Channel 02','IR_016':'Channel 03','IR_039':'Channel 04','WV_062':'Channel 05','WV_073':'Channel 06',\
+               'IR_087':'Channel 07','IR_097':'Channel 08','IR_108':'Channel 09','IR_120':'Channel 10','IR_134':'Channel 11','HRV':'Channel 12'}
+
+
+def load(satscene, calibrate=True, area_extent=None, **kwargs):
+    """Load MSG SEVIRI data from hdf5 format.
+    """
+
+    # Read config file content
+    conf = ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+    values = {"orbit": satscene.orbit,
+    "satname": satscene.satname,
+    "number": satscene.number,
+    "instrument": satscene.instrument_name,
+    "satellite": satscene.fullname
+    }
+
+    LOG.info("assume seviri-level4")
+    print "... assume seviri-level4"
+
+    satscene.add_to_history("hdf5 data read by mpop/msg_seviri_hdf.py")
+
+
+    if "reader_level" in kwargs.keys():
+        reader_level = kwargs["reader_level"]
+    else:
+        reader_level = "seviri-level4"
+
+    if "RSS" in kwargs.keys():
+        if kwargs["RSS"]:
+            dt_end =  4
+        else:
+            dt_end = 12
+    else:
+        from my_msg_module import check_RSS
+        RSS = check_RSS(satscene.sat_nr(), satscene.time_slot)
+        if RSS == None:
+            print "*** Error in mpop/satin/msg_seviri_hdf.py"
+            print "    satellite MSG", satscene.sat_nr() ," is not active yet"
+            quit()
+        else:
+            if RSS:
+                dt_end =  4
+            else:
+                dt_end = 12
+
+    print "... hdf file name is specified by observation end time"
+    print "    assume ", dt_end, " min between start and end time of observation"
+
+    # end of scan time 4 min after start 
+    end_time = satscene.time_slot + datetime.timedelta(minutes=dt_end)
+
+    filename = os.path.join( end_time.strftime(conf.get(reader_level, "dir", raw=True)),
+                             end_time.strftime(conf.get(reader_level, "filename", raw=True)) % values )
+    
+    print "... search for file: ", filename
+    filenames=glob(str(filename))
+    if len(filenames) == 0:
+        print "*** Error, no file found"
+        return # just return without exit the program 
+    elif len(filenames) > 1:
+        print "*** Warning, more than 1 datafile found: ", filenames 
+    filename = filenames[0]
+    print("... read data from %s" % str(filename))
+
+    # read data from hdf5 file 
+    data_folder='U-MARF/MSG/Level1.5/'
+
+    # Load data from hdf file
+    with h5py.File(filename,'r') as hf:
+
+        subset_info=hf.get(data_folder+'METADATA/SUBSET')
+        for i in range(subset_info.len()):
+            #print subset_info[i]['EntryName'], subset_info[i]['Value']
+            if subset_info[i]['EntryName'] == "VIS_IRSouthLineSelectedRectangle":
+                VIS_IRSouthLine = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "VIS_IRNorthLineSelectedRectangle":
+                VIS_IRNorthLine = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "VIS_IREastColumnSelectedRectangle":
+                VIS_IREastColumn = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "VIS_IRWestColumnSelectedRectangle":
+                VIS_IRWestColumn = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "HRVLowerNorthLineSelectedRectangle":
+                HRVLowerNorthLine = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "HRVLowerSouthLineSelectedRectangle":
+                HRVLowerSouthLine = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "HRVLowerEastColumnSelectedRectangle":
+                HRVLowerEastColumn = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "HRVLowerWestColumnSelectedRectangle":
+                HRVLowerWestColumn = int(subset_info[i]['Value'])
+            if subset_info[i]['EntryName'] == "HRVUpperSouthLineSelectedRectangle":
+                HRVUpperSouthLine = int(subset_info[i]['Value'])  # 0
+            if subset_info[i]['EntryName'] == "HRVUpperNorthLineSelectedRectangle":
+                HRVUpperNorthLine = int(subset_info[i]['Value'])  # 0
+            if subset_info[i]['EntryName'] == "HRVUpperEastColumnSelectedRectangle":
+                HRVUpperEastColumn = int(subset_info[i]['Value']) # 0
+            if subset_info[i]['EntryName'] == "HRVUpperWestColumnSelectedRectangle":
+                HRVUpperWestColumn = int(subset_info[i]['Value']) # 0
+
+        sat_status=hf.get(data_folder+'METADATA/HEADER/SatelliteStatus/SatelliteStatus_DESCR')
+        for i in range(subset_info.len()):
+            if sat_status[i]['EntryName']=="SatelliteDefinition-NominalLongitude":
+                sat_lon = sat_status[i]['Value']
+                break
+
+        #print 'VIS_IRSouthLine', VIS_IRSouthLine
+        #print 'VIS_IRNorthLine', VIS_IRNorthLine
+        #print 'VIS_IREastColumn', VIS_IREastColumn
+        #print 'VIS_IRWestColumn', VIS_IRWestColumn
+        #print 'sat_longitude', sat_lon, type(sat_lon), 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>' 
+
+        if 1 == 0:
+            # works only if all pixels are on the disk 
+            from msg_pixcoord2area import msg_pixcoord2area
+            print "VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn: ", VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn
+            area_def = msg_pixcoord2area ( VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn, "vis", sat_lon )
+        else:
+            # works also for pixels outside of the disk 
+            pname = 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>'  # "GEOS<+009.5>"
+            proj = {'proj': 'geos', 'a': '6378169.0', 'b': '6356583.8', 'h': '35785831.0', 'lon_0': str(sat_lon)}
+            aex=(-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
+
+            # define full disk projection 
+            from pyresample.geometry import AreaDefinition
+            full_disk_def = AreaDefinition('full_disk',
+                                           'full_disk',
+                                           pname,
+                                           proj,
+                                           3712,
+                                           3712,
+                                           aex )
+
+            # define name and calculate area for sub-demain 
+            area_name= 'MSG_'+'{:04d}'.format(VIS_IRNorthLine)+'_'+'{:04d}'.format(VIS_IRWestColumn)+'_'+'{:04d}'.format(VIS_IRSouthLine)+'_'+'{:04d}'.format(VIS_IREastColumn)
+            aex = full_disk_def.get_area_extent_for_subset(3712-VIS_IRSouthLine,3712-VIS_IRWestColumn,3712-VIS_IRNorthLine,3712-VIS_IREastColumn)
+
+            area_def = AreaDefinition(area_name,
+                                      area_name,
+                                      pname,
+                                      proj,
+                                      (VIS_IRWestColumn-VIS_IREastColumn)+1,
+                                      (VIS_IRNorthLine-VIS_IRSouthLine)+1,
+                                      aex )
+
+        #print area_def
+        #print "REGION:", area_def.area_id, "{"
+        #print "\tNAME:\t", area_def.name
+        #print "\tPCS_ID:\t", area_def.proj_id
+        #print ("\tPCS_DEF:\tproj="+area_def.proj_dict['proj']+", lon_0=" + area_def.proj_dict['lon_0'] + ", a="+area_def.proj_dict['a']+", b="+area_def.proj_dict['b']+", h="+area_def.proj_dict['h'])
+        #print "\tXSIZE:\t", area_def.x_size
+        #print "\tYSIZE:\t", area_def.y_size
+        #print "\tAREA_EXTENT:\t", area_def.area_extent
+        #print "};"
+
+        # copy area to satscene 
+        satscene.area = area_def
+
+        # write information used by mipp.xrit.MSG._Calibrator in a fake header file
+        hdr = dict()
+
+        # satellite ID number 
+        hdr["SatelliteDefinition"] = dict()
+        hdr["SatelliteDefinition"]["SatelliteId"] = SatelliteIds[str(satscene.sat_nr())]
+        
+        # processing 
+        hdr["Level 1_5 ImageProduction"] = dict()
+        hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"] = np_array([2,2,2,2,2,2,2,2,2,2,2,2], int)
+        
+        # calibration factors  
+        Level15ImageCalibration = hf.get(data_folder+'METADATA/HEADER/RadiometricProcessing/Level15ImageCalibration_ARRAY')
+        hdr["Level1_5ImageCalibration"] = dict()
+
+        for chn_name in channel_numbers.keys():
+            chn_nb = channel_numbers[chn_name]-1
+            hdr["Level1_5ImageCalibration"][chn_nb] = dict()
+            #print chn_name, chn_nb, Level15ImageCalibration[chn_nb]['Cal_Slope'], Level15ImageCalibration[chn_nb]['Cal_Offset']
+            hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Slope']  = Level15ImageCalibration[chn_nb]['Cal_Slope']
+            hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Offset'] = Level15ImageCalibration[chn_nb]['Cal_Offset']
+
+        # loop over channels to load 
+        for chn_name in satscene.channels_to_load:
+
+            dataset_name = data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA'
+            if dataset_name in hf:
+                data_tmp = hf.get(data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA')
+
+                LOG.info('hdr["SatelliteDefinition"]["SatelliteId"]: '+str(hdr["SatelliteDefinition"]["SatelliteId"]))
+                #LOG.info('hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"]', hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"])
+                chn_nb = channel_numbers[chn_name]-1
+                LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]:  '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]))
+                LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]: '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]))
+
+                if calibrate:
+                    #Calibrator = _Calibrator(hdr, chn_name)
+                    bits_per_pixel = 10   ### !!! I have no idea if this is correct !!!
+                    Calibrator = _Calibrator(hdr, chn_name, bits_per_pixel) ## changed call in mipp/xrit/MSG.py
+                    data, calibration_unit = Calibrator (data_tmp, calibrate=1)
+                else:
+                    data = data_tmp
+                    calibration_unit = "counts"
+
+                LOG.info(chn_name+ " min/max: "+str(data.min())+","+str(data.max())+" "+calibration_unit )
+
+                satscene[chn_name] = ma.asarray(data)
+
+                satscene[chn_name].info['units'] = calibration_unit
+                satscene[chn_name].info['satname'] = satscene.satname
+                satscene[chn_name].info['satnumber'] = satscene.number
+                satscene[chn_name].info['instrument_name'] = satscene.instrument_name
+                satscene[chn_name].info['time'] = satscene.time_slot
+                satscene[chn_name].info['is_calibrated'] = True
+
+            else: 
+                print "*** Warning, no data for channel "+ chn_name+ " in file "+ filename
+                data = np_nan
+                calibration_unit = ""
+                LOG.info("*** Warning, no data for channel "+ chn_name+" in file "+filename)
+                # do not append the channel chn_name
+
diff --git a/mpop/satin/nc_pps_l2.py b/mpop/satin/nc_pps_l2.py
index 72ef370..080f4de 100644
--- a/mpop/satin/nc_pps_l2.py
+++ b/mpop/satin/nc_pps_l2.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-# Copyright (c) 2014, 2015 Adam.Dybbroe
+# Copyright (c) 2014, 2015, 2016 Adam.Dybbroe
 
 # Author(s):
 
@@ -126,6 +126,7 @@ class PpsGeolocationData(object):
                              dtype=np.bool)
 
         swath_index = 0
+
         for idx, filename in enumerate(self.filenames):
 
             y0_ = swath_index
@@ -1117,19 +1118,38 @@ def get_lonlat_into(filename, out_lons, out_lats, out_mask):
     # FIXME: this is to mask out the npp bowtie deleted pixels...
     if "NPP" in h5f.attrs['platform']:
 
-        new_mask = np.zeros((16, 3200), dtype=bool)
-        new_mask[0, :1008] = True
-        new_mask[1, :640] = True
-        new_mask[14, :640] = True
-        new_mask[15, :1008] = True
-        new_mask[14, 2560:] = True
-        new_mask[1, 2560:] = True
-        new_mask[0, 2192:] = True
-        new_mask[15, 2192:] = True
-        new_mask = np.tile(new_mask, (out_lons.shape[0] / 16, 1))
+        if shape[1] == 3200:  # M-bands:
+            new_mask = np.zeros((16, 3200), dtype=bool)
+            new_mask[0, :1008] = True
+            new_mask[1, :640] = True
+            new_mask[14, :640] = True
+            new_mask[15, :1008] = True
+            new_mask[14, 2560:] = True
+            new_mask[1, 2560:] = True
+            new_mask[0, 2192:] = True
+            new_mask[15, 2192:] = True
+            new_mask = np.tile(new_mask, (out_lons.shape[0] / 16, 1))
+        elif shape[1] == 6400:  # I-bands:
+            LOG.info(
+                "PPS on I-band resolution. Mask out bow-tie deletion pixels")
+            LOG.warning("Not yet supported...")
+            new_mask = np.zeros((32, 6400), dtype=bool)
+            new_mask[0:2, :2016] = True
+            new_mask[0:2, 4384:] = True
+            new_mask[2:4, :1280] = True
+            new_mask[2:4, 5120:] = True
+            new_mask[28:30, :1280] = True
+            new_mask[28:30, 5120:] = True
+            new_mask[30:32, :2016] = True
+            new_mask[30:32, 4384:] = True
+            new_mask = np.tile(new_mask, (out_lons.shape[0] / 32, 1))
+        else:
+            LOG.error("VIIRS shape not supported. " +
+                      "No handling of bow-tie deletion pixels: shape = ", str(shape))
 
     out_mask[:] = np.logical_or(
-        new_mask, np.logical_and(out_lats <= fillvalue, out_lons <= fillvalue))
+        new_mask, np.logical_and(out_lats == fillvalue, out_lons == fillvalue))
+    # new_mask, np.logical_and(out_lats <= fillvalue, out_lons <= fillvalue))
 
     h5f.close()
     if unzipped:
diff --git a/mpop/satin/nwcsaf_hrw_hdf.py b/mpop/satin/nwcsaf_hrw_hdf.py
new file mode 100644
index 0000000..d6a22ca
--- /dev/null
+++ b/mpop/satin/nwcsaf_hrw_hdf.py
@@ -0,0 +1,355 @@
+"""Loader for MSG, nwcsaf high resolution hdf5 format.
+"""
+from ConfigParser import ConfigParser
+from mpop import CONFIG_PATH
+import os
+from numpy import array as np_array
+from numpy import empty as np_empty
+from numpy import append as np_append
+from numpy import dtype as np_dtype
+from numpy import append as np_append
+from numpy import where as np_where
+from numpy import in1d as np_in1d
+from numpy import logical_and as np_logical_and
+from glob import glob
+from mpop.projector import get_area_def
+import datetime 
+
+from copy import deepcopy
+
+try:
+    import h5py
+except ImportError:
+    print "... module h5py needs to be installed"
+    quit()
+
+from mipp.xrit.MSG import _Calibrator
+
+import logging
+LOG = logging.getLogger(__name__)
+#from mpop.utils import debug_on
+#debug_on()
+
+GP_IDs = { 321: '08',  # Meteosat 8 
+           322: '09',  # Meteosat 9 
+           323: '10',  # Meteosat 10
+           324: '11' } # Meteosat 11
+
+dict_channel = {'CHANNEL00':'HRV',   'CHANNEL01':'VIS006','CHANNEL02':'VIS008','CHANNEL03':'IR_016','CHANNEL04':'IR_039','CHANNEL05':'WV_062',\
+                'CHANNEL06':'WV_073','CHANNEL07':'IR_087','CHANNEL08':'IR_097','CHANNEL09':'IR_108','CHANNEL10':'IR_120','CHANNEL11':'IR_134'}
+
+
+# class definition of a high resolution wind data
+class HRW_class:
+
+    def __init__(self):
+        # see http://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.html
+        self.date            = None # datetime of the observation  
+        self.detailed        = None # False-> basic, True -> detailed 
+        self.channel         = np_array([], dtype='|S6') 
+        self.wind_id         = np_array([], dtype=int)
+        self.prev_wind_id    = np_array([], dtype=int)
+        self.segment_X       = np_array([], dtype='f') 
+        self.segment_Y       = np_array([], dtype='f') 
+        self.t_corr_method   = np_array([], dtype=int) 
+        self.lon             = np_array([], dtype='f')     #  6.3862 [longitude in degree E]
+        self.lat             = np_array([], dtype='f')     # 46.8823 [latitude in degree N]
+        self.dlon            = np_array([], dtype='f')     # -0.011  [longitude in degree E]
+        self.dlat            = np_array([], dtype='f')     #  0.01   [latitude in degree N]
+        self.pressure        = np_array([], dtype='f')     # 64200.0 [p in Pa]
+        self.wind_speed      = np_array([], dtype='f')     #   3.1   [v in m/s]
+        self.wind_direction  = np_array([], dtype='f')     #  313.0  [v_dir in deg]
+        self.temperature     = np_array([], dtype='f')     #  272.4  [T in K]
+        self.conf_nwp        = np_array([], dtype='f')
+        self.conf_no_nwp     = np_array([], dtype='f')
+        self.t_type          = np_array([], dtype=int)
+        self.t_level_method  = np_array([], dtype=int)
+        self.t_winds         = np_array([], dtype=int)
+        self.t_corr_test     = np_array([], dtype=int)
+        self.applied_QI      = np_array([], dtype=int)
+        self.NWP_wind_levels = np_array([], dtype=int)
+        self.num_prev_winds  = np_array([], dtype=int)
+        self.orographic_index= np_array([], dtype=int)
+        self.cloud_type      = np_array([], dtype=int)
+        self.wind_channel    = np_array([], dtype=int)
+        self.correlation     = np_array([], dtype=int)
+        self.pressure_error  = np_array([], dtype='f')
+
+    # ---------------- add two data sets e.g. time steps ---------------------
+    def __add__(self, HRW_class2):
+
+        HRW_new = HRW_class()
+
+        HRW_new.date            = self.date      # !!! does not make sense !!! 
+        HRW_new.detailed        = self.detailed  # !!! does not make sense !!!
+        HRW_new.channel         = np_append(self.channel,         HRW_class2.channel)
+        HRW_new.wind_id         = np_append(self.wind_id,         HRW_class2.wind_id)
+        HRW_new.prev_wind_id    = np_append(self.prev_wind_id,    HRW_class2.prev_wind_id)
+        HRW_new.segment_X       = np_append(self.segment_X,       HRW_class2.segment_X)
+        HRW_new.segment_Y       = np_append(self.segment_Y,       HRW_class2.segment_Y)
+        HRW_new.t_corr_method   = np_append(self.t_corr_method,   HRW_class2.t_corr_method)
+        HRW_new.lon             = np_append(self.lon,             HRW_class2.lon)
+        HRW_new.lat             = np_append(self.lat,             HRW_class2.lat)
+        HRW_new.dlon            = np_append(self.dlon,            HRW_class2.dlon)
+        HRW_new.dlat            = np_append(self.dlat,            HRW_class2.dlat)
+        HRW_new.pressure        = np_append(self.pressure,        HRW_class2.pressure)
+        HRW_new.wind_speed      = np_append(self.wind_speed,      HRW_class2.wind_speed)
+        HRW_new.wind_direction  = np_append(self.wind_direction,  HRW_class2.wind_direction)       
+        HRW_new.temperature     = np_append(self.temperature,     HRW_class2.temperature)
+        HRW_new.conf_nwp        = np_append(self.conf_nwp,        HRW_class2.conf_nwp)
+        HRW_new.conf_no_nwp     = np_append(self.conf_no_nwp,     HRW_class2.conf_no_nwp)
+        HRW_new.t_type          = np_append(self.t_type,          HRW_class2.t_type)
+        HRW_new.t_level_method  = np_append(self.t_level_method,  HRW_class2.t_level_method)
+        HRW_new.t_winds         = np_append(self.t_winds,         HRW_class2.t_winds)
+        HRW_new.t_corr_test     = np_append(self.t_corr_test,     HRW_class2.t_corr_test)
+        HRW_new.applied_QI      = np_append(self.applied_QI,      HRW_class2.applied_QI)
+        HRW_new.NWP_wind_levels = np_append(self.NWP_wind_levels, HRW_class2.NWP_wind_levels)
+        HRW_new.num_prev_winds  = np_append(self.num_prev_winds,  HRW_class2.num_prev_winds)
+        HRW_new.orographic_index= np_append(self.orographic_index,HRW_class2.orographic_index)
+        HRW_new.cloud_type      = np_append(self.cloud_type,      HRW_class2.cloud_type)
+        HRW_new.wind_channel    = np_append(self.wind_channel,    HRW_class2.wind_channel)
+        HRW_new.correlation     = np_append(self.correlation,     HRW_class2.correlation)
+        HRW_new.pressure_error  = np_append(self.pressure_error,  HRW_class2.pressure_error)
+
+        return HRW_new
+
+    # ---------------- filter for certain criterias  ---------------------
+    def filter(self, **kwargs):
+
+        # if empty than return self (already empty)
+        if self.channel.size == 0:
+            return self
+
+        HRW_new = deepcopy(self)
+
+        for key_filter in ['min_correlation', 'min_conf_nwp', 'min_conf_no_nwp', 'cloud_type', 'level']:
+            if key_filter in kwargs.keys():
+                
+                # if argument given is None or all keyword than skip this filter 
+                if kwargs[key_filter] == None or kwargs[key_filter] == 'all' or kwargs[key_filter] == 'ALL' or kwargs[key_filter] == 'A':
+                    continue
+
+                n1 = str(HRW_new.channel.size)
+
+                if key_filter == 'min_correlation':
+                    inds = np_where(HRW_new.correlation > kwargs[key_filter])
+                elif key_filter == 'min_conf_nwp':
+                    inds = np_where(HRW_new.conf_nwp    > kwargs[key_filter])
+                elif key_filter == 'min_conf_no_nwp':
+                    inds = np_where(HRW_new.conf_no_nwp > kwargs[key_filter])
+                elif key_filter == 'cloud_type':
+                    mask = np_in1d(HRW_new.cloud_type, kwargs[key_filter]) 
+                    inds = np_where(mask)[0]
+                elif key_filter == 'level':
+                    if kwargs[key_filter] == 'H': # high level: < 440hPa like in the ISCCP
+                        inds = np_where(HRW_new.pressure < 44000 ) 
+                    elif kwargs[key_filter] == 'M': # mid level: 440hPa ... 680hPa like in the ISCCP
+                        inds = np_where( np_logical_and(44000 < HRW_new.pressure, HRW_new.pressure < 68000) ) 
+                    elif kwargs[key_filter] == 'L': # low level: > 680hPa like in the ISCCP
+                        inds = np_where(68000 < HRW_new.pressure)
+
+                HRW_new.subset(inds)
+                print "    filter for "+key_filter+" = ", kwargs[key_filter],' ('+n1+'->'+str(HRW_new.channel.size)+')'
+
+        return HRW_new
+
+    # ---------------- reduce HRW_dataset to the given indices inds ---------------------
+    def subset(self, inds):
+
+        self.channel          = self.channel         [inds]    
+        self.wind_id          = self.wind_id         [inds]    
+        self.prev_wind_id     = self.prev_wind_id    [inds]    
+        self.segment_X        = self.segment_X       [inds]   
+        self.segment_Y        = self.segment_Y       [inds]   
+        self.t_corr_method    = self.t_corr_method   [inds]   
+        self.lon              = self.lon             [inds]   
+        self.lat              = self.lat             [inds]   
+        self.dlon             = self.dlon            [inds]  
+        self.dlat             = self.dlat            [inds]   
+        self.pressure         = self.pressure        [inds]   
+        self.wind_speed       = self.wind_speed      [inds]   
+        self.wind_direction   = self.wind_direction  [inds]  
+        self.temperature      = self.temperature     [inds]   
+        self.conf_nwp         = self.conf_nwp        [inds]   
+        self.conf_no_nwp      = self.conf_no_nwp     [inds]   
+        self.t_type           = self.t_type          [inds]  
+        self.t_level_method   = self.t_level_method  [inds]  
+        self.t_winds          = self.t_winds         [inds] 
+        self.t_corr_test      = self.t_corr_test     [inds]  
+        self.applied_QI       = self.applied_QI      [inds]  
+        self.NWP_wind_levels  = self.NWP_wind_levels [inds] 
+        self.num_prev_winds   = self.num_prev_winds  [inds]
+        self.orographic_index = self.orographic_index[inds]
+        self.cloud_type       = self.cloud_type      [inds]
+        self.wind_channel     = self.wind_channel    [inds]
+        self.correlation      = self.correlation     [inds]
+        self.pressure_error   = self.pressure_error  [inds]
+
+        return self
+
+
+def load(satscene, calibrate=True, area_extent=None, read_basic_or_detailed='both', **kwargs):
+    """Load MSG SEVIRI High Resolution Wind (HRW) data from hdf5 format.
+    """
+
+    # Read config file content
+    conf = ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+    values = {"orbit": satscene.orbit,
+    "satname": satscene.satname,
+    "number": satscene.number,
+    "instrument": satscene.instrument_name,
+    "satellite": satscene.fullname
+    }
+
+    LOG.info("assume seviri-level5")
+    print "... assume seviri-level5"
+
+    satscene.add_to_history("hdf5 data read by mpop/nwcsaf_hrw_hdf.py")
+
+    # end of scan time 4 min after start 
+    end_time = satscene.time_slot + datetime.timedelta(minutes=4)
+
+    # area !!! satscene.area
+
+    filename = os.path.join( satscene.time_slot.strftime(conf.get("seviri-level5", "dir", raw=True)),
+                             satscene.time_slot.strftime(conf.get("seviri-level5", "filename", raw=True)) % values )
+
+    # define classes before we search for files (in order to return empty class if no file is found)
+    HRW_basic             = HRW_class()
+    HRW_basic.detailed    = False 
+    HRW_basic.date        = satscene.time_slot
+    HRW_detailed          = HRW_class()
+    HRW_detailed.detailed = True
+    HRW_detailed.date     = satscene.time_slot
+
+    print "... search for file: ", filename
+    filenames=glob(str(filename))
+
+    if len(filenames) != 0:
+
+        if len(filenames) > 1:
+            print "*** Warning, more than 1 datafile found: ", filenames 
+
+        filename = filenames[0]
+        print("... read data from %s" % str(filename))
+
+        # create an instant of the HRW_class
+        m_per_s_to_knots = 1.944
+
+        ## limit channels to read 
+        #hrw_channels=['HRV']
+        # limit basic or detailed or both
+        #read_basic_or_detailed='detailed'
+        #read_basic_or_detailed='basic'
+
+
+        with h5py.File(filename,'r') as hf:
+
+            #print hf.attrs.keys()
+            #print hf.attrs.values()
+
+            region_name = hf.attrs['REGION_NAME'].replace("_", "")
+            print "... read HRW data for region ", region_name
+            LOG.info("... read HRW data for region "+region_name)
+            sat_ID = GP_IDs[int(hf.attrs["GP_SC_ID"])]
+            print "... derived from Meteosat ", sat_ID
+            LOG.info("... derived from Meteosat "+sat_ID)
+
+            # print('List of arrays in this file: \n', hf.keys()), len(hf.keys())
+
+            if len(hf.keys()) == 0:
+                print "*** Warning, empty file ", filename
+                print ""
+            else:
+                for key in hf.keys():
+
+                    if key[4:9] == "BASIC":
+                        if 'read_basic_or_detailed' in locals():
+                            if read_basic_or_detailed.lower() == "detailed":
+                                continue
+                        HRW_data = HRW_basic   # shallow copy 
+                    elif key[4:12] == "DETAILED":
+                        if 'read_basic_or_detailed' in locals():
+                            if read_basic_or_detailed.lower() == "basic":
+                                continue
+                        HRW_data = HRW_detailed # shallow copy 
+
+                    hrw_chn = dict_channel[key[len(key)-9:]]
+
+                    if 'hrw_channels' in locals():
+                        if hrw_channels != None:
+                            if hrw_chn not in hrw_channels:
+                                print "... "+hrw_chn+" is not in hrw_channels", hrw_channels 
+                                print "    skip reading this channel" 
+                                continue 
+
+                    # read all  data 
+                    channel = hf.get(key)
+                    # print '... read wind vectors of channel ', channel.name, hrw_chn
+                    # print  "  i    lon        lat      speed[kn] dir   pressure"
+                    #for i in range(channel.len()):
+                    #    print '%3d %10.7f %10.7f %7.2f %7.1f %8.1f' % (channel[i]['wind_id'], channel[i]['lon'], channel[i]['lat'], \
+                    #                                                   channel[i]['wind_speed']*m_per_s_to_knots, \
+                    #                                                   channel[i]['wind_direction'], channel[i]['pressure'])
+                    # create string array with channel names 
+                    channel_chararray = np_empty(channel.len(), dtype='|S6')
+                    channel_chararray[:] = hrw_chn
+
+                    HRW_data.channel          = np_append(HRW_data.channel         , channel_chararray              )
+                    HRW_data.wind_id          = np_append(HRW_data.wind_id         , channel[:]['wind_id']          )    
+                    HRW_data.prev_wind_id     = np_append(HRW_data.prev_wind_id    , channel[:]['prev_wind_id']     )    
+                    HRW_data.segment_X        = np_append(HRW_data.segment_X       , channel[:]['segment_X']        )   
+                    HRW_data.segment_Y        = np_append(HRW_data.segment_Y       , channel[:]['segment_Y']        )   
+                    HRW_data.t_corr_method    = np_append(HRW_data.t_corr_method   , channel[:]['t_corr_method']    )   
+                    HRW_data.lon              = np_append(HRW_data.lon             , channel[:]['lon']              )   
+                    HRW_data.lat              = np_append(HRW_data.lat             , channel[:]['lat']              )   
+                    HRW_data.dlon             = np_append(HRW_data.dlon            , channel[:]['dlon']             )  
+                    HRW_data.dlat             = np_append(HRW_data.dlat            , channel[:]['dlat']             )   
+                    HRW_data.pressure         = np_append(HRW_data.pressure        , channel[:]['pressure']         )   
+                    HRW_data.wind_speed       = np_append(HRW_data.wind_speed      , channel[:]['wind_speed']       )   
+                    HRW_data.wind_direction   = np_append(HRW_data.wind_direction  , channel[:]['wind_direction']   )  
+                    HRW_data.temperature      = np_append(HRW_data.temperature     , channel[:]['temperature']      )   
+                    HRW_data.conf_nwp         = np_append(HRW_data.conf_nwp        , channel[:]['conf_nwp']         )   
+                    HRW_data.conf_no_nwp      = np_append(HRW_data.conf_no_nwp     , channel[:]['conf_no_nwp']      )   
+                    HRW_data.t_type           = np_append(HRW_data.t_type          , channel[:]['t_type']           )  
+                    HRW_data.t_level_method   = np_append(HRW_data.t_level_method  , channel[:]['t_level_method']   )  
+                    HRW_data.t_winds          = np_append(HRW_data.t_winds         , channel[:]['t_winds']          ) 
+                    HRW_data.t_corr_test      = np_append(HRW_data.t_corr_test     , channel[:]['t_corr_test']      )   
+                    HRW_data.applied_QI       = np_append(HRW_data.applied_QI      , channel[:]['applied_QI']       )  
+                    HRW_data.NWP_wind_levels  = np_append(HRW_data.NWP_wind_levels , channel[:]['NWP_wind_levels']  ) 
+                    HRW_data.num_prev_winds   = np_append(HRW_data.num_prev_winds  , channel[:]['num_prev_winds']   )
+                    HRW_data.orographic_index = np_append(HRW_data.orographic_index, channel[:]['orographic_index'] )
+                    HRW_data.cloud_type       = np_append(HRW_data.cloud_type      , channel[:]['cloud_type']       )
+                    HRW_data.wind_channel     = np_append(HRW_data.wind_channel    , channel[:]['wind_channel']     )
+                    HRW_data.correlation      = np_append(HRW_data.correlation     , channel[:]['correlation']      )
+                    HRW_data.pressure_error   = np_append(HRW_data.pressure_error  , channel[:]['pressure_error']   )
+
+                # sort according to wind_id
+                inds = HRW_data.wind_id.argsort()
+                HRW_data.subset(inds) # changes HRW_data itself
+
+                # sorting without conversion to numpy arrays 
+                #[e for (wid,pwid) in sorted(zip(HRW_data.wind_id,HRW_data.prev_wind_id))]
+
+    else:
+        print "*** Error, no file found"
+        print ""
+        sat_ID = "no file"
+        # but we continue the program in order to add an empty channel below 
+
+
+    ## filter data according to the given optional arguments 
+    #n1 = str(HRW_data.channel.size)
+    #HRW_data = HRW_data.filter(**kwargs)   
+    #print "    apply filters "+' ('+n1+'->'+str(HRW_data.channel.size)+')'
+
+    chn_name="HRW"
+    satscene[chn_name].HRW_basic    = HRW_basic.filter(**kwargs)     # returns new object (deepcopied and filtered)
+    satscene[chn_name].HRW_detailed = HRW_detailed.filter(**kwargs)  # returns new object (deepcopied and filtered)
+    satscene[chn_name].info['units'] = 'm/s'
+    satscene[chn_name].info['satname'] = 'meteosat'
+    satscene[chn_name].info['satnumber'] = sat_ID
+    satscene[chn_name].info['instrument_name'] = 'seviri'
+    satscene[chn_name].info['time'] = satscene.time_slot
+    satscene[chn_name].info['is_calibrated'] = True
diff --git a/mpop/satin/nwcsaf_msg.py b/mpop/satin/nwcsaf_msg.py
new file mode 100644
index 0000000..1e71802
--- /dev/null
+++ b/mpop/satin/nwcsaf_msg.py
@@ -0,0 +1,3086 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2010, 2012, 2014.
+
+# SMHI,
+# Folkborgsvägen 1,
+# Norrköping,
+# Sweden
+
+# Author(s):
+
+#   Martin Raspaud <martin.raspaud at smhi.se>
+#   Marco Sassi <marco.sassi at meteoswiss.ch> for CRR, PC (partly), SPhR, PCPh, CRPh
+#   Jörg Asmus <joerg.asmus at dwd.de> for CRR, PC (partly), SPhR, PCPh, CRPH
+#   Ulrich Hamann <ulrich.hamann at meteoswiss.ch> for CMa, bugfix SPhR.cape, 1st version generic class MsgNwcsafClass
+
+
+# This file is part of mpop.
+
+# mpop is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, either version 3 of the License, or (at your option) any later
+# version.
+
+# mpop is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License along with
+# mpop.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Plugin for reading NWCSAF MSG products hdf files.
+"""
+import ConfigParser
+import os.path
+from mpop import CONFIG_PATH
+import mpop.channel
+import numpy as np
+import pyresample.utils
+
+import glob
+from mpop.utils import get_logger
+from mpop.projector import get_area_def
+from os.path import basename
+
+LOG = get_logger('satin/nwcsaf_msg')
+COMPRESS_LVL = 6
+
+
+def pcs_def_from_region(region):
+    items = region.proj_dict.items()
+    return ' '.join([t[0] + '=' + t[1] for t in items])
+
+
+def _get_area_extent(cfac, lfac, coff, loff, numcols, numlines):
+    """Get the area extent from msg parameters.
+    """
+
+    # h = 35785831.0, see area_def.cfg
+
+    xur = (numcols - coff) * 2 ** 16 / (cfac * 1.0) 
+    xur = np.deg2rad(xur) * 35785831.0
+    xll = (-1 - coff) * 2 ** 16 / (cfac * 1.0)
+    xll = np.deg2rad(xll) * 35785831.0
+    xres = (xur - xll) / numcols
+    xur, xll = xur - xres / 2, xll + xres / 2
+    yll = (numlines - loff) * 2 ** 16 / (-lfac * 1.0)
+    yll = np.deg2rad(yll) * 35785831.0
+    yur = (-1 - loff) * 2 ** 16 / (-lfac * 1.0)
+    yur = np.deg2rad(yur) * 35785831.0
+    yres = (yur - yll) / numlines
+    yll, yur = yll + yres / 2, yur - yres / 2
+    print "msg_hdf _get_area_extent: xll, yll, xur, yur = ", xll, yll, xur, yur
+    return xll, yll, xur, yur
+
+
+def get_area_extent(filename):
+    """Get the area extent of the data in *filename*.
+    """
+    import h5py
+    h5f = h5py.File(filename, 'r')
+    print "msg_hdf get_area_extent: CFAC, LFAC, COFF, LOFF, NC, NL = ", h5f.attrs["CFAC"], h5f.attrs["LFAC"], h5f.attrs["COFF"], h5f.attrs["LOFF"], h5f.attrs["NC"], h5f.attrs["NL"]
+    aex = _get_area_extent(h5f.attrs["CFAC"],
+                           h5f.attrs["LFAC"],
+                           h5f.attrs["COFF"],
+                           h5f.attrs["LOFF"],
+                           h5f.attrs["NC"],
+                           h5f.attrs["NL"])
+    h5f.close()
+    return aex
+
+
+def _get_palette(h5f, dsname):
+    try:
+        p = h5f[dsname].attrs['PALETTE']
+        return h5f[p].value
+    except KeyError:
+        return None
+
+# ------------------------------------------------------------------
+
+class MsgCloudMaskData(object):
+
+    """NWCSAF/MSG Cloud Mask data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+class MsgCloudMask(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG Cloud Mask data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    """
+
+    def __init__(self, resolution=None):
+        mpop.channel.GenericChannel.__init__(self, "CloudMask")
+        self.filled = False
+        self.name = "CloudMask"
+        self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.CMa = None
+        self.CMa_DUST = None
+        self.CMa_QUALITY = None
+        self.CMa_VOLCANIC = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.CMa.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+    def read(self, filename, calibrate=True):
+        """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data.
+        """
+        import h5py
+
+        self.CMa = MsgCloudMaskData()
+        self.CMa_DUST = MsgCloudMaskData()
+        self.CMa_QUALITY = MsgCloudMaskData()
+        self.CMa_VOLCANIC = MsgCloudMaskData()
+
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The cloud mask data
+        print "... read cloud mask data"
+        h5d = h5f['CMa']
+        self.CMa.data = h5d[:, :]
+        self.CMa.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.CMa.offset = h5d.attrs["OFFSET"]
+        self.CMa.num_of_lines = h5d.attrs["N_LINES"]
+        self.CMa.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.CMa.num_of_lines,
+                      self.CMa.num_of_columns)
+        self.CMa.product = h5d.attrs["PRODUCT"]
+        self.CMa.id = h5d.attrs["ID"]
+        self.CMa_palette = _get_palette(h5f, 'CMa')
+        # ------------------------
+
+        # The cloud mask dust data
+        print "... read cloud mask dust data"
+        h5d = h5f['CMa_DUST']
+        self.CMa_DUST.data = h5d[:, :]
+        self.CMa_DUST.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.CMa_DUST.offset = h5d.attrs["OFFSET"]
+        self.CMa_DUST.num_of_lines = h5d.attrs["N_LINES"]
+        self.CMa_DUST.num_of_columns = h5d.attrs["N_COLS"]
+        self.CMa_DUST.product = h5d.attrs["PRODUCT"]
+        self.CMa_DUST.id = h5d.attrs["ID"]
+        self.CMa_DUST_palette = _get_palette(h5f, 'CMa_DUST')
+        # ------------------------
+
+        # The cloud mask quality
+        print "... read cloud mask quality"
+        h5d = h5f['CMa_QUALITY']
+        self.CMa_QUALITY.data = h5d[:, :]
+        self.CMa_QUALITY.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.CMa_QUALITY.offset = h5d.attrs["OFFSET"]
+        self.CMa_QUALITY.num_of_lines = h5d.attrs["N_LINES"]
+        self.CMa_QUALITY.num_of_columns = h5d.attrs["N_COLS"]
+        self.CMa_QUALITY.product = h5d.attrs["PRODUCT"]
+        self.CMa_QUALITY.id = h5d.attrs["ID"]
+        # no palette for QUALITY
+        # ------------------------
+
+        h5d = h5f['CMa_VOLCANIC']
+        print "... read volcanic dust mask"
+        self.CMa_VOLCANIC.data = h5d[:, :]
+        self.CMa_VOLCANIC.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.CMa_VOLCANIC.offset = h5d.attrs["OFFSET"]
+        self.CMa_VOLCANIC.num_of_lines = h5d.attrs["N_LINES"]
+        self.CMa_VOLCANIC.num_of_columns = h5d.attrs["N_COLS"]
+        self.CMa_VOLCANIC.product = h5d.attrs["PRODUCT"]
+        self.CMa_VOLCANIC.id = h5d.attrs["ID"]
+        self.CMa_VOLCANIC_palette = _get_palette(h5f, 'CMa_VOLCANIC')
+        # ------------------------
+
+        h5f.close()
+
+        self.CMa = self.CMa.data
+        self.CMa_DUST = self.CMa_DUST.data
+        self.CMa_QUALITY = self.CMa_QUALITY.data
+        self.CMa_VOLCANIC = self.CMa_VOLCANIC.data
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def save(self, filename):
+        """Save the current cloudtype object to hdf *filename*, in pps format.
+        """
+        import h5py
+        cma = self.convert2pps()
+        LOG.info("Saving CMa hdf file...")
+        cma.save(filename)
+        h5f = h5py.File(filename, mode="a")
+        h5f.attrs["straylight_contaminated"] = self.qc_straylight
+        h5f.close()
+        LOG.info("Saving CMa hdf file done !")
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgCloudMask()
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        retv.CMa = coverage.project_array(self.CMa)
+        retv.CMa_DUST = coverage.project_array(self.CMa_DUST)
+        retv.CMa_QUALITY = coverage.project_array(self.CMa_QUALITY)
+        retv.CMa_VOLCANIC = coverage.project_array(self.CMa_VOLCANIC)
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+
+    def convert2pps(self):
+        """Converts the NWCSAF/MSG Cloud Mask to the PPS format,
+        in order to have consistency in output format between PPS and MSG.
+        """
+        import epshdf
+        retv = PpsCloudMask()        
+        retv.region = epshdf.SafRegion()
+        retv.region.xsize = self.num_of_columns
+        retv.region.ysize = self.num_of_lines
+        retv.region.id = self.region_name
+        retv.region.pcs_id = self.projection_name
+
+        retv.region.pcs_def = pcs_def_from_region(self.area)
+        retv.region.area_extent = self.area.area_extent
+        retv.satellite_id = self.satid
+
+        retv.CMa_lut = pps_luts('CMa')
+        retv.CMa_DUST_lut = pps_luts('CMa_DUST')
+        retv.CMa_VOLCANIC_lut = pps_luts('CMa_VOLCANIC')
+
+        retv.CMa_des = "MSG SEVIRI Cloud Mask"
+        retv.CMa_DUST_des = 'MSG SEVIRI Cloud Mask DUST'
+        retv.CMa_QUALITY_des = 'MSG SEVIRI bitwise quality/processing flags'
+        retv.CMa_VOLCANIC_des = 'MSG SEVIRI Cloud Mask VOLCANIC'
+
+        retv.CMa = self.CMa.astype('B')
+        retv.CMa_DUST = self.CMa_DUST.astype('B')
+        retv.CMa_QUALITY = self.CMa_QUALITY.astype('B')
+        retv.CMa_VOLCANIC = self.CMa_VOLCANIC.astype('B')
+
+        return retv
+
+    def convert2nordrad(self):
+        return NordRadCType(self)
+
+#-----------------------------------------------------------------------
+
+# ------------------------------------------------------------------
+
+class MsgNwcsafData(object):
+
+    """NWCSAF/MSG data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+class MsgNwcsafClass(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    """
+
+    def __init__(self, product, resolution=None):
+        mpop.channel.GenericChannel.__init__(self, product)
+        self.filled = False
+        self.name = product
+        self.var_names = None
+        self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+
+        if product == "CloudMask":
+            self.CMa = None
+            self.CMa_DUST = None
+            self.CMa_QUALITY = None
+            self.CMa_VOLCANIC = None
+        elif product == "CT":
+            self.CT = None
+            self.CT_PHASE = None
+            self.CT_QUALITY = None
+        elif product == "CTTH":
+            self.CTTH_TEMPER = None
+            self.CTTH_HEIGHT = None
+            self.CTTH_PRESS = None
+            self.CTTH_EFFECT = None
+            self.CTTH_QUALITY = None
+        elif product == "CRR":
+            self.crr = None
+            self.crr_accum = None
+            self.crr_intensity = None
+            self.crr_quality = None
+            self.processing_flags = None
+        elif product == "PC":
+            self.probability_1 = None
+            self.processing_flags = None
+        elif product == "SPhR":
+            self.sphr_bl = None
+            self.sphr_cape = None
+            self.sphr_diffbl = None
+            self.sphr_diffhl = None
+            self.sphr_diffki = None
+            self.sphr_diffli = None
+            self.sphr_diffml = None
+            self.sphr_diffshw = None
+            self.sphr_difftpw = None
+            self.sphr_hl = None
+            self.sphr_ki = None
+            self.sphr_li = None
+            self.sphr_ml = None
+            self.sphr_quality = None
+            self.sphr_sflag = None
+            self.sphr_shw = None
+            self.sphr_tpw = None
+        elif product == "PCPh":
+            self.pcph_pc = MNone
+            self.pcph_quality = None
+            self.pcph_dataflag = None
+            self.processing_flags = None
+        elif product =="CRPh":
+            self.crph_crr = None
+            self.crph_accum = None
+            self.crph_iqf = None
+            self.crph_quality = None
+            self.crph_dataflag = None
+            self.processing_flags = None
+        else:
+            print "*** ERROR in MsgNWCSAF (nwcsaf_msg.py)"
+            print "    unknown NWCSAF product: ", product
+            quit() 
+
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+    def read(self, filename, calibrate=True):
+        """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data.
+        """
+        import h5py
+
+        if self.name == "CTTH":
+            self.var_names = ('CTTH_TEMPER', 'CTTH_HEIGHT', 'CTTH_PRESS', 'CTTH_EFFECT', 'CTTH_QUALITY')
+        elif self.name == "CloudType":
+            self.var_names = ('CT', 'CT_PHASE', 'CT_QUALITY')
+        elif self.name == "CloudMask":
+            self.var_names = ('CMa', 'CMa_DUST', 'CMa_QUALITY', 'CMa_VOLCANIC')
+        elif self.name == "SPhR":
+            self.var_names = ('SPhR_BL','SPhR_CAPE','SPhR_HL','SPhR_KI','SPhR_LI','SPhR_ML','SPhR_QUALITY','SPhR_SHW','SPhR_TPW')  
+        else:
+            print "*** ERROR in MsgNWCSAF read (nwcsaf_msg.py)"
+            print "    unknown NWCSAF product: ", product
+            quit()
+            
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        for var_name in self.var_names:
+            print "... read hdf5 variable ", var_name
+            h5d = h5f[var_name]
+            var1=MsgNwcsafData()
+            var1.data = h5d[:, :]
+            var1.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+            var1.offset = h5d.attrs["OFFSET"]
+            var1.num_of_lines = h5d.attrs["N_LINES"]
+            var1.num_of_columns = h5d.attrs["N_COLS"]
+            self.shape = (var1.num_of_lines,
+                          var1.num_of_columns)
+            var1.product = h5d.attrs["PRODUCT"]
+            var1.id = h5d.attrs["ID"]
+
+            # copy temporal var1 to self.var_name
+            if calibrate:
+                print "... apply scaling_factor", var1.scaling_factor, " and offset ", var1.offset 
+                setattr(self, var_name,  var1.data*var1.scaling_factor
+                                        +var1.offset )
+            else:
+                setattr(self, var_name,  var1.data)
+
+            # !!! is there a check needed, if the palette exists? !!! 
+            # read 'product'_palette and copy it to self.'product'_palette
+            setattr(self, var_name+"_palette", _get_palette(h5f, var_name) )
+
+        h5f.close()
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def save(self, filename):
+        """Save the current cloudtype object to hdf *filename*, in pps format.
+        """
+        import h5py
+        cma = self.convert2pps()
+        LOG.info("Saving NWCSAF data as hdf file...")
+        cma.save(filename)
+        h5f = h5py.File(filename, mode="a")
+        h5f.attrs["straylight_contaminated"] = self.qc_straylight
+        h5f.close()
+        LOG.info("Saving NWCSAF data hdf file done !")
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgNwcsafClass(self.name)
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        # loop for reprojecting data, e.g. retv.CMa = coverage.project_array(self.CMa)
+        for var_name in self.var_names:
+            setattr(retv, var_name,  coverage.project_array(getattr(self, var_name)))         
+            # !!! BUG !!! copy palette is missing 
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+
+    def convert2pps(self):
+        """Converts the NWCSAF/MSG data set to the PPS format,
+        in order to have consistency in output format between PPS and MSG.
+        """
+        import epshdf
+        retv = PpsCloudMask()
+        retv.region = epshdf.SafRegion()
+        retv.region.xsize = self.num_of_columns
+        retv.region.ysize = self.num_of_lines
+        retv.region.id = self.region_name
+        retv.region.pcs_id = self.projection_name
+
+        retv.region.pcs_def = pcs_def_from_region(self.area)
+        retv.region.area_extent = self.area.area_extent
+        retv.satellite_id = self.satid
+
+        # !!! UH: THIS PART IS TO BE DONE BY SOMEBODY WHO USES PPS !!!
+        # loop for intersting variables
+        for var_name in self.var_names:
+            # get look-up tables, e.g. retv.CMa_lut = pps_luts('CMa')
+            setattr( retv, var_name+"_lut", pps_luts(var_name) )
+            # get describing strings, e.g. retv.CMa_des = "MSG SEVIRI Cloud Mask"
+            setattr( retv, var_name+"_des", pps_description(var_name) )
+
+            # if not processing flag, get astype, e.g. retv.cloudtype = self.cloudtype.astype('B')
+            if var_name.find("QUALITY") != -1 and var_name.find("flag") != -1:
+                setattr( retv, var_name, getattr(self, var_name).astype('B') )
+            elif var_name=="CT_QUALITY" or var_name=="qualityflag":
+                retv.qualityflag = ctype_procflags2pps(self.processing_flags)
+            elif var_name=="CTTH_QUALITY" or var_name=="processingflag":
+                retv.processingflag = ctth_procflags2pps(self.processing_flags)
+            elif var_name=="CMa_QUALITY" or var_name=="QUALITY":
+                print "*** WARNING, no conversion for CMA and SPhR products flags yet!"
+        # !!! UH: THIS PART IS TO BE DONE BY SOMEBODY WHO USES PPS !!!
+
+        return retv
+
+    def convert2nordrad(self):
+        return NordRadCType(self)
+
+#-----------------------------------------------------------------------
+
+class MsgCloudTypeData(object):
+
+    """NWCSAF/MSG Cloud Type data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgCloudType(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG Cloud Type data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    """
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self, "CloudType")
+        self.filled = False
+        self.name = "CloudType"
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.cloudtype = None
+        self.processing_flags = None
+        self.cloudphase = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+        self.cloudtype_palette = None
+        self.cloudphase_palette = None
+
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.cloudtype.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+# ------------------------------------------------------------------
+    def read(self, filename, calibrate=True):
+        """Reader for the NWCSAF/MSG cloudtype. Use *filename* to read data.
+        """
+        import h5py
+
+        self.cloudtype = MsgCloudTypeData()
+        self.processing_flags = MsgCloudTypeData()
+        self.cloudphase = MsgCloudTypeData()
+
+        LOG.debug("Filename = <" + str(filename) + ">")
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The cloudtype data
+        h5d = h5f['CT']
+        self.cloudtype.data = h5d[:, :]
+        self.cloudtype.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.cloudtype.offset = h5d.attrs["OFFSET"]
+        self.cloudtype.num_of_lines = h5d.attrs["N_LINES"]
+        self.cloudtype.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.cloudtype.num_of_lines,
+                      self.cloudtype.num_of_columns)
+        self.cloudtype.product = h5d.attrs["PRODUCT"]
+        self.cloudtype.id = h5d.attrs["ID"]
+        self.cloudtype_palette = _get_palette(h5f, 'CT') / 255.0
+        # ------------------------
+
+        # The cloud phase data
+        h5d = h5f['CT_PHASE']
+        self.cloudphase.data = h5d[:, :]
+        self.cloudphase.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.cloudphase.offset = h5d.attrs["OFFSET"]
+        self.cloudphase.num_of_lines = h5d.attrs["N_LINES"]
+        self.cloudphase.num_of_columns = h5d.attrs["N_COLS"]
+        self.cloudphase.product = h5d.attrs["PRODUCT"]
+        self.cloudphase.id = h5d.attrs["ID"]
+        self.cloudphase_palette = _get_palette(h5f, 'CT_PHASE')
+
+        # ------------------------
+
+        # The cloudtype processing/quality flags
+        h5d = h5f['CT_QUALITY']
+        self.processing_flags.data = h5d[:, :]
+        self.processing_flags.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.processing_flags.offset = h5d.attrs["OFFSET"]
+        self.processing_flags.num_of_lines = h5d.attrs["N_LINES"]
+        self.processing_flags.num_of_columns = h5d.attrs["N_COLS"]
+        self.processing_flags.product = h5d.attrs["PRODUCT"]
+        self.processing_flags.id = h5d.attrs["ID"]
+        # ------------------------
+
+        h5f.close()
+
+        self.cloudtype = self.cloudtype.data
+        self.cloudphase = self.cloudphase.data
+        self.processing_flags = self.processing_flags.data
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def save(self, filename):
+        """Save the current cloudtype object to hdf *filename*, in pps format.
+        """
+        import h5py
+        ctype = self.convert2pps()
+        LOG.info("Saving CType hdf file...")
+        ctype.save(filename)
+        h5f = h5py.File(filename, mode="a")
+        h5f.attrs["straylight_contaminated"] = self.qc_straylight
+        h5f.close()
+        LOG.info("Saving CType hdf file done !")
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG Cloud Type to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgCloudType()
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        retv.cloudtype = coverage.project_array(self.cloudtype)
+        retv.cloudtype_palette = self.cloudtype_palette
+
+        retv.cloudphase = coverage.project_array(self.cloudphase)
+        retv.cloudphase_palette = self.cloudphase_palette
+
+        retv.processing_flags = \
+            coverage.project_array(self.processing_flags)
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+
+# is it necessary?
+
+#    def convert2nordrad(self):
+#        return NordRadCType(self)
+
+
+class MsgCTTHData(object):
+
+    """CTTH data object.
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgCTTH(mpop.channel.GenericChannel):
+
+    """CTTH channel.
+    """
+
+    def __init__(self, resolution=None):
+        mpop.channel.GenericChannel.__init__(self, "CTTH")
+        self.filled = False
+        self.name = "CTTH"
+        self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.cloudiness = None  # Effective cloudiness
+        self.processing_flags = None
+        self.height = None
+        self.temperature = None
+        self.pressure = None
+        self.satid = ""
+
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+    def read(self, filename, calibrate=True):
+        import h5py
+
+        self.cloudiness = MsgCTTHData()  # Effective cloudiness
+        self.temperature = MsgCTTHData()
+        self.height = MsgCTTHData()
+        self.pressure = MsgCTTHData()
+        self.processing_flags = MsgCTTHData()
+
+        h5f = h5py.File(filename, 'r')
+
+        # The header
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The CTTH cloudiness data
+        h5d = h5f['CTTH_EFFECT']
+        self.cloudiness.data = h5d[:, :]
+        self.cloudiness.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.cloudiness.offset = h5d.attrs["OFFSET"]
+        self.cloudiness.num_of_lines = h5d.attrs["N_LINES"]
+        self.cloudiness.num_of_columns = h5d.attrs["N_COLS"]
+        self.cloudiness.product = h5d.attrs["PRODUCT"]
+        self.cloudiness.id = h5d.attrs["ID"]
+
+#     self.cloudiness.data = np.ma.masked_equal(self.cloudiness.data, 255)
+#      self.cloudiness.data = np.ma.masked_equal(self.cloudiness.data, 0)
+        self.cloudiness_palette = _get_palette(h5f, 'CTTH_EFFECT') / 255.0
+
+        # ------------------------
+
+        # The CTTH temperature data
+        h5d = h5f['CTTH_TEMPER']
+        self.temperature.data = h5d[:, :]
+        self.temperature.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.temperature.offset = h5d.attrs["OFFSET"]
+        self.temperature.num_of_lines = h5d.attrs["N_LINES"]
+        self.shape = (self.temperature.num_of_lines,
+                      self.temperature.num_of_columns)
+        self.temperature.num_of_columns = h5d.attrs["N_COLS"]
+        self.temperature.product = h5d.attrs["PRODUCT"]
+        self.temperature.id = h5d.attrs["ID"]
+
+#     self.temperature.data = np.ma.masked_equal(self.temperature.data, 0)
+        if calibrate:
+            self.temperature = (self.temperature.data *
+                                self.temperature.scaling_factor +
+                                self.temperature.offset)
+        else:
+            self.temperature = self.temperature.data
+        self.temperature_palette = _get_palette(h5f, 'CTTH_TEMPER') / 255.0
+
+        # ------------------------
+
+        # The CTTH pressure data
+        h5d = h5f['CTTH_PRESS']
+        self.pressure.data = h5d[:, :]
+        self.pressure.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.pressure.offset = h5d.attrs["OFFSET"]
+        self.pressure.num_of_lines = h5d.attrs["N_LINES"]
+        self.pressure.num_of_columns = h5d.attrs["N_COLS"]
+        self.pressure.product = h5d.attrs["PRODUCT"]
+        self.pressure.id = h5d.attrs["ID"]
+
+#    self.pressure.data = np.ma.masked_equal(self.pressure.data, 255)
+#     self.pressure.data = np.ma.masked_equal(self.pressure.data, 0)
+        if calibrate:
+            self.pressure = (self.pressure.data *
+                             self.pressure.scaling_factor +
+                             self.pressure.offset)
+        else:
+            self.pressure = self.pressure.data
+        self.pressure_palette = _get_palette(h5f, 'CTTH_PRESS') / 255.0
+
+        # ------------------------
+
+        # The CTTH height data
+        h5d = h5f['CTTH_HEIGHT']
+        self.height.data = h5d[:, :]
+        self.height.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.height.offset = h5d.attrs["OFFSET"]
+        self.height.num_of_lines = h5d.attrs["N_LINES"]
+        self.height.num_of_columns = h5d.attrs["N_COLS"]
+        self.height.product = h5d.attrs["PRODUCT"]
+        self.height.id = h5d.attrs["ID"]
+
+#        self.height.data = np.ma.masked_equal(self.height.data, 255)
+#        self.height.data = np.ma.masked_equal(self.height.data, 0)
+        if calibrate:
+            self.height = (self.height.data *
+                           self.height.scaling_factor +
+                           self.height.offset)
+        else:
+            self.height = self.height.data
+        self.height_palette = _get_palette(h5f, 'CTTH_HEIGHT') / 255.0
+
+        # ------------------------
+
+        # The CTTH processing/quality flags
+        h5d = h5f['CTTH_QUALITY']
+        self.processing_flags.data = h5d[:, :]
+        self.processing_flags.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.processing_flags.offset = h5d.attrs["OFFSET"]
+        self.processing_flags.num_of_lines = \
+            h5d.attrs["N_LINES"]
+        self.processing_flags.num_of_columns = \
+            h5d.attrs["N_COLS"]
+        self.processing_flags.product = h5d.attrs["PRODUCT"]
+        self.processing_flags.id = h5d.attrs["ID"]
+
+        self.processing_flags = \
+            np.ma.masked_equal(self.processing_flags.data, 0)
+
+        h5f.close()
+
+        self.shape = self.height.shape
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def save(self, filename):
+        """Save the current CTTH channel to HDF5 format.
+        """
+        ctth = self.convert2pps()
+        LOG.info("Saving CTTH hdf file...")
+        ctth.save(filename)
+        LOG.info("Saving CTTH hdf file done !")
+
+    def project(self, coverage):
+        """Project the current CTTH channel along the *coverage*
+        """
+        dest_area = coverage.out_area
+        dest_area_id = dest_area.area_id
+
+        retv = MsgCTTH()
+
+        retv.temperature = coverage.project_array(self.temperature)
+        retv.height = coverage.project_array(self.height)
+        retv.pressure = coverage.project_array(self.pressure)
+        #retv.cloudiness = coverage.project_array(self.cloudiness)
+        retv.processing_flags = \
+            coverage.project_array(self.processing_flags)
+
+        retv.height_palette = self.height_palette 
+        retv.pressure_palette = self.pressure_palette
+        retv.temperature_palette = self.temperature_palette
+
+        retv.area = dest_area
+        retv.region_name = dest_area_id
+        retv.projection_name = dest_area.proj_id
+        retv.num_of_columns = dest_area.x_size
+        retv.num_of_lines = dest_area.y_size
+
+        retv.shape = dest_area.shape
+
+        retv.name = self.name
+        retv.resolution = self.resolution
+        retv.filled = True
+
+        return retv
+
+# ----------------------------------------
+
+
+class MsgPCData(object):
+
+    """NWCSAF/MSG Precipitating Clouds data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgPC(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG Precipitating Clouds data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    """
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self, "PC")
+        self.filled = False
+        self.name = "PC"
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.probability_1 = None
+        self.processing_flags = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.probability_1.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+# ------------------------------------------------------------------
+    def read(self, filename, calibrate=True):
+        """Reader for the NWCSAF/MSG precipitating clouds. Use *filename* to read data.
+        """
+        import h5py
+
+        self.probability_1 = MsgPCData()
+        self.processing_flags = MsgPCData()
+
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The precipitating clouds data
+        h5d = h5f['PC_PROB1']
+        self.probability_1.data = h5d[:, :]
+        self.probability_1.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.probability_1.offset = h5d.attrs["OFFSET"]
+        self.probability_1.num_of_lines = h5d.attrs["N_LINES"]
+        self.probability_1.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.probability_1.num_of_lines,
+                      self.probability_1.num_of_columns)
+        self.probability_1.product = h5d.attrs["PRODUCT"]
+        self.probability_1.id = h5d.attrs["ID"]
+        if calibrate:
+            self.probability_1 = (self.probability_1.data *
+                                  self.probability_1.scaling_factor +
+                                  self.probability_1.offset)
+        else:
+            self.probability_1 = self.probability_1.data
+        self.probability_1_palette = _get_palette(h5f, 'PC_PROB1') / 255.0
+
+        # ------------------------
+
+        # The PC processing/quality flags
+        h5d = h5f['PC_QUALITY']
+        self.processing_flags.data = h5d[:, :]
+        self.processing_flags.scaling_factor = \
+            h5d.attrs["SCALING_FACTOR"]
+        self.processing_flags.offset = h5d.attrs["OFFSET"]
+        self.processing_flags.num_of_lines = h5d.attrs["N_LINES"]
+        self.processing_flags.num_of_columns = h5d.attrs["N_COLS"]
+        self.processing_flags.product = h5d.attrs["PRODUCT"]
+        self.processing_flags.id = h5d.attrs["ID"]
+        self.processing_flags = np.ma.masked_equal(
+            self.processing_flags.data, 0)
+
+        # ------------------------
+        h5f.close()
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+
+    def project(self, coverage):
+        """Project the current PC channel along the *coverage*
+        """
+        dest_area = coverage.out_area
+        dest_area_id = dest_area.area_id
+
+        retv = MsgPC()
+
+        retv.probability_1 = coverage.project_array(self.probability_1)
+        retv.processing_flags = \
+            coverage.project_array(self.processing_flags)
+
+        retv.probability_1_palette = self.probability_1_palette
+
+        retv.area = dest_area
+        retv.region_name = dest_area_id
+        retv.projection_name = dest_area.proj_id
+        retv.num_of_columns = dest_area.x_size
+        retv.num_of_lines = dest_area.y_size
+
+        retv.shape = dest_area.shape
+
+        retv.name = self.name
+        retv.resolution = self.resolution
+        retv.filled = True
+
+        return retv
+
+
+# ------------------------------------------------------------------
+
+
+def get_bit_from_flags(arr, nbit):
+    """I don't know what this function does.
+    """
+    res = np.bitwise_and(np.right_shift(arr, nbit), 1)
+    return res.astype('b')
+
+# NEU Anfang NEW Beginn PyTroll-Workshop Kopenhagen 2014
+
+class MsgCRRData(object):
+
+    """NWCSAF/MSG Convective Rain Rate data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgCRR(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG Convective Rain Rate data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    """
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self, "CRR")
+        self.filled = False
+        self.name = "CRR"
+#       self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.crr = None
+        self.crr_accum = None
+        self.crr_intensity = None
+        self.crr_quality = None
+        self.crr_dataflag = None
+        self.processing_flags = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+        self.crr_palette = None
+        self.crr_accum_palette = None
+        self.crr_intensity_palette = None
+        self.crr_quality_palette = None
+
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.crr.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+# ------------------------------------------------------------------
+    def read(self, filename, calibrate=True):
+        """Reader for the . Use *filename* to read data.
+        """
+        import h5py
+
+        self.crr = MsgCRRData()
+        self.crr_accum = MsgCRRData()
+        self.crr_intensity = MsgCRRData()
+        self.crr_quality = MsgCRRData()
+        self.processing_flags = MsgCRRData()
+
+        LOG.debug("Filename = <" + str(filename) + ">")
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The CRR data
+        h5d = h5f['CRR']
+        self.crr.data = h5d[:, :]
+        self.crr.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crr.offset = h5d.attrs["OFFSET"]
+        self.crr.num_of_lines = h5d.attrs["N_LINES"]
+        self.crr.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.crr.num_of_lines,
+                      self.crr.num_of_columns)
+        self.crr.product = h5d.attrs["PRODUCT"]
+        self.crr.id = h5d.attrs["ID"]
+        if calibrate:
+            self.crr = (self.crr.data *
+                                  self.crr.scaling_factor +
+                                  self.crr.offset)
+        else:
+            self.crr = self.crr.data
+        self.crr_palette = _get_palette(h5f, 'CRR') / 255.0
+
+        # ------------------------
+
+        # The CRR ACCUM data
+        h5d = h5f['CRR_ACCUM']
+        self.crr_accum.data = h5d[:, :]
+        self.crr_accum.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crr_accum.offset = h5d.attrs["OFFSET"]
+        self.crr_accum.num_of_lines = h5d.attrs["N_LINES"]
+        self.crr_accum.num_of_columns = h5d.attrs["N_COLS"]
+        self.crr_accum.product = h5d.attrs["PRODUCT"]
+        self.crr_accum.id = h5d.attrs["ID"]
+        if calibrate:
+            self.crr_accum = (self.crr_accum.data *
+                                  self.crr_accum.scaling_factor +
+                                  self.crr_accum.offset)
+        else:
+            self.crr_accum = self.crr_accum.data
+        self.crr_accum_palette = _get_palette(h5f, 'CRR_ACCUM') / 255.0
+
+        # ------------------------
+
+        # The CRR Intensity data
+        h5d = h5f['CRR_INTENSITY']
+        self.crr_intensity.data = h5d[:, :]
+        self.crr_intensity.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crr_intensity.offset = h5d.attrs["OFFSET"]
+        self.crr_intensity.num_of_lines = h5d.attrs["N_LINES"]
+        self.crr_intensity.num_of_columns = h5d.attrs["N_COLS"]
+        self.crr_intensity.product = h5d.attrs["PRODUCT"]
+        self.crr_intensity.id = h5d.attrs["ID"]
+        if calibrate:
+            self.crr_intensity = (self.crr_intensity.data *
+                                  self.crr_intensity.scaling_factor +
+                                  self.crr_intensity.offset)
+        else:
+            self.crr_intensity = self.crr_intensity.data
+        self.crr_intensity_palette = _get_palette(h5f, 'CRR_INTENSITY') / 255.0
+
+        # ------------------------
+
+        # The CRR quality data
+        h5d = h5f['CRR_QUALITY']
+        self.crr_quality.data = h5d[:, :]
+        self.crr_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crr_quality.offset = h5d.attrs["OFFSET"]
+        self.crr_quality.num_of_lines = h5d.attrs["N_LINES"]
+        self.crr_quality.num_of_columns = h5d.attrs["N_COLS"]
+        self.crr_quality.product = h5d.attrs["PRODUCT"]
+        self.crr_quality.id = h5d.attrs["ID"]
+        if calibrate:
+            self.crr_quality = (self.crr_quality.data *
+                                  self.crr_quality.scaling_factor +
+                                  self.crr_quality.offset)
+        else:
+            self.crr_quality = self.crr_quality.data
+        self.crr_quality_palette = _get_palette(h5f, 'CRR_QUALITY')
+        # ------------------------
+
+        h5f.close()
+
+        #self.crr = self.crr.data
+        #self.crr_accum = self.crr_accum.data
+        #self.crr_intensity = self.crr_intensity.data
+        #self.crr_quality = self.crr_quality.data
+        #self.processing_flags = self.processing_flags.data
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def save(self, filename):
+        """Save the current cloudtype object to hdf *filename*, in pps format.
+        """
+        import h5py
+        ctype = self.convert2pps()
+        LOG.info("Saving CRR hdf file...")
+        ctype.save(filename)
+        h5f = h5py.File(filename, mode="a")
+        h5f.attrs["straylight_contaminated"] = self.qc_straylight
+        h5f.close()
+        LOG.info("Saving CRR hdf file done !")
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG CRR to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgCRR()
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        retv.crr = coverage.project_array(self.crr)
+        retv.crr_palette = self.crr_palette
+
+        retv.crr_accum = coverage.project_array(self.crr_accum)
+        retv.crr_accum_palette = self.crr_accum_palette
+
+        retv.crr_intensity = coverage.project_array(self.crr_intensity)
+        retv.crr_intensity_palette = self.crr_intensity_palette
+
+        retv.crr_quality = coverage.project_array(self.crr_quality)
+        retv.crr_quality_palette = self.crr_quality_palette
+
+        #retv.processing_flags = \
+        #      coverage.project_array(self.processing_flags)
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+
+
+#    def convert2nordrad(self):
+#        return NordRadCType(self)
+
+class MsgSPhRData(object):
+
+    """NWCSAF/MSG Convective Rain Rate data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgSPhR(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG SPhR data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    Palette now missing
+    """
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self, "SPhR")
+        self.filled = False
+        self.name = "SPhR"
+#       self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.sphr = None
+        self.sphr_bl = None
+        self.sphr_cape = None
+        self.sphr_diffbl = None
+        self.sphr_diffhl = None
+        self.sphr_diffki = None
+        self.sphr_diffli = None
+        self.sphr_diffml = None
+        self.sphr_diffshw = None
+        self.sphr_difftpw = None
+        self.sphr_hl = None
+        self.sphr_ki = None
+        self.sphr_li = None
+        self.sphr_ml = None
+        self.sphr_quality = None
+        self.sphr_sflag = None
+        self.sphr_shw = None
+        self.sphr_tpw = None
+        self.processing_flags = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+        self.sphr = None
+        self.sphr_bl_palette = None
+        self.sphr_cape_palette = None
+        self.sphr_diffbl_palette = None
+        self.sphr_diffhl_palette = None
+        self.sphr_diffki_palette = None
+        self.sphr_diffli_palette = None
+        self.sphr_diffml_palette = None
+        self.sphr_diffshw_palette = None
+        self.sphr_difftpw_palette = None
+        self.sphr_hl_palette = None
+        self.sphr_ki_palette = None
+        self.sphr_li_palette = None
+        self.sphr_ml_palette = None
+        self.sphr_quality_palette = None
+        self.sphr_sflag_palette = None
+        self.sphr_shw_palette = None
+        self.sphr_tpw_palette = None
+        
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.sphr_bl.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+# ------------------------------------------------------------------
+    def read(self, filename, calibrate=True):
+        """Reader for the . Use *filename* to read data.
+        """
+        import h5py
+
+# Erste Zeile notwendig?
+        self.sphr = MsgSPhRData()
+        self.sphr_bl = MsgSPhRData()
+        self.sphr_cape = MsgSPhRData()
+        self.sphr_diffbl = MsgSPhRData()
+        self.sphr_diffhl = MsgSPhRData()
+        self.sphr_diffki = MsgSPhRData()
+        self.sphr_diffli = MsgSPhRData()
+        self.sphr_diffml = MsgSPhRData()
+        self.sphr_diffshw = MsgSPhRData()
+        self.sphr_difftpw = MsgSPhRData()
+        self.sphr_hl = MsgSPhRData()
+        self.sphr_ki = MsgSPhRData()
+        self.sphr_li = MsgSPhRData()
+        self.sphr_ml = MsgSPhRData()
+        self.sphr_quality = MsgSPhRData()
+        self.sphr_sflag = MsgSPhRData()
+        self.sphr_shw = MsgSPhRData()
+        self.sphr_tpw = MsgSPhRData()
+
+        self.processing_flags = MsgSPhRData()
+
+        LOG.debug("Filename = <" + str(filename) + ">")
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The SPhR BL data
+        h5d = h5f['SPhR_BL']
+        self.sphr_bl.data = h5d[:, :]
+        self.sphr_bl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_bl.offset = h5d.attrs["OFFSET"]
+        self.sphr_bl.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_bl.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_bl.num_of_lines,
+                      self.sphr_bl.num_of_columns)
+        self.sphr_bl.product = h5d.attrs["PRODUCT"]
+        self.sphr_bl.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_bl.data ) * ( self.sphr_bl.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_bl = mask * (self.sphr_bl.data *
+                                   self.sphr_bl.scaling_factor +
+                                   self.sphr_bl.offset)
+        else:
+            self.sphr_bl = self.sphr_bl.data
+        self.sphr_bl_palette = _get_palette(h5f, 'SPhR_BL') / 255.0
+
+        # The SPhR Cape data
+        h5d = h5f['SPhR_CAPE']
+        self.sphr_cape.data = h5d[:, :]
+        self.sphr_cape.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_cape.offset = h5d.attrs["OFFSET"]
+        self.sphr_cape.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_cape.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_cape.num_of_lines,
+                      self.sphr_cape.num_of_columns)
+        self.sphr_cape.product = h5d.attrs["PRODUCT"]
+
+        self.sphr_cape.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 128 < self.sphr_cape.data )
+            # apply scaling factor and offset
+            self.sphr_cape = mask * (self.sphr_cape.data *
+                                     self.sphr_cape.scaling_factor +
+                                     self.sphr_cape.offset)
+        else:
+            self.sphr_cape = self.sphr_cape.data
+        #self.sphr_cape_palette = _get_palette(h5f, 'SPhR_CAPE') / 255.0
+
+        # The SPhR DIFFBL data
+        h5d = h5f['SPhR_DIFFBL']
+        self.sphr_diffbl.data = h5d[:, :]
+        self.sphr_diffbl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_diffbl.offset = h5d.attrs["OFFSET"]
+        self.sphr_diffbl.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_diffbl.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_diffbl.num_of_lines,
+                      self.sphr_diffbl.num_of_columns)
+        self.sphr_diffbl.product = h5d.attrs["PRODUCT"]
+        self.sphr_diffbl.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_diffbl.data ) * ( self.sphr_diffbl.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_diffbl = mask * (self.sphr_diffbl.data *
+                                       self.sphr_diffbl.scaling_factor +
+                                       self.sphr_diffbl.offset)
+        else:
+            self.sphr_diffbl = self.sphr_diffbl.data
+        self.sphr_diffbl_palette = _get_palette(h5f, 'SPhR_DIFFBL') / 255.0
+
+        # The SPhR DIFFHL data
+        h5d = h5f['SPhR_DIFFHL']
+        self.sphr_diffhl.data = h5d[:, :]
+        self.sphr_diffhl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_diffhl.offset = h5d.attrs["OFFSET"]
+        self.sphr_diffhl.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_diffhl.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_diffhl.num_of_lines,
+                      self.sphr_diffhl.num_of_columns)
+        self.sphr_diffhl.product = h5d.attrs["PRODUCT"]
+        self.sphr_diffhl.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_diffhl.data ) * ( self.sphr_diffhl.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_diffhl = mask * (self.sphr_diffhl.data *
+                                       self.sphr_diffhl.scaling_factor +
+                                       self.sphr_diffhl.offset)
+        else:
+            self.sphr_diffhl = self.sphr_diffhl.data
+        self.sphr_diffhl_palette = _get_palette(h5f, 'SPhR_DIFFHL') / 255.0
+
+        # The SPhR DIFFKI data
+        h5d = h5f['SPhR_DIFFKI']
+        self.sphr_diffki.data = h5d[:, :]
+        self.sphr_diffki.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_diffki.offset = h5d.attrs["OFFSET"]
+        self.sphr_diffki.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_diffki.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_diffki.num_of_lines,
+                      self.sphr_diffki.num_of_columns)
+        self.sphr_diffki.product = h5d.attrs["PRODUCT"]
+        self.sphr_diffki.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_diffki.data ) * ( self.sphr_diffki.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_diffki = mask * (self.sphr_diffki.data *
+                                       self.sphr_diffki.scaling_factor +
+                                       self.sphr_diffki.offset)
+        else:
+            self.sphr_diffki = self.sphr_diffki.data
+        self.sphr_diffki_palette = _get_palette(h5f, 'SPhR_DIFFKI') / 255.0
+
+        # The SPhR DIFFLI data
+        h5d = h5f['SPhR_DIFFLI']
+        self.sphr_diffli.data = h5d[:, :]
+        self.sphr_diffli.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_diffli.offset = h5d.attrs["OFFSET"]
+        self.sphr_diffli.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_diffli.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_diffli.num_of_lines,
+                      self.sphr_diffli.num_of_columns)
+        self.sphr_diffli.product = h5d.attrs["PRODUCT"]
+        self.sphr_diffli.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_diffli.data ) * ( self.sphr_diffli.data <= 128 ) 
+            # apply scaling factor and offset
+            self.sphr_diffli = mask * (self.sphr_diffli.data *
+                                       self.sphr_diffli.scaling_factor +
+                                       self.sphr_diffli.offset)
+        else:
+            self.sphr_diffli= self.sphr_diffli.data
+        self.sphr_diffli_palette = _get_palette(h5f, 'SPhR_DIFFLI') / 255.0
+
+        # The SPhR DIFFML data
+        h5d = h5f['SPhR_DIFFML']
+        self.sphr_diffml.data = h5d[:, :]
+        self.sphr_diffml.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_diffml.offset = h5d.attrs["OFFSET"]
+        self.sphr_diffml.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_diffml.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_diffml.num_of_lines,
+                      self.sphr_diffml.num_of_columns)
+        self.sphr_diffml.product = h5d.attrs["PRODUCT"]
+        self.sphr_diffml.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_diffml.data ) * ( self.sphr_diffml.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_diffml = mask * (self.sphr_diffml.data *
+                                       self.sphr_diffml.scaling_factor +
+                                       self.sphr_diffml.offset)
+        else:
+            self.sphr_diffml = self.sphr_diffml.data
+        self.sphr_diffml_palette = _get_palette(h5f, 'SPhR_DIFFML') / 255.0
+
+        # The SPhR DIFFSHW data
+        h5d = h5f['SPhR_DIFFSHW']
+        self.sphr_diffshw.data = h5d[:, :]
+        self.sphr_diffshw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_diffshw.offset = h5d.attrs["OFFSET"]
+        self.sphr_diffshw.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_diffshw.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_diffshw.num_of_lines,
+                      self.sphr_diffshw.num_of_columns)
+        self.sphr_diffshw.product = h5d.attrs["PRODUCT"]
+        self.sphr_diffshw.id = h5d.attrs["ID"]
+        if calibrate:
+            mask =  ( 8 <= self.sphr_diffshw.data ) * ( self.sphr_diffshw.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_diffshw = mask * (self.sphr_diffshw.data *
+                                        self.sphr_diffshw.scaling_factor +
+                                        self.sphr_diffshw.offset)
+        else:
+            self.sphr_diffshw = self.sphr_diffshw.data
+        self.sphr_diffshw_palette = _get_palette(h5f, 'SPhR_DIFFSHW') / 255.0
+
+        # The SPhR DIFFTPW data
+        h5d = h5f['SPhR_DIFFTPW']
+        self.sphr_difftpw.data = h5d[:, :]
+        self.sphr_difftpw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_difftpw.offset = h5d.attrs["OFFSET"]
+        self.sphr_difftpw.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_difftpw.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_difftpw.num_of_lines,
+                      self.sphr_difftpw.num_of_columns)
+        self.sphr_difftpw.product = h5d.attrs["PRODUCT"]
+        self.sphr_difftpw.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_difftpw.data ) * ( self.sphr_difftpw.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_difftpw = mask * (self.sphr_difftpw.data *
+                                        self.sphr_difftpw.scaling_factor +
+                                        self.sphr_difftpw.offset)
+        else:
+            self.sphr_difftpw = self.sphr_difftpw.data
+        self.sphr_difftpw_palette = _get_palette(h5f, 'SPhR_DIFFTPW') / 255.0
+
+        # The SPhR HL data
+        h5d = h5f['SPhR_HL']
+        self.sphr_hl.data = h5d[:, :]
+        self.sphr_hl.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_hl.offset = h5d.attrs["OFFSET"]
+        self.sphr_hl.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_hl.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_hl.num_of_lines,
+                      self.sphr_hl.num_of_columns)
+        self.sphr_hl.product = h5d.attrs["PRODUCT"]
+        self.sphr_hl.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_hl.data ) * ( self.sphr_hl.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_hl = mask * (self.sphr_hl.data *
+                                   self.sphr_hl.scaling_factor +
+                                   self.sphr_hl.offset)
+        else:
+            self.sphr_hl = self.sphr_hl.data
+        self.sphr_hl_palette = _get_palette(h5f, 'SPhR_HL') / 255.0
+
+        # The SPhR KI data
+        h5d = h5f['SPhR_KI']
+        self.sphr_ki.data = h5d[:, :]
+        self.sphr_ki.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_ki.offset = h5d.attrs["OFFSET"]
+        self.sphr_ki.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_ki.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_ki.num_of_lines,
+                      self.sphr_ki.num_of_columns)
+        self.sphr_ki.product = h5d.attrs["PRODUCT"]
+        self.sphr_ki.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_ki.data ) * ( self.sphr_ki.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_ki = mask * (self.sphr_ki.data *
+                                   self.sphr_ki.scaling_factor +
+                                   self.sphr_ki.offset)
+        else:
+            self.sphr_ki = self.sphr_ki.data
+        self.sphr_ki_palette = _get_palette(h5f, 'SPhR_KI') / 255.0
+
+        # The SPhR LI data
+        h5d = h5f['SPhR_LI']
+        self.sphr_li.data = h5d[:, :]
+        self.sphr_li.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_li.offset = h5d.attrs["OFFSET"]
+        self.sphr_li.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_li.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_li.num_of_lines,
+                      self.sphr_li.num_of_columns)
+        self.sphr_li.product = h5d.attrs["PRODUCT"]
+        self.sphr_li.id = h5d.attrs["ID"]
+        if calibrate:
+            mask =  ( 8 <= self.sphr_li.data ) * ( self.sphr_li.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_li = mask * (self.sphr_li.data *
+                                   self.sphr_li.scaling_factor +
+                                   self.sphr_li.offset)
+        else:
+            self.sphr_li = self.sphr_li.data
+        self.sphr_li_palette = _get_palette(h5f, 'SPhR_LI') / 255.0
+
+        # The SPhR ML data
+        h5d = h5f['SPhR_ML']
+        self.sphr_ml.data = h5d[:, :]
+        self.sphr_ml.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_ml.offset = h5d.attrs["OFFSET"]
+        self.sphr_ml.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_ml.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_ml.num_of_lines,
+                      self.sphr_ml.num_of_columns)
+        self.sphr_ml.product = h5d.attrs["PRODUCT"]
+        self.sphr_ml.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_ml.data ) * ( self.sphr_ml.data <= 128 )
+            # apply scaling factor and offset
+            self.sphr_ml = mask * (self.sphr_ml.data *
+                                   self.sphr_ml.scaling_factor +
+                                   self.sphr_ml.offset)
+        else:
+            self.sphr_ml = self.sphr_ml.data
+        self.sphr_ml_palette = _get_palette(h5f, 'SPhR_ML') / 255.0
+
+        # The SPhR QUALITY data
+        h5d = h5f['SPhR_QUALITY']
+        self.sphr_quality.data = h5d[:, :]
+        self.sphr_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_quality.offset = h5d.attrs["OFFSET"]
+        self.sphr_quality.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_quality.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_quality.num_of_lines,
+                      self.sphr_quality.num_of_columns)
+        self.sphr_quality.product = h5d.attrs["PRODUCT"]
+        self.sphr_quality.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = (self.sphr_quality.data != 0 )
+            # apply scaling factor and offset
+            self.sphr_quality = mask * (self.sphr_quality.data *
+                                        self.sphr_quality.scaling_factor +
+                                        self.sphr_quality.offset)
+        else:
+            self.sphr_quality = self.sphr_quality.data
+
+        # The SPhR SFLAG data
+        h5d = h5f['SPhR_SFLAG']
+        self.sphr_sflag.data = h5d[:, :]
+        self.sphr_sflag.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_sflag.offset = h5d.attrs["OFFSET"]
+        self.sphr_sflag.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_sflag.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_sflag.num_of_lines,
+                      self.sphr_sflag.num_of_columns)
+        self.sphr_sflag.product = h5d.attrs["PRODUCT"]
+        self.sphr_sflag.id = h5d.attrs["ID"]
+        self.sphr_sflag = self.sphr_sflag.data
+
+        # The SPhR SHW data
+        h5d = h5f['SPhR_SHW']
+        self.sphr_shw.data = h5d[:, :]
+        self.sphr_shw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_shw.offset = h5d.attrs["OFFSET"]
+        self.sphr_shw.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_shw.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_shw.num_of_lines,
+                      self.sphr_shw.num_of_columns)
+        self.sphr_shw.product = h5d.attrs["PRODUCT"]
+        self.sphr_shw.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_shw.data ) * ( self.sphr_shw.data <= 128 ) 
+            # apply scaling factor and offset
+            self.sphr_shw = mask * (self.sphr_shw.data *
+                                    self.sphr_shw.scaling_factor +
+                                    self.sphr_shw.offset)
+        else:
+            self.sphr_shw = self.sphr_shw.data
+        self.sphr_shw_palette = _get_palette(h5f, 'SPhR_SHW') / 255.0
+
+        # The SPhR TPW data
+        h5d = h5f['SPhR_TPW']
+        self.sphr_tpw.data = h5d[:, :]
+        self.sphr_tpw.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.sphr_tpw.offset = h5d.attrs["OFFSET"]
+        self.sphr_tpw.num_of_lines = h5d.attrs["N_LINES"]
+        self.sphr_tpw.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.sphr_tpw.num_of_lines,
+                      self.sphr_tpw.num_of_columns)
+        self.sphr_tpw.product = h5d.attrs["PRODUCT"]
+        self.sphr_tpw.id = h5d.attrs["ID"]
+        if calibrate:
+            mask = ( 8 <= self.sphr_tpw.data ) * ( self.sphr_tpw.data <= 128 ) 
+            # apply scaling factor and offset
+            self.sphr_tpw = mask * (self.sphr_tpw.data *
+                                    self.sphr_tpw.scaling_factor +
+                                    self.sphr_tpw.offset)
+            print self.sphr_tpw.min(), self.sphr_tpw.max()
+        else:
+            self.sphr_tpw = self.sphr_tpw.data
+
+        self.sphr_tpw_palette = _get_palette(h5f, 'SPhR_TPW') / 255.0
+
+        # ------------------------
+
+        h5f.close()
+
+        #self.sphr = self.sphr.data
+        #self.sphr_bl = self.sphr_bl.data
+        #self.sphr_cape = self.sphr_cape.data
+        #self.sphr_diffbl = self.sphr_diffbl.data
+        #self.sphr_diffhl = self.sphr_diffhl.data
+        #self.sphr_diffki = self.sphr_diffki.data
+        #self.sphr_diffli = self.sphr_diffli.data
+        #self.sphr_diffml = self.sphr_diffml.data
+        #self.sphr_diffshw = self.sphr_diffshw.data
+        #self.sphr_difftpw = self.sphr_difftpw.data
+        #self.sphr_hl = self.sphr_hl.data
+        #self.sphr_ki = self.sphr_ki.data
+        #self.sphr_li = self.sphr_li.data
+        #self.sphr_ml = self.sphr_ml.data
+        #self.sphr_quality = self.sphr_quality.data
+        #self.sphr_sflag = self.sphr_sflag.data
+        #self.sphr_shw = self.sphr_shw.data
+        #self.sphr_tpw = self.sphr_tpw.data
+
+
+        self.processing_flags = self.processing_flags.data
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG CRR to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgSPhR()
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        retv.sphr_bl = coverage.project_array(self.sphr_bl)
+        retv.sphr_bl_palette = self.sphr_bl_palette
+        retv.sphr_ml = coverage.project_array(self.sphr_ml)
+        retv.sphr_ml_palette = self.sphr_ml_palette
+        retv.sphr_hl = coverage.project_array(self.sphr_hl)
+        retv.sphr_hl_palette = self.sphr_hl_palette
+        retv.sphr_ki = coverage.project_array(self.sphr_ki)
+        retv.sphr_ki_palette = self.sphr_ki_palette
+        retv.sphr_li = coverage.project_array(self.sphr_li)
+        retv.sphr_li_palette = self.sphr_li_palette
+        retv.sphr_tpw = coverage.project_array(self.sphr_tpw)
+        retv.sphr_tpw_palette = self.sphr_tpw_palette
+        retv.sphr_cape = coverage.project_array(self.sphr_cape)
+        # no sphr_cape_palette 
+        retv.sphr_quality = coverage.project_array(self.sphr_quality)
+        # no sphr_quality_palette 
+        retv.sphr_sflag = coverage.project_array(self.sphr_sflag)
+        # no sphr_sflag_palette
+        retv.sphr_shw = coverage.project_array(self.sphr_shw)
+        retv.sphr_shw_palette = self.sphr_shw_palette
+        retv.sphr_diffbl = coverage.project_array(self.sphr_diffbl)
+        retv.sphr_diffbl_palette = self.sphr_diffbl_palette
+        retv.sphr_diffml = coverage.project_array(self.sphr_diffml)
+        retv.sphr_diffml_palette = self.sphr_diffml_palette
+        retv.sphr_diffhl = coverage.project_array(self.sphr_diffhl)
+        retv.sphr_diffhl_palette = self.sphr_diffhl_palette
+        retv.sphr_diffki = coverage.project_array(self.sphr_diffki)
+        retv.sphr_diffki_palette = self.sphr_diffki_palette
+        retv.sphr_diffli = coverage.project_array(self.sphr_diffli)
+        retv.sphr_diffli_palette = self.sphr_diffli_palette
+        retv.sphr_difftpw = coverage.project_array(self.sphr_difftpw)
+        retv.sphr_difftpw_palette = self.sphr_difftpw_palette
+        retv.sphr_diffshw = coverage.project_array(self.sphr_diffshw)
+        retv.sphr_diffshw_palette = self.sphr_diffshw_palette
+
+
+
+#        retv.processing_flags = \
+#            coverage.project_array(self.processing_flags)
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+   
+
+ 
+class MsgPCPhData(object):
+
+    """NWCSAF/MSG PCPh  data layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgPCPh(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG PCPh data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    Palette now missing
+    """
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self, "PCPh")
+        self.filled = False
+        self.name = "PCPh"
+#       self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.pcph = None
+        self.pcph_pc = None
+        self.pcph_quality = None
+        self.pcph_dataflag = None
+        self.processing_flags = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+        self.pcph = None
+        self.pcph_pc_palette = None
+        self.pcph_quality_palette = None
+        self.pcph_sflag_palette = None
+        
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.pcph_pc.shape,
+                 self.resolution))
+
+    def is_loaded(self): 
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+# ------------------------------------------------------------------
+    def read(self, filename, calibrate=True):
+        """Reader for the . Use *filename* to read data.
+        """
+        import h5py
+
+# Erste Zeile notwendig?
+        self.pcph = MsgPCPhData()
+        self.pcph_pc = MsgPCPhData()
+        self.pcph_quality = MsgPCPhData()
+        self.pcph_dataflag = MsgPCPhData()
+
+        self.processing_flags = MsgPCPhData()
+
+        LOG.debug("Filename = <" + str(filename) + ">")
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The PPh PC data
+        h5d = h5f['PCPh_PC']
+        self.pcph_pc.data = h5d[:, :]
+        self.pcph_pc.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.pcph_pc.offset = h5d.attrs["OFFSET"]
+        self.pcph_pc.num_of_lines = h5d.attrs["N_LINES"]
+        self.pcph_pc.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.pcph_pc.num_of_lines,
+                      self.pcph_pc.num_of_columns)
+        self.pcph_pc.product = h5d.attrs["PRODUCT"]
+
+        self.pcph_pc.id = h5d.attrs["ID"]
+        if calibrate:
+            self.pcph_pc = (self.pcph_pc.data *
+                                  self.pcph_pc.scaling_factor +
+                                  self.pcph_pc.offset)
+        else:
+            self.pcph_pc = self.pcph_pc.data
+        self.pcph_pc_palette = _get_palette(h5f, 'PCPh_PC') / 255.0
+
+        # The PPh QUALITY data
+        h5d = h5f['PCPh_QUALITY']
+        self.pcph_quality.data = h5d[:, :]
+        self.pcph_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.pcph_quality.offset = h5d.attrs["OFFSET"]
+        self.pcph_quality.num_of_lines = h5d.attrs["N_LINES"]
+        self.pcph_quality.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.pcph_quality.num_of_lines,
+                      self.pcph_quality.num_of_columns)
+        self.pcph_quality.product = h5d.attrs["PRODUCT"]
+        self.pcph_quality.id = h5d.attrs["ID"]
+
+        # The PPh DATA FLAG data
+        h5d = h5f['PCPh_DATAFLAG']
+        self.pcph_dataflag.data = h5d[:, :]
+        self.pcph_dataflag.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.pcph_dataflag.offset = h5d.attrs["OFFSET"]
+        self.pcph_dataflag.num_of_lines = h5d.attrs["N_LINES"]
+        self.pcph_dataflag.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.pcph_dataflag.num_of_lines,
+                      self.pcph_dataflag.num_of_columns)
+        self.pcph_dataflag.product = h5d.attrs["PRODUCT"]
+        self.pcph_dataflag.id = h5d.attrs["ID"]
+
+        # ------------------------
+
+        h5f.close()
+
+        self.processing_flags = self.processing_flags.data
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG PCPh to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgPCPh()
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        retv.pcph_pc = coverage.project_array(self.pcph_pc)
+        retv.pcph_pc_palette = self.pcph_pc_palette
+
+        #retv.processing_flags = \
+         #   coverage.project_array(self.processing_flags)
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+
+
+class MsgCRPhData(object):
+
+    """NWCSAF/MSG CRPh layer
+    """
+
+    def __init__(self):
+        self.data = None
+        self.scaling_factor = 1
+        self.offset = 0
+        self.num_of_lines = 0
+        self.num_of_columns = 0
+        self.product = ""
+        self.id = ""
+
+
+class MsgCRPh(mpop.channel.GenericChannel):
+
+    """NWCSAF/MSG CRPh data structure as retrieved from HDF5
+    file. Resolution sets the nominal resolution of the data.
+    Palette now missing
+    """
+
+    def __init__(self):
+        mpop.channel.GenericChannel.__init__(self, "CRPh")
+        self.filled = False
+        self.name = "CRPh"
+#       self.resolution = resolution
+        self.package = ""
+        self.saf = ""
+        self.product_name = ""
+        self.num_of_columns = 0
+        self.num_of_lines = 0
+        self.projection_name = ""
+        self.pcs_def = ""
+        self.xscale = 0
+        self.yscale = 0
+        self.ll_lon = 0.0
+        self.ll_lat = 0.0
+        self.ur_lon = 0.0
+        self.ur_lat = 0.0
+        self.region_name = ""
+        self.cfac = 0
+        self.lfac = 0
+        self.coff = 0
+        self.loff = 0
+        self.nb_param = 0
+        self.gp_sc_id = 0
+        self.image_acquisition_time = 0
+        self.spectral_channel_id = 0
+        self.nominal_product_time = 0
+        self.sgs_product_quality = 0
+        self.sgs_product_completeness = 0
+        self.product_algorithm_version = ""
+        self.crph = None
+        self.crph_crr = None
+        self.crph_accum = None
+        self.crph_IQF = None
+        self.crph_quality = None
+        self.crph_dataflag = None
+        self.processing_flags = None
+        self.shape = None
+        self.satid = ""
+        self.qc_straylight = -1
+        self.crph = None
+        self.crph_pc_palette = None
+        self.crph_quality_palette = None
+        self.crph_sflag_palette = None
+        
+    def __str__(self):
+        return ("'%s: shape %s, resolution %sm'" %
+                (self.name,
+                 self.crph_crr.shape,
+                 self.resolution))
+
+    def is_loaded(self):
+        """Tells if the channel contains loaded data.
+        """
+        return self.filled
+
+# ------------------------------------------------------------------
+    def read(self, filename, calibrate=True):
+        """Reader for the . Use *filename* to read data.
+        """
+        import h5py
+
+# Erste Zeile notwendig?
+        self.crph = MsgCRPhData()
+        self.crph_crr = MsgCRPhData()
+        self.crph_accum = MsgCRPhData()
+        self.crph_iqf = MsgCRPhData()
+        self.crph_quality = MsgCRPhData()
+        self.crph_dataflag = MsgCRPhData()
+
+        self.processing_flags = MsgCRPhData()
+
+        LOG.debug("Filename = <" + str(filename) + ">")
+        h5f = h5py.File(filename, 'r')
+        # pylint: disable-msg=W0212
+        self.package = h5f.attrs["PACKAGE"]
+        self.saf = h5f.attrs["SAF"]
+        self.product_name = h5f.attrs["PRODUCT_NAME"]
+        self.num_of_columns = h5f.attrs["NC"]
+        self.num_of_lines = h5f.attrs["NL"]
+        self.projection_name = h5f.attrs["PROJECTION_NAME"]
+        self.region_name = h5f.attrs["REGION_NAME"]
+        self.cfac = h5f.attrs["CFAC"]
+        self.lfac = h5f.attrs["LFAC"]
+        self.coff = h5f.attrs["COFF"]
+        self.loff = h5f.attrs["LOFF"]
+        self.nb_param = h5f.attrs["NB_PARAMETERS"]
+        self.gp_sc_id = h5f.attrs["GP_SC_ID"]
+        self.image_acquisition_time = h5f.attrs["IMAGE_ACQUISITION_TIME"]
+        self.spectral_channel_id = h5f.attrs["SPECTRAL_CHANNEL_ID"]
+        self.nominal_product_time = h5f.attrs["NOMINAL_PRODUCT_TIME"]
+        self.sgs_product_quality = h5f.attrs["SGS_PRODUCT_QUALITY"]
+        self.sgs_product_completeness = h5f.attrs["SGS_PRODUCT_COMPLETENESS"]
+        self.product_algorithm_version = h5f.attrs["PRODUCT_ALGORITHM_VERSION"]
+        # pylint: enable-msg=W0212
+        # ------------------------
+
+        # The CRPh CRR data
+        h5d = h5f['CRPh_CRR']
+        self.crph_crr.data = h5d[:, :]
+        self.crph_crr.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crph_crr.offset = h5d.attrs["OFFSET"]
+        self.crph_crr.num_of_lines = h5d.attrs["N_LINES"]
+        self.crph_crr.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.crph_crr.num_of_lines,
+                      self.crph_crr.num_of_columns)
+        self.crph_crr.product = h5d.attrs["PRODUCT"]
+        self.crph_crr.id = h5d.attrs["ID"]
+        if calibrate:
+            self.crph_crr = (self.crph_crr.data *
+                                  self.crph_crr.scaling_factor +
+                                  self.crph_crr.offset)
+        else:
+            self.crph_crr = self.crph_crr.data
+        self.crph_crr_palette = _get_palette(h5f, 'CRPh_CRR') / 255.0
+
+        # The CRPh ACCUM data
+        h5d = h5f['CRPh_ACUM']
+        self.crph_accum.data = h5d[:, :]
+        self.crph_accum.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crph_accum.offset = h5d.attrs["OFFSET"]
+        self.crph_accum.num_of_lines = h5d.attrs["N_LINES"]
+        self.crph_accum.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.crph_accum.num_of_lines,
+                      self.crph_accum.num_of_columns)
+        self.crph_accum.product = h5d.attrs["PRODUCT"]
+        self.crph_accum.id = h5d.attrs["ID"]
+        if calibrate:
+            self.crph_accum = (self.crph_accum.data *
+                                  self.crph_accum.scaling_factor +
+                                  self.crph_accum.offset)
+        else:
+            self.crph_accum = self.crph_accum.data
+        self.crph_accum_palette = _get_palette(h5f, 'CRPh_ACUM') / 255.0
+
+
+        # The CRPH IQF data
+        h5d = h5f['CRPh_IQF']
+        self.crph_iqf.data = h5d[:, :]
+        self.crph_iqf.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crph_iqf.offset = h5d.attrs["OFFSET"]
+        self.crph_iqf.num_of_lines = h5d.attrs["N_LINES"]
+        self.crph_iqf.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.crph_iqf.num_of_lines,
+                      self.crph_iqf.num_of_columns)
+        self.crph_iqf.product = h5d.attrs["PRODUCT"]
+        self.crph_iqf.id = h5d.attrs["ID"]
+        
+        # The CRPh QUALITY data
+        h5d = h5f['CRPh_QUALITY']
+        self.crph_quality.data = h5d[:, :]
+        self.crph_quality.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crph_quality.offset = h5d.attrs["OFFSET"]
+        self.crph_quality.num_of_lines = h5d.attrs["N_LINES"]
+        self.crph_quality.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.crph_quality.num_of_lines,
+                      self.crph_quality.num_of_columns)
+        self.crph_quality.product = h5d.attrs["PRODUCT"]
+        self.crph_quality.id = h5d.attrs["ID"]
+
+        # The CRPh DATA FLAG data
+        h5d = h5f['CRPh_DATAFLAG']
+        self.crph_dataflag.data = h5d[:, :]
+        self.crph_dataflag.scaling_factor = h5d.attrs["SCALING_FACTOR"]
+        self.crph_dataflag.offset = h5d.attrs["OFFSET"]
+        self.crph_dataflag.num_of_lines = h5d.attrs["N_LINES"]
+        self.crph_dataflag.num_of_columns = h5d.attrs["N_COLS"]
+        self.shape = (self.crph_dataflag.num_of_lines,
+                      self.crph_dataflag.num_of_columns)
+        self.crph_dataflag.product = h5d.attrs["PRODUCT"]
+        self.crph_dataflag.id = h5d.attrs["ID"]
+
+        # ------------------------
+
+        h5f.close()
+
+        self.processing_flags = self.processing_flags.data
+
+        self.area = get_area_from_file(filename)
+
+        self.filled = True
+
+    def project(self, coverage):
+        """Remaps the NWCSAF/MSG CRPh to cartographic map-projection on
+        area give by a pre-registered area-id. Faster version of msg_remap!
+        """
+        LOG.info("Projecting channel %s..." % (self.name))
+
+        region = coverage.out_area
+        dest_area = region.area_id
+
+        retv = MsgCRPh()
+
+        retv.name = self.name
+        retv.package = self.package
+        retv.saf = self.saf
+        retv.product_name = self.product_name
+        retv.region_name = dest_area
+        retv.cfac = self.cfac
+        retv.lfac = self.lfac
+        retv.coff = self.coff
+        retv.loff = self.loff
+        retv.nb_param = self.nb_param
+        retv.gp_sc_id = self.gp_sc_id
+        retv.image_acquisition_time = self.image_acquisition_time
+        retv.spectral_channel_id = self.spectral_channel_id
+        retv.nominal_product_time = self.nominal_product_time
+        retv.sgs_product_quality = self.sgs_product_quality
+        retv.sgs_product_completeness = self.sgs_product_completeness
+        retv.product_algorithm_version = self.product_algorithm_version
+
+        retv.crph_crr = coverage.project_array(self.crph_crr)
+        retv.crph_crr_palette = self.crph_crr_palette
+        retv.crph_accum = coverage.project_array(self.crph_accum)
+        retv.crph_accum_palette = self.crph_accum_palette
+#        retv.processing_flags = \
+#            coverage.project_array(self.processing_flags)
+
+        retv.qc_straylight = self.qc_straylight
+        retv.region_name = dest_area
+        retv.area = region
+        retv.projection_name = region.proj_id
+
+        retv.pcs_def = pcs_def_from_region(region)
+
+        retv.num_of_columns = region.x_size
+        retv.num_of_lines = region.y_size
+        retv.xscale = region.pixel_size_x
+        retv.yscale = region.pixel_size_y
+
+        import pyproj
+        prj = pyproj.Proj(region.proj4_string)
+        aex = region.area_extent
+        lonur, latur = prj(aex[2], aex[3], inverse=True)
+        lonll, latll = prj(aex[0], aex[1], inverse=True)
+        retv.ll_lon = lonll
+        retv.ll_lat = latll
+        retv.ur_lon = lonur
+        retv.ur_lat = latur
+
+        self.shape = region.shape
+
+        retv.filled = True
+        retv.resolution = self.resolution
+
+        return retv
+
+""" NEU ENDE """
+
+MSG_PGE_EXTENTIONS = ["PLAX.CTTH.0.h5", "PLAX.CLIM.0.h5", "h5"]
+
+
+def get_best_product(filename, area_extent):
+    """Get the best of the available products for the *filename* template.
+    """
+
+    for ext in MSG_PGE_EXTENTIONS:
+        match_str = filename + "." + ext
+        LOG.debug("glob-string for filename: " + str(match_str))
+        flist = glob.glob(match_str)
+        if len(flist) == 0:
+            LOG.warning("No matching %s.%s input MSG file."
+                        % (filename, ext))
+        else:
+            # File found:
+            if area_extent is None:
+                LOG.warning("Didn't specify an area, taking " + flist[0])
+                return flist[0]
+            for fname in flist:
+                aex = get_area_extent(fname)
+                #import pdb
+                # pdb.set_trace()
+                if np.all(np.max(np.abs(np.array(aex) -
+                                        np.array(area_extent))) < 1000):
+                    LOG.info("MSG file found: %s" % fname)
+                    return fname
+            LOG.info("Did not find any MSG file for specified area")
+
+
+def get_best_products(filename, area_extent):
+    """Get the best of the available products for the *filename* template.
+    """
+
+    filenames = []
+
+    for ext in MSG_PGE_EXTENTIONS:
+        match_str = filename + "." + ext
+        LOG.debug('Match string = ' + str(match_str))
+        flist = glob.glob(match_str)
+        if len(flist) == 0:
+            LOG.warning("No matching %s.%s input MSG file."
+                        % (filename, ext))
+        else:
+            # File found:
+            if area_extent is None:
+                LOG.warning("Didn't specify an area, taking " + flist[0])
+                filenames.append(flist[0])
+            else:
+                found = False
+                for fname in flist:
+                    aex = get_area_extent(fname)
+                    if np.all(np.max(np.abs(np.array(aex) -
+                                            np.array(area_extent))) < 1000):
+                        found = True
+                        LOG.info("MSG file found: %s" % fname)
+                        filenames.append(fname)
+                    if not found:
+                        LOG.info(
+                            "Did not find any MSG file for specified area")
+    LOG.debug("Sorted filenames: %s", str(sorted(filenames)))
+    return sorted(filenames)
+
+
+def get_area_from_file(filename):
+    """Get the area from the h5 file.
+    """
+    from pyresample.geometry import AreaDefinition
+    import h5py
+
+    aex = get_area_extent(filename)
+    h5f = h5py.File(filename, 'r')
+    pname = h5f.attrs["PROJECTION_NAME"]
+    proj = {}
+    if pname.startswith("GEOS"):
+        proj["proj"] = "geos"
+        proj["a"] = "6378169.0"
+        proj["b"] = "6356583.8"
+        proj["h"] = "35785831.0"
+        proj["lon_0"] = str(float(pname.split("<")[1][:-1]))
+    else:
+        raise NotImplementedError("Only geos projection supported yet.")
+
+    #h5f.attrs["REGION_NAME"]  # <type 'numpy.string_'> alps
+    #pname                     # <type 'numpy.string_'> GEOS<+009.5>
+    #proj                      # <type 'dict'> {'a': '6378169.0', 'h': '35785831.0', 'b': '6356583.8', 'lon_0': '9.5', 'proj': 'geos'}               
+    #int(h5f.attrs["NC"])      # <type 'int'>  349
+    #int(h5f.attrs["NL"])      # <type 'int'> 151
+    #aex                       # <type 'tuple'> (-613578.17189778585, 4094060.208733994, 433553.97518292483, 4547101.2335793395)
+
+    area_def = AreaDefinition(h5f.attrs["REGION_NAME"],
+                              h5f.attrs["REGION_NAME"],
+                              pname,
+                              proj,
+                              int(h5f.attrs["NC"]),
+                              int(h5f.attrs["NL"]),
+                              aex)
+    h5f.close()
+    return area_def
+
+
+def load(scene, **kwargs):
+    """Load data into the *channels*. *Channels* is a list or a tuple
+    containing channels we will load data into. If None, all channels are
+    loaded.
+    """
+
+    print "*** read NWC-SAF data with nwcsaf_msg.py", scene.channels_to_load
+
+    area_extent = kwargs.get("area_extent")
+    calibrate = kwargs.get("calibrate", True)
+
+    conf = ConfigParser.ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, scene.fullname + ".cfg"))
+    directory = conf.get(scene.instrument_name + "-level3", "dir",      raw=True)
+    filename_raw  = conf.get(scene.instrument_name + "-level3", "filename", raw=True)
+    pathname      = os.path.join(directory, filename_raw)
+
+    LOG.debug("Inside load: " + str(scene.channels_to_load))
+
+    if "CloudMask" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "01",
+                       "product": "CMa__"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgCloudMask() 
+            ct_chan.read(filename,calibrate)
+            ct_chan.satid = (scene.satname.capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+
+    if "CloudType" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "02",
+                       "product": "CT___"})
+        filenames = get_best_products(filename_wildcards, area_extent)
+        if len(filenames) > 0:
+            filename = filenames[-1]
+        else:
+            LOG.info("Did not find any MSG file for specified area")
+            return
+        ct_chan = MsgCloudType()
+        ct_chan.read(filenames[-1])
+        LOG.debug("Uncorrected file: %s", filename)
+        ct_chan.name = "CloudType"
+        ct_chan.satid = (scene.satname.capitalize() +
+                         str(scene.sat_nr()).rjust(2))
+        ct_chan.resolution = ct_chan.area.pixel_size_x
+        scene.channels.append(ct_chan)
+
+    if "CloudType_plax" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "02",
+                       "product": "CT___"})
+        filenames = get_best_products(filename_wildcards, area_extent)
+        if len(filenames) > 0:
+            filename = filenames[0]
+        else:
+            LOG.info("Did not find any MSG file for specified area")
+            return
+        ct_chan_plax = MsgCloudType()
+        if filename != None:
+            LOG.debug("Parallax corrected file: %s", filename)
+            ct_chan_plax.read(filename)
+            ct_chan_plax.name = "CloudType_plax"
+            ct_chan_plax.satid = (scene.satname.capitalize() +
+                                  str(scene.sat_nr()).rjust(2))
+            ct_chan_plax.resolution = ct_chan_plax.area.pixel_size_x
+            scene.channels.append(ct_chan_plax)
+
+    print "*** hallo world***"
+
+    if "CTTH" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "03",
+                       "product": "CTTH_"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgCTTH()
+            ct_chan.read(filename,calibrate)
+            print "CCC", scene.sat_nr()
+            ct_chan.satid = (scene.satname[0:8].capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            print "bullshit (nwcsat_msg.py) ", ct_chan.satid   # "Meteosat 9"
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+        
+    if "CRR" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "05",
+                       "product": "CRR__"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgCRR()
+            ct_chan.read(filename,calibrate)
+            ct_chan.name = "CRR_"          # !!!!! changed as we create another channel named 'CRR' when transforming the format
+            ct_chan.satid = (scene.satname.capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+
+    if "PC" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "04",
+                       "product": "PC___"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgPC()
+            ct_chan.read(filename,calibrate)
+            ct_chan.name = "PC"
+            ct_chan.satid = (scene.satname.capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+
+    if "SPhR" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "13",
+                       "product": "SPhR_"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgSPhR()
+            ct_chan.read(filename,calibrate)
+            ct_chan.name = "SPhR"
+            ct_chan.satid = (scene.satname.capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+
+    if "PCPh" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "14",
+                       "product": "PCPh_"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgPCPh()
+            ct_chan.read(filename,calibrate)
+            ct_chan.name = "PCPh_"
+            ct_chan.satid = (scene.satname.capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+
+    if "CRPh" in scene.channels_to_load:
+        filename_wildcards = (scene.time_slot.strftime(pathname)
+                    % {"number": "14",
+                       "product": "CRPh_"})
+        filename = get_best_product(filename_wildcards, area_extent)
+        if filename != None:
+            ct_chan = MsgCRPh()
+            ct_chan.read(filename,calibrate)
+            ct_chan.name = "CRPh_"
+            ct_chan.satid = (scene.satname.capitalize() +
+                             str(scene.sat_nr()).rjust(2))
+            ct_chan.resolution = ct_chan.area.pixel_size_x
+            scene.channels.append(ct_chan)
+
+    if 'filename' in locals() and filename != None:
+        # print "nwcsaf_msg", len(filename), filename
+        if len(filename) > 12:
+            sat_nr= int(basename(filename)[10:11])+7
+            if int(scene.sat_nr()) != int(sat_nr):
+                print "*** Warning, change Meteosat number to "+str(sat_nr)+" (input: "+scene.sat_nr()+")"
+                #scene.number = str(sat_nr).zfill(2)
+                # !!! update number !!!
+                scene.number = str(sat_nr)
+ 
+
+    LOG.info("Loading channels done.")
diff --git a/mpop/satin/odyssey_radar.py b/mpop/satin/odyssey_radar.py
new file mode 100644
index 0000000..d71775b
--- /dev/null
+++ b/mpop/satin/odyssey_radar.py
@@ -0,0 +1,222 @@
+import Image
+import glob
+import os
+from ConfigParser import ConfigParser
+import numpy as np
+import numpy.ma as ma
+from mpop import CONFIG_PATH
+
+import pyresample
+import logging
+
+import h5py
+
+LOG = logging.getLogger(__name__)
+
+ODIM_H5_FIELD_NAMES = {
+   'TH': 'total_power',      # uncorrected reflectivity, horizontal
+   'TV': 'total_power',      # uncorrected reflectivity, vertical
+   'DBZH': 'reflectivity',    # corrected reflectivity, horizontal
+   'DBZV': 'reflectivity',    # corrected reflectivity, vertical
+   'ZDR': 'differential_reflectivity',    # differential reflectivity
+   'RHOHV': 'cross_correlation_ratio',
+   'LDR': 'linear_polarization_ratio',
+   'PHIDP': 'differential_phase',
+   'KDP': 'specific_differential_phase',
+   'SQI': 'normalized_coherent_power',
+   'SNR': 'signal_to_noise_ratio',
+   'VRAD': 'velocity',
+   'WRAD': 'spectrum_width',
+   'QIND': 'quality_index',
+   'RATE': 'precip',         # precip
+   'ACRR': 'accu_precip',      # 1 hour ACCU
+}
+
+
+def load(satscene, *args, **kwargs):
+   """Loads the *channels* into the satellite *scene*.
+   """
+   #
+   # Dataset information
+   #
+   # Read config file content
+   conf = ConfigParser()
+   conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+
+   values = {"orbit": satscene.orbit,
+          "satname": satscene.satname,
+          "number": satscene.number,
+          "instrument": satscene.instrument_name,
+          "satellite": satscene.fullname
+          }
+
+   # projection info
+   projectionName = conf.get("radar-level2", "projection")
+   projection = pyresample.utils.load_area(os.path.join(CONFIG_PATH, "areas.def"), projectionName)
+   satscene.area = projection
+   
+   for chn_name in satscene.channels_to_load:
+      filename = os.path.join(
+         satscene.time_slot.strftime(conf.get("radar-level2", "dir", raw=True)) % values,
+         satscene.time_slot.strftime(conf.get(chn_name,  "filename", raw=True)) % values )
+
+      # Load data from the h5 file
+      LOG.debug("filename: "+filename)
+      filenames=glob.glob(str(filename))
+
+      if len(filenames) == 0:
+         LOG.debug("no input file found: "+filename)
+         print "no input file found:"+filename
+         quit()
+      else:
+         filename = glob.glob(str(filename))[0]
+      
+      # open the file
+      hfile = h5py.File(filename, 'r')
+      odim_object = hfile['what'].attrs['object']
+      if odim_object != 'COMP':
+         raise NotImplementedError('object: %s not implemented.' % (odim_object))
+      else:
+         # File structure
+         
+         #>>> hfile.keys()
+         #[u'dataset1', u'dataset2', u'how', u'what', u'where']
+
+
+         #>>> for f in hfile['what'].attrs.keys():
+         #...  print "hfile['what'].attrs['",f,"']=", hfile['what'].attrs[f]
+         #
+         #hfile['what'].attrs[' object ']= COMP
+         #hfile['what'].attrs[' version ']= H5rad 2.0
+         #hfile['what'].attrs[' date ']= 20151201
+         #hfile['what'].attrs[' time ']= 060000
+         #hfile['what'].attrs[' source ']= ORG:247
+
+         #>>> for f in hfile['where'].attrs.keys():
+         #...  print "hfile['where'].attrs['",f,"']=", hfile['where'].attrs[f]
+         #
+         #hfile['where'].attrs[' projdef ']= +proj=laea +lat_0=55.0 +lon_0=10.0 +x_0=1950000.0 +y_0=-2100000.0 +units=m +ellps=WGS84
+         #hfile['where'].attrs[' xsize ']= 1900
+         #hfile['where'].attrs[' ysize ']= 2200
+         #hfile['where'].attrs[' xscale ']= 2000.0
+         #hfile['where'].attrs[' yscale ']= 2000.0
+         #hfile['where'].attrs[' LL_lon ']= -10.4345768386
+         #hfile['where'].attrs[' LL_lat ']= 31.7462153193
+         #hfile['where'].attrs[' UL_lon ']= -39.5357864125
+         #hfile['where'].attrs[' UL_lat ']= 67.0228327583
+         #hfile['where'].attrs[' UR_lon ']= 57.8119647501
+         #hfile['where'].attrs[' UR_lat ']= 67.6210371028
+         #hfile['where'].attrs[' LR_lon ']= 29.4210386356
+         #hfile['where'].attrs[' LR_lat ']= 31.9876502779
+
+         # hfile['how'].attrs['nodes'] 
+         # list of radar in composite
+
+         #>>> for f in hfile['dataset1']['what'].attrs.keys():
+         #...  print "hfile['dataset1'][what].attrs['",f,"']=", hfile['dataset1']['what'].attrs[f]
+         #
+         #hfile['dataset1'][what].attrs[' product ']= COMP
+         #hfile['dataset1'][what].attrs[' startdate ']= 20151201
+         #hfile['dataset1'][what].attrs[' starttime ']= 055000
+         #hfile['dataset1'][what].attrs[' enddate ']= 20151201
+         #hfile['dataset1'][what].attrs[' endtime ']= 060500
+         #hfile['dataset1'][what].attrs[' quantity ']= RATE
+         #hfile['dataset1'][what].attrs[' gain ']= 1.0
+         #hfile['dataset1'][what].attrs[' offset ']= 0.0
+         #hfile['dataset1'][what].attrs[' nodata ']= -9999000.0
+         #hfile['dataset1'][what].attrs[' undetect ']= -8888000.0
+         #>>> for f in hfile['dataset2']['what'].attrs.keys():
+         #...  print "hfile['dataset2'][what].attrs['",f,"']=", hfile['dataset2']['what'].attrs[f]
+         #
+         #hfile['dataset2'][what].attrs[' product ']= COMP
+         #hfile['dataset2'][what].attrs[' startdate ']= 20151201
+         #hfile['dataset2'][what].attrs[' starttime ']= 055000
+         #hfile['dataset2'][what].attrs[' enddate ']= 20151201
+         #hfile['dataset2'][what].attrs[' endtime ']= 060500
+         #hfile['dataset2'][what].attrs[' quantity ']= QIND
+         #hfile['dataset2'][what].attrs[' gain ']= 1.0
+         #hfile['dataset2'][what].attrs[' offset ']= 0.0
+         #hfile['dataset2'][what].attrs[' nodata ']= -9999000.0
+         #hfile['dataset2'][what].attrs[' undetect ']= -8888000.0
+
+         _xsize = hfile['where'].attrs['xsize']
+         _ysize = hfile['where'].attrs['ysize']
+         #nbins= _xsize * _ysize
+
+         #projection = hfile['where'].attrs['projdef']
+         
+         datasets = [k for k in hfile if k.startswith('dataset')]
+         datasets.sort()
+         nsweeps = len(datasets)
+         
+         try:
+            ds1_what = hfile[datasets[0]]['what'].attrs
+         except KeyError:
+            # if no how group exists mock it with an empty dictionary
+            ds1_what = {}
+         
+         _type = ''
+         if 'product' in ds1_what:
+            LOG.debug("product: "+ds1_what['product'])
+            if ds1_what['product'] == 'COMP':
+               if 'quantity' in ds1_what:
+                  _type = ds1_what['quantity']
+                  LOG.debug("product_type: "+_type)
+
+                  #for chn_name in satscene.channels_to_load:
+                  #   if chn_name == _type:
+
+                  raw_data = hfile[datasets[0]]['data1']['data'][:]
+                  raw_data = raw_data.reshape(_ysize,_xsize)
+         
+                  # flag no data
+                  if 'nodata' in ds1_what:
+                     nodata = ds1_what['nodata']
+                     data = np.ma.masked_equal(raw_data, nodata)
+                  else:
+                     data = np.ma.masked_array(raw_data)
+         
+                  mask = np.ma.masked_array( raw_data == nodata )
+                  mask = np.ma.masked_equal( mask, False)
+            
+                  # flag undetect data 
+                  if 'undetect' in ds1_what:
+                     undetect = ds1_what['undetect']
+                     data[data == undetect] = np.ma.masked
+                        
+                  #from trollimage.image import Image as trollimage
+                  #img = trollimage(mask, mode="L", fill_value=[1,1,1]) # [0,0,0] [1,1,1]
+                  #from trollimage.colormap import rainbow
+                  #img.colorize(rainbow)
+                  #img.show()
+                  #quit()
+
+                  # gain/offset adjustment
+                  if 'offset' in ds1_what:
+                     offset = ds1_what['offset']
+                  else:
+                     offset = 0.0
+                     
+                  if 'gain' in ds1_what:
+                     gain = ds1_what['gain']
+                  else:
+                     gain = 1.0
+
+                  data *= gain + offset
+                  
+                  satscene[chn_name] = data
+                  satscene[chn_name+'-MASK'] = mask
+
+                  LOG.debug(" *** channel:"+chn_name)
+                  
+                  if _type == 'DBZH':
+                     units = 'dBZ'
+                  
+                  if _type == 'RATE':
+                     units = 'mm/h'
+
+                  if _type == 'ACRR':
+                     units = 'mm'
+                     
+                  satscene[chn_name].info["units"] = units
+                  LOG.debug("channel:"+chn_name+" units:"+units)
diff --git a/mpop/satin/s2_msi.py b/mpop/satin/s2_msi.py
new file mode 100644
index 0000000..ac69940
--- /dev/null
+++ b/mpop/satin/s2_msi.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+"""Loader for s2, jpeg2000 format.
+"""
+#Matias Takala FMI 2016
+
+import glob
+import os
+import pickle
+import re
+from ConfigParser import ConfigParser
+
+import numpy.ma as ma
+from pyresample import utils
+
+import glymur
+from mpop import CONFIG_PATH
+from mpop.satellites import GenericFactory
+
+#in this version Q_V is hardcoded but could be read from metadata
+QUANTIFICATION_VALUE = 10000
+
+
+def parse_tile(file):
+    tile = re.findall('T(\d{2}\w{3})_', file)
+    f = open('s2tiles.pickle', 'r')
+    s2tiles = pickle.load(f)
+    f.close()
+    return [tile[0], s2tiles[tile[0]]]
+
+
+def read_jp2_data(file):
+    jp2 = glymur.Jp2k(file)
+    data = jp2[:] / (QUANTIFICATION_VALUE + 0.0)
+    return data
+
+
+def open_s2_tile(fname):
+    data = read_jp2_data(fname)
+    size = data.shape
+    params = parse_tile(fname)
+    areadef = utils.get_area_def(
+        params[0], "Sentinel 2 tile " + params[0], 'PROJ EPSG:' + params[1][0],
+        'init=epsg:' + params[1][0], size[0], size[1], params[1][1])
+    return ([data, areadef])
+
+
+def load(satscene):
+    """Load jpeg2000 data.
+    """
+
+    # Read config file content
+    conf = ConfigParser()
+    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+
+    for chn_name in satscene.channels_to_load:
+        values = {"orbit": satscene.orbit,
+                  "satname": satscene.satname.upper(),
+                  "number": satscene.number,
+                  "instrument": satscene.instrument_name.upper(),
+                  "satellite": satscene.fullname.upper(),
+                  "band": chn_name}
+        filename = os.path.join(
+            conf.get("msi-level2", "dir"),
+            satscene.time_slot.strftime(conf.get(
+                "msi-level2", "filename", raw=True)) % values)
+        filelist = glob.glob(filename)
+        data_area = open_s2_tile(filelist[0])
+        satscene[chn_name] = ma.masked_array(data_area[0])
+        satscene[chn_name].area = data_area[1]
diff --git a/mpop/satin/viirs_compact.py b/mpop/satin/viirs_compact.py
index 4e30464..92905b9 100644
--- a/mpop/satin/viirs_compact.py
+++ b/mpop/satin/viirs_compact.py
@@ -53,167 +53,176 @@ def load(satscene, *args, **kwargs):
     files_to_load = []
     files_to_delete = []
 
-    filename = kwargs.get("filename")
-    logger.debug("reading %s", str(filename))
-    if filename is not None:
-        if isinstance(filename, (list, set, tuple)):
-            files = filename
-        else:
-            files = [filename]
-        files_to_load = []
-        for filename in files:
-            pathname, ext = os.path.splitext(filename)
-            if ext == ".bz2":
-                zipfile = bz2.BZ2File(filename)
-                newname = os.path.join("/tmp", os.path.basename(pathname))
-                if not os.path.exists(newname):
-                    with open(newname, "wb") as fp_:
-                        fp_.write(zipfile.read())
-                zipfile.close()
-                files_to_load.append(newname)
-                files_to_delete.append(newname)
+    try:
+        filename = kwargs.get("filename")
+        logger.debug("reading %s", str(filename))
+        if filename is not None:
+            if isinstance(filename, (list, set, tuple)):
+                files = filename
             else:
-                files_to_load.append(filename)
-    else:
-        time_start, time_end = kwargs.get("time_interval",
-                                          (satscene.time_slot, None))
-
-        conf = ConfigParser()
-        conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
-        options = {}
-        for option, value in conf.items(satscene.instrument_name + "-level2",
-                                        raw=True):
-            options[option] = value
+                files = [filename]
+            files_to_load = []
+            for filename in files:
+                pathname, ext = os.path.splitext(filename)
+                if ext == ".bz2":
+                    zipfile = bz2.BZ2File(filename)
+                    newname = os.path.join("/tmp", os.path.basename(pathname))
+                    if not os.path.exists(newname):
+                        with open(newname, "wb") as fp_:
+                            fp_.write(zipfile.read())
+                    zipfile.close()
+                    files_to_load.append(newname)
+                    files_to_delete.append(newname)
+                else:
+                    files_to_load.append(filename)
+        else:
+            time_start, time_end = kwargs.get("time_interval",
+                                              (satscene.time_slot, None))
 
-        template = os.path.join(options["dir"], options["filename"])
+            conf = ConfigParser()
+            conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
+            options = {}
+            for option, value in conf.items(satscene.instrument_name + "-level2",
+                                            raw=True):
+                options[option] = value
 
-        second = timedelta(seconds=1)
-        files_to_load = []
+            template = os.path.join(options["dir"], options["filename"])
 
-        if time_end is not None:
-            time = time_start - second * 85
+            second = timedelta(seconds=1)
             files_to_load = []
-            while time <= time_end:
-                fname = time.strftime(template)
-                flist = glob.glob(fname)
+
+            if time_end is not None:
+                time = time_start - second * 85
+                files_to_load = []
+                while time <= time_end:
+                    fname = time.strftime(template)
+                    flist = glob.glob(fname)
+                    try:
+                        files_to_load.append(flist[0])
+                        time += second * 80
+                    except IndexError:
+                        pass
+                    time += second
+
+            else:
+                files_to_load = glob.glob(time_start.strftime(template))
+
+        chan_dict = {"M01": "M1",
+                     "M02": "M2",
+                     "M03": "M3",
+                     "M04": "M4",
+                     "M05": "M5",
+                     "M06": "M6",
+                     "M07": "M7",
+                     "M08": "M8",
+                     "M09": "M9",
+                     "M10": "M10",
+                     "M11": "M11",
+                     "M12": "M12",
+                     "M13": "M13",
+                     "M14": "M14",
+                     "M15": "M15",
+                     "M16": "M16",
+                     "DNB": "DNB"}
+
+        channels = [(chn, chan_dict[chn])
+                    for chn in satscene.channels_to_load
+                    if chn in chan_dict]
+        try:
+            channels_to_load, chans = zip(*channels)
+        except ValueError:
+            return
+
+        m_chans = []
+        dnb_chan = []
+        for chn in chans:
+            if chn.startswith('M'):
+                m_chans.append(chn)
+            elif chn.startswith('DNB'):
+                dnb_chan.append(chn)
+            else:
+                raise ValueError("Reading of channel %s not implemented", chn)
+
+        m_datas = []
+        m_lonlats = []
+        dnb_datas = []
+        dnb_lonlats = []
+
+        for fname in files_to_load:
+            is_dnb = os.path.basename(fname).startswith('SVDNBC')
+            logger.debug("Reading %s", fname)
+            if is_dnb:
+                if tables:
+                    h5f = tables.open_file(fname, "r")
+                else:
+                    logger.warning("DNB data could not be read from %s, "
+                                   "PyTables not available.", fname)
+                    continue
+            else:
+                h5f = h5py.File(fname, "r")
+            if m_chans and not is_dnb:
                 try:
-                    files_to_load.append(flist[0])
-                    time += second * 80
-                except IndexError:
+                    arr, m_units = read_m(h5f, m_chans)
+                    m_datas.append(arr)
+                    m_lonlats.append(navigate_m(h5f, m_chans[0]))
+                except KeyError:
                     pass
-                time += second
-
-        else:
-            files_to_load = glob.glob(time_start.strftime(template))
-
-    chan_dict = {"M01": "M1",
-                 "M02": "M2",
-                 "M03": "M3",
-                 "M04": "M4",
-                 "M05": "M5",
-                 "M06": "M6",
-                 "M07": "M7",
-                 "M08": "M8",
-                 "M09": "M9",
-                 "M10": "M10",
-                 "M11": "M11",
-                 "M12": "M12",
-                 "M13": "M13",
-                 "M14": "M14",
-                 "M15": "M15",
-                 "M16": "M16",
-                 "DNB": "DNB"}
-
-    channels = [(chn, chan_dict[chn])
-                for chn in satscene.channels_to_load
-                if chn in chan_dict]
-    try:
-        channels_to_load, chans = zip(*channels)
-    except ValueError:
-        return
-
-    m_chans = []
-    dnb_chan = []
-    for chn in chans:
-        if chn.startswith('M'):
-            m_chans.append(chn)
-        elif chn.startswith('DNB'):
-            dnb_chan.append(chn)
+            if dnb_chan and is_dnb and tables:
+                try:
+                    arr, dnb_units = read_dnb(h5f)
+                    dnb_datas.append(arr)
+                    dnb_lonlats.append(navigate_dnb(h5f))
+                except KeyError:
+                    pass
+            h5f.close()
+
+        if len(m_lonlats) > 0:
+            m_lons = np.ma.vstack([lonlat[0] for lonlat in m_lonlats])
+            m_lats = np.ma.vstack([lonlat[1] for lonlat in m_lonlats])
+        if len(dnb_lonlats) > 0:
+            dnb_lons = np.ma.vstack([lonlat[0] for lonlat in dnb_lonlats])
+            dnb_lats = np.ma.vstack([lonlat[1] for lonlat in dnb_lonlats])
+
+        m_i = 0
+        dnb_i = 0
+        for chn in channels_to_load:
+            if m_datas and chn.startswith('M'):
+                m_data = np.ma.vstack([dat[m_i] for dat in m_datas])
+                satscene[chn] = m_data
+                satscene[chn].info["units"] = m_units[m_i]
+                m_i += 1
+            if dnb_datas and chn.startswith('DNB'):
+                dnb_data = np.ma.vstack([dat[dnb_i] for dat in dnb_datas])
+                satscene[chn] = dnb_data
+                satscene[chn].info["units"] = dnb_units[dnb_i]
+                dnb_i += 1
+
+        if m_datas:
+            m_area_def = SwathDefinition(np.ma.masked_where(m_data.mask, m_lons),
+                                         np.ma.masked_where(m_data.mask, m_lats))
         else:
-            raise ValueError("Reading of channel %s not implemented", chn)
-
-    m_datas = []
-    m_lonlats = []
-    dnb_datas = []
-    dnb_lonlats = []
-
-    for fname in files_to_load:
-        logger.debug("Reading %s", fname)
-        if 'SVDNBC' in fname:
-            if tables:
-                h5f = tables.open_file(fname, "r")
-            else:
-                logger.warning("DNB data could not be read from %f, "
-                               "PyTables not available.", fname)
-                continue
+            logger.warning("No M channel data available.")
+
+        if dnb_datas:
+            dnb_area_def = SwathDefinition(np.ma.masked_where(dnb_data.mask,
+                                                              dnb_lons),
+                                           np.ma.masked_where(dnb_data.mask,
+                                                              dnb_lats))
         else:
-            h5f = h5py.File(fname, "r")
-        if m_chans and "SVDNBC" not in os.path.split(fname)[-1]:
-            try:
-                arr, m_units = read_m(h5f, m_chans)
-                m_datas.append(arr)
-                m_lonlats.append(navigate_m(h5f, m_chans[0]))
-            except KeyError:
-                pass
-        if dnb_chan and "SVDNBC" in os.path.split(fname)[-1]:
-            try:
-                arr, dnb_units = read_dnb(h5f)
-                dnb_datas.append(arr)
-                dnb_lonlats.append(navigate_dnb(h5f))
-            except KeyError:
-                pass
-        h5f.close()
-
-    if m_chans:
-        m_lons = np.ma.vstack([lonlat[0] for lonlat in m_lonlats])
-        m_lats = np.ma.vstack([lonlat[1] for lonlat in m_lonlats])
-    if dnb_chan:
-        dnb_lons = np.ma.vstack([lonlat[0] for lonlat in dnb_lonlats])
-        dnb_lats = np.ma.vstack([lonlat[1] for lonlat in dnb_lonlats])
-
-    m_i = 0
-    dnb_i = 0
-    for chn in channels_to_load:
-        if chn.startswith('M'):
-            m_data = np.ma.vstack([dat[m_i] for dat in m_datas])
-            satscene[chn] = m_data
-            satscene[chn].info["units"] = m_units[m_i]
-            m_i += 1
-        if chn.startswith('DNB'):
-            dnb_data = np.ma.vstack([dat[dnb_i] for dat in dnb_datas])
-            satscene[chn] = dnb_data
-            satscene[chn].info["units"] = dnb_units[dnb_i]
-            dnb_i += 1
-
-    if m_chans:
-        m_area_def = SwathDefinition(np.ma.masked_where(m_data.mask, m_lons),
-                                     np.ma.masked_where(m_data.mask, m_lats))
-    if dnb_chan:
-        dnb_area_def = SwathDefinition(np.ma.masked_where(dnb_data.mask,
-                                                          dnb_lons),
-                                       np.ma.masked_where(dnb_data.mask,
-                                                          dnb_lats))
-
-    for chn in channels_to_load:
-        if "DNB" not in chn:
-            satscene[chn].area = m_area_def
-
-    for chn in dnb_chan:
-        satscene[chn].area = dnb_area_def
-
-    for fname in files_to_delete:
-        if os.path.exists(fname):
-            os.remove(fname)
+            logger.warning("No DNB data available.")
+
+        for chn in channels_to_load:
+            if "DNB" not in chn and m_datas:
+                satscene[chn].area = m_area_def
+
+        if dnb_datas:
+            for chn in dnb_chan:
+                satscene[chn].area = dnb_area_def
+
+    finally:
+        for fname in files_to_delete:
+            if os.path.exists(fname):
+                os.remove(fname)
 
 
 def read_m(h5f, channels, calibrate=1):
@@ -225,17 +234,24 @@ def read_m(h5f, channels, calibrate=1):
     scans = h5f["All_Data"]["NumberOfScans"][0]
     res = []
     units = []
+    arr_mask = np.ma.nomask
 
     for channel in channels:
         rads = h5f["All_Data"][chan_dict[channel]]["Radiance"]
-        arr = np.ma.masked_greater(rads[:scans * 16, :].astype(np.float32),
-                                   65526)
+        if channel in ("M9",):
+            arr = rads[:scans * 16, :].astype(np.float32)
+            arr[arr > 65526] = np.nan
+            arr = np.ma.masked_array(arr, mask=arr_mask)
+        else:
+            arr = np.ma.masked_greater(rads[:scans * 16, :].astype(np.float32),
+                                       65526)
         try:
             arr = np.ma.where(arr <= rads.attrs['Threshold'],
                               arr * rads.attrs['RadianceScaleLow'] +
                               rads.attrs['RadianceOffsetLow'],
                               arr * rads.attrs['RadianceScaleHigh'] + \
                               rads.attrs['RadianceOffsetHigh'],)
+            arr_mask = arr.mask
         except KeyError:
             print "KeyError"
             pass
diff --git a/mpop/satin/viirs_sdr.py b/mpop/satin/viirs_sdr.py
index 58b664d..e10bf65 100644
--- a/mpop/satin/viirs_sdr.py
+++ b/mpop/satin/viirs_sdr.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2011, 2012, 2013, 2014, 2015.
+# Copyright (c) 2011, 2012, 2013, 2014, 2015, 2016.
 
 # Author(s):
 
@@ -31,16 +31,18 @@ Format documentation:
 http://npp.gsfc.nasa.gov/science/sciencedocuments/082012/474-00001-03_CDFCBVolIII_RevC.pdf
 
 """
+import hashlib
+import logging
 import os.path
 from ConfigParser import ConfigParser
 from datetime import datetime, timedelta
 
-import numpy as np
 import h5py
-import hashlib
-import logging
+import numpy as np
 
 from mpop import CONFIG_PATH
+# ------------------------------------------------------------------------------
+from mpop.plugin_base import Reader
 from mpop.utils import strftime
 
 NO_DATE = datetime(1958, 1, 1)
@@ -510,19 +512,89 @@ class ViirsBandData(object):
             geofilepaths = [os.path.join(geodir, geofilepath)
                             for geofilepath in self.geo_filenames]
 
+        geofilepaths = sorted(geofilepaths)
         logger.debug("Geo-files = " + str(geofilepaths))
         self.geolocation = ViirsGeolocationData(self.data.shape,
                                                 geofilepaths).read()
 
-# ------------------------------------------------------------------------------
-from mpop.plugin_base import Reader
-
 
 class ViirsSDRReader(Reader):
     pformat = "viirs_sdr"
 
     def __init__(self, *args, **kwargs):
         Reader.__init__(self, *args, **kwargs)
+        self.geofiles = []
+        self.shape = None
+
+    def get_sunsat_angles(self, **kwargs):
+        """Get sun-satellite viewing geometry for a given band type (M, I, or
+        DNB)
+        Optional arguments:
+            bandtype = 'M', 'I', or 'DNB'
+        Return
+            sun-zenith, sun-azimuth, sat-zenith, sat-azimuth
+
+        """
+
+        if 'bandtype' in kwargs:
+            bandtype = kwargs['bandtype']
+        else:
+            bandtype = 'M'
+
+        if bandtype.startswith('M'):
+            geofilenames = [geofile for geofile in self.geofiles
+                            if os.path.basename(geofile).startswith('GMTCO')]
+            if len(geofilenames) == 0:
+                # Try the geoid instead:
+                geofilenames = [geofile for geofile in self.geofiles
+                                if os.path.basename(geofile).startswith('GMODO')]
+        elif bandtype.startswith('I'):
+            geofilenames = [geofile for geofile in self.geofiles
+                            if os.path.basename(geofile).startswith('GITCO')]
+            if len(geofilenames) == 0:
+                # Try the geoid instead:
+                geofilenames = [geofile for geofile in self.geofiles
+                                if os.path.basename(geofile).startswith('GIMGO')]
+        elif bandtype.startswith('DNB'):
+            geofilenames = [geofile for geofile in self.geofiles
+                            if os.path.basename(geofile).startswith('GDNBO')]
+
+        else:
+            logger.error("Band type %s not supported", bandtype)
+            return None
+
+        geofilenames = sorted(geofilenames)
+
+        data = {}
+        mask = {}
+        h5names = ['SolarZenithAngle', 'SolarAzimuthAngle',
+                   'SatelliteZenithAngle', 'SatelliteAzimuthAngle']
+        local_names = ['sunz', 'sun_azi',
+                       'satz', 'sat_azi']
+        for item in local_names:
+            data[item] = np.empty(self.shape,
+                                  dtype=np.float32)
+            mask[item] = np.zeros(self.shape,
+                                  dtype=np.bool)
+
+        granule_length = self.shape[0] / len(geofilenames)
+
+        for index, filename in enumerate(geofilenames):
+
+            swath_index = index * granule_length
+            y0_ = swath_index
+            y1_ = swath_index + granule_length
+
+            for angle, param_name in zip(h5names, local_names):
+                get_viewing_angle_into(filename,
+                                       data[param_name][y0_:y1_, :],
+                                       mask[param_name][y0_:y1_, :], angle)
+
+        for item in local_names:
+            data[item] = np.ma.array(data[item], mask=mask[item], copy=False)
+
+        return (data['sunz'], data['sun_azi'],
+                data['satz'], data['sat_azi'])
 
     def load(self, satscene, calibrate=1, time_interval=None,
              area=None, filename=None, **kwargs):
@@ -662,6 +734,8 @@ class ViirsSDRReader(Reader):
 
         glob_info = {}
 
+        self.geofiles = geofile_list
+
         logger.debug("Channels to load: " + str(satscene.channels_to_load))
         for chn in satscene.channels_to_load:
             # Take only those files in the list matching the band:
@@ -740,6 +814,9 @@ class ViirsSDRReader(Reader):
             satscene[chn].info['band_id'] = band.band_id
             satscene[chn].info['start_time'] = band.begin_time
             satscene[chn].info['end_time'] = band.end_time
+            if chn in ['M01', 'M02', 'M03', 'M04', 'M05', 'M06', 'M07', 'M08', 'M09', 'M10', 'M11',
+                       'I01', 'I02', 'I03']:
+                satscene[chn].info['sun_zen_correction_applied'] = True
 
             # We assume the same geolocation should apply to all M-bands!
             # ...and the same to all I-bands:
@@ -757,8 +834,13 @@ class ViirsSDRReader(Reader):
                          str(satscene.time_slot) + "_"
                          + str(satscene[chn].data.shape) + "_" +
                          band.band_uid)
+
             satscene[chn].area.area_id = area_name
             satscene[chn].area_id = area_name
+
+            if self.shape is None:
+                self.shape = band.data.shape
+
             # except ImportError:
             #    satscene[chn].area = None
             #    satscene[chn].lat = np.ma.array(band.latitude, mask=band.data.mask)
@@ -812,6 +894,27 @@ def get_lonlat_into(filename, out_lons, out_lats, out_mask):
     h5f.close()
 
 
+def get_viewing_angle_into(filename, out_val, out_mask, param):
+    """Read a sun-sat viewing angle from hdf5 file"""
+    logger.debug("Sun-Sat viewing geometry = " + filename)
+
+    if param not in ['SolarZenithAngle',
+                     'SolarAzimuthAngle',
+                     'SatelliteZenithAngle',
+                     'SatelliteAzimuthAngle']:
+        logger.warning('Viewing geometry parameter %s not supported!', param)
+        return None
+
+    md = HDF5MetaData(filename).read()
+
+    h5f = h5py.File(filename, 'r')
+    for key in md.get_data_keys():
+        if key.endswith('/' + param):
+            h5f[key].read_direct(out_val)
+            out_mask[:] = out_val < -999
+    h5f.close()
+
+
 def globify(filename):
     filename = filename.replace("%Y", "????")
     filename = filename.replace("%m", "??")
diff --git a/mpop/satout/cfscene.py b/mpop/satout/cfscene.py
index e3c46ee..cebde37 100644
--- a/mpop/satout/cfscene.py
+++ b/mpop/satout/cfscene.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010, 2011, 2012, 2014.
+# Copyright (c) 2010, 2011, 2012, 2014, 2016.
 
 # Author(s):
 
@@ -59,10 +59,16 @@ class CFScene(object):
     with the *scene* to transform as argument.
     """
 
-    def __init__(self, scene, dtype=np.int16, band_axis=2):
+    def __init__(self, scene, dtype=np.int16, band_axis=2,
+                 area_aggregation=True,
+                 time_dimension=False):
         if not issubclass(dtype, np.integer):
             raise TypeError('Only integer saving allowed for CF data')
 
+        time_axis = -1
+        if time_dimension:
+            time_axis = 0
+
         self.info = scene.info.copy()
         if "time" in self.info:
             del self.info["time"]
@@ -70,7 +76,8 @@ class CFScene(object):
 
         # Other global attributes
         self.info["Conventions"] = "CF-1.5"
-        self.info["platform"] = scene.satname + "-" + str(scene.number)
+        #self.info["platform"] = scene.satname + "-" + str(scene.number)
+        self.info["platform"] = scene.fullname
         self.info["instrument"] = scene.instrument_name
         if scene.variant:
             self.info["service"] = scene.variant
@@ -80,9 +87,14 @@ class CFScene(object):
         self.time = InfoObject()
         self.time.data = date2num(scene.time_slot,
                                   TIME_UNITS)
+        if time_dimension:
+            var_dim_names = ("time", )
+        else:
+            var_dim_names = ()
+
         self.time.info = {"var_name": "time",
                           "var_data": self.time.data,
-                          "var_dim_names": (),
+                          "var_dim_names": var_dim_names,
                           "long_name": "Nominal time of the image",
                           "standard_name": "time",
                           "units": TIME_UNITS}
@@ -105,6 +117,7 @@ class CFScene(object):
                 continue
 
             fill_value = np.iinfo(CF_DATA_TYPE).min
+
             if ma.count_masked(chn.data) == chn.data.size:
                 # All data is masked
                 data = np.ones(chn.data.shape, dtype=CF_DATA_TYPE) * fill_value
@@ -132,7 +145,10 @@ class CFScene(object):
                 else:
                     data = ((chn.data - offset) / scale).astype(CF_DATA_TYPE)
 
-            data = np.ma.expand_dims(data, band_axis)
+            if time_dimension:
+                data = np.ma.expand_dims(data, time_axis)
+            elif area_aggregation:
+                data = np.ma.expand_dims(data, band_axis)
 
             # it's a grid mapping
             try:
@@ -231,6 +247,7 @@ class CFScene(object):
                                  "var_data": lons.data,
                                  "var_dim_names": ("y" + str_arc,
                                                    "x" + str_arc),
+                                 "_FillValue": lons.data.fill_value,
                                  "units": "degrees east",
                                  "long_name": "longitude coordinate",
                                  "standard_name": "longitude"}
@@ -247,6 +264,7 @@ class CFScene(object):
                                  "var_data": lats.data,
                                  "var_dim_names": ("y" + str_arc,
                                                    "x" + str_arc),
+                                 "_FillValue": lats.data.fill_value,
                                  "units": "degrees north",
                                  "long_name": "latitude coordinate",
                                  "standard_name": "latitude"}
@@ -258,7 +276,9 @@ class CFScene(object):
                                        lons.info["var_name"])
                 xy_names = ["y" + str_arc, "x" + str_arc]
 
-            if (chn.area, chn.info['units']) in area_units:
+            if (area_aggregation and not time_dimension and
+                    (chn.area, chn.info['units']) in area_units):
+
                 str_cnt = str(area_units.index((chn.area, chn.info['units'])))
                 # area has been used before
                 band = getattr(self, "band" + str_cnt)
@@ -307,7 +327,11 @@ class CFScene(object):
                 band = InfoObject()
                 band.data = data
                 dim_names = xy_names
-                dim_names.insert(band_axis, 'band' + str_cnt)
+                if time_dimension:
+                    dim_names.insert(time_axis, 'time')
+                elif area_aggregation:
+                    dim_names.insert(band_axis, 'band' + str_cnt)
+
                 band.info = {"var_name": "Image" + str_cnt,
                              "var_data": band.data,
                              'var_dim_names': dim_names,
@@ -318,11 +342,12 @@ class CFScene(object):
 
                 # bandname
 
+                var_dim_names = ("band" + str_cnt,)
                 bandname = InfoObject()
                 bandname.data = np.array([chn.name], 'O')
                 bandname.info = {"var_name": "band" + str_cnt,
                                  "var_data": bandname.data,
-                                 "var_dim_names": ("band" + str_cnt,),
+                                 "var_dim_names": var_dim_names,
                                  "standard_name": "band_name"}
                 setattr(self, "bandname" + str_cnt, bandname)
 
@@ -360,13 +385,22 @@ class CFScene(object):
                 else:
                     band.info["coordinates"] = coordinates
 
+                # Add other (custom) attributes:
+                # Only scalar attributes!
+                for key in chn.info.keys():
+                    if key not in band.info.keys():
+                        if (type(chn.info[key]) == str or type(chn.info[key]) == int or
+                                type(chn.info[key]) == float):
+                            band.info[key] = chn.info[key]
+
                 setattr(self, "band" + str_cnt, band)
 
         for i, area_unit in enumerate(area_units):
             # compute data reduction
             fill_value = np.iinfo(CF_DATA_TYPE).min
             band = getattr(self, "band" + str(i))
-            # band.info["valid_range"] = np.array([valid_min, valid_max]),
+            valid_min, valid_max = band.data.min(), band.data.max()
+            band.info["valid_range"] = np.array([valid_min, valid_max]),
 
     def save(self, filename, *args, **kwargs):
         return netcdf_cf_writer(filename, self, kwargs.get("compression", True))
diff --git a/mpop/satout/netcdf4.py b/mpop/satout/netcdf4.py
index 5058da2..ea0b978 100644
--- a/mpop/satout/netcdf4.py
+++ b/mpop/satout/netcdf4.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010, 2011, 2012, 2014.
+# Copyright (c) 2010, 2011, 2012, 2014, 2016.
 
 # Author(s):
 
@@ -35,16 +35,29 @@ import logging
 logger = logging.getLogger(__name__)
 
 
-def save(scene, filename, compression=True, dtype=np.int16, band_axis=2):
+def save(scene, filename, compression=True, dtype=np.int16, band_axis=2,
+         area_aggregation=True, time_dimension=False):
     """Saves the scene as a NetCDF4 file, with CF conventions.
 
-    *band_axis* gives the which axis to use for the band dimension. For
+    *band_axis* gives which axis to use for the band dimension. For
      example, use band_axis=0 to get dimensions like (band, y, x).
+
+    *area_aggregation* determines if bands on the same area should be gathered
+     together or not. Default is True, meaning aggregation. If
+     *area_aggregation* is False, the band_axis gets obsolete. Area aggregation
+     is currently not possible when using *time_dimension*.
+
+    *time_dimension* is a boolean and if True a time axis (dimension=1) is
+    added in front, like (time, y, x), and the band_axis is omitted. Thus each
+    data/band go in a separate dataset.
+
     """
     from mpop.satout.cfscene import CFScene
 
     scene.add_to_history("Saved as netcdf4/cf by pytroll/mpop.")
-    return netcdf_cf_writer(filename, CFScene(scene, dtype, band_axis),
+    return netcdf_cf_writer(filename,
+                            CFScene(scene, dtype, band_axis, area_aggregation,
+                                    time_dimension),
                             compression=compression)
 
 
@@ -228,6 +241,7 @@ def netcdf_cf_writer(filename, root_object, compression=True):
             # in the case of arrays containing strings:
             if str(vtype) == "object":
                 vtype = str
+
             nc_vars.append(rootgrp.createVariable(
                 name, vtype, dim_name,
                 zlib=compression,
diff --git a/mpop/scene.py b/mpop/scene.py
index 9d993ef..49cc802 100644
--- a/mpop/scene.py
+++ b/mpop/scene.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015.
+# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016.
 
 # Author(s):
 
@@ -30,19 +30,19 @@ also for a given area.
 import ConfigParser
 import copy
 import datetime
+import imp
+import logging
 import os.path
+import sys
 import types
 import weakref
-import sys
-import logging
-import imp
 
 import numpy as np
 
+import mpop.satin
 from mpop import CONFIG_PATH
 from mpop.channel import Channel, NotLoadedError
 from mpop.utils import OrderedConfigParser
-import mpop.satin
 
 LOG = logging.getLogger(__name__)
 
@@ -83,6 +83,14 @@ class Satellite(object):
         """
         return self.variant + self.satname + self.number
 
+    def sat_nr(self, string=False):
+        import re
+        sat_nr = re.findall(r'\d+', self.fullname)[0]
+        if string:
+            return sat_nr
+        else:
+            return int(sat_nr)
+
     @classmethod
     def remove_attribute(cls, name):
         """Remove an attribute from the class.
@@ -113,7 +121,7 @@ class SatelliteScene(Satellite):
 
         Satellite.__init__(self, satellite)
 
-        #if(time_slot is not None and
+        # if(time_slot is not None and
         #   not isinstance(time_slot, datetime.datetime)):
         #    raise TypeError("Time_slot must be a datetime.datetime instance.")
 
@@ -204,6 +212,10 @@ class SatelliteInstrumentScene(SatelliteScene):
 
         self.channels = []
 
+        self.end_time = None
+        if isinstance(self.time_slot, (tuple, list)):
+            self.time_slot, self.end_time = self.time_slot
+
         try:
             conf = OrderedConfigParser()
             conf.read(os.path.join(CONFIG_PATH, self.fullname + ".cfg"))
@@ -461,6 +473,13 @@ class SatelliteInstrumentScene(SatelliteScene):
             if len(self.channels_to_load) == 0:
                 return
 
+            if "reader_level" in kwargs.keys():
+                if kwargs["reader_level"] != None:
+                    LOG.debug(
+                        "Using explecit definition of reader level: " + kwargs["reader_level"])
+                    if kwargs["reader_level"] != level:
+                        continue
+
             LOG.debug("Looking for sources in section " + level)
             reader_name = conf.get(level, 'format')
             try:
@@ -550,6 +569,181 @@ class SatelliteInstrumentScene(SatelliteScene):
         """
         return set([chan for chan in self.channels if chan.is_loaded()])
 
+    def get_orbital(self):
+        from pyorbital.orbital import Orbital
+        from pyorbital import tlefile
+
+        from pyorbital.tlefile import get_norad_line
+        sat_line = get_norad_line(self.satname, self.number)
+        self.orbital = Orbital(sat_line)
+
+        return self.orbital
+
+    def estimate_cth(self, cth_atm="best", time_slot=None):
+        """
+        General purpose
+        ===============
+           Estimation of the cloud top height using the 10.8 micron channel
+           limitations: this is the most simple approach
+           a simple fit of the ir108 to the temperature profile
+                * no correction for water vapour or any other trace gas
+                * no viewing angle dependency
+                * no correction for semi-transparent clouds
+                * no special treatment of temperature inversions
+        Example call
+        ============
+           data.estimate_cth(cth_atm="best")
+        input arguments
+        ===============
+          cth_atm    * using temperature profile to estimate the cloud top height
+                       possible choices are (see estimate_cth in mpop/tools.py):
+                       "standard", "tropics", "midlatitude summer", "midlatitude winter", "subarctic summer", "subarctic winter"
+                       this will choose the corresponding atmospheric AFGL temperature profile
+                     * new choice: "best" -> choose according to central (lon,lat) and time from:
+                       "tropics", "midlatitude summer", "midlatitude winter", "subarctic summer", "subarctic winter"
+          time_slot  current observation time as (datetime.datetime() object)
+                     time_slot option can be omitted, the function tries to use self.time_slot
+        """
+
+        print "*** Simple estimation of Cloud Top Height with IR_108 channel"
+
+        # check if IR_108 is loaded
+        loaded_channels = [chn.name for chn in self.loaded_channels()]
+        if "IR_108" not in loaded_channels:
+            print "*** Error in estimate_cth (mpop/scene.py)"
+            print "    IR_108 is required to estimate CTH, but not loaded"
+            quit()
+        else:
+            ir108 = self["IR_108"].data
+
+        # choose atmosphere
+        if cth_atm.lower() == "best":
+            # get central lon/lat coordinates
+            (yc, xc) = ir108.shape
+            (lon, lat) = self.area.get_lonlat(yc / 2, xc / 2)
+
+            if time_slot == None:
+                if hasattr(self, 'time_slot'):
+                    time_slot = self.time_slot
+                else:
+                    print "*** Error, in estimate_cth (mpop/channel.py)"
+                    print "    when using cth_atm=\"best\" also the time_slot information is required!"
+                    quit()
+
+            # automatic choise of temperature profile
+            doy = time_slot.timetuple().tm_yday
+            print "... automatic choise of temperature profile lon=", lon, " lat=", lat, ", time=", str(time_slot), ", doy=", doy
+            if abs(lat) <= 30.0:
+                cth_atm = "tropics"
+            elif doy < 80 or doy <= 264:
+                # northern summer
+                if lat < -60.0:
+                    cth_atm = "subarctic winter"
+                elif -60.0 <= lat and lat < -30.0:
+                    cth_atm = "midlatitude winter"
+                elif 30.0 < lat and lat <= 60.0:
+                    cth_atm = "midlatitude summer"
+                elif 60.0 < lat:
+                    cth_atm = "subarctic summer"
+            else:
+                # northern winter
+                if lat < -60.0:
+                    cth_atm = "subarctic summer"
+                elif -60.0 <= lat and lat < -30.0:
+                    cth_atm = "midlatitude summer"
+                elif 30.0 < lat and lat <= 60.0:
+                    cth_atm = "midlatitude winter"
+                elif 60 < lat:
+                    cth_atm = "subarctic winter"
+            print "    choosing temperature profile for ", cth_atm
+
+        # estimate cloud top height by searching first fit of ir108 with
+        # temperature profile
+        from mpop.tools import estimate_cth
+        cth = estimate_cth(ir108, cth_atm=cth_atm)
+
+        # create new channel named "CTH"
+        self.channels.append(Channel(name="CTH",
+                                     wavelength_range=[0., 0., 0.],
+                                     resolution=self["IR_108"].resolution,
+                                     data=cth,
+                                     calibration_unit="m"))
+
+        # copy additional information from IR_108
+        self["CTH"].info = self["IR_108"].info
+        self["CTH"].info['units'] = 'm'
+        self["CTH"].area = self["IR_108"].area
+        self["CTH"].area_id = self["IR_108"].area_id
+        self["CTH"].area_def = self["IR_108"].area_def
+        self["CTH"].resolution = self["IR_108"].resolution
+
+        return cth
+
+    def parallax_corr(self, fill="False", estimate_cth=False, cth_atm='best', replace=False):
+        """
+        perform the CTH parallax corretion for all loaded channels
+        """
+
+        loaded_channels = [chn.name for chn in self.loaded_channels()]
+        if len(loaded_channels) == 0:
+            return
+
+        # loop over channels and check, if one is a normal radiance channel
+        # having the method to calculate the viewing geometry
+        for chn in self.loaded_channels():
+            if hasattr(chn, 'get_viewing_geometry'):
+                # calculate the viewing geometry of the SEVIRI sensor
+                print "... calculate viewing geometry using ", chn.name
+                (azi, ele) = chn.get_viewing_geometry(
+                    self.get_orbital(), self.time_slot)
+                break
+
+        # choose best way to get CTH for parallax correction
+        if not estimate_cth:
+            if "CTTH" in loaded_channels:
+                # make a copy of CTH, as it might get replace by its parallax
+                # corrected version
+                cth = copy.deepcopy(self["CTTH"].height)
+            else:
+                print "*** Error in parallax_corr (mpop.scene.py)"
+                print "    parallax correction needs some cloud top height information"
+                print "    please load the NWC-SAF CTTH product (recommended) or"
+                print "    activate the option data.parallax_corr( estimate_cth=True )"
+                quit()
+        else:
+            if "IR_108" in loaded_channels:
+                # try to estimate CTH with IR_108
+                self.estimate_cth()
+                cth = self["CTH"].data
+            else:
+                print "*** Error in parallax_corr (mpop.scene.py)"
+                print "    parallax correction needs some cloud top height information"
+                print "    you specified the estimation of CTH with the IR_108, but "
+                print "    this channel is not loaded"
+                quit()
+
+        # perform parallax correction for each loaded channel
+        for chn in self.loaded_channels():
+            if hasattr(chn, 'parallax_corr'):
+                print "... perform parallax correction for ", chn.name
+                if replace:
+                    chn_name_PC = chn.name
+                    print "    replace channel ", chn_name_PC
+                else:
+                    chn_name_PC = chn.name + "_PC"
+                    print "    create channel ", chn_name_PC
+
+                # take care of the parallax correction
+                self[chn_name_PC] = chn.parallax_corr(
+                    cth=cth, azi=azi, ele=ele, fill=fill)
+            else:
+                LOG.warning("Channel " + str(chn.name) + " has no attribute parallax_corr,"
+                            "thus parallax effect wont be corrected.")
+                print "Channel " + str(chn.name) + " has no attribute parallax_corr,"
+                print "thus parallax effect wont be corrected."
+
+        return self
+
     def project(self, dest_area, channels=None, precompute=False, mode=None,
                 radius=None, nprocs=1):
         """Make a copy of the current snapshot projected onto the
diff --git a/mpop/tests/test_geo_image.py b/mpop/tests/test_geo_image.py
index dc54cee..e7b5b89 100644
--- a/mpop/tests/test_geo_image.py
+++ b/mpop/tests/test_geo_image.py
@@ -41,6 +41,8 @@ sys.modules['osgeo'] = MagicMock()
 sys.modules['pyresample'] = MagicMock()
 
 import mpop.imageo.geo_image as geo_image
+import mpop.imageo.formats.writer_options as writer_opts
+
 
 class TestGeoImage(unittest.TestCase):
     """Class for testing pp.geo_image.
@@ -62,19 +64,49 @@ class TestGeoImage(unittest.TestCase):
         """
         
         self.img.save("test.tif", compression=0)
-        mock_save.assert_called_once_with("test.tif", 0, None, None, 256)
+        mock_save.assert_called_once_with("test.tif", 0, {}, None, 256,
+                                          writer_options={'blocksize': 256,
+                                                          'compression': 0})
         mock_save.reset_mock()
         self.img.save("test.tif", compression=9)
-        mock_save.assert_called_once_with("test.tif", 9, None, None, 256)
+        mock_save.assert_called_once_with("test.tif", 9, {}, None, 256,
+                                          writer_options={'blocksize': 256,
+                                                          'compression': 9})
         mock_save.reset_mock()
         self.img.save("test.tif", compression=9, floating_point=True)
-        mock_save.assert_called_once_with("test.tif", 9, None, None, 256,
-                                          floating_point=True)
+        mock_save.assert_called_once_with("test.tif", 9, {}, None, 256,
+                                          floating_point=True,
+                                          writer_options={'blocksize': 256,
+                                                          'compression': 9})
 
         mock_save.reset_mock()
         self.img.save("test.tif", compression=9, tags={"NBITS": 20})
         mock_save.assert_called_once_with("test.tif", 9, {"NBITS": 20},
-                                          None, 256)
+                                          None, 256,
+                                          writer_options={'blocksize': 256,
+                                                          'nbits': 20,
+                                                          'compression': 9})
+        mock_save.reset_mock()
+        self.img.save("test.tif", writer_options={"compression":9})
+        mock_save.assert_called_once_with("test.tif", 9, {}, None, 256,
+                                          writer_options={'blocksize': 256,
+                                                          'compression': 9})
+
+        mock_save.reset_mock()
+        self.img.save("test.tif", writer_options={"compression":9, "nbits":16})
+        mock_save.assert_called_once_with("test.tif", 9, {"NBITS": 16},
+                                          None, 256,
+                                          writer_options={'blocksize': 256,
+                                                          'nbits': 16,
+                                                          'compression': 9})
+
+        mock_save.reset_mock()
+        self.img.save("test.tif", writer_options={"fill_value_subst": 1})
+        mock_save.assert_called_once_with("test.tif", 6, {}, None, 256,
+                                          writer_options={'blocksize': 256,
+                                                          'compression': 6,
+                                                          'fill_value_subst': 1})
+
 
         with patch.object(geo_image.Image, 'save') as mock_isave:
             self.img.save("test.png")
@@ -432,6 +464,108 @@ class TestGeoImage(unittest.TestCase):
         dst_ds.SetMetadata.assert_called_once_with(time_tag, '')
 
 
+    @patch('osgeo.osr.SpatialReference')
+    @patch('mpop.projector.get_area_def')
+    @patch('osgeo.gdal.GDT_Float64')
+    @patch('osgeo.gdal.GDT_Byte')
+    @patch('osgeo.gdal.GDT_UInt16')
+    @patch('osgeo.gdal.GDT_UInt32')
+    @patch('osgeo.gdal.GetDriverByName')
+    @patch.object(geo_image.GeoImage, '_gdal_write_channels')
+    def test_save_geotiff_fill_value(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref):
+        """Save to geotiff format.
+        """
+        
+        # source image data, masked data but only zeros
+        self.data = np.ma.zeros((512, 512), dtype=np.uint8)
+        self.data.mask = np.zeros(self.data .shape, dtype=bool)
+        self.data.mask[0,0] = True
+
+        self.img = geo_image.GeoImage(self.data,
+                                      area="euro",
+                                      time_slot=self.time_slot)
+        self.img.fill_value = [0]
+
+        raster = gtbn.return_value
+        
+        self.img.geotiff_save("test.tif", 0, None, {"BLA": "09"}, 256)
+        gtbn.assert_called_once_with("GTiff")
+
+        raster.Create.assert_called_once_with("test.tif",
+                                              self.data.shape[0],
+                                              self.data.shape[1],
+                                              1,
+                                              gby,
+                                              ["BLA=09",
+                                               'TILED=YES',
+                                               'BLOCKXSIZE=256',
+                                               'BLOCKYSIZE=256'])
+        dst_ds = raster.Create.return_value
+
+        self.assertEquals(mock_write_channels.call_count, 1)
+        self.assertEquals(mock_write_channels.call_args[0][0], dst_ds)
+        self.assertEquals(mock_write_channels.call_args[0][2], 255)
+        self.assertTrue(mock_write_channels.call_args[0][3], self.img.fill_value)
+        self.assertTrue(np.all(mock_write_channels.call_args[0][1]
+                               == self.data))
+
+
+    @patch('osgeo.osr.SpatialReference')
+    @patch('mpop.projector.get_area_def')
+    @patch('osgeo.gdal.GDT_Float64')
+    @patch('osgeo.gdal.GDT_Byte')
+    @patch('osgeo.gdal.GDT_UInt16')
+    @patch('osgeo.gdal.GDT_UInt32')
+    @patch('osgeo.gdal.GetDriverByName')
+    @patch.object(geo_image.GeoImage, '_gdal_write_channels')
+    def test_save_geotiff_fill_value_subst(self, mock_write_channels, gtbn, gui32, gui16, gby, gf, gad, spaceref):
+        """Save to geotiff format.
+        """
+
+        # source image data, masked data but only zeros
+        self.data = np.ma.zeros((512, 512), dtype=np.uint8)
+        self.data.mask = np.zeros(self.data .shape, dtype=bool)
+        self.data.mask[0,0] = True
+
+        self.img = geo_image.GeoImage(self.data,
+                                      area="euro",
+                                      time_slot=self.time_slot)
+        self.img.fill_value = [0]
+        
+        # not masked zeros should be replaced by ones
+        fill_value_substitution = 1
+        
+        data_with_subst = np.ma.copy(self.data)
+        np.place(data_with_subst, self.data == self.img.fill_value[0], 1)
+
+        raster = gtbn.return_value
+
+        self.img.geotiff_save("test.tif", 0, None, {"BLA": "09"}, 256,
+                              writer_options={writer_opts.WR_OPT_FILL_VALUE_SUBST: fill_value_substitution})
+
+        gtbn.assert_called_once_with("GTiff")
+
+        raster.Create.assert_called_once_with("test.tif",
+                                              self.data.shape[0],
+                                              self.data.shape[1],
+                                              1,
+                                              gby,
+                                              ["BLA=09",
+                                               'TILED=YES',
+                                               'BLOCKXSIZE=256',
+                                               'BLOCKYSIZE=256'])
+        dst_ds = raster.Create.return_value
+
+        self.assertEquals(mock_write_channels.call_count, 1)
+        self.assertEquals(mock_write_channels.call_args[0][0], dst_ds)
+        self.assertEquals(mock_write_channels.call_args[0][2], 255)
+        self.assertTrue(mock_write_channels.call_args[0][3], self.img.fill_value)
+        
+        # all zeros  should be replaced by ones
+        self.assertTrue(np.all(mock_write_channels.call_args[0][1]
+                               == data_with_subst))
+
+
 def suite():
     """The test suite for test_geo_image.
     """
diff --git a/mpop/tests/test_projector.py b/mpop/tests/test_projector.py
index 65dc703..bd27611 100644
--- a/mpop/tests/test_projector.py
+++ b/mpop/tests/test_projector.py
@@ -74,8 +74,9 @@ class TestProjector(unittest.TestCase):
 
         self.proj = Projector(in_area_id, out_area_id)
         self.assertEquals(utils.parse_area_file.call_count, 2)
-        utils.parse_area_file.assert_any_call('', in_area_id)
-        utils.parse_area_file.assert_any_call('', out_area_id)
+        area_file = mpop.projector.get_area_file()
+        utils.parse_area_file.assert_any_call(area_file, in_area_id)
+        utils.parse_area_file.assert_any_call(area_file, out_area_id)
 
 
 
diff --git a/mpop/tools.py b/mpop/tools.py
index a31fd91..d04aa93 100644
--- a/mpop/tools.py
+++ b/mpop/tools.py
@@ -46,3 +46,177 @@ def sunzen_corr_cos(data, cos_zen, limit=80.):
     data[lim_y, lim_x] /= cos_limit
 
     return data
+
+
+def estimate_cth(IR_108, cth_atm="standard"):
+
+    '''
+    Estimation of the cloud top height using the 10.8 micron channel
+    limitations: this is the most simple approach
+                 a simple fit of the ir108 to the temperature profile
+                 * no correction for water vapour or any other trace gas
+                 * no viewing angle dependency
+                 * no correction for semi-transparent clouds
+
+    optional input:
+      cth_atm    * "standard", "tropics", "midlatitude summer", "midlatitude winter", "subarctic summer", "subarctic winter"
+                  Matching the 10.8 micron temperature with atmosphere profile
+                  (s)  AFGL atmospheric constituent profile. U.S. standard atmosphere 1976. (AFGL-TR-86-0110) 
+                  (t)  AFGL atmospheric constituent profile. tropical.                      (AFGL-TR-86-0110)
+                  (mw) AFGL atmospheric constituent profile. midlatitude summer.            (AFGL-TR-86-0110) 
+                  (ms) AFGL atmospheric constituent profile. midlatitude winter.            (AFGL-TR-86-0110)
+                  (ss) AFGL atmospheric constituent profile. subarctic summer.              (AFGL-TR-86-0110) 
+                  (sw) AFGL atmospheric constituent profile. subarctic winter.              (AFGL-TR-86-0110)
+                  Ulrich Hamann (MeteoSwiss)
+                * "tropopause"
+                  Assuming a fixed tropopause height and a fixed temperature gradient
+                  Richard Mueller (DWD)
+    output: 
+      parallax corrected channel
+                 the content of the channel will be parallax corrected.
+                 The name of the new channel will be
+                 *original_chan.name+'_PC'*, eg. "IR_108_PC". This name is
+                 also stored to the info dictionary of the originating channel.
+
+    Versions: 05.07.2016 initial version
+              Ulrich Hamann (MeteoSwiss), Richard Mueller (DWD)
+    '''
+
+    print "*** estimating CTH using the 10.8 micro meter brightness temperature "
+
+    if cth_atm.lower() != "tropopause":
+
+        # define atmospheric temperature profile    
+        import os
+        from numpy import loadtxt, zeros, where, logical_and
+        import mpop 
+
+        mpop_dir = os.path.dirname(mpop.__file__)
+        afgl_file = mpop_dir+"/afgl.dat"
+        print "... assume ", cth_atm, " atmosphere for temperature profile"
+
+        if cth_atm.lower()=="standard" or cth_atm.lower()=="s":
+            z, T = loadtxt(afgl_file, usecols=(0, 1), unpack=True, comments="#")
+        elif cth_atm.lower()=="tropics" or cth_atm.lower()=="t":
+            z, T = loadtxt(afgl_file, usecols=(0, 2), unpack=True, comments="#")
+        elif cth_atm.lower()=="midlatitude summer" or cth_atm.lower()=="ms":
+            z, T = loadtxt(afgl_file, usecols=(0, 3), unpack=True, comments="#")
+        elif cth_atm.lower()=="midlatitude winter" or cth_atm.lower()=="ws":
+            z, T = loadtxt(afgl_file, usecols=(0, 4), unpack=True, comments="#")
+        elif cth_atm.lower()=="subarctic summer" or cth_atm.lower()=="ss":
+            z, T = loadtxt(afgl_file, usecols=(0, 5), unpack=True, comments="#")
+        elif cth_atm.lower()=="subarctic winter" or cth_atm.lower()=="ss":
+            z, T = loadtxt(afgl_file, usecols=(0, 6), unpack=True, comments="#")
+        else:
+            print "*** Error in estimate_cth (mpop/tools.py)"
+            print "unknown temperature profiel for CTH estimation: cth_atm = ", cth_atm
+            quit()
+
+        height = zeros(IR_108.shape)
+        # warmer than lowest level -> clear sky 
+        height[where(IR_108 > T[-1])] = -1.
+        print "     z0(km)   z1(km)   T0(K)   T1(K)  number of pixels"
+        print "------------------------------------------------------"
+        for i in range(z.size)[::-1]:
+
+            # search for temperatures between layer i-1 and i
+            ind =  np.where( logical_and( T[i-1]< IR_108, IR_108 < T[i]) )
+            # interpolate CTH according to ir108 temperature
+            height[ind] = z[i] + (IR_108[ind]-T[i])/(T[i-1]-T[i]) * (z[i-1]-z[i])
+            # verbose output
+            print " {0:8.1f} {1:8.1f} {2:8.1f} {3:8.1f} {4:8d}".format(z[i], z[i-1], T[i], T[i-1], len(ind[0]))
+
+            # if temperature increases above 8km -> tropopause detected
+            if z[i]>=8. and T[i] <= T[i-1]:
+                # no cloud above tropopose
+                break
+            # no cloud heights above 20km
+            if z[i]>=20.:
+                break
+
+        # if height is still 0 -> cloud colder than tropopause -> cth == tropopause height
+        height[np.where( height == 0 )] = z[i]
+        
+    else:
+
+        Htropo=11.0 # km
+        # this is an assumption it should be optimized 
+        # by making it dependent on region and season. 
+        # It might be good to include the ITC in the  
+        # region of interest, that would make a fixed Htropo 
+        # value more reliable. 
+        Tmin = np.amin(IR_108) 
+        # for Tmin it might be better to use the 5th or 10th percentile 
+        # else overshoting tops induces further uncertainties  
+        # in the calculation of the cloud height. 
+        # However numpy provides weird results for 5th percentile. 
+        # Hence, for the working version the minima is used 
+
+        print "... assume tropopause height ", Htropo, ", tropopause temperature ", Tmin, "K (", Tmin-273.16, "deg C)"
+        print "    and constant temperature gradient 6.5 K/km"
+
+        height = -(IR_108 - Tmin)/6.5 + Htropo 
+        # calculation of the height, the temperature gradient 
+        # 6.5 K/km is an assumption  
+        # derived from USS and MPI standard profiles. It 
+        # has to be improved as well 
+
+    # convert to masked array
+    # convert form km to meter
+    height = np.ma.masked_where(height <= 0, height, copy=False) * 1000.
+
+    if False:
+        from trollimage.image import Image as trollimage
+        from trollimage.colormap import rainbow
+        from copy import deepcopy 
+        # cloud top height
+        prop = height
+        min_data = prop.min()
+        max_data = prop.max()
+        print " estimated CTH(meter) (min/max): ", min_data, max_data
+        min_data =     0
+        max_data = 12000    
+        colormap = deepcopy(rainbow)
+        colormap.set_range(min_data, max_data)
+        img = trollimage(prop, mode="L") #, fill_value=[0,0,0]
+        img.colorize(colormap)
+        img.show()
+
+    # return cloud top height in meter
+    return height
+
+
+def viewzen_corr(data, view_zen):
+    """Apply atmospheric correction on the given *data* using the
+    specified satellite zenith angles (*view_zen*). Both input data
+    are given as 2-dimensional Numpy (masked) arrays, and they should
+    have equal shapes.
+    The *data* array will be changed in place and has to be copied before.
+    """
+    def ratio(value, v_null, v_ref):
+        return (value - v_null) / (v_ref - v_null)
+
+    def tau0(t):
+        T_0 = 210.0
+        T_REF = 320.0
+        TAU_REF = 9.85
+        return (1 + TAU_REF)**ratio(t, T_0, T_REF) - 1
+
+    def tau(t):
+        T_0 = 170.0
+        T_REF = 295.0
+        TAU_REF = 1.0
+        M = 4
+        return TAU_REF * ratio(t, T_0, T_REF)**M
+
+    def delta(z):
+        Z_0 = 0.0
+        Z_REF = 70.0
+        DELTA_REF = 6.2
+        return (1 + DELTA_REF)**ratio(z, Z_0, Z_REF) - 1
+
+    y0, x0 = np.ma.where(view_zen == 0)
+    data[y0, x0] += tau0(data[y0, x0])
+
+    y, x = np.ma.where((view_zen > 0) & (view_zen < 90) & (~data.mask))
+    data[y, x] += tau(data[y, x]) * delta(view_zen[y, x])
diff --git a/mpop/version.py b/mpop/version.py
index 9b533c1..6da6d0b 100644
--- a/mpop/version.py
+++ b/mpop/version.py
@@ -23,4 +23,4 @@
 """Version file.
 """
 
-__version__ = "v1.2.1"
+__version__ = "v1.3.0"
diff --git a/setup.py b/setup.py
index 6a2b50a..99d0d5d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Copyright (c) 2009-2015.
+# Copyright (c) 2009-2016.
 
 # SMHI,
 # Folkborgsvägen 1,
@@ -69,7 +69,7 @@ setup(name=NAME,
                     os.path.join('etc', 'eps_avhrrl1b_6.5.xml')])],
       zip_safe=False,
       install_requires=requires,
-      test_requires=["mock"],
+      tests_require=["mock", 'pyorbital >= v0.2.3'],
       extras_require={'xRIT': ['mipp >= 0.6.0'],
                       'hdf_eos': ['pyhdf'],
                       'viirs': ['h5py'],
diff --git a/utils/get_tile_def.py b/utils/get_tile_def.py
new file mode 100644
index 0000000..3ffc7ed
--- /dev/null
+++ b/utils/get_tile_def.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+
+import xml.etree.ElementTree as ET
+from pyresample import utils
+import pickle
+import urllib2
+
+length=109800
+
+#https://sentinel.esa.int/documents/247904/1955685/S2A_OPER_GIP_TILPAR_20150622T000000_21000101T000000_ZZ_0001
+FNAME="S2A_OPER_GIP_TILPAR_20150622T000000_21000101T000000_ZZ_0001"
+
+TILPAR_URL="https://sentinel.esa.int/documents/247904/1955685/"+FNAME
+
+FNAME=FNAME+".kml"
+
+tiles = urllib2.urlopen(TILPAR_URL)
+with open(FNAME,'wb') as output:
+  output.write(tiles.read())
+
+tiles.close()
+
+tree = ET.parse(FNAME)
+root = tree.getroot()
+
+s2tiles={}
+
+for pm in root.iter('{http://www.opengis.net/kml/2.2}Placemark'):
+	tilename=None
+	epsg=None
+	utm_ul_x=None
+	utm_ul_y=None
+
+	for name in pm.iter('{http://www.opengis.net/kml/2.2}name'):
+		tilename=name.text
+	for simple in pm.iter('{http://www.opengis.net/kml/2.2}SimpleData'):
+		if (simple.attrib['name']=='epsg'):
+			epsg=simple.text
+		if(simple.attrib['name']=='utm_ul_x'):
+			utm_ul_x=simple.text
+		if(simple.attrib['name']=='utm_ul_y'):
+			utm_ul_y=simple.text
+
+	extent=(float(utm_ul_x),float(utm_ul_y)-length,float(utm_ul_x)+length,float(utm_ul_y))
+
+	s2tiles[tilename]=[epsg,extent]
+
+f=open('s2tiles.pickle','w')
+pickle.dump(s2tiles,f)
+f.close()
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-grass/python-mpop.git



More information about the Pkg-grass-devel mailing list