[med-svn] [snakemake] 01/06: New upstream version 4.3.0

chrysn chrysn-guest at moszumanska.debian.org
Mon Dec 4 21:27:34 UTC 2017


This is an automated email from the git hooks/post-receive script.

chrysn-guest pushed a commit to branch master
in repository snakemake.

commit 06602fee740739c8a5d63e9754e69fedc75b3df7
Author: chrysn <chrysn at fsfe.org>
Date:   Fri Nov 3 11:33:23 2017 +0100

    New upstream version 4.3.0
---
 CHANGELOG.md                                       |  51 ++
 Dockerfile                                         |  14 +-
 docs/_static/sphinx-argparse.css                   |   6 +
 docs/conf.py                                       |   6 +-
 docs/executable.rst                                | 123 +++-
 docs/getting_started/installation.rst              |   4 +-
 docs/index.rst                                     |  26 +-
 docs/project_info/faq.rst                          |   6 +-
 docs/project_info/more_resources.rst               |   6 +-
 docs/requirements.txt                              |   1 +
 docs/snakefiles/deployment.rst                     |  29 +
 docs/snakefiles/modularization.rst                 |   8 +-
 docs/snakefiles/remote_files.rst                   | 108 +++-
 docs/snakefiles/rules.rst                          |  34 +-
 docs/tutorial/tutorial.rst                         |   2 +-
 environment.yml                                    |  15 +-
 setup.py                                           |  16 +-
 snakemake/__init__.py                              | 359 ++++++++++--
 snakemake/common.py                                |   9 +
 snakemake/conda.py                                 |  23 +-
 snakemake/dag.py                                   | 162 ++++--
 snakemake/exceptions.py                            |   4 +
 snakemake/executors.py                             | 619 +++++++++++++++++----
 snakemake/io.py                                    | 100 +++-
 snakemake/jobs.py                                  |  44 +-
 snakemake/logging.py                               |  53 +-
 snakemake/parser.py                                |  28 +-
 snakemake/persistence.py                           | 222 ++++----
 snakemake/remote/FTP.py                            |   3 +
 snakemake/remote/GS.py                             | 128 ++++-
 snakemake/remote/HTTP.py                           |   3 +
 snakemake/remote/NCBI.py                           |   6 +-
 snakemake/remote/S3.py                             | 225 ++------
 snakemake/remote/S3Mocked.py                       |  13 +-
 snakemake/remote/__init__.py                       |  38 +-
 snakemake/remote/dropbox.py                        |   2 +-
 snakemake/remote/gfal.py                           | 137 +++++
 snakemake/remote/gridftp.py                        |  71 +++
 snakemake/remote/webdav.py                         | 167 ++++++
 snakemake/rules.py                                 | 104 ++--
 snakemake/scheduler.py                             | 167 ++++--
 snakemake/script.py                                |  12 +-
 snakemake/shell.py                                 |  18 +-
 snakemake/singularity.py                           |  73 +++
 snakemake/version.py                               |   4 +-
 snakemake/workflow.py                              | 123 +++-
 snakemake/wrapper.py                               |  20 +-
 test-environment.yml                               |  19 +-
 tests/test_issue612/Snakefile                      |  11 +
 .../expected-results/.gitignore}                   |   0
 tests/test_kubernetes/README.md                    |  11 +
 tests/test_kubernetes/Snakefile                    |  29 +
 tests/test_kubernetes/envs/gzip.yaml               |   4 +
 tests/test_profile/Snakefile                       |   3 +
 tests/test_profile/Snakefile.internal              |   5 +
 tests/test_profile/config.yaml                     |   1 +
 .../expected-results/test.out}                     |   0
 tests/test_profile/workflow-config.yaml            |   1 +
 tests/test_remote_gs/Snakefile                     |  15 +
 .../expected-results/landsat-data.txt              | 218 ++++++++
 tests/test_remote_gs/landsat-data.txt              | 218 ++++++++
 tests/test_remote_log/Snakefile                    |  17 +
 tests/test_remote_log/expected-results/motoState.p | Bin 0 -> 848 bytes
 .../.done => test_remote_log/test.txt}             |   0
 tests/test_remote_ncbi/Snakefile                   |   5 +-
 tests/test_remote_ncbi/expected-results/sizes.txt  |   8 +-
 tests/test_remote_ncbi_simple/Snakefile            |   4 +-
 .../expected-results/sizes.txt                     |   2 +-
 tests/test_restartable_job_cmd_exit_1/Snakefile    |  27 +-
 .../expected-results/.done                         |   1 +
 tests/test_script/expected-results/test.html       |  85 +--
 tests/test_singularity/Snakefile                   |   7 +
 tests/test_singularity/expected-results/test.out   |   2 +
 tests/test_static_remote/S3MockedForStaticTest.py  |   7 +-
 tests/test_wrapper/Snakefile                       |   2 +-
 tests/tests.py                                     | 103 ++--
 wercker.yml                                        |   6 +-
 77 files changed, 3359 insertions(+), 844 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 80419e9..e096e4f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,60 @@
 # Change Log
 
+## [4.3.0] - 2017-10-27
+### Added
+- GridFTP remote provider. This is a specialization of the GFAL remote provider that uses globus-url-copy to download or upload files.
+### Changed
+- Scheduling and execution mechanisms have undergone a major revision that removes several potential (but rare) deadlocks.
+- Several bugs and corner cases of the singularity support have been fixed.
+- Snakemake now requires singularity 2.4 at least.
+
+## [4.2.0] - 2017-10-10
+### Added
+- Support for executing jobs in per-rule singularity images. This is meant as an alternative to the conda directive (see docs), providing even more guarantees for reproducibility.
+### Changed
+- In cluster mode, jobs that are still running after Snakemake has been killed are automatically resumed.
+- Various fixes to GFAL remote provider.
+- Fixed --summary and --list-code-changes.
+- Many other small bug fixes.
+
+## [4.1.0] - 2017-09-26
+### Added
+- Support for configuration profiles. Profiles allow to specify default options, e.g., a cluster
+  submission command. They can be used via 'snakemake --profile myprofile'. See the docs for details.
+- GFAL remote provider. This allows to use GridFTP, SRM and any other protocol supported by GFAL for remote input and output files.
+- Added --cluster-status flag that allows to specify a command that returns jobs status.
+### Changed
+- The scheduler now tries to get rid of the largest temp files first.
+- The Docker image used for kubernetes support can now be configured at the command line.
+- Rate-limiting for cluster interaction has be unified.
+- S3 remote provider uses boto3.
+- Resource functions can now use an additional `attempt` parameter, that contains the number of times this job has already been tried.
+- Various minor fixes.
+
+## [4.0.0] - 2017-07-24
+### Added
+- Cloud computing support via Kubernetes. Snakemake workflows can be executed transparently
+  in the cloud, while storing input and output files within the cloud storage
+  (e.g. S3 or Google Storage). I.e., this feature does not need a shared filesystem
+  between the cloud notes, and thereby makes the setup really simple.
+- WebDAV remote file support: Snakemake can now read and write from WebDAV. Hence,
+  it can now, e.g., interact with Nextcloud or Owncloud.
+- Support for default remote providers: define a remote provider to implicitly
+  use for all input and output files.
+- Added an option to only create conda environments instead of executing the workflow.
+### Changed
+- The number of files used for the metadata tracking of Snakemake (e.g., code, params, input changes) in the .snakemake directory has been reduced by a factor of 10, which should help with NFS and IO bottlenecks. This is a breaking change in the sense that Snakemake 4.x won't see the metadata of workflows executed with Snakemake 3.x. However, old metadata won't be overwritten, so that you can always go back and check things by installing an older version of Snakemake again.
+- The google storage (GS) remote provider has been changed to use the google SDK.
+  This is a breaking change, since the remote provider invocation has been simplified (see docs).
+- Due to WebDAV support (which uses asyncio), Snakemake now requires Python 3.5 at least.
+- Various minor bug fixes (e.g. for dynamic output files).
+
+
 ## [3.13.3] - 2017-06-23
 ### Changed
 - Fix a followup bug in Namedlist where a single item was not returned as string.
 
+
 ## [3.13.2] - 2017-06-20
 ### Changed
 - The --wrapper-prefix flag now also affects where the corresponding environment definition is fetched from.
diff --git a/Dockerfile b/Dockerfile
index 5407fc8..1d93c06 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,12 +1,12 @@
-FROM bitnami/minideb:jessie
+FROM bitnami/minideb:stretch
 MAINTAINER Johannes Köster <johannes.koester at tu-dortmund.de>
+ENV SINGULARITY_VERSION=2.3.2
 ADD . /tmp/repo
-# taken from condaforge/linux-anvil 
-#RUN apt-get update && \
-#    apt-get install -y wget bzip2 && \
-#    rm -rf /var/lib/apt/lists/*
-RUN install_packages wget bzip2
-RUN wget --no-check-certificate https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
+RUN install_packages wget bzip2 ca-certificates gnupg2
+RUN wget -O- http://neuro.debian.net/lists/xenial.us-ca.full > /etc/apt/sources.list.d/neurodebian.sources.list
+RUN wget -O- http://neuro.debian.net/_static/neuro.debian.net.asc | apt-key add -
+RUN install_packages singularity-container
+RUN wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
     bash Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda && \
     rm Miniconda3-latest-Linux-x86_64.sh
 ENV PATH /opt/conda/bin:${PATH}
diff --git a/docs/_static/sphinx-argparse.css b/docs/_static/sphinx-argparse.css
new file mode 100644
index 0000000..70ce1ab
--- /dev/null
+++ b/docs/_static/sphinx-argparse.css
@@ -0,0 +1,6 @@
+.wy-table-responsive table td {
+    white-space: normal !important;
+}
+.wy-table-responsive {
+    overflow: visible !important;
+}
diff --git a/docs/conf.py b/docs/conf.py
index 5296dfd..66004f4 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -33,7 +33,8 @@ extensions = [
     'sphinx.ext.autodoc',
     'sphinx.ext.mathjax',
     'sphinx.ext.viewcode',
-    'sphinxcontrib.napoleon'
+    'sphinxcontrib.napoleon',
+    'sphinxarg.ext'
 ]
 
 # Add any paths that contain templates here, relative to this directory.
@@ -263,3 +264,6 @@ texinfo_documents = [
 
 # If true, do not generate a @detailmenu in the "Top" node's menu.
 #texinfo_no_detailmenu = False
+
+def setup(app):
+    app.add_stylesheet('sphinx-argparse.css')
diff --git a/docs/executable.rst b/docs/executable.rst
index 07e89b6..16e3976 100644
--- a/docs/executable.rst
+++ b/docs/executable.rst
@@ -1,8 +1,8 @@
-.. user_manual-snakemake_executable:
+.. _executable:
 
-========================
-The Snakemake Executable
-========================
+===================
+Executing Snakemake
+===================
 
 This part of the documentation describes the ``snakemake`` executable.  Snakemake
 is primarily a command-line tool, so the ``snakemake`` executable is the primary way
@@ -47,6 +47,83 @@ By specifying the number of available cores, i.e.
 one can tell Snakemake to use up to 4 cores and solve a binary knapsack problem to optimize the scheduling of jobs.
 If the number is omitted (i.e., only ``-j`` is given), the number of used cores is determined as the number of available CPU cores in the machine.
 
+
+-------------
+Cloud Support
+-------------
+
+Snakemake 4.0 and later supports experimental execution in the cloud via Kubernetes.
+This is independent of the cloud provider, but we provide the setup steps for GCE below.
+
+Google cloud engine
+~~~~~~~~~~~~~~~~~~~
+
+First, install the `Google Cloud SDK <https://cloud.google.com/sdk/docs/quickstarts>`_.
+Then, run
+
+.. code-block:: console
+
+    $ gcloud init
+
+to setup your access.
+Then, you can create a new kubernetes cluster via
+
+.. code-block:: console
+
+    $ gcloud container clusters create $CLUSTER_NAME --num-nodes=$NODES --scopes storage-rw
+
+with ``$CLUSTER_NAME`` being the cluster name and ``$NODES`` being the number of cluster
+nodes. If you intent to use google storage, make sure that `--scopes storage-rw` is set.
+This enables Snakemake to write to the google storage from within the cloud nodes.
+Next, you configure Kubernetes to use the new cluster via
+
+.. code-block:: console
+
+    $ gcloud container clusters get-credentials $CLUSTER_NAME
+
+
+Now, Snakemake is ready to use your cluster.
+
+**Important:** After finishing your work, do not forget to delete the cluster with
+
+.. code-block:: console
+
+    $ gcloud container clusters delete $CLUSTER_NAME
+
+in order to avoid unnecessary charges.
+
+
+.. _kubernetes:
+
+Executing a Snakemake workflow via kubernetes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Assuming that kubernetes has been properly configured (see above), you can
+execute a workflow via:
+
+.. code-block:: console
+
+    snakemake --kubernetes --use-conda --default-remote-provider $REMOTE --default-remote-prefix $PREFIX
+
+In this mode, Snakemake will assume all input and output files to be stored in a given
+remote location, configured by setting ``$REMOTE`` to your provider of choice
+(e.g. ``GS`` for Google cloud storage or ``S3`` for Amazon S3) and ``$PREFIX``
+to a bucket name or subfolder within that remote storage.
+After successful execution, you find your results in the specified remote storage.
+Of course, if any input or output already defines a different remote location, the latter will be used instead.
+Importantly, this means that Snakemake does **not** require a shared network
+filesystem to work in the cloud.
+
+It is further possible to forward arbitrary environment variables to the kubernetes
+jobs via the flag ``--kubernetes-env`` (see ``snakemake --help``).
+
+When executing, Snakemake will make use of the defined resources and threads
+to schedule jobs to the correct nodes. In particular, it will forward memory requirements
+defined as `mem_mb` to kubernetes. Further, it will propagate the number of threads
+a job intends to use, such that kubernetes can allocate it to the correct cloud
+computing node.
+
+
 -----------------
 Cluster Execution
 -----------------
@@ -122,8 +199,34 @@ When executing a workflow on a cluster using the ``--cluster`` parameter (see be
     os.system("qsub -t {threads} {script}".format(threads=threads, script=jobscript))
 
 
-.. _getting_started-all_options:
+--------
+Profiles
+--------
 
+Adapting Snakemake to a particular environment can entail many flags and options.
+Therefore, since Snakemake 4.1, it is possible to specify a configuration profile
+to be used to obtain default options:
+
+.. code-block:: console
+
+   $ snakemake --profile myprofile
+
+Here, a folder ``myprofile`` is searched in per-user and global configuration directories (on Linux, this will be ``$HOME/.config/snakemake`` and ``/etc/xdg/snakemake``, you can find the answer for your system via ``snakemake --help``).
+Alternatively, an absolute or relative path to the folder can be given.
+The profile folder is expected to contain a file ``config.yaml`` that defines default values for the Snakemake command line arguments.
+For example, the file
+
+.. code-block:: yaml
+
+    cluster: qsub
+    jobs: 100
+
+would setup Snakemake to always submit to the cluster via the ``qsub`` command, and never use more than 100 parallel jobs in total.
+Under https://github.com/snakemake-profiles/doc, you can find publicly available profiles.
+Feel free to contribute your own.
+
+The profile folder can additionally contain auxilliary files, e.g., jobscripts, or any kind of wrappers.
+See https://github.com/snakemake-profiles/doc for examples.
 
 .. _getting_started-visualization:
 
@@ -153,11 +256,19 @@ To visualize the whole DAG regardless of the eventual presence of files, the ``f
 
 Of course the visual appearance can be modified by providing further command line arguments to ``dot``.
 
+
+.. _all_options:
+
 -----------
 All Options
 -----------
 
-All command line options can be printed by calling ``snakemake -h``.
+.. argparse::
+   :module: snakemake
+   :func: get_argument_parser
+   :prog: snakemake
+
+   All command line options can be printed by calling ``snakemake -h``.
 
 .. _getting_started-bash_completion:
 
diff --git a/docs/getting_started/installation.rst b/docs/getting_started/installation.rst
index 4b3ea6c..6f19cff 100644
--- a/docs/getting_started/installation.rst
+++ b/docs/getting_started/installation.rst
@@ -10,7 +10,7 @@ You can use one of the following ways for installing Snakemake.
 Installation via Conda
 ======================
 
-On **Linux** and **MacOSX**, this is the recommended way to install Snakemake,
+This is the recommended way to install Snakemake,
 because it also enables Snakemake to :ref:`handle software dependencies of your
 workflow <integrated_package_management>`.
 
@@ -32,7 +32,7 @@ from the `Bioconda <https://bioconda.github.io>`_ channel.
 Global Installation
 ===================
 
-With a working Python 3 setup, installation of Snakemake can be performed by issuing
+With a working Python ``>=3.5`` setup, installation of Snakemake can be performed by issuing
 
 .. code-block:: console
 
diff --git a/docs/index.rst b/docs/index.rst
index e311f6d..c073c18 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -22,9 +22,11 @@ Welcome to Snakemake's documentation!
 .. image:: https://img.shields.io/badge/stack-overflow-orange.svg
     :target: http://stackoverflow.com/questions/tagged/snakemake
 
-Snakemake is an MIT-licensed workflow management system that aims to reduce the complexity of creating workflows by providing a fast and comfortable execution environment, together with a clean and modern specification language in python style.
-Snakemake workflows are essentially Python scripts extended by declarative code to define **rules**.
-Rules describe how to create **output files** from **input files**.
+
+The Snakemake workflow management system is a tool to create reproducible and scalable data analyses.
+Workflows are described via a human readable, Python based language.
+They can be seamlessly scaled to server, cluster, grid and cloud environments, without the need to modify the workflow definition.
+Finally, Snakemake workflows can entail a description of required software, which will be automatically deployed to any execution environment.
 
 
 .. _manual-quick_example:
@@ -33,6 +35,9 @@ Rules describe how to create **output files** from **input files**.
 Quick Example
 -------------
 
+Snakemake workflows are essentially Python scripts extended by declarative code to define **rules**.
+Rules describe how to create **output files** from **input files**.
+
 .. code-block:: python
 
     rule targets:
@@ -54,8 +59,9 @@ Quick Example
 * Snakemake determines the rule dependencies by matching file names.
 * Input and output files can contain multiple named wildcards.
 * Rules can either use shell commands, plain Python code or external Python or R scripts to create output files from input files.
-* Snakemake workflows can be executed on workstations and clusters without modification. The job scheduling can be constrained by arbitrary resources like e.g. available CPU cores, memory or GPUs.
-* Snakemake can use Amazon S3, Google Storage, Dropbox, FTP and SFTP to access input or output files and further access input files via HTTP and HTTPS.
+* Snakemake workflows can be easily executed on **workstations**, **clusters**, **the grid**, and **in the cloud** without modification. The job scheduling can be constrained by arbitrary resources like e.g. available CPU cores, memory or GPUs.
+* Snakemake can automatically deploy required software dependencies of a workflow using `Conda <https://conda.io>`_ or `Singularity <http://singularity.lbl.gov/>`_.
+* Snakemake can use Amazon S3, Google Storage, Dropbox, FTP, WebDAV and SFTP to access input or output files and further access input files via HTTP and HTTPS.
 
 .. _main-getting-started:
 
@@ -84,15 +90,15 @@ Citation
 
 See :doc:`Citations <project_info/citations>` for more information.
 
-----------------
-Related Projects
-----------------
+---------
+Resources
+---------
 
 `Snakemake Wrappers Repository <https://snakemake-wrappers.readthedocs.org>`_
     The Snakemake Wrapper Repository is a collection of reusable wrappers that allow to quickly use popular command line tools from Snakemake rules and workflows.
 
-`Snakemake Workflow Repository <https://bitbucket.org/snakemake/snakemake-workflows>`_
-    This repository provides a collection of high quality modularized and re-usable rules and workflows.
+`Snakemake Workflows Project <https://github.com/snakemake-workflows/docs>`_
+    This project provides a collection of high quality modularized and re-usable workflows.
     The provided code should also serve as a best-practices of how to build production ready workflows with Snakemake.
     Everybody is invited to contribute.
 
diff --git a/docs/project_info/faq.rst b/docs/project_info/faq.rst
index 2eff5c5..0f358e4 100644
--- a/docs/project_info/faq.rst
+++ b/docs/project_info/faq.rst
@@ -212,9 +212,7 @@ You can copy that file to ``$HOME/.vim/syntax`` directory and add
 .. code-block:: vim
 
     au BufNewFile,BufRead Snakefile set syntax=snakemake
-    au BufNewFile,BufRead *.rules set syntax=snakemake
-    au BufNewFile,BufRead *.snakefile set syntax=snakemake
-    au BufNewFile,BufRead *.snake set syntax=snakemake
+    au BufNewFile,BufRead *.smk set syntax=snakemake
 
 to your ``$HOME/.vimrc`` file. Highlighting can be forced in a vim session with ``:set syntax=snakemake``.
 
@@ -422,7 +420,7 @@ To avoid confusion we therefore disallow the conda directive together with the r
 It is recommended to use the script directive instead (see :ref:`snakefiles-external_scripts`).
 
 
-My workflow is very large, how to I stop Snakemake from printing all this rule/job information in a dry-run?
+My workflow is very large, how do I stop Snakemake from printing all this rule/job information in a dry-run?
 ------------------------------------------------------------------------------------------------------------
 
 Indeed, the information for each individual job can slow down a dryrun if there are tens of thousands of jobs.
diff --git a/docs/project_info/more_resources.rst b/docs/project_info/more_resources.rst
index 1e69171..bc3259b 100644
--- a/docs/project_info/more_resources.rst
+++ b/docs/project_info/more_resources.rst
@@ -10,9 +10,9 @@ More Resources
 Talks and Posters
 -----------------
 
-* `Poster at ECCB 2016, The Hague, Netherlands. <http://johanneskoester.bitbucket.org/posters/snakemake+bioconda-2016.pdf>`_
-* `Invited talk by Johannes Köster at the Broad Institute, Boston 2015. <http://slides.com/johanneskoester/snakemake-broad-2015>`_
-* `Introduction to Snakemake. Tutorial Slides presented by Johannes Köster at the GCB 2015, Dortmund, Germany. <http://slides.com/johanneskoester/deck-1>`_
+* `Poster at ECCB 2016, The Hague, Netherlands. <https://johanneskoester.bitbucket.io/posters/snakemake+bioconda-2016.pdf>`_
+* `Invited talk by Johannes Köster at the Broad Institute, Boston 2015. <https://slides.com/johanneskoester/snakemake-broad-2015>`_
+* `Introduction to Snakemake. Tutorial Slides presented by Johannes Köster at the GCB 2015, Dortmund, Germany. <https://slides.com/johanneskoester/deck-1>`_
 * `Invited talk by Johannes Köster at the DTL Focus Meeting: "NGS Production Pipelines", Dutch Techcentre for Life Sciences, Utrecht 2014. <https://speakerdeck.com/johanneskoester/workflow-management-with-snakemake>`_
 * `Taming Snakemake by Jeremy Leipzig, Bioinformatics software developer at Children's Hospital of Philadelphia, 2014. <http://de.slideshare.net/jermdemo/taming-snakemake>`_
 * `"Snakemake makes ... snakes?" - An Introduction by Marcel Martin from SciLifeLab, Stockholm 2015 <http://marcelm.net/talks/2015/snakemake>`_
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 1b7c01e..d4c0a05 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,4 +1,5 @@
 sphinx
 sphinxcontrib-napoleon
+sphinx-argparse
 sphinx_rtd_theme
 docutils==0.12
diff --git a/docs/snakefiles/deployment.rst b/docs/snakefiles/deployment.rst
index 505e7a1..c251b14 100644
--- a/docs/snakefiles/deployment.rst
+++ b/docs/snakefiles/deployment.rst
@@ -81,6 +81,35 @@ Snakemake will store the environment persistently in ``.snakemake/conda/$hash``
 Note that you need to clean up environments manually for now. However, in many cases they are lightweight and consist of symlinks to your central conda installation.
 
 
+--------------------------
+Running jobs in containers
+--------------------------
+
+As an alternative to using Conda (see above), it is possible to define, for each rule, a docker or singularity container to use, e.g.,
+
+.. code-block:: python
+
+    rule NAME:
+        input:
+            "table.txt"
+        output:
+            "plots/myplot.pdf"
+        singularity:
+            "docker://joseespinosa/docker-r-ggplot2"
+        script:
+            "scripts/plot-stuff.R"
+
+When executing Snakemake with
+
+.. code-block:: bash
+
+    snakemake --use-singularity
+
+it will execute the job within a singularity container that is spawned from the given image.
+Allowed image urls entail everything supported by singularity (e.g., ``shub://`` and ``docker://``).
+When ``--use-singularity`` is combined with ``--kubernetes`` (see :ref:`kubernetes`), cloud jobs will be automatically configured to run in priviledged mode, because this is a current requirement of the singularity executable.
+Importantly, those privileges won't be shared by the actual code that is executed in the singularity container though.
+
 --------------------------------------
 Sustainable and reproducible archiving
 --------------------------------------
diff --git a/docs/snakefiles/modularization.rst b/docs/snakefiles/modularization.rst
index ea15d7a..1fb32d4 100644
--- a/docs/snakefiles/modularization.rst
+++ b/docs/snakefiles/modularization.rst
@@ -4,7 +4,7 @@
 Modularization
 ==============
 
-Modularization in Snakemake comes at different levels. 
+Modularization in Snakemake comes at different levels.
 
 1. The most fine-grained level are wrappers. They are available and can be published at the `Snakemake Wrapper Repository <https://snakemake-wrappers.readthedocs.io>`_. These wrappers can then be composed and customized according to your needs, by copying skeleton rules into your workflow. In combination with conda integration, wrappers also automatically deploy the needed software dependencies into isolated environments.
 2. For larger, reusable parts that shall be integrated into a common workflow, it is recommended to write small Snakefiles and include them into a master Snakefile via the include statement. In such a setup, all rules share a common config file.
@@ -17,8 +17,8 @@ Modularization in Snakemake comes at different levels.
 Wrappers
 --------
 
-With Snakemake 3.5.5, the wrapper directive is introduced (experimental).
-This directive allows to have re-usable wrapper scripts around e.g. command line tools. In contrast to modularization strategies like ``include`` or subworkflows, the wrapper directive allows to re-wire the DAG of jobs.
+The wrapper directive allows to have re-usable wrapper scripts around e.g. command line tools.
+In contrast to modularization strategies like ``include`` or subworkflows, the wrapper directive allows to re-wire the DAG of jobs.
 For example
 
 .. code-block:: python
@@ -94,5 +94,3 @@ This function automatically determines the absolute path to the file (here ``../
 When executing, snakemake first tries to create (or update, if necessary) ``test.txt`` (and all other possibly mentioned dependencies) by executing the subworkflow.
 Then the current workflow is executed.
 This can also happen recursively, since the subworkflow may have its own subworkflows as well.
-
-
diff --git a/docs/snakefiles/remote_files.rst b/docs/snakefiles/remote_files.rst
index 1314acb..a7dbfd1 100644
--- a/docs/snakefiles/remote_files.rst
+++ b/docs/snakefiles/remote_files.rst
@@ -20,6 +20,9 @@ Snakemake includes the following remote providers, supported by the correspondin
 * Dropbox: ``snakemake.remote.dropbox``
 * XRootD: ``snakemake.remote.XRootD``
 * GenBank / NCBI Entrez: ``snakemake.remote.NCBI``
+* WebDAV: ``snakemake.remote.webdav``
+* GFAL: ``snakemake.remote.gfal``
+* GridFTP: ``snakemake.remote.gridftp``
 
 
 Amazon Simple Storage Service (S3)
@@ -110,15 +113,20 @@ The remote provider also supports a new ``glob_wildcards()`` (see :ref:`glob-wil
 Google Cloud Storage (GS)
 =========================
 
-Using Google Cloud Storage (GS) is a simple import change, though since GS support it is based on boto, GS must be accessed via Google's "`interoperable <https://cloud.google.com/storage/docs/interoperability>`_" credentials.
 Usage of the GS provider is the same as the S3 provider.
-You may specify credentials as environment variables in the file ``=/.aws/credentials``, prefixed with ``AWS_*``, as with a standard `boto config <http://boto.readthedocs.org/en/latest/boto_config_tut.html>`_, or explicitly in the ``Snakefile``.
+For authentication, one simply needs to login via the ``gcloud`` tool before
+executing Snakemake, i.e.:
 
+.. code-block:: console
+
+    $ gcloud auth application-default login
+
+In the Snakefile, no additional authentication information has to be provided:
 
 .. code-block:: python
 
     from snakemake.remote.GS import RemoteProvider as GSRemoteProvider
-    GS = GSRemoteProvider(access_key_id="MYACCESSKEY", secret_access_key="MYSECRET")
+    GS = GSRemoteProvider()
 
     rule all:
         input:
@@ -463,8 +471,7 @@ Snakemake can directly source input files from `GenBank <https://www.ncbi.nlm.ni
         run:
             shell("wc -c {input} > {output}")
 
-The output format and source database of a record retrieved from GenBank is inferred from the file extension specified. For example, ``NCBI.RemoteProvider().remote("KY785484.1.fasta", db="nuccore")`` will download a FASTA file while ``NCBI.RemoteProvider().remote("KY785484.1.gb", db="nuccore")`` will download a GenBank-format file. If the options are ambiguous, Snakemake will raise an exception and inform the user of possible format choices. To see available formats, consult the 
-in a variety of `Entrez EFetch documentation <https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly>`_. To view the valid file extensions for these formats, access ``NCBI.RemoteProvider()._gb.valid_extensions``, or instantiate an ``NCBI.NCBIHelper()`` and access ``NCBI.NCBIHelper().valid_extensions`` (this is a property).
+The output format and source database of a record retrieved from GenBank is inferred from the file extension specified. For example, ``NCBI.RemoteProvider().remote("KY785484.1.fasta", db="nuccore")`` will download a FASTA file while ``NCBI.RemoteProvider().remote("KY785484.1.gb", db="nuccore")`` will download a GenBank-format file. If the options are ambiguous, Snakemake will raise an exception and inform the user of possible format choices. To see available formats, consult the `Entrez  [...]
 
 When used in conjunction with ``NCBI.RemoteProvider().search()``, Snakemake and ``NCBI.RemoteProvider().remote()`` can be used to find accessions by query and download them:
 
@@ -478,8 +485,8 @@ When used in conjunction with ``NCBI.RemoteProvider().search()``, Snakemake and
     query = '"Zika virus"[Organism] AND (("9000"[SLEN] : "20000"[SLEN]) AND ("2017/03/20"[PDAT] : "2017/03/24"[PDAT])) '
     accessions = NCBI.search(query, retmax=3)
 
-    # give the accessions a file extension to help the RemoteProvider determine the 
-    # proper output type. 
+    # give the accessions a file extension to help the RemoteProvider determine the
+    # proper output type.
     input_files = expand("{acc}.fasta", acc=accessions)
 
     rule all:
@@ -500,6 +507,91 @@ When used in conjunction with ``NCBI.RemoteProvider().search()``, Snakemake and
 
 Normally, all accessions for a query are returned from ``NCBI.RemoteProvider.search()``. To truncate the results, specify ``retmax=<desired_number>``. Standard Entrez `fetch query options <https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch>`_ are supported as kwargs, and may be passed in to ``NCBI.RemoteProvider.remote()`` and ``NCBI.RemoteProvider.search()``.
 
+WebDAV
+======
+
+WebDAV support is currently ``experimental`` and available in Snakemake 4.0 and later.
+
+Snakemake supports reading and writing WebDAV remote files. The protocol defaults to ``https://``, but insecure connections
+can be used by specifying ``protocol=="http://"``. Similarly, the port defaults to 443, and can be overridden by specifying ``port=##`` or by including the port as part of the file address.
+
+.. code-block:: python
+
+    from snakemake.remote import webdav
+
+    webdav = webdav.RemoteProvider(username="test", password="test", protocol="http://")
+
+    rule a:
+        input:
+            webdav.remote("example.com:8888/path/to/input_file.csv"),
+        shell:
+            # do something
+
+
+GFAL
+====
+
+GFAL support is available in Snakemake 4.1 and later.
+
+Snakemake supports reading and writing remote files via the `GFAL <https://dmc.web.cern.ch/projects/gfal-2/home>`_ command line client (gfal-* commands).
+By this, it supports various grid storage protocols like `GridFTP <https://en.wikipedia.org/wiki/GridFTP>`_.
+In general, if you are able to use the `gfal-*` commands directly, Snakemake support for GFAL will work as well.
+
+.. code-block:: python
+
+    from snakemake.remote import gfal
+
+    gfal = gfal.RemoteProvider(retry=5)
+
+    rule a:
+        input:
+            gfal.remote("gridftp.grid.sara.nl:2811/path/to/infile.txt")
+        output:
+            gfal.remote("gridftp.grid.sara.nl:2811/path/to/outfile.txt")
+        shell:
+            # do something
+
+Authentication has to be setup in the system, e.g. via certificates in the ``.globus`` directory.
+Usually, this is already the case and no action has to be taken.
+The keyword argument to the remote provider allows to set the number of retries (10 per default) in case of failed commands (the GRID is usually relatively unreliable).
+The latter may be unsupported depending on the system configuration.
+
+Note that GFAL support used together with the flags ``--no-shared-fs`` and ``--default-remote-provider`` enables you
+to transparently use Snakemake in a grid computing environment without a shared network filesystem.
+For an example see the `surfsara-grid configuration profile <https://github.com/Snakemake-Profiles/surfsara-grid>`_.
+
+GridFTP
+=======
+
+GridFTP support is available in Snakemake 4.3.0 and later.
+
+As a more specialized alternative to the GFAL remote provider, Snakemake provides a `GridFTP <https://en.wikipedia.org/wiki/GridFTP>`_ remote provider.
+This provider only supports the GridFTP protocol. Internally, it uses the `globus-url-copy <http://toolkit.globus.org/toolkit/docs/latest-stable/gridftp/user/#globus-url-copy>`_ command for downloads and uploads, while all other tasks are delegated to the GFAL remote provider.
+
+.. code-block:: python
+
+    from snakemake.remote import gridftp
+
+    gridftp = gridftp.RemoteProvider(retry=5)
+
+    rule a:
+        input:
+            gridftp.remote("gridftp.grid.sara.nl:2811/path/to/infile.txt")
+        output:
+            gridftp.remote("gridftp.grid.sara.nl:2811/path/to/outfile.txt")
+        shell:
+            # do something
+
+Authentication has to be setup in the system, e.g. via certificates in the ``.globus`` directory.
+Usually, this is already the case and no action has to be taken.
+The keyword argument to the remote provider allows to set the number of retries (10 per default) in case of failed commands (the GRID is usually relatively unreliable).
+The latter may be unsupported depending on the system configuration.
+
+Note that GridFTP support used together with the flags ``--no-shared-fs`` and ``--default-remote-provider`` enables you
+to transparently use Snakemake in a grid computing environment without a shared network filesystem.
+For an example see the `surfsara-grid configuration profile <https://github.com/Snakemake-Profiles/surfsara-grid>`_.
+
+
 Remote cross-provider transfers
 ===============================
 
@@ -523,4 +615,4 @@ It is possible to use Snakemake to transfer files between remote providers (usin
         output:
             GS.remote( expand("destination-bucket/{file}.bam", file=fileList) )
         run:
-            shell("cp -R source-bucket/ destination-bucket/")
+            shell("cp {input} {output}")
diff --git a/docs/snakefiles/rules.rst b/docs/snakefiles/rules.rst
index c184a3e..a89f846 100644
--- a/docs/snakefiles/rules.rst
+++ b/docs/snakefiles/rules.rst
@@ -208,7 +208,7 @@ Further, a rule can be given a number of threads to use, i.e.
 Snakemake can alter the number of cores available based on command line options. Therefore it is useful to propagate it via the built in variable ``threads`` rather than hardcoding it into the shell command.
 In particular, it should be noted that the specified threads have to be seen as a maximum. When Snakemake is executed with fewer cores, the number of threads will be adjusted, i.e. ``threads = min(threads, cores)`` with ``cores`` being the number of cores specified at the command line (option ``--cores``). On a cluster node, Snakemake uses as many cores as available on that node. Hence, the number of threads used by a rule never exceeds the number of physically available cores on the nod [...]
 
-Starting from version 3.7, threads can also be a callable that returns an ``int`` value. The signature of the callable should be ``callable(wildcards, [input])`` (input is an optional parameter).  It is also possible to refer to a predefined variable (e.g, ``threads: threads_max``) so that the number of cores for a set of rules can be changed with one change only by altering the value of the variable ``threads_max``.
+Starting from version 3.7, threads can also be a callable that returns an ``int`` value. The signature of the callable should be ``callable(wildcards[, input])`` (input is an optional parameter).  It is also possible to refer to a predefined variable (e.g, ``threads: threads_max``) so that the number of cores for a set of rules can be changed with one change only by altering the value of the variable ``threads_max``.
 
 
 .. _snakefiles-resources:
@@ -223,22 +223,46 @@ In addition to threads, a rule can use arbitrary user-defined resources by speci
     rule:
         input:     ...
         output:    ...
-        resources: gpu=1
-        shell: "..."
+        resources:
+            mem_mb=100
+        shell:
+            "..."
 
 If limits for the resources are given via the command line, e.g.
 
 .. code-block:: console
 
-    $ snakemake --resources gpu=2
+    $ snakemake --resources mem_mb=100
 
 the scheduler will ensure that the given resources are not exceeded by running jobs.
 If no limits are given, the resources are ignored.
 Apart from making Snakemake aware of hybrid-computing architectures (e.g. with a limited number of additional devices like GPUs) this allows to control scheduling in various ways, e.g. to limit IO-heavy jobs by assigning an artificial IO-resource to them and limiting it via the ``--resources`` flag.
 Resources must be ``int`` values.
+Note that you are free to choose any names for the given resources.
+When defining memory constraints, it is however advised to use ``mem_mb``, because there are
+Snakemake execution modes that make use of this information, (e.g., when using :ref:`kubernetes`).
 
 Resources can also be callables that return ``int`` values.
-The signature of the callable should be ``callable(wildcards, [input])`` (input is an optional parameter).
+The signature of the callable has to be ``callable(wildcards [, input] [, threads] [, attempt])`` (``input``, ``threads``, and ``attempt`` are optional parameters).
+
+The parameter ``attempt`` allows to adjust resources based on how often the job has been restarted (see :ref:`all_options`, option ``--restart-times``).
+This is handy when executing a Snakemake workflow in a cluster environment, where jobs can e.g. fail because of too limited resources.
+When Snakemake is executed with ``--restart-times 3``, it will try to restart a failed job 3 times before it gives up.
+Thereby, the parameter ``attempt`` will contain the current attempt number (starting from ``1``).
+This can be used to adjust the required memory as follows
+
+.. code-block:: python
+
+    rule:
+        input:    ...
+        output:   ...
+        resources:
+            mem_mb=lambda wildcards, attempt: attempt * 100
+        shell:
+            "..."
+
+Here, the first attempt will require 100 MB memory, the second attempt will require 200 MB memory and so on.
+When passing memory requirements to the cluster engine, you can by this automatically try out larger nodes if it turns out to be necessary.
 
 Messages
 --------
diff --git a/docs/tutorial/tutorial.rst b/docs/tutorial/tutorial.rst
index f74f9d0..dd6c7fb 100644
--- a/docs/tutorial/tutorial.rst
+++ b/docs/tutorial/tutorial.rst
@@ -18,7 +18,7 @@ Hooking into the Python interpreter, Snakemake offers a definition language that
 This allows to combine the flexibility of a plain scripting language with a pythonic workflow definition.
 The Python language is known to be concise yet readable and can appear almost like pseudo-code.
 The syntactic extensions provided by Snakemake maintain this property for the definition of the workflow.
-Further, Snakemakes scheduling algorithm can be constrained by priorities, provided cores and customizable resources and it provides a generic support for distributed computing (e.g., cluster or batch systems).
+Further, Snakemake's scheduling algorithm can be constrained by priorities, provided cores and customizable resources and it provides a generic support for distributed computing (e.g., cluster or batch systems).
 Hence, a Snakemake workflow scales without modification from single core workstations and multi-core servers to cluster or batch systems.
 
 The examples presented in this tutorial come from Bioinformatics.
diff --git a/environment.yml b/environment.yml
index b4f0d94..b83090d 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,14 +1,13 @@
 channels:
   - bioconda
-  - r
-  - anaconda
   - conda-forge
+  - defaults
 dependencies:
-  - python >=3.3
-  - boto
-  - moto
+  - python >=3.5
+  - boto3
+  - moto >=1.0.1
   - httpretty
-  - filechunkio
+  - wrapt
   - pyyaml
   - ftputil
   - pysftp
@@ -20,3 +19,7 @@ dependencies:
   - psutil
   - pandas
   - nomkl
+  - google-cloud-storage
+  - ratelimiter
+  - configargparse
+  - appdirs
diff --git a/setup.py b/setup.py
index 5661c6a..235c3ed 100644
--- a/setup.py
+++ b/setup.py
@@ -13,8 +13,8 @@ import sys
 exec(open("snakemake/version.py").read())
 
 
-if sys.version_info < (3, 3):
-    print("At least Python 3.3 is required.\n", file=sys.stderr)
+if sys.version_info < (3, 5):
+    print("At least Python 3.5 is required.\n", file=sys.stderr)
     exit(1)
 
 
@@ -61,14 +61,18 @@ setup(
          "snakemake-bash-completion = snakemake:bash_completion"]
     },
     package_data={'': ['*.css', '*.sh', '*.html']},
-    install_requires=['wrapt', 'requests'],
-    tests_require=['pytools', 'rpy2', 'httpretty==0.8.10', 'docutils', 'nose>=1.3', 'boto>=2.38.0', 'filechunkio>=1.6',
-                   'moto>=0.4.14', 'ftputil>=3.2', 'pysftp>=0.2.8', 'requests>=2.8.1', 'dropbox>=5.2', 'pyyaml'],
+    install_requires=['wrapt', 'requests', 'ratelimiter', 'pyyaml',
+                      'configargparse', 'appdirs'],
+    tests_require=['pytools', 'rpy2', 'httpretty', 'docutils',
+                   'nose>=1.3', 'boto3',
+                   'moto>=0.4.14', 'ftputil>=3.2', 'pysftp>=0.2.8',
+                   'requests>=2.8.1', 'dropbox>=5.2', 'pyyaml',
+                   'google-cloud-storage', 'ratelimiter'],
     test_suite='all',
     cmdclass={'test': NoseTestCommand},
     classifiers=
     ["Development Status :: 5 - Production/Stable", "Environment :: Console",
      "Intended Audience :: Science/Research",
      "License :: OSI Approved :: MIT License", "Natural Language :: English",
-     "Programming Language :: Python :: 3",
+     "Programming Language :: Python :: 3.5",
      "Topic :: Scientific/Engineering :: Bio-Informatics"])
diff --git a/snakemake/__init__.py b/snakemake/__init__.py
index 5888e51..12f7562 100644
--- a/snakemake/__init__.py
+++ b/snakemake/__init__.py
@@ -6,7 +6,6 @@ __license__ = "MIT"
 import os
 import subprocess
 import glob
-import argparse
 from argparse import ArgumentError
 import logging as _logging
 import re
@@ -18,7 +17,7 @@ from functools import partial
 import importlib
 
 from snakemake.workflow import Workflow
-from snakemake.exceptions import print_exception
+from snakemake.exceptions import print_exception, WorkflowError
 from snakemake.logging import setup_logger, logger
 from snakemake.version import __version__
 from snakemake.io import load_configfile
@@ -26,6 +25,7 @@ from snakemake.shell import shell
 from snakemake.utils import update_config, available_cpu_count
 from snakemake.common import Mode
 
+
 def snakemake(snakefile,
               listrules=False,
               list_target_rules=False,
@@ -99,15 +99,26 @@ def snakemake(snakefile,
               log_handler=None,
               keep_logger=False,
               max_jobs_per_second=None,
+              max_status_checks_per_second=100,
               restart_times=0,
+              attempt=1,
               verbose=False,
               force_use_threads=False,
               use_conda=False,
+              use_singularity=False,
+              singularity_args="",
               conda_prefix=None,
+              singularity_prefix=None,
+              create_envs_only=False,
               mode=Mode.default,
               wrapper_prefix=None,
+              kubernetes=None,
+              kubernetes_envvars=None,
+              container_image=None,
               default_remote_provider=None,
-              default_remote_prefix=""):
+              default_remote_prefix="",
+              assume_shared_fs=True,
+              cluster_status=None):
     """Run snakemake on a given snakefile.
 
     This function provides access to the whole snakemake functionality. It is not thread-safe.
@@ -180,14 +191,24 @@ def snakemake(snakefile,
         updated_files(list):        a list that will be filled with the files that are updated or created during the workflow execution
         verbose (bool):             show additional debug output (default False)
         max_jobs_per_second (int):  maximal number of cluster/drmaa jobs per second, None to impose no limit (default None)
-        restart_times (int):        number of times to restart failing jobs (default 1)
+        restart_times (int):        number of times to restart failing jobs (default 0)
+        attempt (int):              initial value of Job.attempt. This is intended for internal use only (default 1).
         force_use_threads:          whether to force use of threads over processes. helpful if shared memory is full or unavailable (default False)
         use_conda (bool):           create conda environments for each job (defined with conda directive of rules)
-        conda_prefix (str):         the directories in which conda environments will be created (default None)
+        use_singularity (bool):     run jobs in singularity containers (if defined with singularity directive)
+        singularity_args (str):     additional arguments to pass to singularity
+        conda_prefix (str):         the directory in which conda environments will be created (default None)
+        singularity_prefix (str):   the directory to which singularity images will be pulled (default None)
+        create_envs_only (bool):   If specified, only builds the conda environments specified for each job, then exits.
         mode (snakemake.common.Mode): Execution mode
         wrapper_prefix (str):       Prefix for wrapper script URLs (default None)
-        default_remote_provider (str): Default remote provider to use instead of local files (S3, GS)
+        kubernetes (str):           Submit jobs to kubernetes, using the given namespace.
+        kubernetes_env (list):      Environment variables that shall be passed to kubernetes jobs.
+        container_image (str):         Docker image to use, e.g., for kubernetes.
+        default_remote_provider (str): Default remote provider to use instead of local files (e.g. S3, GS)
         default_remote_prefix (str): Prefix for default remote provider (e.g. name of the bucket).
+        assume_shared_fs (bool):    Assume that cluster nodes share a common filesystem (default true).
+        cluster_status (str):       Status command for cluster execution. If None, Snakemake will rely on flag files. Otherwise, it expects the command to return "success", "failure" or "running" when executing with a cluster jobid as single argument.
         log_handler (function):     redirect snakemake output to this custom log handler, a function that takes a log message dictionary (see below) as its only argument (default None). The log message dictionary for the log handler has to following entries:
 
             :level:
@@ -252,11 +273,11 @@ def snakemake(snakefile,
         configs = [load_configfile(f) for f in cluster_config]
         # Merge in the order as specified, overriding earlier values with
         # later ones
-        cluster_config = configs[0]
+        cluster_config_content = configs[0]
         for other in configs[1:]:
-            update_config(cluster_config, other)
+            update_config(cluster_config_content, other)
     else:
-        cluster_config = dict()
+        cluster_config_content = dict()
 
     # force thread use for any kind of cluster
     use_threads = force_use_threads or (os.name != "posix") or cluster or cluster_sync or drmaa
@@ -305,6 +326,8 @@ def snakemake(snakefile,
         configfile = os.path.abspath(configfile)
     if config:
         overwrite_config.update(config)
+        if config_args is None:
+            config_args = unparse_config(config)
 
     if workdir:
         olddir = os.getcwd()
@@ -315,39 +338,43 @@ def snakemake(snakefile,
         workdir = os.path.abspath(workdir)
         os.chdir(workdir)
 
-    # handle default remote provider
-    _default_remote_provider = None
-    if default_remote_provider is not None:
-        try:
-            rmt = importlib.import_module("snakemake.remote." +
-                                          default_remote_provider)
-        except ImportError as e:
-            raise WorkflowError("Unknown default remote provider.")
-        if rmt.RemoteProvider.supports_default:
-            _default_remote_provider = rmt.RemoteProvider()
-        else:
-            raise WorkflowError("Remote provider {} does not (yet) support to "
-                                "be used as default provider.")
+    try:
+        # handle default remote provider
+        _default_remote_provider = None
+        if default_remote_provider is not None:
+            try:
+                rmt = importlib.import_module("snakemake.remote." +
+                                              default_remote_provider)
+            except ImportError as e:
+                raise WorkflowError("Unknown default remote provider.")
+            if rmt.RemoteProvider.supports_default:
+                _default_remote_provider = rmt.RemoteProvider()
+            else:
+                raise WorkflowError("Remote provider {} does not (yet) support to "
+                                    "be used as default provider.")
 
-    workflow = Workflow(snakefile=snakefile,
+        workflow = Workflow(snakefile=snakefile,
                         jobscript=jobscript,
                         overwrite_shellcmd=overwrite_shellcmd,
                         overwrite_config=overwrite_config,
                         overwrite_workdir=workdir,
                         overwrite_configfile=configfile,
-                        overwrite_clusterconfig=cluster_config,
+                        overwrite_clusterconfig=cluster_config_content,
                         config_args=config_args,
                         debug=debug,
                         use_conda=use_conda,
+                        use_singularity=use_singularity,
                         conda_prefix=conda_prefix,
+                        singularity_prefix=singularity_prefix,
+                        singularity_args=singularity_args,
                         mode=mode,
                         wrapper_prefix=wrapper_prefix,
                         printshellcmds=printshellcmds,
                         restart_times=restart_times,
+                        attempt=attempt,
                         default_remote_provider=_default_remote_provider,
                         default_remote_prefix=default_remote_prefix)
-    success = True
-    try:
+        success = True
         workflow.include(snakefile,
                          overwrite_first_rule=True,
                          print_compilation=print_compilation)
@@ -384,6 +411,8 @@ def snakemake(snakefile,
                                        immediate_submit=immediate_submit,
                                        standalone=standalone,
                                        ignore_ambiguity=ignore_ambiguity,
+                                       restart_times=restart_times,
+                                       attempt=attempt,
                                        lock=lock,
                                        unlock=unlock,
                                        cleanup_metadata=cleanup_metadata,
@@ -407,9 +436,20 @@ def snakemake(snakefile,
                                        keep_shadow=True,
                                        force_use_threads=use_threads,
                                        use_conda=use_conda,
+                                       use_singularity=use_singularity,
                                        conda_prefix=conda_prefix,
+                                       singularity_prefix=singularity_prefix,
+                                       singularity_args=singularity_args,
+                                       kubernetes=kubernetes,
+                                       kubernetes_envvars=kubernetes_envvars,
+                                       container_image=container_image,
+                                       create_envs_only=create_envs_only,
                                        default_remote_provider=default_remote_provider,
-                                       default_remote_prefix=default_remote_prefix)
+                                       default_remote_prefix=default_remote_prefix,
+                                       assume_shared_fs=assume_shared_fs,
+                                       cluster_status=cluster_status,
+                                       max_jobs_per_second=max_jobs_per_second,
+                                       max_status_checks_per_second=max_status_checks_per_second)
 
                 success = workflow.execute(
                     targets=targets,
@@ -435,7 +475,11 @@ def snakemake(snakefile,
                     jobname=jobname,
                     drmaa=drmaa,
                     drmaa_log_dir=drmaa_log_dir,
+                    kubernetes=kubernetes,
+                    kubernetes_envvars=kubernetes_envvars,
+                    container_image=container_image,
                     max_jobs_per_second=max_jobs_per_second,
+                    max_status_checks_per_second=max_status_checks_per_second,
                     printd3dag=printd3dag,
                     immediate_submit=immediate_submit,
                     ignore_ambiguity=ignore_ambiguity,
@@ -466,7 +510,10 @@ def snakemake(snakefile,
                     allowed_rules=allowed_rules,
                     greediness=greediness,
                     no_hooks=no_hooks,
-                    force_use_threads=use_threads)
+                    force_use_threads=use_threads,
+                    create_envs_only=create_envs_only,
+                    assume_shared_fs=assume_shared_fs,
+                    cluster_status=cluster_status)
 
     except BrokenPipeError:
         # ignore this exception and stop. It occurs if snakemake output is piped into less and less quits before reading the whole output.
@@ -475,6 +522,7 @@ def snakemake(snakefile,
     except (Exception, BaseException) as ex:
         print_exception(ex, workflow.linemaps)
         success = False
+
     if workdir:
         os.chdir(olddir)
     if workflow.persistence:
@@ -539,16 +587,98 @@ def parse_config(args):
     return config
 
 
-def get_argument_parser():
+def unparse_config(config):
+    if not isinstance(config, dict):
+        raise ValueError("config is not a dict")
+    items = []
+    for key, value in config.items():
+        if isinstance(value, dict):
+            raise ValueError("config may only be a flat dict")
+        encoded = "'{}'".format(value) if isinstance(value, str) else value
+        items.append("{}={}".format(key, encoded))
+    return items
+
+
+APPDIRS = None
+
+
+def get_appdirs():
+    global APPDIRS
+    if APPDIRS is None:
+        from appdirs import AppDirs
+        APPDIRS = AppDirs("snakemake", "snakemake")
+    return APPDIRS
+
+
+def get_profile_file(profile, file, return_default=False):
+    dirs = get_appdirs()
+    if os.path.isabs(profile):
+        search_dirs = [os.path.dirname(profile)]
+        profile = os.path.basename(profile)
+    else:
+        search_dirs = [os.getcwd(),
+                       dirs.user_config_dir,
+                       dirs.site_config_dir]
+    get_path = lambda d: os.path.join(d, profile, file)
+    for d in search_dirs:
+        p = get_path(d)
+        if os.path.exists(p):
+            return p
+
+    if return_default:
+        return file
+    return None
+
+
+def get_argument_parser(profile=None):
     """Generate and return argument parser."""
-    parser = argparse.ArgumentParser(
+    import configargparse
+    from configargparse import YAMLConfigFileParser
+
+    dirs = get_appdirs()
+    config_files = []
+    if profile:
+        if profile == "":
+            print("Error: invalid profile name.", file=sys.stderr)
+            exit(1)
+
+        config_file = get_profile_file(profile, "config.yaml")
+        if config_file is None:
+            print("Error: profile given but no config.yaml found. "
+                  "Profile has to be given as either absolute path, relative "
+                  "path or name of a directory available in either "
+                  "{site} or {user}.".format(
+                      site=dirs.site_config_dir,
+                      user=dirs.user_config_dir), file=sys.stderr)
+            exit(1)
+        config_files = [config_file]
+
+    parser = configargparse.ArgumentParser(
         description="Snakemake is a Python based language and execution "
-        "environment for GNU Make-like workflows.")
+        "environment for GNU Make-like workflows.",
+        default_config_files=config_files,
+        config_file_parser_class=YAMLConfigFileParser)
 
     parser.add_argument("target",
                         nargs="*",
                         default=None,
                         help="Targets to build. May be rules or files.")
+
+    parser.add_argument("--profile",
+                        help="""
+                        Name of profile to use for configuring
+                        Snakemake. Snakemake will search for a corresponding
+                        folder in {} and {}. Alternatively, this can be an
+                        absolute or relative path.
+                        The profile folder has to contain a file 'config.yaml'.
+                        This file can be used to set default values for command
+                        line options in YAML format. For example,
+                        '--cluster qsub' becomes 'cluster: qsub' in the YAML
+                        file. Profiles can be obtained from
+                        https://github.com/snakemake-profiles.
+                        """.format(dirs.site_config_dir,
+                                   dirs.user_config_dir))
+
     parser.add_argument("--snakefile", "-s",
                         metavar="FILE",
                         default="Snakefile",
@@ -558,9 +688,14 @@ def get_argument_parser():
         nargs="?",
         const="8000",
         metavar="PORT",
-        type=int,
-        help="Serve an HTML based user interface to the given port "
-        "(default: 8000). If possible, a browser window is opened.")
+        type=str,
+        help="Serve an HTML based user interface to the given network and "
+        "port e.g. 168.129.10.15:8000. By default Snakemake is only "
+        "available in the local network (default port: 8000). To make "
+        "Snakemake listen to all ip addresses add the special host address "
+        "0.0.0.0 to the url (0.0.0.0:8000). This is important if Snakemake "
+        "is used in a virtualised environment like Docker. If possible, a "
+        "browser window is opened.")
     parser.add_argument(
         "--cores", "--jobs", "-j",
         action="store",
@@ -831,6 +966,40 @@ def get_argument_parser():
         help="Provide a custom name for the jobscript that is submitted to the "
         "cluster (see --cluster). NAME is \"snakejob.{rulename}.{jobid}.sh\" "
         "per default. The wildcard {jobid} has to be present in the name.")
+    parser.add_argument(
+        "--cluster-status",
+        help="Status command for cluster execution. This is only considered "
+        "in combination with the --cluster flag. If provided, Snakemake will "
+        "use the status command to determine if a job has finished successfully "
+        "or failed. For this it is necessary that the submit command provided "
+        "to --cluster returns the cluster job id. Then, the status command "
+        "will be invoked with the job id. Snakemake expects it to return "
+        "'success' if the job was successfull, 'failed' if the job failed and "
+        "'running' if the job still runs."
+    )
+
+    parser.add_argument(
+        "--kubernetes", metavar="NAMESPACE",
+        nargs="?", const="default",
+        help="Execute workflow in a kubernetes cluster (in the cloud). "
+        "NAMESPACE is the namespace you want to use for your job (if nothing "
+        "specified: 'default'). "
+        "Usually, this requires --default-remote-provider and "
+        "--default-remote-prefix to be set to a S3 or GS bucket where your . "
+        "data shall be stored. It is further advisable to activate conda "
+        "integration via --use-conda.")
+    parser.add_argument(
+        "--kubernetes-env", nargs="+", metavar="ENVVAR", default=[],
+        help="Specify environment variables to pass to the kubernetes job.")
+    parser.add_argument(
+        "--container-image", metavar="IMAGE", help=
+        "Docker image to use, e.g., when submitting jobs to kubernetes. "
+        "By default, this is 'quay.io/snakemake/snakemake', tagged with "
+        "the same version as the currently running Snakemake instance. "
+        "Note that overwriting this value is up to your responsibility. "
+        "Any used image has to contain a working snakemake installation "
+        "that is compatible with (or ideally the same as) the currently "
+        "running version.")
     parser.add_argument("--reason", "-r",
                         action="store_true",
                         help="Print the reason for each executed rule.")
@@ -941,13 +1110,23 @@ def get_argument_parser():
         "used. Note that this is intended primarily for internal use and may "
         "lead to unexpected results otherwise.")
     parser.add_argument(
-        "--max-jobs-per-second", default=None, type=float,
+        "--max-jobs-per-second", default=10, type=float,
+        help=
+        "Maximal number of cluster/drmaa jobs per second, default is 10, "
+        "fractions allowed.")
+    parser.add_argument(
+        "--max-status-checks-per-second", default=10, type=float,
         help=
-        "Maximal number of cluster/drmaa jobs per second, default is no limit")
+        "Maximal number of job status checks per second, default is 10, "
+        "fractions allowed.")
     parser.add_argument(
         "--restart-times", default=0, type=int,
         help=
         "Number of times to restart failing jobs (defaults to 0).")
+    parser.add_argument(
+        "--attempt", default=1, type=int,
+        help="Internal use only: define the initial value of the attempt "
+        "parameter (default: 1).")
     parser.add_argument('--timestamp', '-T',
                         action='store_true',
                         help='Add a timestamp to all logging output')
@@ -980,7 +1159,7 @@ def get_argument_parser():
                         help="Allow to debug rules with e.g. PDB. This flag "
                         "allows to set breakpoints in run blocks.")
     parser.add_argument(
-        "--profile",
+        "--runtime-profile",
         metavar="FILE",
         help=
         "Profile Snakemake and write the output to FILE. This requires yappi "
@@ -1002,7 +1181,7 @@ def get_argument_parser():
     parser.add_argument(
         "--use-conda",
         action="store_true",
-        help="If defined in the rule, create job specific conda environments. "
+        help="If defined in the rule, run job in a conda environment. "
         "If this flag is not set, the conda directive is ignored.")
     parser.add_argument(
         "--conda-prefix",
@@ -1014,6 +1193,31 @@ def get_argument_parser():
         "If supplied, the `--use-conda` flag must also be set. The value may "
         "be given as a relative path, which will be extrapolated to the "
         "invocation directory, or as an absolute path.")
+    parser.add_argument("--create-envs-only",
+                        action="store_true",
+                        help="If specified, only creates the job-specific "
+                        "conda environments then exits. The `--use-conda` "
+                        "flag must also be set.")
+    parser.add_argument(
+        "--use-singularity",
+        action="store_true",
+        help="If defined in the rule, run job within a singularity container. "
+        "If this flag is not set, the singularity directive is ignored."
+    )
+    parser.add_argument(
+        "--singularity-prefix",
+        metavar="DIR",
+        help="Specify a directory in which singularity images will be stored."
+        "If not supplied, the value is set "
+        "to the '.snakemake' directory relative to the invocation directory. "
+        "If supplied, the `--use-singularity` flag must also be set. The value "
+        "may be given as a relative path, which will be extrapolated to the "
+        "invocation directory, or as an absolute path.")
+    parser.add_argument(
+        "--singularity-args",
+        default="",
+        metavar="ARGS",
+        help="Pass additional args to singularity.")
     parser.add_argument(
         "--wrapper-prefix",
         default="https://bitbucket.org/snakemake/snakemake-wrappers/raw/",
@@ -1022,7 +1226,7 @@ def get_argument_parser():
         "a different URL to use your fork or a local clone of the repository."
     )
     parser.add_argument("--default-remote-provider",
-                        choices=["S3", "GS", "SFTP", "S3Mocked"],
+                        choices=["S3", "GS", "FTP", "SFTP", "S3Mocked", "gfal", "gridftp"],
                         help="Specify default remote provider to be used for "
                         "all input and output files that don't yet specify "
                         "one.")
@@ -1030,6 +1234,20 @@ def get_argument_parser():
                         default="",
                         help="Specify prefix for default remote provider. E.g. "
                         "a bucket name.")
+    parser.add_argument("--no-shared-fs",
+                        action="store_true",
+                        help="Do not assume that jobs share a common file "
+                        "system. When this flag is activated, Snakemake will "
+                        "assume that the filesystem on a cluster node is not "
+                        "shared with other nodes. For example, this will lead "
+                        "to downloading remote files on each cluster node "
+                        "separately. Further, it won't take special measures "
+                        "to deal with filesystem latency issues. This option "
+                        "will in most cases only make sense in combination with "
+                        "--default-remote-provider. Further, when using --cluster "
+                        "you will have to also provide --cluster-status. "
+                        "Only activate this if you "
+                        "know what you are doing.")
     parser.add_argument("--version", "-v",
                         action="version",
                         version=__version__)
@@ -1041,6 +1259,27 @@ def main(argv=None):
     parser = get_argument_parser()
     args = parser.parse_args(argv)
 
+    if args.profile:
+        # reparse args while inferring config file from profile
+        parser = get_argument_parser(args.profile)
+        args = parser.parse_args(argv)
+        def adjust_path(f):
+            if os.path.exists(f) or os.path.isabs(f):
+                return f
+            else:
+                return get_profile_file(args.profile, f, return_default=True)
+
+        # update file paths to be relative to the profile
+        # (if they do not exist relative to CWD)
+        if args.jobscript:
+            args.jobscript = adjust_path(args.jobscript)
+        if args.cluster:
+            args.cluster = adjust_path(args.cluster)
+        if args.cluster_sync:
+            args.cluster_sync = adjust_path(args.cluster_sync)
+        if args.cluster_status:
+            args.cluster_status = adjust_path(args.cluster_status)
+
     if args.bash_completion:
         cmd = b"complete -o bashdefault -C snakemake-bash-completion snakemake"
         sys.stdout.buffer.write(cmd)
@@ -1072,7 +1311,7 @@ def main(argv=None):
         if not os.path.isabs(args.drmaa_log_dir):
             args.drmaa_log_dir = os.path.abspath(os.path.expanduser(args.drmaa_log_dir))
 
-    if args.profile:
+    if args.runtime_profile:
         import yappi
         yappi.start()
 
@@ -1083,12 +1322,18 @@ def main(argv=None):
             file=sys.stderr)
         sys.exit(1)
 
-    if args.conda_prefix and not args.use_conda:
+    if (args.conda_prefix or args.create_envs_only) and not args.use_conda:
         print(
-            "Error: --use-conda must be set if --conda-prefix is set.",
+            "Error: --use-conda must be set if --conda-prefix or "
+            "--create-envs-only is set.",
             file=sys.stderr)
         sys.exit(1)
 
+    if args.singularity_prefix and not args.use_singularity:
+        print("Error: --use_singularity must be set if --singularity-prefix "
+              "is set.", file=sys.stderr)
+        sys.exit(1)
+
     if args.gui is not None:
         try:
             import snakemake.gui as gui
@@ -1102,7 +1347,14 @@ def main(argv=None):
 
         _snakemake = partial(snakemake, os.path.abspath(args.snakefile))
         gui.register(_snakemake, args)
-        url = "http://127.0.0.1:{}".format(args.gui)
+
+        if ":" in args.gui:
+            host, port = args.gui.split(":")
+        else:
+            port = args.gui
+            host = "127.0.0.1"
+
+        url = "http://{}:{}".format(host, port)
         print("Listening on {}.".format(url), file=sys.stderr)
 
         def open_browser():
@@ -1115,8 +1367,10 @@ def main(argv=None):
               file=sys.stderr)
         threading.Timer(0.5, open_browser).start()
         success = True
+
         try:
-            gui.app.run(debug=False, threaded=True, port=args.gui)
+            gui.app.run(debug=False, threaded=True, port=int(port), host=host)
+
         except (KeyboardInterrupt, SystemExit):
             # silently close
             pass
@@ -1156,6 +1410,9 @@ def main(argv=None):
                             cluster_sync=args.cluster_sync,
                             drmaa=args.drmaa,
                             drmaa_log_dir=args.drmaa_log_dir,
+                            kubernetes=args.kubernetes,
+                            kubernetes_envvars=args.kubernetes_env,
+                            container_image=args.container_image,
                             jobname=args.jobname,
                             immediate_submit=args.immediate_submit,
                             standalone=True,
@@ -1189,17 +1446,25 @@ def main(argv=None):
                             keep_shadow=args.keep_shadow,
                             allowed_rules=args.allowed_rules,
                             max_jobs_per_second=args.max_jobs_per_second,
+                            max_status_checks_per_second=args.max_status_checks_per_second,
                             restart_times=args.restart_times,
+                            attempt=args.attempt,
                             force_use_threads=args.force_use_threads,
                             use_conda=args.use_conda,
                             conda_prefix=args.conda_prefix,
+                            use_singularity=args.use_singularity,
+                            singularity_prefix=args.singularity_prefix,
+                            singularity_args=args.singularity_args,
+                            create_envs_only=args.create_envs_only,
                             mode=args.mode,
                             wrapper_prefix=args.wrapper_prefix,
                             default_remote_provider=args.default_remote_provider,
-                            default_remote_prefix=args.default_remote_prefix)
+                            default_remote_prefix=args.default_remote_prefix,
+                            assume_shared_fs=not args.no_shared_fs,
+                            cluster_status=args.cluster_status)
 
-    if args.profile:
-        with open(args.profile, "w") as out:
+    if args.runtime_profile:
+        with open(args.runtime_profile, "w") as out:
             profile = yappi.get_func_stats()
             profile.sort("totaltime")
             profile.print_all(out=out)
diff --git a/snakemake/common.py b/snakemake/common.py
index 0feefd8..3bb5179 100644
--- a/snakemake/common.py
+++ b/snakemake/common.py
@@ -4,6 +4,8 @@ __email__ = "johannes.koester at protonmail.com"
 __license__ = "MIT"
 
 from functools import update_wrapper
+import inspect
+
 
 DYNAMIC_FILL = "__snakemake_dynamic__"
 
@@ -39,3 +41,10 @@ def strip_prefix(text, prefix):
     if text.startswith(prefix):
         return text[len(prefix):]
     return text
+
+
+def log_location(msg):
+    callerframerecord = inspect.stack()[1]
+    frame = callerframerecord[0]
+    info = inspect.getframeinfo(frame)
+    logger.debug("{}: {info.filename}, {info.function}, {info.lineno}".format(msg, info=info))
diff --git a/snakemake/conda.py b/snakemake/conda.py
index b9eadea..a839be8 100644
--- a/snakemake/conda.py
+++ b/snakemake/conda.py
@@ -15,6 +15,17 @@ from snakemake.common import strip_prefix
 from snakemake import utils
 
 
+def content(env_file):
+    if urlparse(env_file).scheme:
+        return urlopen(env_file).read()
+    else:
+        if not os.path.exists(env_file):
+            raise WorkflowError("Conda env file does not "
+                                "exist: {}".format(env_file))
+        with open(env_file, 'rb') as f:
+            return f.read()
+
+
 class Env:
 
     """Conda environment from a given specification file."""
@@ -33,13 +44,7 @@ class Env:
     @property
     def content(self):
         if self._content is None:
-            env_file = self.file
-            if urlparse(env_file).scheme:
-                content = urlopen(env_file).read()
-            else:
-                with open(env_file, 'rb') as f:
-                    content = f.read()
-            self._content = content
+            self._content = content(self.file)
         return self._content
 
     @property
@@ -171,6 +176,10 @@ class Env:
         return env_path
 
 
+def shellcmd(env_path):
+    return "source activate {};".format(env_path)
+
+
 def check_conda():
     if shutil.which("conda") is None:
         raise CreateCondaEnvironmentException("The 'conda' command is not available in $PATH.")
diff --git a/snakemake/dag.py b/snakemake/dag.py
index 6acf6ec..50c5a95 100644
--- a/snakemake/dag.py
+++ b/snakemake/dag.py
@@ -28,7 +28,8 @@ from snakemake.exceptions import UnexpectedOutputException, InputFunctionExcepti
 from snakemake.logging import logger
 from snakemake.output_index import OutputIndex
 from snakemake.common import DYNAMIC_FILL
-from snakemake import conda
+from snakemake import conda, singularity
+from snakemake import utils
 
 # Workaround for Py <3.5 prior to existence of RecursionError
 try:
@@ -65,7 +66,6 @@ class DAG:
         self._needrun = set()
         self._priority = dict()
         self._downstream_size = dict()
-        self._temp_input_count = dict()
         self._reason = defaultdict(Reason)
         self._finished = set()
         self._dynamic = set()
@@ -85,6 +85,7 @@ class DAG:
         self._jobid = dict()
         self.job_cache = dict()
         self.conda_envs = dict()
+        self._progress = 0
 
         self.forcerules = set()
         self.forcefiles = set()
@@ -117,14 +118,14 @@ class DAG:
 
         self.update_output_index()
 
-    def init(self):
+    def init(self, progress=False):
         """ Initialise the DAG. """
         for job in map(self.rule2job, self.targetrules):
-            job = self.update([job])
+            job = self.update([job], progress=progress)
             self.targetjobs.add(job)
 
         for file in self.targetfiles:
-            job = self.update(self.file2jobs(file), file=file)
+            job = self.update(self.file2jobs(file), file=file, progress=progress)
             self.targetjobs.add(job)
 
         self.cleanup()
@@ -153,23 +154,35 @@ class DAG:
             except KeyError:
                 pass
 
-    def create_conda_envs(self, dryrun=False):
+    def create_conda_envs(self, dryrun=False, forceall=False):
         conda.check_conda()
         # First deduplicate based on job.conda_env_file
-        env_set = {job.conda_env_file for job in self.needrun_jobs
+        jobs = self.jobs if forceall else self.needrun_jobs
+        env_set = {job.conda_env_file for job in jobs
                    if job.conda_env_file}
         # Then based on md5sum values
-        env_file_map = dict()
+        self.conda_envs = dict()
         hash_set = set()
         for env_file in env_set:
             env = conda.Env(env_file, self)
             hash = env.hash
-            env_file_map[env_file] = env
+            self.conda_envs[env_file] = env
             if hash not in hash_set:
                 env.create(dryrun)
                 hash_set.add(hash)
 
-        self.conda_envs = env_file_map
+    def pull_singularity_imgs(self, dryrun=False, forceall=False):
+        # First deduplicate based on job.conda_env_file
+        jobs = self.jobs if forceall else self.needrun_jobs
+        img_set = {job.singularity_img_url for job in jobs
+                   if job.singularity_img_url}
+
+        self.singularity_imgs = dict()
+        for img_url in img_set:
+            img = singularity.Image(img_url, self)
+            img.pull(dryrun)
+            self.singularity_imgs[img_url] = img
+
 
     def update_output_index(self):
         """Update the OutputIndex."""
@@ -188,6 +201,24 @@ class DAG:
                 else:
                     raise IncompleteFilesException(incomplete)
 
+    def incomplete_external_jobid(self, job):
+        """Return the external jobid of the job if it is marked as incomplete.
+
+        Returns None, if job is not incomplete, or if no external jobid has been
+        registered or if force_incomplete is True.
+        """
+        if self.force_incomplete:
+            return None
+        jobids = self.workflow.persistence.external_jobids(job)
+        if len(jobids) == 1:
+            return jobids[0]
+        else:
+            raise WorkflowError(
+                "Multiple different external jobids registered "
+                "for output files of incomplete job {} ({}). This job "
+                "cannot be resumed. Execute Snakemake with --rerun-incomplete "
+                "to fix this issue.".format(job.jobid, jobids))
+
     def check_dynamic(self):
         """Check dynamic output and update downstream rules if necessary."""
         for job in filter(
@@ -249,10 +280,6 @@ class DAG:
         """Return the number of downstream jobs of a given job."""
         return self._downstream_size[job]
 
-    def temp_input_count(self, job):
-        """Return number of temporary input files of given job."""
-        return self._temp_input_count[job]
-
     def noneedrun_finished(self, job):
         """
         Return whether a given job is finished or was not
@@ -307,15 +334,22 @@ class DAG:
                 return True
         return False
 
-    def check_and_touch_output(self, job, wait=3, ignore_missing_output=False):
+    def check_and_touch_output(self,
+                               job,
+                               wait=3,
+                               ignore_missing_output=False,
+                               no_touch=False,
+                               force_stay_on_remote=False):
         """ Raise exception if output files of job are missing. """
         expanded_output = [job.shadowed_path(path) for path in job.expanded_output]
         if job.benchmark:
             expanded_output.append(job.benchmark)
 
-        if ignore_missing_output is False:
+        if not ignore_missing_output:
             try:
-                wait_for_files(expanded_output, latency_wait=wait)
+                wait_for_files(expanded_output,
+                               latency_wait=wait,
+                               force_stay_on_remote=force_stay_on_remote)
             except IOError as e:
                 raise MissingOutputException(str(e) + "\nThis might be due to "
                 "filesystem latency. If that is the case, consider to increase the "
@@ -328,10 +362,11 @@ class DAG:
         #Note that if the input files somehow have a future date then this will
         #not currently be spotted and the job will always be re-run.
         #Also, don't touch directories, as we can't guarantee they were removed.
-        for f in expanded_output:
-            #This will neither create missing files nor touch directories
-            if os.path.isfile(f):
-                f.touch()
+        if not no_touch:
+            for f in expanded_output:
+                #This will neither create missing files nor touch directories
+                if os.path.isfile(f):
+                    f.touch()
 
     def unshadow_output(self, job):
         """ Move files from shadow directory to real output paths. """
@@ -389,34 +424,60 @@ class DAG:
             for f in filter(job_.temp_output.__contains__, files):
                 yield f
 
+    def temp_size(self, job):
+        """Return the total size of temporary input files of the job.
+        If none, return 0.
+        """
+        return sum(f.size for f in self.temp_input(job))
+
     def handle_temp(self, job):
-        """ Remove temp files if they are no longer needed. """
+        """ Remove temp files if they are no longer needed. Update temp_mtimes. """
         if self.notemp:
             return
 
+        is_temp = lambda f: is_flagged(f, "temp")
+
+        # handle temp input
+
         needed = lambda job_, f: any(
             f in files for j, files in self.depending[job_].items()
             if not self.finished(j) and self.needrun(j) and j != job)
 
         def unneeded_files():
+            # temp input
             for job_, files in self.dependencies[job].items():
-                yield from filterfalse(partial(needed, job_), job_.temp_output & files)
-            if job not in self.targetjobs:
-                yield from filterfalse(partial(needed, job), job.temp_output)
+                tempfiles = set(f for f in job_.expanded_output if is_temp(f))
+                yield from filterfalse(partial(needed, job_), tempfiles & files)
+
+            # temp output
+            if job not in self.targetjobs and not job.dynamic_output:
+                tempfiles = (f for f in job.expanded_output if is_temp(f))
+                yield from filterfalse(partial(needed, job), tempfiles)
 
         for f in unneeded_files():
             logger.info("Removing temporary output file {}.".format(f))
             f.remove(remove_non_empty_dir=True)
 
+    def handle_log(self, job, upload_remote=True):
+        for f in job.log:
+            if not f.exists_local:
+                # If log file was not created during job, create an empty one.
+                f.touch_or_create()
+            if upload_remote and f.is_remote and not f.should_stay_on_remote:
+                f.upload_to_remote()
+                if not f.exists_remote:
+                    raise RemoteFileException(
+                        "The file upload was attempted, but it does not "
+                        "exist on remote. Check that your credentials have "
+                        "read AND write permissions.")
+
     def handle_remote(self, job, upload=True):
-        """ Remove local files if they are no longer needed, and upload to S3. """
+        """ Remove local files if they are no longer needed and upload. """
         if upload:
             # handle output files
             files = list(job.expanded_output)
             if job.benchmark:
                 files.append(job.benchmark)
-            if job.log:
-                files.extend(job.log)
             for f in files:
                 if f.is_remote and not f.should_stay_on_remote:
                     f.upload_to_remote()
@@ -446,9 +507,12 @@ class DAG:
                     for f in filter(putative, files):
                         if not needed(job_, f):
                             yield f
-                for f in filter(putative, job.output):
-                    if not needed(job, f) and not f in self.targetfiles:
-                        for f_ in job.expand_dynamic(f):
+                for f, f_ in zip(job.output, job.rule.output):
+                    if putative(f) and not needed(job, f) and not f in self.targetfiles:
+                        if f in job.dynamic_output:
+                            for f_ in job.expand_dynamic(f_):
+                                yield f_
+                        else:
                             yield f
                 for f in filter(putative, job.input):
                     # TODO what about remote inputs that are used by multiple jobs?
@@ -466,7 +530,7 @@ class DAG:
         """Return job id of given job."""
         return self._jobid[job]
 
-    def update(self, jobs, file=None, visited=None, skip_until_dynamic=False):
+    def update(self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False):
         """ Update the DAG by adding given jobs and their dependencies. """
         if visited is None:
             visited = set()
@@ -488,7 +552,8 @@ class DAG:
                 self.check_periodic_wildcards(job)
                 self.update_(job,
                              visited=set(visited),
-                             skip_until_dynamic=skip_until_dynamic)
+                             skip_until_dynamic=skip_until_dynamic,
+                             progress=progress)
                 # TODO this might fail if a rule discarded here is needed
                 # elsewhere
                 if producer:
@@ -520,9 +585,14 @@ class DAG:
 
         logger.dag_debug(dict(status="selected", job=job))
 
+        n = len(self.dependencies)
+        if progress and n % 1000 == 0 and n and self._progress != n:
+            logger.info("Processed {} potential jobs.".format(n))
+            self._progress = n
+
         return producer
 
-    def update_(self, job, visited=None, skip_until_dynamic=False):
+    def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
         """ Update the DAG by adding the given job and its dependencies. """
         if job in self.dependencies:
             return
@@ -545,7 +615,8 @@ class DAG:
                     file=file,
                     visited=visited,
                     skip_until_dynamic=skip_until_dynamic or file in
-                    job.dynamic_input)
+                    job.dynamic_input,
+                    progress=progress)
                 producer[file] = selected_job
             except (MissingInputException, CyclicGraphException,
                     PeriodicWildcardError) as ex:
@@ -712,11 +783,6 @@ class DAG:
                                   job,
                                   stop=self.noneedrun_finished)) - 1
 
-    def update_temp_input_count(self):
-        """For each job update the number of temporary input files."""
-        for job in self.needrun_jobs:
-            self._temp_input_count[job] = sum(1 for _ in self.temp_input(job))
-
     def close_remote_objects(self):
         """Close all remote objects."""
         for job in self.jobs:
@@ -731,7 +797,6 @@ class DAG:
         self.update_priority()
         self.update_ready()
         self.update_downstream_size()
-        self.update_temp_input_count()
         self.close_remote_objects()
 
     def _ready(self, job):
@@ -1162,6 +1227,8 @@ class DAG:
         if os.path.exists(path):
             raise WorkflowError("Archive already exists:\n" + path)
 
+        self.create_conda_envs(forceall=True)
+
         try:
             workdir = Path(os.path.abspath(os.getcwd()))
             with tarfile.open(path, mode=mode, dereference=True) as archive:
@@ -1178,14 +1245,10 @@ class DAG:
                             archived.add(f)
                             logger.info("archived " + f)
 
-                logger.info("Archiving files under version control...")
-                try:
-                    out = subprocess.check_output(["git", "ls-files", "."])
-                    for f in out.decode().split("\n"):
-                        if f:
-                            add(f)
-                except subprocess.CalledProcessError as e:
-                    raise WorkflowError("Error executing git.")
+                logger.info("Archiving snakefiles, scripts and files under "
+                            "version control...")
+                for f in self.workflow.get_sources():
+                    add(f)
 
                 logger.info("Archiving external input files...")
                 for job in self.jobs:
@@ -1199,7 +1262,6 @@ class DAG:
                 envs = set()
                 for job in self.jobs:
                     if job.conda_env_file:
-                        job.create_conda_env()
                         env_archive = job.archive_conda_env()
                         envs.add(env_archive)
                 for env in envs:
diff --git a/snakemake/exceptions.py b/snakemake/exceptions.py
index 8bcda51..e93bdc1 100644
--- a/snakemake/exceptions.py
+++ b/snakemake/exceptions.py
@@ -325,6 +325,10 @@ class NCBIFileException(RuleException):
     def __init__(self, msg, lineno=None, snakefile=None):
         super().__init__(msg, lineno=lineno, snakefile=snakefile)
 
+class WebDAVFileException(RuleException):
+    def __init__(self, msg, lineno=None, snakefile=None):
+        super().__init__(msg, lineno=lineno, snakefile=snakefile)
+
 class ClusterJobException(RuleException):
     def __init__(self, job_info, jobid):
         super().__init__(
diff --git a/snakemake/executors.py b/snakemake/executors.py
index 58e3f58..9673646 100644
--- a/snakemake/executors.py
+++ b/snakemake/executors.py
@@ -13,6 +13,7 @@ import json
 import textwrap
 import stat
 import shutil
+import shlex
 import threading
 import concurrent.futures
 import subprocess
@@ -21,6 +22,9 @@ from functools import partial
 from itertools import chain
 from collections import namedtuple
 from tempfile import mkdtemp
+import random
+import base64
+import uuid
 
 from snakemake.jobs import Job
 from snakemake.shell import shell
@@ -32,6 +36,15 @@ from snakemake.exceptions import print_exception, get_exception_origin
 from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
 from snakemake.exceptions import ClusterJobException, ProtectedOutputException, WorkflowError, ImproperShadowException, SpawnedJobError
 from snakemake.common import Mode
+from snakemake.version import __version__
+
+
+def format_files(job, io, dynamicio):
+    for f in io:
+        if f in dynamicio:
+            yield "{} (dynamic)".format(f.format_dynamic())
+        else:
+            yield f
 
 
 class AbstractExecutor:
@@ -54,7 +67,7 @@ class AbstractExecutor:
     def get_default_remote_provider_args(self):
         if self.workflow.default_remote_provider:
             return (
-                "--default-remote-provider {} "
+                " --default-remote-provider {} "
                 "--default-remote-prefix {} ").format(
                     self.workflow.default_remote_provider.__module__.split(".")[-1],
                     self.workflow.default_remote_prefix)
@@ -85,13 +98,6 @@ class AbstractExecutor:
         if self.dag.dynamic(job):
             return
 
-        def format_files(job, io, dynamicio):
-            for f in io:
-                if f in dynamicio:
-                    yield "{} (dynamic)".format(f.format_dynamic())
-                else:
-                    yield f
-
         priority = self.dag.priority(job)
         logger.job_info(jobid=self.dag.jobid(job),
                         msg=job.message,
@@ -114,9 +120,15 @@ class AbstractExecutor:
             logger.info("Subsequent jobs will be added dynamically "
                         "depending on the output of this rule")
 
-    def print_job_error(self, job):
-        logger.error("Error in job {} while creating output file{} {}.".format(
-            job, "s" if len(job.output) > 1 else "", ", ".join(job.output)))
+    def print_job_error(self, job, msg=None, **kwargs):
+        logger.job_error(name=job.rule.name,
+                         jobid=self.dag.jobid(job),
+                         output=list(format_files(job, job.output,
+                                                  job.dynamic_output)),
+                         log=list(job.log),
+                         aux=kwargs)
+        if msg is not None:
+            logger.error(msg)
 
     def handle_job_success(self, job):
         pass
@@ -137,20 +149,26 @@ class RealExecutor(AbstractExecutor):
                  quiet=False,
                  printshellcmds=False,
                  latency_wait=3,
-                 benchmark_repeats=1):
+                 benchmark_repeats=1,
+                 assume_shared_fs=True):
         super().__init__(workflow, dag,
                          printreason=printreason,
                          quiet=quiet,
                          printshellcmds=printshellcmds,
                          latency_wait=latency_wait,
                          benchmark_repeats=benchmark_repeats)
+        self.assume_shared_fs = assume_shared_fs
         self.stats = Stats()
+        self.snakefile = workflow.snakefile
+
+    def register_job(self, job):
+        self.workflow.persistence.started(job)
 
     def _run(self, job, callback=None, error_callback=None):
         super()._run(job)
         self.stats.report_job_start(job)
         try:
-            self.workflow.persistence.started(job)
+            self.register_job(job)
         except IOError as e:
             logger.info(
                 "Failed to set marker file for job started ({}). "
@@ -160,16 +178,24 @@ class RealExecutor(AbstractExecutor):
                 "directory {}".format(e, self.workflow.persistence.path))
 
     def handle_job_success(self, job, upload_remote=True, ignore_missing_output=False):
-        self.dag.handle_touch(job)
-        self.dag.check_and_touch_output(
-            job,
-            wait=self.latency_wait,
-            ignore_missing_output=ignore_missing_output)
-        self.dag.unshadow_output(job)
-        self.dag.handle_remote(job, upload=upload_remote)
-        self.dag.handle_protected(job)
+        if self.assume_shared_fs:
+            self.dag.handle_touch(job)
+            self.dag.handle_log(job)
+            self.dag.check_and_touch_output(
+                job,
+                wait=self.latency_wait,
+                ignore_missing_output=ignore_missing_output)
+            self.dag.unshadow_output(job)
+            self.dag.handle_remote(job, upload=upload_remote)
+            self.dag.handle_protected(job)
+            job.close_remote()
+        else:
+            self.dag.check_and_touch_output(
+                job,
+                wait=self.latency_wait,
+                no_touch=True,
+                force_stay_on_remote=True)
         self.dag.handle_temp(job)
-        job.close_remote()
 
         self.stats.report_job_end(job)
         try:
@@ -180,8 +206,10 @@ class RealExecutor(AbstractExecutor):
                         "directory {}".format(e,
                                               self.workflow.persistence.path))
 
-    def handle_job_error(self, job):
-        job.close_remote()
+    def handle_job_error(self, job, upload_remote=True):
+        if self.assume_shared_fs:
+            self.dag.handle_log(job, upload_remote=upload_remote)
+            job.close_remote()
 
     def format_job_pattern(self, pattern, job=None, **kwargs):
         overwrite_workdir = []
@@ -203,10 +231,12 @@ class RealExecutor(AbstractExecutor):
 
         return format(pattern,
                       job=job,
+                      attempt=job.attempt,
                       overwrite_workdir=overwrite_workdir,
                       overwrite_config=overwrite_config,
                       printshellcmds=printshellcmds,
                       workflow=self.workflow,
+                      snakefile=self.snakefile,
                       cores=self.cores,
                       benchmark_repeats=self.benchmark_repeats,
                       target=target,
@@ -257,18 +287,25 @@ class CPUExecutor(RealExecutor):
 
         self.exec_job = '\\\n'.join((
             'cd {workflow.workdir_init} && ',
-            '{sys.executable} -m snakemake {target} --snakefile {workflow.snakefile} ',
+            '{sys.executable} -m snakemake {target} --snakefile {snakefile} ',
             '--force -j{cores} --keep-target-files --keep-shadow --keep-remote ',
-            '--benchmark-repeats {benchmark_repeats} ',
+            '--benchmark-repeats {benchmark_repeats} --attempt {attempt} ',
             '--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ',
+            '--latency-wait {latency_wait} ',
             self.get_default_remote_provider_args(),
-            '{overwrite_workdir} {overwrite_config} {printshellcmds} ',
+            '{overwrite_workdir} {overwrite_config} ',
             '--notemp --quiet --no-hooks --nolock --mode {} '.format(Mode.subprocess)))
 
         if self.workflow.use_conda:
             self.exec_job += " --use-conda "
             if self.workflow.conda_prefix:
-                self.exec_job += " --conda-prefix " + self.workflow.conda_prefix + " "
+                self.exec_job += " --conda-prefix {} ".format(
+                    self.workflow.conda_prefix)
+        if self.workflow.use_singularity:
+            self.exec_job += " --use-singularity "
+            if self.workflow.singularity_prefix:
+                self.exec_job += " --singularity-prefix {} ".format(
+                    self.workflow.singularity_prefix)
 
         self.use_threads = use_threads
         self.cores = cores
@@ -284,7 +321,10 @@ class CPUExecutor(RealExecutor):
             job.prepare()
             conda_env = None
             if self.workflow.use_conda:
-                conda_env = job.conda_env
+                conda_env = job.conda_env_path
+            singularity_img = None
+            if self.workflow.use_singularity:
+                singularity_img = job.singularity_img_path
 
             benchmark = None
             if job.benchmark is not None:
@@ -293,7 +333,8 @@ class CPUExecutor(RealExecutor):
                 run_wrapper, job.rule, job.input.plainstrings(),
                 job.output.plainstrings(), job.params, job.wildcards, job.threads,
                 job.resources, job.log.plainstrings(), benchmark,
-                self.benchmark_repeats, conda_env,
+                self.benchmark_repeats, conda_env, singularity_img,
+                self.workflow.singularity_args,
                 self.workflow.linemaps, self.workflow.debug,
                 shadow_dir=job.shadow_dir)
         else:
@@ -307,7 +348,9 @@ class CPUExecutor(RealExecutor):
         exec_job = self.exec_job
         if not job.rule.is_branched:
             exec_job += " --allowed-rules {}".format(job.rule)
-        cmd = self.format_job_pattern(exec_job, job=job, _quote_all=True)
+        cmd = self.format_job_pattern(exec_job, job=job,
+                                      _quote_all=True,
+                                      latency_wait=self.latency_wait)
         try:
             subprocess.check_call(cmd, shell=True)
         except subprocess.CalledProcessError:
@@ -340,6 +383,7 @@ class CPUExecutor(RealExecutor):
         super().handle_job_success(job)
 
     def handle_job_error(self, job):
+        super().handle_job_error(job)
         job.cleanup()
         self.workflow.persistence.cleanup(job)
 
@@ -357,15 +401,24 @@ class ClusterExecutor(RealExecutor):
                  benchmark_repeats=1,
                  cluster_config=None,
                  local_input=None,
-                 max_jobs_per_second=None,
-                 restart_times=None):
+                 restart_times=None,
+                 exec_job=None,
+                 assume_shared_fs=True,
+                 max_status_checks_per_second=1):
+        from ratelimiter import RateLimiter
+
         local_input = local_input or []
         super().__init__(workflow, dag,
                          printreason=printreason,
                          quiet=quiet,
                          printshellcmds=printshellcmds,
                          latency_wait=latency_wait,
-                         benchmark_repeats=benchmark_repeats)
+                         benchmark_repeats=benchmark_repeats,
+                         assume_shared_fs=assume_shared_fs)
+
+        if not self.assume_shared_fs:
+            # use relative path to Snakefile
+            self.snakefile = os.path.relpath(workflow.snakefile)
 
         jobscript = workflow.jobscript
         if jobscript is None:
@@ -381,23 +434,34 @@ class ClusterExecutor(RealExecutor):
             raise WorkflowError(
                 "Defined jobname (\"{}\") has to contain the wildcard {jobid}.")
 
-        self.exec_job = '\\\n'.join((
-            'cd {workflow.workdir_init} && ',
-            '{sys.executable} -m snakemake {target} --snakefile {workflow.snakefile} ',
-            '--force -j{cores} --keep-target-files --keep-shadow --keep-remote ',
-            '--wait-for-files {wait_for_files} --latency-wait {latency_wait} ',
-            '--benchmark-repeats {benchmark_repeats} ',
-            self.get_default_remote_provider_args(),
-            '--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ',
-            '{overwrite_workdir} {overwrite_config} {printshellcmds} --nocolor ',
-            '--notemp --quiet --no-hooks --nolock'))
+        if exec_job is None:
+            self.exec_job = '\\\n'.join((
+                'cd {workflow.workdir_init} && ' if assume_shared_fs else '',
+                '{sys.executable} ' if assume_shared_fs else 'python ',
+                '-m snakemake {target} --snakefile {snakefile} ',
+                '--force -j{cores} --keep-target-files --keep-shadow --keep-remote ',
+                '--wait-for-files {wait_for_files} --latency-wait {latency_wait} ',
+                '--benchmark-repeats {benchmark_repeats} --attempt {attempt} ',
+                '--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ',
+                '{overwrite_workdir} {overwrite_config} {printshellcmds} --nocolor ',
+                '--notemp --no-hooks --nolock --timestamp '))
+        else:
+            self.exec_job = exec_job
 
         if printshellcmds:
             self.exec_job += " --printshellcmds "
         if self.workflow.use_conda:
             self.exec_job += " --use-conda "
             if self.workflow.conda_prefix:
-                self.exec_job += " --conda-prefix " + self.workflow.conda_prefix + " "
+                self.exec_job += " --conda-prefix {} ".format(
+                    self.workflow.conda_prefix)
+        if self.workflow.use_singularity:
+            self.exec_job += " --use-singularity "
+            if self.workflow.singularity_prefix:
+                self.exec_job += " --singularity-prefix {} ".format(
+                    self.workflow.singularity_prefix)
+
+        self.exec_job += self.get_default_remote_provider_args()
 
         # force threading.Lock() for cluster jobs
         self.exec_job += " --force-use-threads "
@@ -410,12 +474,7 @@ class ClusterExecutor(RealExecutor):
         self.cores = cores if cores else ""
         self.cluster_config = cluster_config if cluster_config else dict()
 
-        self.max_jobs_per_second = max_jobs_per_second
         self.restart_times = restart_times
-        if self.max_jobs_per_second:
-            self.rate_lock = threading.RLock()
-            self.rate_interval = 1 / self.max_jobs_per_second
-            self.rate_last_called = 0
 
         self.active_jobs = list()
         self.lock = threading.Lock()
@@ -424,6 +483,12 @@ class ClusterExecutor(RealExecutor):
         self.wait_thread.daemon = True
         self.wait_thread.start()
 
+        self.max_status_checks_per_second = max_status_checks_per_second
+
+        self.status_rate_limiter = RateLimiter(
+            max_calls=self.max_status_checks_per_second,
+            period=1)
+
     def shutdown(self):
         with self.lock:
             self.wait = False
@@ -433,20 +498,10 @@ class ClusterExecutor(RealExecutor):
     def cancel(self):
         self.shutdown()
 
-    def _limit_rate(self):
-        """Called in ``_run()`` for rate-limiting"""
-        with self.rate_lock:
-            elapsed = time.clock() - self.rate_last_called
-            wait = self.rate_interval - elapsed
-            if wait > 0:
-                time.sleep(wait)
-            self.rate_last_called = time.clock()
-
     def _run(self, job, callback=None, error_callback=None):
-        if self.max_jobs_per_second:
-            self._limit_rate()
-        job.remove_existing_output()
-        job.download_remote_input()
+        if self.assume_shared_fs:
+            job.remove_existing_output()
+            job.download_remote_input()
         super()._run(job, callback=callback, error_callback=error_callback)
         logger.shellcmd(job.shellcmd)
 
@@ -468,16 +523,18 @@ class ClusterExecutor(RealExecutor):
 
         return os.path.join(self.tmpdir, f)
 
-    def spawn_jobscript(self, job, jobscript, **kwargs):
-        wait_for_files = [self.tmpdir]
-        wait_for_files.extend(job.local_input)
-        wait_for_files.extend(f.local_file()
-                              for f in job.remote_input if not f.stay_on_remote)
+    def format_job(self, pattern, job, **kwargs):
+        wait_for_files = []
+        if self.assume_shared_fs:
+            wait_for_files.append(self.tmpdir)
+            wait_for_files.extend(job.local_input)
+            wait_for_files.extend(f.local_file()
+                                  for f in job.remote_input if not f.stay_on_remote)
 
-        if job.shadow_dir:
-            wait_for_files.append(job.shadow_dir)
-        if self.workflow.use_conda and job.conda_env:
-            wait_for_files.append(job.conda_env)
+            if job.shadow_dir:
+                wait_for_files.append(job.shadow_dir)
+            if self.workflow.use_conda and job.conda_env:
+                wait_for_files.append(job.conda_env_path)
 
         format_p = partial(self.format_job_pattern,
                            job=job,
@@ -488,13 +545,23 @@ class ClusterExecutor(RealExecutor):
                            **kwargs)
         exec_job = self.exec_job
         try:
-            exec_job = format_p(exec_job, _quote_all=True)
-            with open(jobscript, "w") as f:
-                print(format_p(self.jobscript, exec_job=exec_job), file=f)
+            return format_p(pattern)
         except KeyError as e:
             raise WorkflowError(
                 "Error formatting jobscript: {} not found\n"
                 "Make sure that your custom jobscript is up to date.".format(e))
+
+    def write_jobscript(self, job, jobscript, **kwargs):
+        exec_job = self.format_job(self.exec_job,
+                                   job,
+                                   _quote_all=True,
+                                   **kwargs)
+        content = self.format_job(self.jobscript,
+                                  job,
+                                  exec_job=exec_job,
+                                  **kwargs)
+        with open(jobscript, "w") as f:
+            print(content, file=f)
         os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR)
 
     def cluster_params(self, job):
@@ -518,7 +585,11 @@ class ClusterExecutor(RealExecutor):
     def handle_job_error(self, job):
         # TODO what about removing empty remote dirs?? This cannot be decided
         # on the cluster node.
-        super().handle_job_error(job)
+        super().handle_job_error(job, upload_remote=False)
+        if not self.assume_shared_fs:
+            logger.debug("Cleanup job metadata.")
+            # if no shared fs, we have to remove metadata here as well
+            self.workflow.persistence.cleanup(job)
 
 
 GenericClusterJob = namedtuple("GenericClusterJob", "job jobid callback error_callback jobscript jobfinished jobfailed")
@@ -527,6 +598,7 @@ GenericClusterJob = namedtuple("GenericClusterJob", "job jobid callback error_ca
 class GenericClusterExecutor(ClusterExecutor):
     def __init__(self, workflow, dag, cores,
                  submitcmd="qsub",
+                 statuscmd=None,
                  cluster_config=None,
                  jobname="snakejob.{rulename}.{jobid}.sh",
                  printreason=False,
@@ -534,8 +606,18 @@ class GenericClusterExecutor(ClusterExecutor):
                  printshellcmds=False,
                  latency_wait=3,
                  benchmark_repeats=1,
-                 max_jobs_per_second=None,
-                 restart_times=0):
+                 restart_times=0,
+                 assume_shared_fs=True,
+                 max_status_checks_per_second=1):
+
+        self.submitcmd = submitcmd
+        if not assume_shared_fs and statuscmd is None:
+            raise WorkflowError("When no shared filesystem can be assumed, a "
+                "status command must be given.")
+
+        self.statuscmd = statuscmd
+        self.external_jobid = dict()
+
         super().__init__(workflow, dag, cores,
                          jobname=jobname,
                          printreason=printreason,
@@ -544,18 +626,26 @@ class GenericClusterExecutor(ClusterExecutor):
                          latency_wait=latency_wait,
                          benchmark_repeats=benchmark_repeats,
                          cluster_config=cluster_config,
-                         max_jobs_per_second=max_jobs_per_second,
-                         restart_times=restart_times)
-        self.submitcmd = submitcmd
-        self.external_jobid = dict()
-        # TODO wrap with watch and touch {jobrunning}
-        # check modification date of {jobrunning} in the wait_for_job method
-        self.exec_job += ' && touch "{jobfinished}" || (touch "{jobfailed}"; exit 1)'
+                         restart_times=restart_times,
+                         assume_shared_fs=assume_shared_fs,
+                         max_status_checks_per_second=max_status_checks_per_second)
+
+        if assume_shared_fs:
+            # TODO wrap with watch and touch {jobrunning}
+            # check modification date of {jobrunning} in the wait_for_job method
+            self.exec_job += ' && touch "{jobfinished}" || (touch "{jobfailed}"; exit 1)'
+        else:
+            self.exec_job += ' && exit 0 || exit 1'
 
     def cancel(self):
         logger.info("Will exit after finishing currently running jobs.")
         self.shutdown()
 
+    def register_job(self, job):
+        # Do not register job here.
+        # Instead do it manually once the jobid is known.
+        pass
+
     def run(self, job,
             callback=None,
             submit_callback=None,
@@ -567,10 +657,30 @@ class GenericClusterExecutor(ClusterExecutor):
         jobscript = self.get_jobscript(job)
         jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
         jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
-        self.spawn_jobscript(job, jobscript,
+        self.write_jobscript(job, jobscript,
                              jobfinished=jobfinished,
                              jobfailed=jobfailed)
 
+        if self.statuscmd:
+            ext_jobid = self.dag.incomplete_external_jobid(job)
+            if ext_jobid:
+                # Job is incomplete and still running.
+                # We simply register it and wait for completion or failure.
+                logger.info(
+                    "Resume incomplete job {} with external jobid '{}'.".format(
+                    jobid, ext_jobid))
+                submit_callback(job)
+                with self.lock:
+                    self.active_jobs.append(
+                        GenericClusterJob(job,
+                                          ext_jobid,
+                                          callback,
+                                          error_callback,
+                                          jobscript,
+                                          jobfinished,
+                                          jobfailed))
+                return
+
         deps = " ".join(self.external_jobid[f] for f in job.input
                         if f in self.external_jobid)
         try:
@@ -593,35 +703,73 @@ class GenericClusterExecutor(ClusterExecutor):
         if ext_jobid and ext_jobid[0]:
             ext_jobid = ext_jobid[0]
             self.external_jobid.update((f, ext_jobid) for f in job.output)
-            logger.debug("Submitted job {} with external jobid {}.".format(
+            logger.info("Submitted job {} with external jobid '{}'.".format(
                 jobid, ext_jobid))
+            self.workflow.persistence.started(
+                job, external_jobid=ext_jobid)
 
         submit_callback(job)
+
         with self.lock:
             self.active_jobs.append(GenericClusterJob(job, ext_jobid, callback, error_callback, jobscript, jobfinished, jobfailed))
 
     def _wait_for_jobs(self):
+        #logger.debug("Setup rate limiter")
+        #status_rate_limiter = RateLimiter(
+        #    max_calls=self.max_status_checks_per_second,
+        #    period=1)
+        #logger.debug("Done setup rate limiter")
+        success = "success"
+        failed = "failed"
+        running = "running"
+        if self.statuscmd is not None:
+            def job_status(job):
+                try:
+                    # this command shall return "success", "failed" or "running"
+                    return subprocess.check_output(
+                        '{statuscmd} {jobid}'.format(jobid=job.jobid,
+                                                     statuscmd=self.statuscmd),
+                        shell=True).decode().split("\n")[0]
+                except subprocess.CalledProcessError as e:
+                    raise WorkflowError("Failed to obtain job status. "
+                                        "See above for error message.")
+        else:
+            def job_status(job):
+                if os.path.exists(active_job.jobfinished):
+                    os.remove(active_job.jobfinished)
+                    os.remove(active_job.jobscript)
+                    return success
+                if os.path.exists(active_job.jobfailed):
+                    os.remove(active_job.jobfailed)
+                    os.remove(active_job.jobscript)
+                    return failed
+                return running
+
         while True:
             with self.lock:
                 if not self.wait:
                     return
                 active_jobs = self.active_jobs
                 self.active_jobs = list()
-                for active_job in active_jobs:
-                    if os.path.exists(active_job.jobfinished):
-                        os.remove(active_job.jobfinished)
-                        os.remove(active_job.jobscript)
+                still_running = list()
+            logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
+            for active_job in active_jobs:
+                with self.status_rate_limiter:
+                    status = job_status(active_job)
+
+                    if status == success:
                         active_job.callback(active_job.job)
-                    elif os.path.exists(active_job.jobfailed):
-                        os.remove(active_job.jobfailed)
-                        os.remove(active_job.jobscript)
-                        self.print_job_error(active_job.job)
-                        print_exception(ClusterJobException(active_job, self.dag.jobid(active_job.job)),
-                                        self.workflow.linemaps)
+                    elif status == failed:
+                        self.print_job_error(
+                            active_job.job,
+                            cluster_jobid=active_job.jobid if active_job.jobid else "unknown",
+                        )
                         active_job.error_callback(active_job.job)
                     else:
-                        self.active_jobs.append(active_job)
-            time.sleep(1)
+                        still_running.append(active_job)
+            with self.lock:
+                self.active_jobs.extend(still_running)
+            time.sleep(10)
 
 
 SynchronousClusterJob = namedtuple("SynchronousClusterJob", "job jobid callback error_callback jobscript process")
@@ -643,8 +791,8 @@ class SynchronousClusterExecutor(ClusterExecutor):
                  printshellcmds=False,
                  latency_wait=3,
                  benchmark_repeats=1,
-                 max_jobs_per_second=None,
-                 restart_times=0):
+                 restart_times=0,
+                 assume_shared_fs=True):
         super().__init__(workflow, dag, cores,
                          jobname=jobname,
                          printreason=printreason,
@@ -653,8 +801,9 @@ class SynchronousClusterExecutor(ClusterExecutor):
                          latency_wait=latency_wait,
                          benchmark_repeats=benchmark_repeats,
                          cluster_config=cluster_config,
-                         max_jobs_per_second=max_jobs_per_second,
-                         restart_times=restart_times)
+                         restart_times=restart_times,
+                         assume_shared_fs=assume_shared_fs,
+                         max_status_checks_per_second=10)
         self.submitcmd = submitcmd
         self.external_jobid = dict()
 
@@ -671,7 +820,7 @@ class SynchronousClusterExecutor(ClusterExecutor):
         jobid = self.dag.jobid(job)
 
         jobscript = self.get_jobscript(job)
-        self.spawn_jobscript(job, jobscript)
+        self.write_jobscript(job, jobscript)
 
         deps = " ".join(self.external_jobid[f] for f in job.input
                         if f in self.external_jobid)
@@ -697,11 +846,13 @@ class SynchronousClusterExecutor(ClusterExecutor):
                     return
                 active_jobs = self.active_jobs
                 self.active_jobs = list()
-                for active_job in active_jobs:
+                still_running = list()
+            for active_job in active_jobs:
+                with self.status_rate_limiter:
                     exitcode = active_job.process.poll()
                     if exitcode is None:
                         # job not yet finished
-                        self.active_jobs.append(active_job)
+                        still_running.append(active_job)
                     elif exitcode == 0:
                         # job finished successfully
                         os.remove(active_job.jobscript)
@@ -710,10 +861,14 @@ class SynchronousClusterExecutor(ClusterExecutor):
                         # job failed
                         os.remove(active_job.jobscript)
                         self.print_job_error(active_job.job)
-                        print_exception(ClusterJobException(active_job, self.dag.jobid(active_job.job)),
-                                        self.workflow.linemaps)
+                        print_exception(
+                            ClusterJobException(
+                                active_job, self.dag.jobid(active_job.job)),
+                            self.workflow.linemaps)
                         active_job.error_callback(active_job.job)
-            time.sleep(1)
+            with self.lock:
+                self.active_jobs.extend(still_running)
+            time.sleep(10)
 
 
 DRMAAClusterJob = namedtuple("DRMAAClusterJob", "job jobid callback error_callback jobscript")
@@ -730,8 +885,9 @@ class DRMAAExecutor(ClusterExecutor):
                  latency_wait=3,
                  benchmark_repeats=1,
                  cluster_config=None,
-                 max_jobs_per_second=None,
-                 restart_times=0):
+                 restart_times=0,
+                 assume_shared_fs=True,
+                 max_status_checks_per_second=1):
         super().__init__(workflow, dag, cores,
                          jobname=jobname,
                          printreason=printreason,
@@ -740,8 +896,9 @@ class DRMAAExecutor(ClusterExecutor):
                          latency_wait=latency_wait,
                          benchmark_repeats=benchmark_repeats,
                          cluster_config=cluster_config,
-                         max_jobs_per_second=max_jobs_per_second,
-                         restart_times=restart_times)
+                         restart_times=restart_times,
+                         assume_shared_fs=assume_shared_fs,
+                         max_status_checks_per_second=max_status_checks_per_second)
         try:
             import drmaa
         except ImportError:
@@ -773,7 +930,7 @@ class DRMAAExecutor(ClusterExecutor):
             error_callback=None):
         super()._run(job)
         jobscript = self.get_jobscript(job)
-        self.spawn_jobscript(job, jobscript)
+        self.write_jobscript(job, jobscript)
 
         try:
             drmaa_args = job.format_wildcards(
@@ -824,13 +981,15 @@ class DRMAAExecutor(ClusterExecutor):
                     return
                 active_jobs = self.active_jobs
                 self.active_jobs = list()
-                for active_job in active_jobs:
+                still_running = list()
+            for active_job in active_jobs:
+                with self.status_rate_limiter:
                     try:
                         retval = self.session.wait(active_job.jobid,
                                                    drmaa.Session.TIMEOUT_NO_WAIT)
                     except drmaa.ExitTimeoutException as e:
                         # job still active
-                        self.active_jobs.append(active_job)
+                        still_running.append(active_job)
                         continue
                     except (drmaa.InternalException, Exception) as e:
                         print_exception(WorkflowError("DRMAA Error: {}".format(e)),
@@ -848,7 +1007,9 @@ class DRMAAExecutor(ClusterExecutor):
                             ClusterJobException(active_job, self.dag.jobid(active_job.job)),
                             self.workflow.linemaps)
                         active_job.error_callback(active_job.job)
-            time.sleep(1)
+            with self.lock:
+                self.active_jobs.extend(still_running)
+            time.sleep(10)
 
 
 @contextlib.contextmanager
@@ -866,9 +1027,220 @@ def change_working_directory(directory=None):
         yield
 
 
+KubernetesJob = namedtuple("KubernetesJob", "job jobid callback error_callback kubejob jobscript")
+
+
+class KubernetesExecutor(ClusterExecutor):
+    def __init__(self, workflow, dag, namespace, envvars,
+                 container_image=None,
+                 jobname="{rulename}.{jobid}",
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 latency_wait=3,
+                 benchmark_repeats=1,
+                 cluster_config=None,
+                 local_input=None,
+                 restart_times=None):
+
+        exec_job = (
+            'snakemake {target} --snakefile {snakefile} '
+            '--force -j{cores} --keep-target-files --keep-shadow --keep-remote '
+            '--latency-wait 0 '
+            '--benchmark-repeats {benchmark_repeats} --attempt {attempt} '
+            '--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} '
+            '{overwrite_config} {printshellcmds} --nocolor '
+            '--notemp --no-hooks --nolock ')
+
+        super().__init__(workflow, dag, None,
+                         jobname=jobname,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats,
+                         cluster_config=cluster_config,
+                         local_input=local_input,
+                         restart_times=restart_times,
+                         exec_job=exec_job,
+                         assume_shared_fs=False,
+                         max_status_checks_per_second=10)
+        # use relative path to Snakefile
+        self.snakefile = os.path.relpath(workflow.snakefile)
+
+        from kubernetes import config
+        config.load_kube_config()
+
+        import kubernetes.client
+        self.kubeapi = kubernetes.client.CoreV1Api()
+        self.batchapi = kubernetes.client.BatchV1Api()
+        self.namespace = namespace
+        self.envvars = envvars
+        self.secret_files = {}
+        self.run_namespace = str(uuid.uuid4())
+        self.secret_envvars = {}
+        self.register_secret()
+        self.container_image = (
+            container_image or
+            "quay.io/snakemake/snakemake:{}".format(__version__))
+
+    def register_secret(self):
+        import kubernetes.client
+
+        secret = kubernetes.client.V1Secret()
+        secret.metadata = kubernetes.client.V1ObjectMeta()
+        # create a random uuid
+        secret.metadata.name = self.run_namespace
+        secret.type = "Opaque"
+        secret.data = {}
+        for i, f in enumerate(self.workflow.get_sources()):
+            if f.startswith(".."):
+                logger.warning("Ignoring source file {}. Only files relative "
+                               "to the working directory are allowed.")
+                continue
+            with open(f, "br") as content:
+                key = "f{}".format(i)
+                self.secret_files[key] = f
+                secret.data[key] = base64.b64encode(content.read()).decode()
+        for e in self.envvars:
+            try:
+                key = e.lower()
+                secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
+                self.secret_envvars[key] = e
+            except KeyError:
+                continue
+        self.kubeapi.create_namespaced_secret(self.namespace, secret)
+
+    def shutdown(self):
+        super().shutdown()
+
+    def cancel(self):
+        import kubernetes.client
+        body = kubernetes.client.V1DeleteOptions()
+        with self.lock:
+            for j in self.active_jobs:
+                self.kubeapi.delete_namespaced_pod(
+                    j.jobid, self.namespace, body)
+        self.shutdown()
+
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        import kubernetes.client
+
+        super()._run(job)
+        exec_job = self.format_job(self.exec_job, job, _quote_all=True)
+        jobid = "snakejob-{}-{}".format(self.run_namespace, self.dag.jobid(job))
+
+        body = kubernetes.client.V1Pod()
+        body.metadata = kubernetes.client.V1ObjectMeta()
+        body.metadata.name = jobid
+
+        body.spec = kubernetes.client.V1PodSpec()
+        # fail on first error
+        body.spec.restart_policy = "Never"
+
+        # container
+        container = kubernetes.client.V1Container()
+        container.image = self.container_image
+        container.command = shlex.split(exec_job)
+        container.name = jobid
+        container.working_dir = "/workdir"
+        container.volume_mounts = [kubernetes.client.V1VolumeMount(
+            name="workdir", mount_path="/workdir")]
+        body.spec.containers = [container]
+
+        # source files
+        secret_volume = kubernetes.client.V1Volume()
+        secret_volume.name = "workdir"
+        secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
+        secret_volume.secret.secret_name = self.run_namespace
+        secret_volume.secret.items = [
+            kubernetes.client.V1KeyToPath(key=key, path=path)
+            for key, path in self.secret_files.items()
+        ]
+        body.spec.volumes = [secret_volume]
+
+        # env vars
+        container.env = []
+        for key, e in self.secret_envvars.items():
+            envvar = kubernetes.client.V1EnvVar(name=e)
+            envvar.value_from = kubernetes.client.V1EnvVarSource()
+            envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
+                key=key, name=self.run_namespace)
+            container.env.append(envvar)
+
+        # request resources
+        container.resources = kubernetes.client.V1ResourceRequirements()
+        container.resources.requests = {}
+        # Subtract 1 from the requested number of cores.
+        # The reason is that kubernetes requires some cycles for
+        # maintenance, but won't use a full core for that.
+        # This way, we should be able to saturate the node without exceeding it
+        # too much.
+        container.resources.requests["cpu"] = job.resources["_cores"] - 1
+        if "mem_mb" in job.resources:
+            container.resources.requests["memory"] = "{}M".format(
+                job.resources["mem_mb"])
+
+        # capabilities
+        if job.singularity_img and self.workflow.use_singularity:
+            # TODO this should work, but it doesn't currently because of
+            # missing loop devices
+            # singularity inside docker requires SYS_ADMIN capabilities
+            # see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
+            # container.capabilities = kubernetes.client.V1Capabilities()
+            # container.capabilities.add = ["SYS_ADMIN",
+            #                               "DAC_OVERRIDE",
+            #                               "SETUID",
+            #                               "SETGID",
+            #                               "SYS_CHROOT"]
+
+            # Running in priviledged mode always works
+            container.security_context = kubernetes.client.V1SecurityContext(
+                privileged=True)
+
+        pod = self.kubeapi.create_namespaced_pod(self.namespace, body)
+        logger.info("Get status with:\n"
+                    "kubectl describe pod {jobid}\n"
+                    "kubectl logs {jobid}".format(jobid=jobid))
+        self.active_jobs.append(KubernetesJob(
+            job, jobid, callback, error_callback, pod, None))
+
+    def _wait_for_jobs(self):
+        while True:
+            with self.lock:
+                if not self.wait:
+                    return
+                active_jobs = self.active_jobs
+                self.active_jobs = list()
+                still_running = list()
+            for j in active_jobs:
+                with self.status_rate_limiter:
+                    res = self.kubeapi.read_namespaced_pod_status(
+                        j.jobid, self.namespace)
+                    if res.status.phase == "Failed":
+                        msg = ("For details, please issue:\n"
+                               "kubectl describe pod {jobid}\n"
+                               "kubectl logs {jobid}").format(jobid=j.jobid)
+                        # failed
+                        self.print_job_error(j.job, msg=msg, jobid=j.jobid)
+                        j.error_callback(j.job)
+                    elif res.status.phase == "Succeeded":
+                        # finished
+                        j.callback(j.job)
+                    else:
+                        # still active
+                        still_running.append(j)
+            with self.lock:
+                self.active_jobs.extend(still_running)
+            time.sleep(10)
+
+
 def run_wrapper(job_rule, input, output, params, wildcards, threads, resources, log,
-                benchmark, benchmark_repeats, conda_env, linemaps, debug=False,
-                shadow_dir=None):
+                benchmark, benchmark_repeats, conda_env, singularity_img,
+                singularity_args, linemaps, debug=False, shadow_dir=None):
     """
     Wrapper around the run method that handles exceptions and benchmarking.
 
@@ -908,19 +1280,22 @@ def run_wrapper(job_rule, input, output, params, wildcards, threads, resources,
                         # etc, as the child PID is available there.
                         bench_record = BenchmarkRecord()
                         run(input, output, params, wildcards, threads, resources,
-                            log, version, rule, conda_env, bench_record)
+                            log, version, rule, conda_env, singularity_img,
+                            singularity_args, bench_record)
                     else:
                         # The benchmarking is started here as we have a run section
                         # and the generated Python function is executed in this
                         # process' thread.
                         with benchmarked() as bench_record:
                             run(input, output, params, wildcards, threads, resources,
-                                log, version, rule, conda_env, bench_record)
+                                log, version, rule, conda_env, singularity_img,
+                                singularity_args, bench_record)
                     # Store benchmark record for this iteration
                     bench_records.append(bench_record)
             else:
                 run(input, output, params, wildcards, threads, resources,
-                    log, version, rule, conda_env, None)
+                    log, version, rule, conda_env, singularity_img,
+                    singularity_args, None)
     except (KeyboardInterrupt, SystemExit) as e:
         # Re-raise the keyboard interrupt in order to record an error in the
         # scheduler but ignore it
diff --git a/snakemake/io.py b/snakemake/io.py
index 53e4aaa..c561ee1 100644
--- a/snakemake/io.py
+++ b/snakemake/io.py
@@ -62,7 +62,25 @@ def lchmod(f, mode):
              follow_symlinks=os.chmod not in os.supports_follow_symlinks)
 
 
+class IOCache:
+    def __init__(self):
+        self.mtime = dict()
+        self.exists = dict()
+        self.size = dict()
+        self.active = True
+
+    def clear(self):
+        self.mtime.clear()
+        self.exists.clear()
+        self.size.clear()
+
+    def deactivate(self):
+        self.clear()
+        self.active = False
+
+
 def IOFile(file, rule=None):
+    assert rule is not None
     f = _IOFile(file)
     f.rule = rule
     return f
@@ -84,8 +102,25 @@ class _IOFile(str):
         obj.rule = None
         obj._regex = None
 
+        if obj.is_remote:
+            obj.remote_object._iofile = obj
+
         return obj
 
+    def iocache(func):
+        @functools.wraps(func)
+        def wrapper(self, *args, **kwargs):
+            if self.rule.workflow.iocache.active:
+                cache = getattr(self.rule.workflow.iocache, func.__name__)
+                if self in cache:
+                    return cache[self]
+                v = func(self, *args, **kwargs)
+                cache[self] = v
+                return v
+            else:
+                return func(self, *args, **kwargs)
+        return wrapper
+
     def _refer_to_remote(func):
         """
             A decorator so that if the file is remote and has a version
@@ -95,7 +130,6 @@ class _IOFile(str):
         @functools.wraps(func)
         def wrapper(self, *args, **kwargs):
             if self.is_remote:
-                self.update_remote_filepath()
                 if hasattr(self.remote_object, func.__name__):
                     return getattr(self.remote_object, func.__name__)(*args, **
                                                                       kwargs)
@@ -128,7 +162,6 @@ class _IOFile(str):
 
     @property
     def remote_object(self):
-        self.update_remote_filepath()
         return get_flag_value(self._file, "remote_object")
 
     @property
@@ -141,20 +174,30 @@ class _IOFile(str):
                              "may not be used directly.")
 
     def check(self):
+        hint = (
+            "It can also lead to inconsistent results of the file-matching "
+            "approach used by Snakemake."
+        )
         if self._file.startswith("./"):
             logger.warning("Relative file path '{}' starts with './'. This is redundant "
-                           "and strongly discouraged. It can also lead to "
-                           "inconsistent results of the file-matching approach "
-                           "used by Snakemake. You can simply omit the './' "
-                           "for relative file paths.".format(self._file))
+                           "and strongly discouraged. {} You can simply omit the './' "
+                           "for relative file paths.".format(self._file, hint))
         if self._file.startswith(" "):
-            logger.warning("File path '{}' starts with whitespace. This is likely unintended.")
+            logger.warning("File path '{}' starts with whitespace. "
+                "This is likely unintended. {}".format(self._file, hint))
         if self._file.endswith(" "):
-            logger.warning("File path '{}' ends with whitespace. This is likely unintended.")
+            logger.warning("File path '{}' ends with whitespace. "
+                "This is likely unintended. {}".format(self._file, hint))
         if "\n" in self._file:
-            logger.warning("File path '{}' contains line break. This is likely unintended.")
+            logger.warning("File path '{}' contains line break. "
+                "This is likely unintended. {}".format(self._file, hint))
+        if _double_slash_regex.search(self._file) is not None:
+            logger.warning("File path {} contains double '{}'. "
+                "This is likely unintended. {}".format(
+                    self._file, os.path.sep, hint))
 
     @property
+    @iocache
     @_refer_to_remote
     def exists(self):
         return self.exists_local
@@ -172,6 +215,7 @@ class _IOFile(str):
         return self.exists_local and not os.access(self.file, os.W_OK)
 
     @property
+    @iocache
     @_refer_to_remote
     def mtime(self):
         return self.mtime_local
@@ -186,6 +230,7 @@ class _IOFile(str):
         return getattr(self._file, "flags", {})
 
     @property
+    @iocache
     @_refer_to_remote
     def size(self):
         return self.size_local
@@ -220,6 +265,7 @@ class _IOFile(str):
             if not self.should_stay_on_remote:
                 logger.info("Downloading from remote: {}".format(self.file))
                 self.remote_object.download()
+                logger.info("Finished download.")
         else:
             raise RemoteFileException(
                 "The file to be downloaded does not seem to exist remotely.")
@@ -228,6 +274,7 @@ class _IOFile(str):
         if self.is_remote:
             logger.info("Uploading to remote: {}".format(self.file))
             self.remote_object.upload()
+            logger.info("Finished upload.")
 
     def prepare(self):
         path_until_wildcard = re.split(DYNAMIC_FILL, self.file)[0]
@@ -332,6 +379,7 @@ class _IOFile(str):
             if "remote_object" in self._file.flags:
                 self._file.flags['remote_object'] = copy.copy(
                     self._file.flags['remote_object'])
+                self.update_remote_filepath()
 
     def set_flags(self, flags):
         if isinstance(self._file, str):
@@ -346,6 +394,11 @@ class _IOFile(str):
         return self._file.__hash__()
 
 
+_double_slash_regex = (re.compile(r"([^:]//|^//)")
+                       if os.path.sep == "/"
+                       else re.compile(r"\\\\"))
+
+
 _wildcard_regex = re.compile(
     r"""
     \{
@@ -362,13 +415,18 @@ _wildcard_regex = re.compile(
     """, re.VERBOSE)
 
 
-def wait_for_files(files, latency_wait=3):
+def wait_for_files(files, latency_wait=3, force_stay_on_remote=False):
     """Wait for given files to be present in filesystem."""
     files = list(files)
-    get_missing = lambda: [
-        f for f in files if not
-        (f.exists_remote if (isinstance(f, _IOFile) and f.is_remote and f.should_stay_on_remote) else os.path.exists(f))
-    ]
+    def get_missing():
+        return [
+            f for f in files
+            if not (f.exists_remote
+                    if (isinstance(f, _IOFile) and
+                       f.is_remote and
+                       (force_stay_on_remote or f.should_stay_on_remote))
+                    else os.path.exists(f))]
+
     missing = get_missing()
     if missing:
         logger.info("Waiting at most {} seconds for missing files.".format(
@@ -572,6 +630,15 @@ def unpack(value):
     return flag(value, "unpack")
 
 
+def local(value):
+    """Mark a file as local file. This disables application of a default remote
+    provider.
+    """
+    if is_flagged(value, "remote"):
+        raise SyntaxError("Remote and local flags are mutually exclusive.")
+    return flag(value, "local")
+
+
 def expand(*args, **wildcards):
     """
     Expand wildcards in given filepatterns.
@@ -879,10 +946,11 @@ def load_configfile(configpath):
 
 
 class PeriodicityDetector:
-    def __init__(self, min_repeat=50, max_repeat=100):
+    def __init__(self, min_repeat=20, max_repeat=100):
         """
         Args:
-            max_len (int): The maximum length of the periodic substring.
+            max_repeat (int): The maximum length of the periodic substring.
+            min_repeat (int): The minimum length of the periodic substring.
         """
         self.regex = re.compile(
             "((?P<value>.+)(?P=value){{{min_repeat},{max_repeat}}})$".format(
diff --git a/snakemake/jobs.py b/snakemake/jobs.py
index 266abed..66d1f08 100644
--- a/snakemake/jobs.py
+++ b/snakemake/jobs.py
@@ -37,9 +37,9 @@ class Job:
                  "_format_wildcards", "input", "dependencies", "output",
                  "_params", "_log", "_benchmark", "_resources",
                  "_conda_env_file", "_conda_env", "shadow_dir", "_inputsize",
-                 "restart_times", "dynamic_output", "dynamic_input",
+                 "dynamic_output", "dynamic_input",
                  "temp_output", "protected_output", "touch_output",
-                 "subworkflow_input", "_hash"]
+                 "subworkflow_input", "_hash", "_attempt"]
 
     def __init__(self, rule, dag, wildcards_dict=None, format_wildcards=None):
         self.rule = rule
@@ -63,7 +63,7 @@ class Job:
         self.shadow_dir = None
         self._inputsize = None
 
-        self.restart_times = self.rule.restart_times
+        self._attempt = self.dag.workflow.attempt
 
         self.dynamic_output, self.dynamic_input = set(), set()
         self.temp_output, self.protected_output = set(), set()
@@ -94,7 +94,7 @@ class Job:
     def is_valid(self):
         """Check if job is valid"""
         # these properties have to work in dry-run as well. Hence we check them here:
-        resources = self.rule.expand_resources(self.wildcards_dict, self.input)
+        resources = self.rule.expand_resources(self.wildcards_dict, self.input, self.attempt)
         self.rule.expand_params(self.wildcards_dict, self.input, self.output, resources)
         self.rule.expand_benchmark(self.wildcards_dict)
         self.rule.expand_log(self.wildcards_dict)
@@ -136,10 +136,21 @@ class Job:
         return self._benchmark
 
     @property
+    def attempt(self):
+        return self._attempt
+
+    @attempt.setter
+    def attempt(self, attempt):
+        # reset resources
+        self._resources = None
+        self._attempt = attempt
+
+    @property
     def resources(self):
         if self._resources is None:
             self._resources = self.rule.expand_resources(self.wildcards_dict,
-                                                         self.input)
+                                                         self.input,
+                                                         self.attempt)
         return self._resources
 
     @property
@@ -162,9 +173,13 @@ class Job:
             logger.debug("Accessing conda environment {}.".format(self._conda_env))
             if self._conda_env is None:
                 raise ValueError("Conda environment {} not found in DAG.".format(self.conda_env_file))
-            return self._conda_env.path
+            return self._conda_env
         return None
 
+    @property
+    def conda_env_path(self):
+        return self.conda_env.path if self.conda_env else None
+
     def archive_conda_env(self):
         """Archive a conda environment into a custom local channel."""
         if self.conda_env_file:
@@ -172,6 +187,20 @@ class Job:
         return None
 
     @property
+    def singularity_img_url(self):
+        return self.rule.singularity_img
+
+    @property
+    def singularity_img(self):
+        if self.singularity_img_url:
+            return self.dag.singularity_imgs[self.singularity_img_url]
+        return None
+
+    @property
+    def singularity_img_path(self):
+        return self.singularity_img.path if self.singularity_img else None
+
+    @property
     def is_shadow(self):
         return self.rule.shadow_depth is not None
 
@@ -418,7 +447,8 @@ class Job:
 
         for f in self.input:
             if f.is_remote:
-                if not f.exists_local and f.exists_remote:
+                if (not f.exists_local and f.exists_remote) and (
+                    not self.rule.norun or f.remote_object.keep_local):
                     toDownload.add(f)
 
         toDownload = toDownload | self.remote_input_newer_than_local
diff --git a/snakemake/logging.py b/snakemake/logging.py
index 79fef1a..5711b9e 100644
--- a/snakemake/logging.py
+++ b/snakemake/logging.py
@@ -6,12 +6,14 @@ __license__ = "MIT"
 import logging as _logging
 import platform
 import time
+import datetime
 import sys
 import os
 import json
 import threading
 import tempfile
 from functools import partial
+import inspect
 
 from snakemake.common import DYNAMIC_FILL
 from snakemake.common import Mode
@@ -69,9 +71,11 @@ class ColorizingStreamHandler(_logging.StreamHandler):
                 self.handleError(record)
 
     def decorate(self, record):
-        message = [record.message]
-        if self.timestamp:
-            message.insert(0, "[{}] ".format(time.asctime()))
+        message = record.message
+        if self.timestamp and message:
+            stamp = "[{}] {{}}".format(time.asctime()).format
+            message = "".join(map(stamp, message.splitlines(True)))
+        message = [message]
         if not self.nocolor and record.levelname in self.colors:
             message.insert(0, self.COLOR_SEQ %
                            (30 + self.colors[record.levelname]))
@@ -93,17 +97,19 @@ class Logger:
 
     def setup(self):
         # logfile output is done always
-        self.logfile_fd, self.logfile = tempfile.mkstemp(
-            prefix="",
-            suffix=".snakemake.log")
+        os.makedirs(os.path.join(".snakemake", "log"), exist_ok=True)
+        self.logfile = os.path.abspath(os.path.join(".snakemake",
+                                    "log",
+                                    datetime.datetime.now().isoformat()
+                                                           .replace(":", "") +
+                                    ".snakemake.log"))
+
         self.logfile_handler = _logging.FileHandler(self.logfile)
         self.logger.addHandler(self.logfile_handler)
 
     def cleanup(self):
         self.logger.removeHandler(self.logfile_handler)
         self.logfile_handler.close()
-        os.close(self.logfile_fd)
-        os.remove(self.logfile)
         self.log_handler = [self.text_handler]
 
     def get_logfile(self):
@@ -111,6 +117,10 @@ class Logger:
             self.logfile_handler.flush()
         return self.logfile
 
+    def remove_logfile(self):
+        self.logfile_handler.close()
+        os.remove(self.logfile)
+
     def handler(self, msg):
         for handler in self.log_handler:
             handler(msg)
@@ -124,6 +134,19 @@ class Logger:
     def set_level(self, level):
         self.logger.setLevel(level)
 
+    def logfile_hint(self):
+        logfile = os.path.relpath(self.get_logfile())
+        if logfile.startswith(".."):
+            # relative path is not "simple to read", use absolute path
+            logfile = self.get_logfile()
+        self.info("Complete log: {}".format(logfile))
+
+    def location(self, msg):
+        callerframerecord = inspect.stack()[1]
+        frame = callerframerecord[0]
+        info = inspect.getframeinfo(frame)
+        self.debug("{}: {info.filename}, {info.function}, {info.lineno}".format(msg, info=info))
+
     def info(self, msg):
         self.handler(dict(level="info", msg=msg))
 
@@ -149,6 +172,10 @@ class Logger:
         msg["level"] = "job_info"
         self.handler(msg)
 
+    def job_error(self, **msg):
+        msg["level"] = "job_error"
+        self.handler(msg)
+
     def dag_debug(self, msg):
         self.handler(dict(level="dag_debug", **msg))
 
@@ -224,6 +251,16 @@ class Logger:
             self.logger.info("")
 
             self.last_msg_was_job_info = True
+        elif level == "job_error":
+            self.logger.error("Error in rule {}:".format(msg["name"]))
+            self.logger.error("    jobid: {}".format(msg["jobid"]))
+            if msg["output"]:
+                self.logger.error("    output: {}".format(", ".join(msg["output"])))
+            if msg["log"]:
+                self.logger.error("    log: {}".format(", ".join(msg["log"])))
+            for item in msg["aux"].items():
+                self.logger.error("    {}: {}".format(*item))
+            self.logger.error("")
         else:
             if level == "info" and not self.quiet:
                 self.logger.warning(msg["msg"])
diff --git a/snakemake/parser.py b/snakemake/parser.py
index cf4784d..0deb86e 100644
--- a/snakemake/parser.py
+++ b/snakemake/parser.py
@@ -401,6 +401,9 @@ class Benchmark(RuleKeywordState):
 class Conda(RuleKeywordState):
     pass
 
+class Singularity(RuleKeywordState):
+    pass
+
 
 class WildcardConstraints(RuleKeywordState):
     @property
@@ -418,20 +421,28 @@ class Run(RuleKeywordState):
                          dedent=dedent,
                          root=root)
         self.rulename = rulename
+        self.content = 0
 
     def start(self):
         yield "@workflow.run"
         yield "\n"
         yield ("def __rule_{rulename}(input, output, params, wildcards, threads, "
-               "resources, log, version, rule, conda_env, bench_record):".format(
-                   rulename=self.rulename if self.rulename is not None else self.snakefile.rulecount))
+               "resources, log, version, rule, conda_env, singularity_img, "
+               "singularity_args, bench_record):".format(
+                   rulename=self.rulename
+                            if self.rulename is not None
+                            else self.snakefile.rulecount))
 
     def end(self):
         yield ""
 
+    def block_content(self, token):
+        self.content += 1
+        yield token.string, token
+
     def is_block_end(self, token):
-        return (self.line and self.was_indented and self.indent <= 0
-                ) or is_eof(token)
+        return (self.content and self.line
+                             and self.indent <= 0) or is_eof(token)
 
 
 class AbstractCmd(Run):
@@ -522,7 +533,9 @@ class Script(AbstractCmd):
         yield ', "{}"'.format(
             os.path.abspath(os.path.dirname(self.snakefile.path)))
         # other args
-        yield ", input, output, params, wildcards, threads, resources, log, config, rule, conda_env, bench_record"
+        yield (", input, output, params, wildcards, threads, resources, log, "
+               "config, rule, conda_env, singularity_img, singularity_args, "
+               "bench_record")
 
 
 class Wrapper(Script):
@@ -530,7 +543,9 @@ class Wrapper(Script):
     end_func = "wrapper"
 
     def args(self):
-        yield ", input, output, params, wildcards, threads, resources, log, config, rule, conda_env, bench_record, workflow.wrapper_prefix"
+        yield (", input, output, params, wildcards, threads, resources, log, "
+               "config, rule, conda_env, singularity_img, singularity_args, "
+               "bench_record, workflow.wrapper_prefix")
 
 
 class Rule(GlobalKeywordState):
@@ -545,6 +560,7 @@ class Rule(GlobalKeywordState):
                        message=Message,
                        benchmark=Benchmark,
                        conda=Conda,
+                       singularity=Singularity,
                        wildcard_constraints=WildcardConstraints,
                        shadow=Shadow,
                        run=Run,
diff --git a/snakemake/persistence.py b/snakemake/persistence.py
index bddb57b..3990d62 100644
--- a/snakemake/persistence.py
+++ b/snakemake/persistence.py
@@ -8,7 +8,8 @@ import shutil
 import signal
 import marshal
 import pickle
-from base64 import urlsafe_b64encode
+import json
+from base64 import urlsafe_b64encode, b64encode
 from functools import lru_cache, partial
 from itertools import filterfalse, count
 
@@ -18,7 +19,12 @@ from snakemake.utils import listfiles
 
 
 class Persistence:
-    def __init__(self, nolock=False, dag=None, conda_prefix=None, warn_only=False):
+    def __init__(self,
+                 nolock=False,
+                 dag=None,
+                 conda_prefix=None,
+                 singularity_prefix=None,
+                 warn_only=False):
         self.path = os.path.abspath(".snakemake")
         if not os.path.exists(self.path):
             os.mkdir(self.path)
@@ -29,29 +35,27 @@ class Persistence:
         self.dag = dag
         self._lockfile = dict()
 
-        self._incomplete_path = os.path.join(self.path, "incomplete_files")
-        self._version_path = os.path.join(self.path, "version_tracking")
-        self._code_path = os.path.join(self.path, "code_tracking")
-        self._rule_path = os.path.join(self.path, "rule_tracking")
-        self._input_path = os.path.join(self.path, "input_tracking")
-        self._log_path = os.path.join(self.path, "log_tracking")
-        self._params_path = os.path.join(self.path, "params_tracking")
-        self._shellcmd_path = os.path.join(self.path, "shellcmd_tracking")
+        self._metadata_path = os.path.join(self.path, "metadata")
+
         self.shadow_path = os.path.join(self.path, "shadow")
         self.conda_env_archive_path = os.path.join(self.path, "conda-archive")
 
-        for d in (self._incomplete_path, self._version_path, self._code_path,
-                  self._rule_path, self._input_path, self._log_path, self._params_path,
-                  self._shellcmd_path, self.shadow_path, self.conda_env_archive_path):
-            if not os.path.exists(d):
-                os.mkdir(d)
+        for d in (self._metadata_path, self.shadow_path, self.conda_env_archive_path):
+            os.makedirs(d, exist_ok=True)
 
         if conda_prefix is None:
             self.conda_env_path = os.path.join(self.path, "conda")
         else:
-            self.conda_env_path = os.path.abspath(os.path.expanduser(conda_prefix))
+            self.conda_env_path = os.path.abspath(
+                os.path.expanduser(conda_prefix))
+        if singularity_prefix is None:
+            self.singularity_img_path = os.path.join(self.path, "singularity")
+        else:
+            self.singularity_img_path = os.path.abspath(
+                os.path.expanduser(singularity_prefix))
 
         os.makedirs(self.conda_env_path, exist_ok=True)
+        os.makedirs(self.singularity_img_path, exist_ok=True)
 
         if nolock:
             self.lock = self.noop
@@ -60,6 +64,8 @@ class Persistence:
             self.lock = self.lock_warn_only
             self.unlock = self.noop
 
+        self._read_record = self._read_record_cached
+
     @property
     def files(self):
         if self._files is None:
@@ -114,23 +120,19 @@ class Persistence:
         shutil.rmtree(self._lockdir)
 
     def cleanup_metadata(self, path):
-        self._delete_record(self._incomplete_path, path)
-        self._delete_record(self._version_path, path)
-        self._delete_record(self._code_path, path)
-        self._delete_record(self._rule_path, path)
-        self._delete_record(self._input_path, path)
-        self._delete_record(self._log_path, path)
-        self._delete_record(self._params_path, path)
-        self._delete_record(self._shellcmd_path, path)
+        self._delete_record(self._metadata_path, path)
 
     def cleanup_shadow(self):
         if os.path.exists(self.shadow_path):
             shutil.rmtree(self.shadow_path)
             os.mkdir(self.shadow_path)
 
-    def started(self, job):
+    def started(self, job, external_jobid=None):
         for f in job.output:
-            self._record(self._incomplete_path, "", f)
+            self._record(self._metadata_path, {
+                             "incomplete": True,
+                             "external_jobid": external_jobid
+                         }, f)
 
     def finished(self, job):
         version = str(
@@ -139,85 +141,88 @@ class Persistence:
         input = self._input(job)
         log = self._log(job)
         params = self._params(job)
-        shellcmd = self._shellcmd(job)
+        shellcmd = job.shellcmd
         for f in job.expanded_output:
-            self._delete_record(self._incomplete_path, f)
-            self._record(self._version_path, version, f)
-            self._record(self._code_path, code, f, bin=True)
-            self._record(self._rule_path, job.rule.name, f)
-            self._record(self._input_path, input, f)
-            self._record(self._log_path, log, f)
-            self._record(self._params_path, params, f)
-            self._record(self._shellcmd_path, shellcmd, f)
+            self._record(self._metadata_path, {
+                "version": version,
+                "code": code,
+                "rule": job.rule.name,
+                "input": input,
+                "log": log,
+                "params": params,
+                "shellcmd": shellcmd,
+                "incomplete": False
+            }, f)
 
     def cleanup(self, job):
         for f in job.expanded_output:
-            self._delete_record(self._incomplete_path, f)
-            self._delete_record(self._version_path, f)
-            self._delete_record(self._code_path, f)
-            self._delete_record(self._rule_path, f)
-            self._delete_record(self._input_path, f)
-            self._delete_record(self._log_path, f)
-            self._delete_record(self._params_path, f)
-            self._delete_record(self._shellcmd_path, f)
+            self._delete_record(self._metadata_path, f)
 
     def incomplete(self, job):
-        marked_incomplete = partial(self._exists_record, self._incomplete_path)
+        def marked_incomplete(f):
+            return self._read_record(self._metadata_path, f).get("incomplete", False)
+
         return any(
             map(lambda f: f.exists and marked_incomplete(f), job.output))
 
+    def external_jobids(self, job):
+        return list(set(
+            self._read_record(self._metadata_path, f)
+                .get("external_jobid", None)
+            for f in job.output))
+
     def version(self, path):
-        return self._read_record(self._version_path, path)
+        return self._read_record(self._metadata_path, path).get("version")
 
     def rule(self, path):
-        return self._read_record(self._rule_path, path)
+        return self._read_record(self._metadata_path, path).get("rule")
 
     def input(self, path):
-        files = self._read_record(self._input_path, path)
-        if files is not None:
-            return files.split("\n")
-        return None
+        return self._read_record(self._metadata_path, path).get("input")
 
     def log(self, path):
-        files = self._read_record(self._log_path, path)
-        if files is not None:
-            return files.split("\n")
-        return None
+        return self._read_record(self._metadata_path, path).get("log")
 
     def shellcmd(self, path):
-        return self._read_record(self._shellcmd_path, path)
+        return self._read_record(self._metadata_path, path).get("shellcmd")
+
+    def params(self, path):
+        return self._read_record(self._metadata_path, path).get("params")
+
+    def code(self, path):
+        return self._read_record(self._metadata_path, path).get("code")
 
     def version_changed(self, job, file=None):
-        cr = partial(self._changed_records, self._version_path,
-                     job.rule.version)
-        if file is None:
-            return cr(*job.output)
-        else:
-            return bool(list(cr(file)))
+        """Yields output files with changed versions of bool if file given."""
+        return _bool_or_gen(self._version_changed, job, file=file)
 
     def code_changed(self, job, file=None):
-        cr = partial(self._changed_records, self._code_path,
-                     self._code(job.rule),
-                     bin=True)
-        if file is None:
-            return cr(*job.output)
-        else:
-            return bool(list(cr(file)))
+        """Yields output files with changed code of bool if file given."""
+        return _bool_or_gen(self._code_changed, job, file=file)
 
     def input_changed(self, job, file=None):
-        cr = partial(self._changed_records, self._input_path, self._input(job))
-        if file is None:
-            return cr(*job.output)
-        else:
-            return bool(list(cr(file)))
+        """Yields output files with changed input of bool if file given."""
+        return _bool_or_gen(self._input_changed, job, file=file)
 
     def params_changed(self, job, file=None):
-        cr = partial(self._changed_records, self._params_path,
-                     self._params(job))
-        if file is None:
-            return cr(*job.output)
-        else:
-            return bool(list(cr(file)))
+        """Yields output files with changed params of bool if file given."""
+        return _bool_or_gen(self._params_changed, job, file=file)
+
+    def _version_changed(self, job, file=None):
+        assert file is not None
+        return self.version(file) != job.rule.version
+
+    def _code_changed(self, job, file=None):
+        assert file is not None
+        return self.code(file) != self._code(job.rule)
+
+    def _input_changed(self, job, file=None):
+        assert file is not None
+        return self.input(file) != self._input(job)
+
+    def _params_changed(self, job, file=None):
+        assert file is not None
+        return self.params(file) != self._params(job)
 
     def noop(self, *args):
         pass
@@ -228,37 +233,29 @@ class Persistence:
     @lru_cache()
     def _code(self, rule):
         code = rule.run_func.__code__
-        return pickle_code(code)
+        return b64encode(pickle_code(code)).decode()
 
     @lru_cache()
     def _input(self, job):
-        return "\n".join(sorted(job.input))
+        return sorted(job.input)
 
     @lru_cache()
     def _log(self, job):
-        return "\n".join(sorted(job.log))
+        return sorted(job.log)
 
     @lru_cache()
     def _params(self, job):
-        return "\n".join(sorted(map(repr, job.params)))
+        return sorted(map(repr, job.params))
 
     @lru_cache()
     def _output(self, job):
         return sorted(job.output)
 
-    @lru_cache()
-    def _shellcmd(self, job):
-        return job.shellcmd
-
-    def _record(self, subject, value, id, bin=False):
+    def _record(self, subject, json_value, id):
         recpath = self._record_path(subject, id)
-        if value is not None:
-            os.makedirs(os.path.dirname(recpath), exist_ok=True)
-            with open(recpath, "wb" if bin else "w") as f:
-                f.write(value)
-        else:
-            if os.path.exists(recpath):
-                os.remove(recpath)
+        os.makedirs(os.path.dirname(recpath), exist_ok=True)
+        with open(recpath, "w") as f:
+            json.dump(json_value, f)
 
     def _delete_record(self, subject, id):
         try:
@@ -271,20 +268,15 @@ class Persistence:
             if e.errno != 2:  # not missing
                 raise e
 
-    def _read_record(self, subject, id, bin=False):
-        if not self._exists_record(subject, id):
-            return None
-        with open(self._record_path(subject, id), "rb" if bin else "r") as f:
-            return f.read()
-
-    def _changed_records(self, subject, value, *ids, bin=False):
-        equals = partial(self._equals_record, subject, value, bin=bin)
-        return filter(
-            lambda id: self._exists_record(subject, id) and not equals(id),
-            ids)
+    @lru_cache()
+    def _read_record_cached(self, subject, id):
+        return self._read_record_uncached(subject, id)
 
-    def _equals_record(self, subject, value, id, bin=False):
-        return self._read_record(subject, id, bin=bin) == value
+    def _read_record_uncached(self, subject, id):
+        if not self._exists_record(subject, id):
+            return dict()
+        with open(self._record_path(subject, id), "r") as f:
+            return json.load(f)
 
     def _exists_record(self, subject, id):
         return os.path.exists(self._record_path(subject, id))
@@ -308,6 +300,9 @@ class Persistence:
         max_len = os.pathconf(
             subject,
             "PC_NAME_MAX") if os.name == "posix" else 255  # maximum NTFS and FAT32 filename length
+        if max_len == 0:
+            max_len = 255
+
         b64id = self._b64id(id)
         # split into chunks of proper length
         b64id = [b64id[i:i + max_len - 1]
@@ -325,6 +320,17 @@ class Persistence:
         # we consider all input files, also of not running jobs
         return jobfiles(self.dag.jobs, "input")
 
+    def deactivate_cache(self):
+        self._read_record_cached.cache_clear()
+        self._read_record = self._read_record_uncached
+
+
+def _bool_or_gen(func, job, file=None):
+    if file is None:
+        return (f for f in job.expanded_output if func(job, file=f))
+    else:
+        return func(job, file=file)
+
 
 def pickle_code(code):
     consts = [(pickle_code(const) if type(const) == type(code) else const)
diff --git a/snakemake/remote/FTP.py b/snakemake/remote/FTP.py
index 2826952..3aba6b2 100644
--- a/snakemake/remote/FTP.py
+++ b/snakemake/remote/FTP.py
@@ -24,6 +24,9 @@ except ImportError as e:
 
 
 class RemoteProvider(AbstractRemoteProvider):
+
+    supports_default = True
+
     def __init__(self, *args, stay_on_remote=False, immediate_close=False, **kwargs):
         super(RemoteProvider, self).__init__(*args, stay_on_remote=stay_on_remote, **kwargs)
 
diff --git a/snakemake/remote/GS.py b/snakemake/remote/GS.py
index 99ac049..2865ebc 100644
--- a/snakemake/remote/GS.py
+++ b/snakemake/remote/GS.py
@@ -1,23 +1,133 @@
-__author__ = "Christopher Tomkins-Tinch"
-__copyright__ = "Copyright 2015, Christopher Tomkins-Tinch"
-__email__ = "tomkinsc at broadinstitute.org"
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2017, Johannes Köster"
+__email__ = "johannes.koester at tu-dortmund.de"
 __license__ = "MIT"
 
-# module-specific
-from snakemake.remote.S3 import RemoteObject, RemoteProvider as S3RemoteProvider
+import os
+import re
 
+from snakemake.remote import AbstractRemoteObject, AbstractRemoteProvider
+from snakemake.exceptions import WorkflowError
+from snakemake.common import lazy_property
+
+try:
+    import google.cloud
+    from google.cloud import storage
+except ImportError as e:
+    raise WorkflowError("The Python 3 package 'google-cloud-sdk' "
+        "needs to be installed to use GS remote() file functionality. %s" % e.msg)
+
+
+class RemoteProvider(AbstractRemoteProvider):
+
+    supports_default = True
 
-class RemoteProvider(S3RemoteProvider):
     def __init__(self, *args, stay_on_remote=False, **kwargs):
-        kwargs["host"] = "storage.googleapis.com"
         super(RemoteProvider, self).__init__(*args, stay_on_remote=stay_on_remote, **kwargs)
 
+        self.client = storage.Client(*args, **kwargs)
+
+    def remote_interface(self):
+        return self.client
+
     @property
     def default_protocol(self):
         """The protocol that is prepended to the path when no protocol is specified."""
-        return 'gs://'
+        return "gs://"
 
     @property
     def available_protocols(self):
         """List of valid protocols for this remote provider."""
-        return ['s3://', 'gs://']
+        return ["gs://"]
+
+
+class RemoteObject(AbstractRemoteObject):
+    def __init__(self, *args, keep_local=False, provider=None, **kwargs):
+        super(RemoteObject, self).__init__(*args, keep_local=keep_local, provider=provider, **kwargs)
+
+        if provider:
+            self.client = provider.remote_interface()
+        else:
+            self.client = storage.Client(*args, **kwargs)
+
+        self._key = None
+        self._bucket_name = None
+        self._bucket = None
+        self._blob = None
+
+    # === Implementations of abstract class members ===
+
+    def exists(self):
+        return self.blob.exists()
+
+    def mtime(self):
+        if self.exists():
+            self.update_blob()
+            t = self.blob.updated
+            return t.timestamp()
+        else:
+            raise WorkflowError("The file does not seem to exist remotely: %s" % self.local_file())
+
+    def size(self):
+        if self.exists():
+            self.update_blob()
+            return self.blob.size // 1024
+        else:
+            return self._iofile.size_local
+
+    def download(self):
+        if self.exists():
+            os.makedirs(os.path.dirname(self.local_file()), exist_ok=True)
+            self.blob.download_to_filename(self.local_file())
+            os.sync()
+            return self.local_file()
+        return None
+
+    def upload(self):
+        try:
+            if not self.bucket.exists():
+                self.bucket.create()
+                self.update_blob()
+            self.blob.upload_from_filename(self.local_file())
+        except google.cloud.exceptions.Forbidden as e:
+            raise WorkflowError(e,
+                "When running locally, make sure that you are authenticated "
+                "via gcloud (see Snakemake documentation). When running in a "
+                "kubernetes cluster, make sure that storage-rw is added to "
+                "--scopes (see Snakemake documentation).")
+
+    @property
+    def name(self):
+        return self.key
+
+    @property
+    def list(self):
+        return [k.name for k in self.bucket.list_blobs()]
+
+    # ========= Helpers ===============
+
+    def update_blob(self):
+        self._blob = self.bucket.get_blob(self.key)
+
+    @lazy_property
+    def bucket(self):
+        return self.client.bucket(self.bucket_name)
+
+    @lazy_property
+    def blob(self):
+        return self.bucket.blob(self.key)
+
+    @lazy_property
+    def bucket_name(self):
+        return self.parse().group("bucket")
+
+    @lazy_property
+    def key(self):
+        return self.parse().group("key")
+
+    def parse(self):
+        m = re.search("(?P<bucket>[^/]*)/(?P<key>.*)", self.local_file())
+        if len(m.groups()) != 2:
+            raise WorkflowError("GS remote file {} does not have the form "
+                "<bucket>/<key>.".format(self.local_file()))
+        return m
diff --git a/snakemake/remote/HTTP.py b/snakemake/remote/HTTP.py
index c084c5d..dc622b8 100644
--- a/snakemake/remote/HTTP.py
+++ b/snakemake/remote/HTTP.py
@@ -12,6 +12,8 @@ from contextlib import contextmanager
 # module-specific
 from snakemake.remote import AbstractRemoteProvider, DomainObject
 from snakemake.exceptions import HTTPFileException, WorkflowError
+from snakemake.logging import logger
+
 
 try:
     # third-party modules
@@ -128,6 +130,7 @@ class RemoteObject(DomainObject):
             with self.httpr(verb="HEAD") as httpr:
 
                 file_mtime = self.get_header_item(httpr, "last-modified", default=0)
+                logger.debug("HTTP mtime: {}".format(file_mtime))
 
                 modified_tuple = email.utils.parsedate_tz(file_mtime)
                 epochTime = email.utils.mktime_tz(modified_tuple)
diff --git a/snakemake/remote/NCBI.py b/snakemake/remote/NCBI.py
index 989b73d..8c461d1 100644
--- a/snakemake/remote/NCBI.py
+++ b/snakemake/remote/NCBI.py
@@ -60,7 +60,7 @@ class RemoteObject(AbstractRemoteObject):
             raise NCBIFileException("DB specified is not valid. Options include: {dbs}".format(dbs=", ".join(self._ncbi.valid_dbs)))
         else:
             self.db = db
-        
+
         self.rettype = rettype
         self.retmode = retmode
         self.kwargs  = kwargs
@@ -99,7 +99,7 @@ class RemoteObject(AbstractRemoteObject):
     @property
     def list(self):
         raise NCBIFileException("The NCBI Remote Provider does not currently support list-based operations like glob_wildcards().")
-  
+
     @property
     def accession(self):
         accession, version, file_ext = self._ncbi.parse_accession_str(self.local_file())
@@ -357,7 +357,7 @@ class NCBIHelper(object):
             This tries to match an NCBI accession as defined here:
                 http://www.ncbi.nlm.nih.gov/Sequin/acc.html
         '''
-        m = re.search( r"(?P<accession>(?:[a-zA-Z]{1,6}|NC_|NM_|NR_)\d{1,10})(?:\.(?P<version>\d+))?(?:\.(?P<file_ext>\S+))?.*", id_str )
+        m = re.search( r"(?P<accession>(?:[a-zA-Z]{1,6}|NW_|NC_|NM_|NR_)\d{1,10})(?:\.(?P<version>\d+))?(?:\.(?P<file_ext>\S+))?.*", id_str )
         accession, version, file_ext = ("","","")
         if m:
             accession = m.group("accession")
diff --git a/snakemake/remote/S3.py b/snakemake/remote/S3.py
index c935c27..4f14818 100644
--- a/snakemake/remote/S3.py
+++ b/snakemake/remote/S3.py
@@ -7,7 +7,6 @@ __license__ = "MIT"
 import os
 import re
 import math
-import email.utils
 import functools
 import concurrent.futures
 
@@ -17,12 +16,11 @@ from snakemake.exceptions import WorkflowError, S3FileException
 
 try:
     # third-party modules
-    import boto
-    from boto.s3.key import Key
-    from filechunkio import FileChunkIO
+    import boto3
+    import botocore
 except ImportError as e:
-    raise WorkflowError("The Python 3 packages 'boto' and 'filechunkio' " +
-        "need to be installed to use S3 remote() file functionality. %s" % e.msg)
+    raise WorkflowError("The Python 3 package 'boto3' "
+        "needs to be installed to use S3 remote() file functionality. %s" % e.msg)
 
 
 class RemoteProvider(AbstractRemoteProvider):
@@ -85,14 +83,11 @@ class RemoteObject(AbstractRemoteObject):
         os.sync() # ensure flush to disk
 
     def upload(self):
-        if self.size() > 10 * 1024 * 1024: # S3 complains if multipart uploads are <10MB
-            self._s3c.upload_to_s3_multipart(self.s3_bucket, self.local_file(), self.s3_key, encrypt_key=self.kwargs.get("encrypt_key", None))
-        else:
-            self._s3c.upload_to_s3(self.s3_bucket, self.local_file(), self.s3_key, encrypt_key=self.kwargs.get("encrypt_key", None))
+        self._s3c.upload_to_s3(self.s3_bucket, self.local_file(), self.s3_key, extra_args=self.kwargs.get("ExtraArgs", None), config=self.kwargs.get("Config", None))
 
     @property
     def list(self):
-        return [k.name for k in self._s3c.list_keys(self.s3_bucket)]
+        return self._s3c.list_keys(self.s3_bucket)
 
     # === Related methods ===
 
@@ -143,7 +138,14 @@ class S3Helper(object):
         if "secret_access_key" in kwargs:
             kwargs["aws_secret_access_key"] = kwargs.pop("secret_access_key")
 
-        self.conn = boto.connect_s3(*args, **kwargs)
+        self.s3 = boto3.resource('s3', **kwargs)
+
+    def bucket_exists(self, bucket_name):
+        try:
+            self.s3.meta.client.head_bucket(Bucket=bucket_name)
+            return True
+        except:
+            return False
 
     def upload_to_s3(
             self,
@@ -152,10 +154,8 @@ class S3Helper(object):
             key=None,
             use_relative_path_for_key=True,
             relative_start_dir=None,
-            replace=False,
-            reduced_redundancy=False,
-            headers=None,
-            encrypt_key=False):
+            extra_args=None,
+            config=None):
         """ Upload a file to S3
 
             This function uploads a file to an AWS S3 bucket.
@@ -169,9 +169,6 @@ class S3Helper(object):
                     representing the path of the file relative to the CWD. If False only the
                     file basename will be used for the key.
                 relative_start_dir: The start dir to use for use_relative_path_for_key. No effect if key is set.
-                replace: If True a file with the same key will be replaced with the one being written
-                reduced_redundancy: Sets the file to AWS reduced redundancy storage.
-                headers: additional heads to pass to AWS
 
             Returns: The key of the file on S3 if written, None otherwise
         """
@@ -181,16 +178,10 @@ class S3Helper(object):
         assert os.path.exists(file_path), "The file path specified does not exist: %s" % file_path
         assert os.path.isfile(file_path), "The file path specified does not appear to be a file: %s" % file_path
 
-        try:
-            b = self.conn.get_bucket(bucket_name)
-        except:
-            b = self.conn.create_bucket(bucket_name)
+        if not self.bucket_exists(bucket_name):
+            self.s3.create_bucket(Bucket=bucket_name)
 
-        k = Key(b)
-
-        if key:
-            k.key = key
-        else:
+        if not key:
             if use_relative_path_for_key:
                 if relative_start_dir:
                     path_key = os.path.relpath(file_path, relative_start_dir)
@@ -198,19 +189,14 @@ class S3Helper(object):
                     path_key = os.path.relpath(file_path)
             else:
                 path_key = os.path.basename(file_path)
-            k.key = path_key
+            key = path_key
+
+        k = self.s3.Object(bucket_name, key)
+
         try:
-            bytes_written = k.set_contents_from_filename(
-                file_path,
-                replace=replace,
-                reduced_redundancy=reduced_redundancy,
-                headers=headers,
-                encrypt_key=encrypt_key)
-            if bytes_written:
-                return k.key
-            else:
-                return None
+            k.upload_file(file_path, ExtraArgs=extra_args, Config=config)
         except:
+            raise
             return None
 
     def download_from_s3(
@@ -220,7 +206,7 @@ class S3Helper(object):
             destination_path=None,
             expandKeyIntoDirs=True,
             make_dest_dirs=True,
-            headers=None, create_stub_only=False):
+            create_stub_only=False):
         """ Download a file from s3
 
             This function downloads an object from a specified AWS S3 bucket.
@@ -234,7 +220,6 @@ class S3Helper(object):
                     following the last slash.
                 make_dest_dirs: If this is True (default) and the destination path includes directories
                     that do not exist, they will be created.
-                headers: Additional headers to pass to AWS
 
             Returns:
                 The destination path of the downloaded file on the receiving end, or None if the destination_path
@@ -243,8 +228,7 @@ class S3Helper(object):
         assert bucket_name, "bucket_name must be specified"
         assert key, "Key must be specified"
 
-        b = self.conn.get_bucket(bucket_name)
-        k = Key(b)
+        b = self.s3.Bucket(bucket_name)
 
         if destination_path:
             destination_path = os.path.realpath(os.path.expanduser(destination_path))
@@ -258,121 +242,20 @@ class S3Helper(object):
         if make_dest_dirs:
             os.makedirs(os.path.dirname(destination_path), exist_ok=True)
 
-        k.key = key if key else os.path.basename(destination_path)
+        k = self.s3.Object(bucket_name, key)
 
         try:
             if not create_stub_only:
-                k.get_contents_to_filename(destination_path, headers=headers)
+                k.download_file(destination_path)
             else:
                 # just create an empty file with the right timestamps
                 with open(destination_path, 'wb') as fp:
-                    modified_tuple = email.utils.parsedate_tz(k.last_modified)
-                    modified_stamp = int(email.utils.mktime_tz(modified_tuple))
-                    os.utime(fp.name, (modified_stamp, modified_stamp))
+                    os.utime(fp.name, (k.last_modified.timestamp(), k.last_modified.timestamp()))
             return destination_path
         except:
             return None
 
-    def _upload_part(self, bucket_name, multipart_id, part_num, source_path, offset, bytes_to_write, number_of_retries=5):
-
-        def _upload(retries_remaining=number_of_retries):
-            try:
-                b = self.conn.get_bucket(bucket_name)
-                for mp in b.get_all_multipart_uploads():
-                    if mp.id == multipart_id:
-                        with FileChunkIO(source_path, 'r', offset=offset, bytes=bytes_to_write) as fp:
-                            mp.upload_part_from_file(fp=fp, part_num=part_num)
-                        break
-            except Exception() as e:
-                if retries_remaining:
-                    _upload(retries_remaining=retries_remaining - 1)
-                else:
-                    raise e
-
-        _upload()
-
-    def upload_to_s3_multipart(
-            self,
-            bucket_name,
-            file_path,
-            key=None,
-            use_relative_path_for_key=True,
-            relative_start_dir=None,
-            replace=False,
-            reduced_redundancy=False,
-            headers=None,
-            parallel_processes=4,
-            encrypt_key=False):
-        """ Upload a file to S3
-
-            This function uploads a file to an AWS S3 bucket.
-
-            Args:
-                bucket_name: the name of the S3 bucket to use (bucket name only, not ARN)
-                file_path: The path to the file to upload.
-                key: The key to set for the file on S3. If not specified, this will default to the
-                    name of the file.
-                use_relative_path_for_key: If set to True (default), and key is None, the S3 key will include slashes
-                    representing the path of the file relative to the CWD. If False only the
-                    file basename will be used for the key.
-                relative_start_dir: The start dir to use for use_relative_path_for_key. No effect if key is set.
-                replace: If True a file with the same key will be replaced with the one being written
-                reduced_redundancy: Sets the file to AWS reduced redundancy storage.
-                headers: additional heads to pass to AWS
-                parallel_processes: Number of concurrent uploads
-
-            Returns: The key of the file on S3 if written, None otherwise
-        """
-        file_path = os.path.realpath(os.path.expanduser(file_path))
-
-        assert bucket_name, "bucket_name must be specified"
-        assert os.path.exists(file_path), "The file path specified does not exist: %s" % file_path
-        assert os.path.isfile(file_path), "The file path specified does not appear to be a file: %s" % file_path
-
-        try:
-            b = self.conn.get_bucket(bucket_name)
-        except:
-            b = self.conn.create_bucket(bucket_name)
-
-        path_key = None
-        if key:
-            path_key = key
-        else:
-            if use_relative_path_for_key:
-                if relative_start_dir:
-                    path_key = os.path.relpath(file_path, relative_start_dir)
-                else:
-                    path_key = os.path.relpath(file_path)
-            else:
-                path_key = os.path.basename(file_path)
-
-        mp = b.initiate_multipart_upload(path_key, headers=headers, encrypt_key=encrypt_key)
-
-        source_size = os.stat(file_path).st_size
-
-        bytes_per_chunk = 52428800  # 50MB = 50 * 1024 * 1024
-        chunk_count = int(math.ceil(source_size / float(bytes_per_chunk)))
-
-        with concurrent.futures.ThreadPoolExecutor(max_workers=parallel_processes) as executor:
-            for i in range(chunk_count):
-                offset = i * bytes_per_chunk
-                remaining_bytes = source_size - offset
-                bytes_to_write = min([bytes_per_chunk, remaining_bytes])
-                part_num = i + 1
-                executor.submit(functools.partial(self._upload_part, bucket_name, mp.id, part_num, file_path, offset, bytes_to_write))
-
-        if len(mp.get_all_parts()) == chunk_count:
-            mp.complete_upload()
-            try:
-                key = b.get_key(path_key)
-                return key.key
-            except:
-                return None
-        else:
-            mp.cancel_upload()
-            return None
-
-    def delete_from_bucket(self, bucket_name, key, headers=None):
+    def delete_from_bucket(self, bucket_name, key):
         """ Delete a file from s3
 
             This function deletes an object from a specified AWS S3 bucket.
@@ -380,7 +263,6 @@ class S3Helper(object):
             Args:
                 bucket_name: the name of the S3 bucket to use (bucket name only, not ARN)
                 key: the key of the object to delete from the bucket
-                headers: Additional headers to pass to AWS
 
             Returns:
                 The name of the object deleted
@@ -388,19 +270,16 @@ class S3Helper(object):
         assert bucket_name, "bucket_name must be specified"
         assert key, "Key must be specified"
 
-        b = self.conn.get_bucket(bucket_name)
-        k = Key(b)
-        k.key = key
-        ret = k.delete(headers=headers)
+        k = self.s3.Object(bucket_name, key)
+        ret = k.delete()
         return ret.name
 
-    def exists_in_bucket(self, bucket_name, key, headers=None):
+    def exists_in_bucket(self, bucket_name, key):
         """ Returns whether the key exists in the bucket
 
             Args:
                 bucket_name: the name of the S3 bucket to use (bucket name only, not ARN)
                 key: the key of the object to delete from the bucket
-                headers: Additional headers to pass to AWS
 
             Returns:
                 True | False
@@ -408,18 +287,21 @@ class S3Helper(object):
         assert bucket_name, "bucket_name must be specified"
         assert key, "Key must be specified"
 
-        b = self.conn.get_bucket(bucket_name)
-        k = Key(b)
-        k.key = key
-        return k.exists(headers=headers)
+        try:
+            self.s3.Object(bucket_name, key).load()
+        except botocore.exceptions.ClientError as e:
+            if e.response['Error']['Code'] == "404":
+                return False
+            else:
+                raise
+        return True
 
-    def key_size(self, bucket_name, key, headers=None):
+    def key_size(self, bucket_name, key):
         """ Returns the size of a key based on a HEAD request
 
             Args:
                 bucket_name: the name of the S3 bucket to use (bucket name only, not ARN)
                 key: the key of the object to delete from the bucket
-                headers: Additional headers to pass to AWS
 
             Returns:
                 Size in kb
@@ -427,18 +309,16 @@ class S3Helper(object):
         assert bucket_name, "bucket_name must be specified"
         assert key, "Key must be specified"
 
-        b = self.conn.get_bucket(bucket_name)
-        k = b.lookup(key)
+        k = self.s3.Object(bucket_name, key)
 
-        return k.size
+        return k.content_length // 1024
 
-    def key_last_modified(self, bucket_name, key, headers=None):
+    def key_last_modified(self, bucket_name, key):
         """ Returns a timestamp of a key based on a HEAD request
 
             Args:
                 bucket_name: the name of the S3 bucket to use (bucket name only, not ARN)
                 key: the key of the object to delete from the bucket
-                headers: Additional headers to pass to AWS
 
             Returns:
                 timestamp
@@ -446,15 +326,10 @@ class S3Helper(object):
         assert bucket_name, "bucket_name must be specified"
         assert key, "Key must be specified"
 
-        b = self.conn.get_bucket(bucket_name)
-        k = b.lookup(key)
-
-        # email.utils parsing of timestamp mirrors boto whereas
-        # time.strptime() can have TZ issues due to DST
-        modified_tuple = email.utils.parsedate_tz(k.last_modified)
-        epochTime = int(email.utils.mktime_tz(modified_tuple))
+        k = self.s3.Object(bucket_name, key)
 
-        return epochTime
+        return k.last_modified.timestamp()
 
     def list_keys(self, bucket_name):
-        return self.conn.get_bucket(bucket_name).list()
+        b = self.s3.Bucket(bucket_name)
+        return [o.key for o in b.objects]
diff --git a/snakemake/remote/S3Mocked.py b/snakemake/remote/S3Mocked.py
index 5540476..ff7195d 100644
--- a/snakemake/remote/S3Mocked.py
+++ b/snakemake/remote/S3Mocked.py
@@ -19,11 +19,10 @@ from snakemake.exceptions import WorkflowError
 
 try:
     # third-party
-    import boto
+    import boto3
     from moto import mock_s3
-    import filechunkio
 except ImportError as e:
-    raise WorkflowError("The Python 3 packages 'moto', boto' and 'filechunkio' " +
+    raise WorkflowError("The Python 3 packages 'moto' and boto3' " +
         "need to be installed to use S3Mocked remote() file functionality. %s" % e.msg)
 
 def noop():
@@ -71,6 +70,9 @@ def pickled_moto_wrapper(func):
 
 @dec_all_methods(pickled_moto_wrapper, prefix=None)
 class RemoteProvider(S3RemoteProvider):
+
+    supports_default = True
+
     def __init__(self, *args, **kwargs):
         super(RemoteProvider, self).__init__(*args, **kwargs)
 
@@ -89,9 +91,8 @@ class RemoteObject(S3RemoteObject):
         bucket_name = 'test-remote-bucket'
         test_file = "test.txt"
 
-        conn = boto.connect_s3()
-        if bucket_name not in [b.name for b in conn.get_all_buckets()]:
-            conn.create_bucket(bucket_name)
+        s3 = boto3.resource('s3')
+        s3.create_bucket(Bucket=bucket_name)
 
         # "Upload" files that should be in S3 before tests...
         s3c = S3Helper()
diff --git a/snakemake/remote/__init__.py b/snakemake/remote/__init__.py
index 380ec5c..0dcb528 100644
--- a/snakemake/remote/__init__.py
+++ b/snakemake/remote/__init__.py
@@ -46,8 +46,6 @@ class AbstractRemoteProvider:
     """
     __metaclass__ = ABCMeta
 
-    supports_default = False
-
     def __init__(self, *args, keep_local=False, stay_on_remote=False, **kwargs):
         self.args = args
         self.stay_on_remote = stay_on_remote
@@ -68,19 +66,17 @@ class AbstractRemoteProvider:
 
         def _set_protocol(value):
             """Adds the default protocol to `value` if it doesn't already have one"""
-            for protocol in self.available_protocols:
-                if value.startswith(protocol):
+            protocol = self.default_protocol
+            for p in self.available_protocols:
+                if value.startswith(p):
+                    value = value[len(p):]
+                    protocol = p
                     break
-            if value.startswith(protocol):
-                value = value[len(protocol):]
-                protocol = protocol
-            else:
-                protocol = self.default_protocol
             return protocol, value
 
         if isinstance(value, str):
             protocol, value = _set_protocol(value)
-            value = protocol+value if stay_on_remote else value
+            value = protocol + value if stay_on_remote else value
         else:
             protocol, value = list(zip(*[_set_protocol(v) for v in value]))
             if len(set(protocol)) != 1:
@@ -88,9 +84,15 @@ class AbstractRemoteProvider:
             protocol = set(protocol).pop()
             value = [protocol+v if stay_on_remote else v for v in value]
 
+        if "protocol" not in kwargs:
+            if "protocol" not in self.kwargs:
+                kwargs["protocol"] = protocol
+            else:
+                kwargs["protocol"] = self.kwargs["protocol"]
+
         provider = sys.modules[self.__module__]  # get module of derived class
         remote_object = provider.RemoteObject(
-            *args, protocol=protocol, keep_local=keep_local, stay_on_remote=stay_on_remote,
+            *args, keep_local=keep_local, stay_on_remote=stay_on_remote,
             provider=provider.RemoteProvider(*self.args,  **self.kwargs), **kwargs
         )
         if static:
@@ -102,7 +104,6 @@ class AbstractRemoteProvider:
         kwargs = self.kwargs if not kwargs else kwargs
 
         referenceObj = snakemake.io.IOFile(self.remote(pattern, *args, **kwargs))
-
         if not referenceObj.remote_object.stay_on_remote:
             pattern = "./" + referenceObj.remote_object.name
             pattern = os.path.normpath(pattern)
@@ -161,7 +162,7 @@ class AbstractRemoteObject:
             return self._file
 
     def remote_file(self):
-        return self.protocol+self.local_file()
+        return self.protocol + self.local_file()
 
     @abstractmethod
     def close(self):
@@ -214,12 +215,18 @@ class DomainObject(AbstractRemoteObject):
 
     @property
     def _matched_address(self):
-        return re.search("^(?P<host>[A-Za-z0-9\-\.]+)(?:\:(?P<port>[0-9]+))?(?P<path_remainder>.*)$", self.local_file())
+        return re.search("^(?P<protocol>[a-zA-Z]+\://)?(?P<host>[A-Za-z0-9\-\.]+)(?:\:(?P<port>[0-9]+))?(?P<path_remainder>.*)$", self.local_file())
 
     @property
     def name(self):
         return self.path_remainder
 
+    # if we ever parse out the protocol directly
+    #@property
+    #def protocol(self):
+    #    if self._matched_address:
+    #        return self._matched_address.group("protocol")
+
     @property
     def host(self):
         if self._matched_address:
@@ -227,7 +234,8 @@ class DomainObject(AbstractRemoteObject):
 
     @property
     def port(self):
-        return self._matched_address.group("port")
+        if self._matched_address:
+            return self._matched_address.group("port")
 
     @property
     def path_prefix(self):
diff --git a/snakemake/remote/dropbox.py b/snakemake/remote/dropbox.py
index dbdc52b..e1ba9e2 100644
--- a/snakemake/remote/dropbox.py
+++ b/snakemake/remote/dropbox.py
@@ -43,7 +43,7 @@ class RemoteProvider(AbstractRemoteProvider):
 
 
 class RemoteObject(AbstractRemoteObject):
-    """ This is a class to interact with the AWS S3 object store.
+    """ This is a class to interact with the Dropbox API.
     """
 
     def __init__(self, *args, keep_local=False, provider=None, **kwargs):
diff --git a/snakemake/remote/gfal.py b/snakemake/remote/gfal.py
new file mode 100644
index 0000000..eb79d70
--- /dev/null
+++ b/snakemake/remote/gfal.py
@@ -0,0 +1,137 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2017, Johannes Köster"
+__email__ = "johannes.koester at tu-dortmund.de"
+__license__ = "MIT"
+
+import os
+import re
+import shutil
+import subprocess as sp
+from datetime import datetime
+import time
+
+from snakemake.remote import AbstractRemoteObject, AbstractRemoteProvider
+from snakemake.exceptions import WorkflowError
+from snakemake.common import lazy_property
+from snakemake.logging import logger
+
+
+if not shutil.which("gfal-copy"):
+    raise WorkflowError("The gfal-* commands need to be available for "
+                        "gfal remote support.")
+
+
+class RemoteProvider(AbstractRemoteProvider):
+
+    supports_default = True
+
+    def __init__(self, *args, stay_on_remote=False, retry=5, **kwargs):
+        super(RemoteProvider, self).__init__(*args, stay_on_remote=stay_on_remote, **kwargs)
+        self.retry = retry
+
+    @property
+    def default_protocol(self):
+        """The protocol that is prepended to the path when no protocol is specified."""
+        return "gsiftp://"
+
+    @property
+    def available_protocols(self):
+        """List of valid protocols for this remote provider."""
+        # TODO gfal provides more. Extend this list.
+        return ["gsiftp://", "srm://"]
+
+
+class RemoteObject(AbstractRemoteObject):
+    mtime_re = re.compile(r"^\s*Modify: (.+)$", flags=re.MULTILINE)
+    size_re = re.compile(r"^\s*Size: ([0-9]+).*$", flags=re.MULTILINE)
+
+    def __init__(self, *args, keep_local=False, provider=None, **kwargs):
+        super(RemoteObject, self).__init__(*args, keep_local=keep_local, provider=provider, **kwargs)
+
+    def _gfal(self,
+              cmd, *args,
+              retry=None,
+              raise_workflow_error=True):
+        if retry is None:
+            retry = self.provider.retry
+        _cmd = ["gfal-" + cmd] + list(args)
+        for i in range(retry + 1):
+            try:
+                logger.debug(_cmd)
+                return sp.run(_cmd,
+                              check=True,
+                              stderr=sp.PIPE,
+                              stdout=sp.PIPE).stdout.decode()
+            except sp.CalledProcessError as e:
+                if i == retry:
+                    if raise_workflow_error:
+                        raise WorkflowError("Error calling gfal-{}:\n{}".format(
+                            cmd, e.stderr.decode()))
+                    else:
+                        raise e
+                else:
+                    # try again after some seconds
+                    time.sleep(1)
+                    continue
+
+    # === Implementations of abstract class members ===
+
+    def exists(self):
+        try:
+            self._gfal("ls", "-a", self.remote_file(),
+                       retry=0, raise_workflow_error=False)
+        except sp.CalledProcessError as e:
+            if e.returncode == 2:
+                # exit code 2 means no such file or directory
+                return False
+            else:
+                raise WorkflowError("Error calling gfal-ls:\n{}".format(
+                    e.stderr.decode()))
+        # exit code 0 means that the file is present
+        return True
+
+    def _stat(self):
+        stat = self._gfal("stat", self.remote_file())
+        return stat
+
+    def mtime(self):
+        # assert self.exists()
+        stat = self._stat()
+        mtime = self.mtime_re.search(stat).group(1)
+        date = datetime.strptime(mtime, "%Y-%m-%d %H:%M:%S.%f")
+        return date.timestamp()
+
+    def size(self):
+        # assert self.exists()
+        stat = self._stat()
+        size = self.size_re.search(stat).group(1)
+        return int(size)
+
+    def download(self):
+        if self.exists():
+            # Download file. Wait for staging.
+            source = self.remote_file()
+            target = "file://" + os.path.abspath(self.local_file())
+
+            # disable all timeouts (file transfers can take a long time)
+            self._gfal("copy", "-p", "-f", "-n", "4", "-t", "0", "-T", "0",
+                       source, target)
+
+            os.sync()
+            return self.local_file()
+        return None
+
+    def upload(self):
+        target = self.remote_file()
+        source = "file://" + os.path.abspath(self.local_file())
+        # disable all timeouts (file transfers can take a long time)
+        self._gfal("copy", "-p", "-f", "-n", "4", "-t", "0", "-T", "0",
+                   source, target)
+
+    @property
+    def list(self):
+        # TODO implement listing of remote files with patterns
+        raise NotImplementedError()
+
+    def host(self):
+        return self.local_file().split("/")[0]
diff --git a/snakemake/remote/gridftp.py b/snakemake/remote/gridftp.py
new file mode 100644
index 0000000..777626f
--- /dev/null
+++ b/snakemake/remote/gridftp.py
@@ -0,0 +1,71 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2017, Johannes Köster"
+__email__ = "johannes.koester at tu-dortmund.de"
+__license__ = "MIT"
+
+
+import os
+import subprocess as sp
+import time
+import shutil
+
+from snakemake.exceptions import WorkflowError
+from snakemake.logging import logger
+
+
+if not shutil.which("globus-url-copy"):
+    raise WorkflowError("The globus-url-copy command has to be available for "
+                        "gridftp remote support.")
+
+if not shutil.which("gfal-ls"):
+    raise WorkflowError("The gfal-* commands need to be available for "
+                        "gridftp remote support.")
+
+
+from snakemake.remote import gfal
+
+
+class RemoteProvider(gfal.RemoteProvider):
+    pass
+
+
+class RemoteObject(gfal.RemoteObject):
+    def _globus(self,
+                *args):
+        retry = self.provider.retry
+        cmd = ["globus-url-copy"] + list(args)
+        for i in range(retry + 1):
+            try:
+                logger.debug(" ".join(cmd))
+                return sp.run(cmd,
+                              check=True,
+                              stderr=sp.PIPE,
+                              stdout=sp.PIPE).stdout.decode()
+            except sp.CalledProcessError as e:
+                if i == retry:
+                    raise WorkflowError("Error calling globus-url-copy:\n{}".format(
+                        cmd, e.stderr.decode()))
+                else:
+                    # try again after some seconds
+                    time.sleep(1)
+                    continue
+
+    def download(self):
+        if self.exists():
+            # Download file. Wait for staging.
+            source = self.remote_file()
+            target = "file://" + os.path.abspath(self.local_file())
+
+            self._globus("-parallel", "4", "-create-dest", "-recurse", "-dp",
+                         source, target)
+
+            os.sync()
+            return self.local_file()
+        return None
+
+    def upload(self):
+        target = self.remote_file()
+        source = "file://" + os.path.abspath(self.local_file())
+
+        self._globus("-parallel", "4", "-create-dest", "-recurse", "-dp",
+                     source, target)
diff --git a/snakemake/remote/webdav.py b/snakemake/remote/webdav.py
new file mode 100644
index 0000000..d19f454
--- /dev/null
+++ b/snakemake/remote/webdav.py
@@ -0,0 +1,167 @@
+__author__ = "Christopher Tomkins-Tinch"
+__copyright__ = "Copyright 2017, Christopher Tomkins-Tinch"
+__email__ = "tomkinsc at broadinstitute.org"
+__license__ = "MIT"
+
+import os, sys
+import email.utils
+from contextlib import contextmanager
+import functools
+
+# module-specific
+from snakemake.remote import AbstractRemoteProvider, AbstractRemoteObject, DomainObject
+from snakemake.exceptions import WebDAVFileException, WorkflowError
+
+try:
+    # third-party modules
+    import aioeasywebdav
+    import asyncio
+except ImportError as e:
+    raise WorkflowError("The Python 3 packages 'aioeasywebdav' "
+                        " and 'asyncio' must be present to use WebDAV remote() file "
+                        "functionality. %s" % e.msg)
+
+class RemoteProvider(AbstractRemoteProvider):
+    def __init__(self, *args, stay_on_remote=False, **kwargs):
+        #loop = asyncio.get_event_loop()
+        super(RemoteProvider, self).__init__(*args, stay_on_remote=stay_on_remote, **kwargs)
+
+    @property
+    def default_protocol(self):
+        """The protocol that is prepended to the path when no protocol is specified."""
+        return 'https://'
+
+    @property
+    def available_protocols(self):
+        """List of valid protocols for this remote provider."""
+        return ['http://', 'https://']
+
+
+class RemoteObject(DomainObject):
+    """ This is a class to interact with a WebDAV file store.
+    """
+
+    def __init__(self, *args, keep_local=False, **kwargs):
+        #self.loop = asyncio.get_event_loop()
+        super(RemoteObject, self).__init__(*args, keep_local=keep_local, **kwargs)
+         
+
+    @contextmanager
+    def webdavc(self):
+        newloop = False
+        if not hasattr(self, "loop"):
+            try:
+                self.loop = asyncio.get_event_loop()
+                if self.loop.is_running():
+                    raise NotImplementedError("Cannot use aioutils in "
+                                            "asynchroneous environment")
+            except:
+                newloop = True
+                self.loop = asyncio.new_event_loop()
+                asyncio.set_event_loop(self.loop)
+
+            self.loop = asyncio.new_event_loop()
+            asyncio.set_event_loop(self.loop)
+
+
+        if (not hasattr(self, "conn") or (hasattr(self, "conn") and not isinstance(self.conn, aioeasywebdav.Client))):
+            # if args have been provided to remote(), use them over those given to RemoteProvider()
+            args_to_use = self.provider.args
+            if len(self.args):
+                args_to_use = self.args
+
+            # use kwargs passed in to remote() to override those given to the RemoteProvider()
+            # default to the host and port given as part of the file, falling back to one specified
+            # as a kwarg to remote() or the RemoteProvider (overriding the latter with the former if both)
+            kwargs_to_use = {}
+            kwargs_to_use["host"]     = self.host
+            kwargs_to_use["protocol"] = self.protocol
+            kwargs_to_use["port"]     = int(self.port) if self.port!=None else 443
+            for k,v in self.provider.kwargs.items():
+                kwargs_to_use[k] = v
+            for k,v in self.kwargs.items():
+                kwargs_to_use[k] = v
+
+            # easywebdav wants the protocol without "://"
+            kwargs_to_use["protocol"] = kwargs_to_use["protocol"].replace("://","")
+
+            # monkey patch aioeasywebdav to noop _rate_calc()
+            # since we don't care about download progress and
+            # the parent (connection) object may be removed before the 
+            # sleep coroutine has a chance to be scheduled/finish, 
+            # and aioeasywebdav only calls close() on __del__()
+            async def noop(_): pass
+            aioeasywebdav.Client._rate_calc = noop
+
+            self.conn = aioeasywebdav.connect(*args_to_use, **kwargs_to_use)
+        yield
+
+    # === Implementations of abstract class members ===
+
+    def exists(self):
+        with self.webdavc() as webdavc:
+            path_to_try = self.webdav_file
+            return self.loop.run_until_complete(self.conn.exists(self.webdav_file))
+
+    def mtime(self):
+        if self.exists():
+            with self.webdavc() as webdavc:
+                metadata = self.loop.run_until_complete(self.conn.ls(remote_path=self.webdav_file))[0]
+                parsed_date = email.utils.parsedate_tz(metadata.mtime)
+                epoch_time = email.utils.mktime_tz(parsed_date)
+                return epoch_time
+        else:
+            raise EasyWebDAVFileException("The file does not seem to exist remotely: %s" % self.webdav_file)
+
+    def size(self):
+        if self.exists():
+            with self.webdavc() as webdavc:
+                metadata = self.loop.run_until_complete(self.conn.ls(remote_path=self.webdav_file))[0]
+                return int(metadata.size)
+        else:
+            return self._iofile.size_local
+
+    def download(self, make_dest_dirs=True):
+        if self.exists():
+            # if the destination path does not exist, make it
+            if make_dest_dirs:
+                os.makedirs(os.path.dirname(self.local_file()), exist_ok=True)
+            with self.webdavc() as webdavc:
+                self.loop.run_until_complete(self.conn.download(self.webdav_file, self.local_file()))
+                os.sync() # ensure flush to disk
+        else:
+            raise EasyWebDAVFileException("The file does not seem to exist remotely: %s" % self.webdav_file)
+
+    def upload(self):
+        # make containing folder
+        with self.webdavc() as webdavc:
+            self.loop.run_until_complete(self.conn.mkdirs(os.path.dirname(self.webdav_file)))
+            self.loop.run_until_complete(self.conn.upload(self.local_file(), self.webdav_file))
+
+    @property
+    def webdav_file(self):
+        filepath = self.local_file().replace(self.host,"").replace(":"+str(self.port),"")
+        filepath = filepath if not filepath.startswith("/") else filepath[1:]
+        return filepath
+
+    @property
+    def name(self):
+        return self.local_file()
+
+    @property
+    def list(self):
+        file_list = []
+
+        first_wildcard = self._iofile.constant_prefix().replace(self.host,"").replace(":"+str(self.port),"")
+        dirname = first_wildcard[1:] if first_wildcard.startswith("/") else first_wildcard
+
+        while '//' in dirname:
+            dirname = dirname.replace('//', '/')
+        dirname = dirname.rstrip('/')+"/"
+
+        with self.webdavc() as webdavc:
+            for item in self.loop.run_until_complete(self.conn.ls(dirname)):
+                file_list.append( os.path.join(os.path.dirname(dirname), item.name.lstrip("/")) )
+                file_list.append( os.path.join(self._iofile.constant_prefix(), os.path.basename(item.name)) )
+
+        return file_list
diff --git a/snakemake/rules.py b/snakemake/rules.py
index 656f6bf..97665e9 100644
--- a/snakemake/rules.py
+++ b/snakemake/rules.py
@@ -52,6 +52,7 @@ class Rule:
             self._log = Log()
             self._benchmark = None
             self._conda_env = None
+            self._singularity_img = None
             self.wildcard_names = set()
             self.lineno = lineno
             self.snakefile = snakefile
@@ -86,6 +87,7 @@ class Rule:
             self._log = other._log
             self._benchmark = other._benchmark
             self._conda_env = other._conda_env
+            self._singularity_img = other._singularity_img
             self.wildcard_names = set(other.wildcard_names)
             self.lineno = other.lineno
             self.snakefile = other.snakefile
@@ -164,8 +166,14 @@ class Rule:
             branch._input, _, branch.dependencies = branch.expand_input(non_dynamic_wildcards)
             branch._output, _ = branch.expand_output(non_dynamic_wildcards)
 
-            resources = branch.expand_resources(non_dynamic_wildcards, branch._input)
-            branch._params = branch.expand_params(non_dynamic_wildcards, branch._input, branch._output, resources)
+            resources = branch.expand_resources(non_dynamic_wildcards,
+                                                branch._input,
+                                                1)
+            branch._params = branch.expand_params(non_dynamic_wildcards,
+                                                  branch._input,
+                                                  branch._output,
+                                                  resources,
+                                                  omit_callable=True)
             branch.resources = dict(resources.items())
 
             branch._log = branch.expand_log(non_dynamic_wildcards)
@@ -196,7 +204,8 @@ class Rule:
 
     @benchmark.setter
     def benchmark(self, benchmark):
-        benchmark = self.apply_default_remote(benchmark)
+        if not callable(benchmark):
+            benchmark = self.apply_default_remote(benchmark)
         self._benchmark = IOFile(benchmark, rule=self)
 
     @property
@@ -207,6 +216,13 @@ class Rule:
     def conda_env(self, conda_env):
         self._conda_env = IOFile(conda_env, rule=self)
 
+    @property
+    def singularity_img(self):
+        return self._singularity_img
+
+    @singularity_img.setter
+    def singularity_img(self, singularity_img):
+        self._singularity_img = singularity_img
 
     @property
     def input(self):
@@ -285,9 +301,12 @@ class Rule:
             seen[value] = name or idx
 
     def apply_default_remote(self, item):
+        assert not callable(item)
         if (not is_flagged(item, "remote_object") and
+            not is_flagged(item, "local") and
             self.workflow.default_remote_provider is not None):
             item = "{}/{}".format(self.workflow.default_remote_prefix, item)
+            item = os.path.normpath(item)
             return self.workflow.default_remote_provider.remote(item)
         return item
 
@@ -305,7 +324,8 @@ class Rule:
             item = self.apply_default_remote(item)
 
             # add the rule to the dependencies
-            if isinstance(item, _IOFile) and item.rule:
+            if (isinstance(item, _IOFile) and item.rule
+                and item in item.rule.output):
                 self.dependencies[item] = item.rule
             if output:
                 rule = self
@@ -320,7 +340,7 @@ class Rule:
                             snakefile=self.snakefile,
                             lineno=self.lineno)
             else:
-                rule = None
+                rule = self
                 if contains_wildcard_constraints(item) and self.workflow.mode != Mode.subprocess:
                     logger.warning(
                         "wildcard constraints in inputs are ignored")
@@ -351,8 +371,6 @@ class Rule:
             if name:
                 inoutput.add_name(name)
         elif callable(item):
-            item = self.apply_default_remote(item)
-
             if output:
                 raise SyntaxError(
                     "Only input files can be specified as functions")
@@ -405,7 +423,9 @@ class Rule:
 
     def _set_log_item(self, item, name=None):
         if isinstance(item, str) or callable(item):
-            item = self.apply_default_remote(item)
+            if not callable(item):
+                item = self.apply_default_remote(item)
+
             self.log.append(IOFile(item,
                                    rule=self) if isinstance(item, str) else
                             item)
@@ -445,9 +465,11 @@ class Rule:
     def _apply_wildcards(self, newitems, olditems, wildcards,
                          concretize=apply_wildcards,
                          check_return_type=True,
+                         omit_callable=False,
                          mapping=None,
                          no_flattening=False,
-                         aux_params=None):
+                         aux_params=None,
+                         apply_default_remote=True):
         if aux_params is None:
             aux_params = dict()
         for name, item in olditems.allitems():
@@ -456,7 +478,11 @@ class Rule:
             is_unpack = is_flagged(item, "unpack")
 
             if is_callable(item):
+                if omit_callable:
+                    continue
                 item = self.apply_input_function(item, wildcards, **aux_params)
+                if apply_default_remote:
+                    item = self.apply_default_remote(item)
 
             if is_unpack:
                 # Sanity checks before interpreting unpack()
@@ -531,7 +557,7 @@ class Rule:
 
         return input, mapping, dependencies
 
-    def expand_params(self, wildcards, input, output, resources):
+    def expand_params(self, wildcards, input, output, resources, omit_callable=False):
         def concretize_param(p, wildcards):
             if isinstance(p, str):
                 return apply_wildcards(p, wildcards)
@@ -544,7 +570,9 @@ class Rule:
             self._apply_wildcards(params, self.params, wildcards,
                                   concretize=concretize_param,
                                   check_return_type=False,
+                                  omit_callable=omit_callable,
                                   no_flattening=True,
+                                  apply_default_remote=False,
                                   aux_params={"input": input,
                                               "resources": resources,
                                               "output": output,
@@ -614,17 +642,28 @@ class Rule:
 
         return benchmark
 
-    def expand_resources(self, wildcards, input):
+    def expand_resources(self, wildcards, input, attempt):
         resources = dict()
-        for name, res in self.resources.items():
+
+        def apply(name, res, threads=None):
             if callable(res):
+                aux = {"threads": threads} if threads is not None else dict()
                 res = self.apply_input_function(res,
                                                 wildcards,
-                                                input=input)
+                                                input=input,
+                                                attempt=attempt,
+                                                **aux)
                 if not isinstance(res, int):
                     raise WorkflowError("Resources function did not return int.")
             res = min(self.workflow.global_resources.get(name, res), res)
-            resources[name] = res
+            return res
+
+        threads = apply("_cores", self.resources["_cores"])
+        resources["_cores"] = threads
+
+        for name, res in self.resources.items():
+            if name != "_cores":
+                resources[name] = apply(name, res)
         resources = Resources(fromdict=resources)
         return resources
 
@@ -727,23 +766,26 @@ class Ruleorder:
         """
         Return whether rule2 has a higher priority than rule1.
         """
-        # try the last clause first,
-        # i.e. clauses added later overwrite those before.
-        for clause in reversed(self.order):
-            try:
-                i = clause.index(rule1.name)
-                j = clause.index(rule2.name)
-                # rules with higher priority should have a smaller index
-                comp = j - i
-                if comp < 0:
-                    comp = -1
-                elif comp > 0:
-                    comp = 1
-                return comp
-            except ValueError:
-                pass
-
-        # if not ruleorder given, prefer rule without wildcards
+        # if rules have the same name, they have been specialized by dynamic output
+        # in that case, clauses are irrelevant and have to be skipped
+        if rule1.name != rule2.name:
+            # try the last clause first,
+            # i.e. clauses added later overwrite those before.
+            for clause in reversed(self.order):
+                try:
+                    i = clause.index(rule1.name)
+                    j = clause.index(rule2.name)
+                    # rules with higher priority should have a smaller index
+                    comp = j - i
+                    if comp < 0:
+                        comp = -1
+                    elif comp > 0:
+                        comp = 1
+                    return comp
+                except ValueError:
+                    pass
+
+        # if no ruleorder given, prefer rule without wildcards
         wildcard_cmp = rule2.has_wildcards() - rule1.has_wildcards()
         if wildcard_cmp != 0:
             return wildcard_cmp
diff --git a/snakemake/scheduler.py b/snakemake/scheduler.py
index 39ed1b0..406ba1d 100644
--- a/snakemake/scheduler.py
+++ b/snakemake/scheduler.py
@@ -3,15 +3,20 @@ __copyright__ = "Copyright 2015, Johannes Köster"
 __email__ = "koester at jimmy.harvard.edu"
 __license__ = "MIT"
 
-import os, signal
+import os, signal, sys
 import threading
 import operator
 from functools import partial
 from collections import defaultdict
 from itertools import chain, accumulate
+from contextlib import contextmanager
+import time
 
 from snakemake.executors import DryrunExecutor, TouchExecutor, CPUExecutor
-from snakemake.executors import GenericClusterExecutor, SynchronousClusterExecutor, DRMAAExecutor
+from snakemake.executors import (
+    GenericClusterExecutor, SynchronousClusterExecutor, DRMAAExecutor,
+    KubernetesExecutor)
+from snakemake.exceptions import RuleException, WorkflowError
 
 from snakemake.logging import logger
 
@@ -24,27 +29,40 @@ _ERROR_MSG_FINAL = ("Exiting because a job execution failed. "
                     "Look above for error message")
 
 
+ at contextmanager
+def dummy_rate_limiter():
+    yield
+
+
 class JobScheduler:
     def __init__(self, workflow, dag, cores,
                  local_cores=1,
                  dryrun=False,
                  touch=False,
                  cluster=None,
+                 cluster_status=None,
                  cluster_config=None,
                  cluster_sync=None,
                  drmaa=None,
                  drmaa_log_dir=None,
+                 kubernetes=None,
+                 kubernetes_envvars=None,
+                 container_image=None,
                  jobname=None,
                  quiet=False,
                  printreason=False,
                  printshellcmds=False,
                  keepgoing=False,
                  max_jobs_per_second=None,
+                 max_status_checks_per_second=100,
                  latency_wait=3,
                  benchmark_repeats=1,
                  greediness=1.0,
-                 force_use_threads=False):
+                 force_use_threads=False,
+                 assume_shared_fs=True):
         """ Create a new instance of KnapsackJobScheduler. """
+        from ratelimiter import RateLimiter
+
         self.cluster = cluster
         self.cluster_config = cluster_config
         self.cluster_sync = cluster_sync
@@ -58,11 +76,12 @@ class JobScheduler:
         self.failed = set()
         self.finished_jobs = 0
         self.greediness = 1
+        self.max_jobs_per_second = max_jobs_per_second
 
         self.resources = dict(self.workflow.global_resources)
 
         use_threads = force_use_threads or (os.name != "posix") or cluster or cluster_sync or drmaa
-        self._open_jobs = threading.Event()
+        self._open_jobs = threading.Semaphore(0)
         self._lock = threading.Lock()
 
         self._errors = False
@@ -81,7 +100,6 @@ class JobScheduler:
                                             quiet=quiet,
                                             printshellcmds=printshellcmds,
                                             latency_wait=latency_wait)
-            self.job_reward = self.dryrun_job_reward
         elif touch:
             self._executor = TouchExecutor(workflow, dag,
                                            printreason=printreason,
@@ -100,8 +118,13 @@ class JobScheduler:
                 benchmark_repeats=benchmark_repeats,
                 cores=local_cores)
             if cluster or cluster_sync:
-                constructor = SynchronousClusterExecutor if cluster_sync \
-                              else GenericClusterExecutor
+                if cluster_sync:
+                    constructor = SynchronousClusterExecutor
+                else:
+                    constructor = partial(GenericClusterExecutor,
+                                          statuscmd=cluster_status,
+                                          max_status_checks_per_second=max_status_checks_per_second)
+
                 self._executor = constructor(
                     workflow, dag, None,
                     submitcmd=(cluster or cluster_sync),
@@ -112,9 +135,8 @@ class JobScheduler:
                     printshellcmds=printshellcmds,
                     latency_wait=latency_wait,
                     benchmark_repeats=benchmark_repeats,
-                    max_jobs_per_second=max_jobs_per_second)
+                    assume_shared_fs=assume_shared_fs)
                 if workflow.immediate_submit:
-                    self.job_reward = self.dryrun_job_reward
                     self._submit_callback = partial(self._proceed,
                                                     update_dynamic=False,
                                                     print_progress=False,
@@ -131,7 +153,31 @@ class JobScheduler:
                     latency_wait=latency_wait,
                     benchmark_repeats=benchmark_repeats,
                     cluster_config=cluster_config,
-                    max_jobs_per_second=max_jobs_per_second)
+                    assume_shared_fs=assume_shared_fs,
+                    max_status_checks_per_second=max_status_checks_per_second)
+        elif kubernetes:
+            workers = min(max(1, sum(1 for _ in dag.local_needrun_jobs)),
+                          local_cores)
+            self._local_executor = CPUExecutor(
+                workflow, dag, workers,
+                printreason=printreason,
+                quiet=quiet,
+                printshellcmds=printshellcmds,
+                use_threads=use_threads,
+                latency_wait=latency_wait,
+                benchmark_repeats=benchmark_repeats,
+                cores=local_cores)
+
+            self._executor = KubernetesExecutor(
+                workflow, dag, kubernetes, kubernetes_envvars,
+                container_image=container_image,
+                printreason=printreason,
+                quiet=quiet,
+                printshellcmds=printshellcmds,
+                latency_wait=latency_wait,
+                benchmark_repeats=benchmark_repeats,
+                cluster_config=cluster_config,
+                max_status_checks_per_second=max_status_checks_per_second)
         else:
             # local execution or execution of cluster job
             # calculate how many parallel workers the executor shall spawn
@@ -146,7 +192,16 @@ class JobScheduler:
                                          latency_wait=latency_wait,
                                          benchmark_repeats=benchmark_repeats,
                                          cores=cores)
-        self._open_jobs.set()
+
+        if self.max_jobs_per_second:
+            self.rate_limiter = RateLimiter(max_calls=self.max_jobs_per_second,
+                                            period=1)
+        else:
+            # essentially no rate limit
+            self.rate_limiter = RateLimiter(max_calls=sys.maxsize,
+                                            period=1)
+
+        self._open_jobs.release()
 
     @property
     def stats(self):
@@ -168,23 +223,25 @@ class JobScheduler:
 
     def schedule(self):
         """ Schedule jobs that are ready, maximizing cpu usage. """
+
         try:
             while True:
                 # work around so that the wait does not prevent keyboard interrupts
-                while not self._open_jobs.wait(1):
-                    pass
+                #while not self._open_jobs.acquire(False):
+                #    time.sleep(1)
+                self._open_jobs.acquire()
 
                 # obtain needrun and running jobs in a thread-safe way
                 with self._lock:
                     needrun = list(self.open_jobs)
                     running = list(self.running)
-                # free the event
-                self._open_jobs.clear()
+                    errors = self._errors
 
                 # handle errors
-                if not self.keepgoing and self._errors:
+                if not self.keepgoing and errors:
                     logger.info("Will exit after finishing "
                                 "currently running jobs.")
+
                     if not running:
                         self._executor.shutdown()
                         logger.error(_ERROR_MSG_FINAL)
@@ -193,9 +250,9 @@ class JobScheduler:
                 # normal shutdown because all jobs have been finished
                 if not needrun and not running:
                     self._executor.shutdown()
-                    if self._errors:
+                    if errors:
                         logger.error(_ERROR_MSG_FINAL)
-                    return not self._errors
+                    return not errors
 
                 # continue if no new job needs to be executed
                 if not needrun:
@@ -217,7 +274,8 @@ class JobScheduler:
                     "Resources after job selection: {}".format(self.resources))
                 # actually run jobs
                 for job in run:
-                    self.run(job)
+                    with self.rate_limiter:
+                        self.run(job)
         except (KeyboardInterrupt, SystemExit):
             logger.info("Terminating processes on user request.")
             self._executor.cancel()
@@ -258,7 +316,14 @@ class JobScheduler:
         """ Do stuff after job is finished. """
         with self._lock:
             # by calling this behind the lock, we avoid race conditions
-            self.get_executor(job).handle_job_success(job)
+            try:
+                self.get_executor(job).handle_job_success(job)
+            except (RuleException, WorkflowError) as e:
+                # if an error occurs while processing job output,
+                # we do the same as in case of errors during execution
+                self._handle_error(job)
+                return
+
             self.dag.finish(job, update_dynamic=update_dynamic)
 
             if update_resources:
@@ -273,33 +338,33 @@ class JobScheduler:
 
             if any(self.open_jobs) or not self.running:
                 # go on scheduling if open jobs are ready or no job is running
-                self._open_jobs.set()
+                self._open_jobs.release()
 
     def _error(self, job):
+        with self._lock:
+            self._handle_error(job)
+   
+    def _handle_error(self, job):
         """Clear jobs and stop the workflow.
 
         If Snakemake is configured to restart jobs then the job might have
         "restart_times" left and we just decrement and let the scheduler
         try to run the job again.
         """
-        with self._lock:
-            self.get_executor(job).handle_job_error(job)
-            self.running.remove(job)
-            self._free_resources(job)
-            self._open_jobs.set()
-            if job.restart_times > 0:
-                msg = (
-                    ("Trying to restart job for rule {} with "
-                     "wildcards {}").format(
-                         job.rule.name, job.wildcards_dict))
-                logger.info(msg
-                    )
-                job.restart_times -= 1
-            else:
-                self._errors = True
-                self.failed.add(job)
-                if self.keepgoing:
-                    logger.info("Job failed, going on with independent jobs.")
+        self.get_executor(job).handle_job_error(job)
+        self.running.remove(job)
+        self._free_resources(job)
+        # attempt starts counting from 1, but the first attempt is not
+        # a restart, hence we subtract 1.
+        if job.rule.restart_times > job.attempt - 1:
+            logger.info("Trying to restart job {}.".format(self.dag.jobid(job)))
+            job.attempt += 1
+        else:
+            self._errors = True
+            self.failed.add(job)
+            if self.keepgoing:
+                logger.info("Job failed, going on with independent jobs.")
+        self._open_jobs.release()
 
     def job_selector(self, jobs):
         """
@@ -372,11 +437,25 @@ Problem", Akcay, Li, Xu, Annals of Operations Research, 2012
                 for name in self.workflow.global_resources]
 
     def job_reward(self, job):
-        return (self.dag.priority(job), self.dag.temp_input_count(job), self.dag.downstream_size(job),
-                0 if self.touch else job.inputsize)
-
-    def dryrun_job_reward(self, job):
-        return (self.dag.priority(job), self.dag.temp_input_count(job), self.dag.downstream_size(job))
+        if self.touch or self.dryrun or self.workflow.immediate_submit:
+            temp_size = 0
+            input_size = 0
+        else:
+            temp_size = self.dag.temp_size(job)
+            input_size = job.inputsize
+
+        # Usually, this should guide the scheduler to first schedule all jobs
+        # that remove the largest temp file, then the second largest and so on.
+        # Since the weight is summed up, it can in theory be that it sometimes
+        # prefers a set of many jobs that all depend on smaller temp files though.
+        # A real solution to the problem is therefore to use dummy jobs that
+        # ensure selection of groups of jobs that together delete the same temp
+        # file.
+
+        return (self.dag.priority(job),
+                temp_size,
+                self.dag.downstream_size(job),
+                input_size)
 
     def progress(self):
         """ Display the progress. """
diff --git a/snakemake/script.py b/snakemake/script.py
index c9b967d..c5f0502 100644
--- a/snakemake/script.py
+++ b/snakemake/script.py
@@ -151,7 +151,8 @@ class Snakemake:
 
 
 def script(path, basedir, input, output, params, wildcards, threads, resources,
-           log, config, rulename, conda_env, bench_record):
+           log, config, rulename, conda_env, singularity_img, singularity_args,
+           bench_record):
     """
     Load a script from the given basedir + path and execute it.
     Supports Python 3 and R.
@@ -182,9 +183,9 @@ def script(path, basedir, input, output, params, wildcards, threads, resources,
                 searchpath = os.path.dirname(os.path.dirname(__file__))
                 preamble = textwrap.dedent("""
                 ######## Snakemake header ########
-                import sys; sys.path.insert(0, "{}"); import pickle; snakemake = pickle.loads({})
+                import sys; sys.path.insert(0, "{}"); import pickle; snakemake = pickle.loads({}); from snakemake.logging import logger; logger.printshellcmds = {}
                 ######## Original script #########
-                """).format(searchpath, snakemake)
+                """).format(searchpath, snakemake, logger.printshellcmds)
             elif path.endswith(".R") or path.endswith(".Rmd"):
                 preamble = textwrap.dedent("""
                 ######## Snakemake header ########
@@ -273,10 +274,13 @@ def script(path, basedir, input, output, params, wildcards, threads, resources,
                             # to execute script
                             py_exec = "python"
                         else:
-                            logger.info("Conda environment defines Python "
+                            logger.warning("Conda environment defines Python "
                                         "version < {}.{}. Using Python of the "
                                         "master process to execute "
                                         "script.".format(*MIN_PY_VERSION))
+                if singularity_img is not None:
+                    # use python from image
+                    py_exec = "python"
                 # use the same Python as the running process or the one from the environment
                 shell("{py_exec} {f.name}", bench_record=bench_record)
             elif path.endswith(".R"):
diff --git a/snakemake/shell.py b/snakemake/shell.py
index 84eba46..a12946c 100644
--- a/snakemake/shell.py
+++ b/snakemake/shell.py
@@ -11,6 +11,7 @@ import inspect
 
 from snakemake.utils import format
 from snakemake.logging import logger
+from snakemake import singularity, conda
 
 
 __author__ = "Johannes Köster"
@@ -57,14 +58,25 @@ class shell:
 
         close_fds = sys.platform != 'win32'
 
+        env_prefix = ""
         conda_env = context.get("conda_env", None)
-        env_prefix = "" if conda_env is None else "source activate {};".format(conda_env)
+        singularity_img = context.get("singularity_img", None)
+        if singularity_img:
+            args = context.get("singularity_args", "")
+            cmd = singularity.shellcmd(singularity_img, cmd, args)
+            logger.info("Activating singularity image {}".format(singularity_img))
+        else:
+            # use conda if no singularity image is defined
+            if conda_env:
+                env_prefix = conda.shellcmd(conda_env)
+                logger.info("Activating conda environment {}.".format(conda_env))
 
-        proc = sp.Popen("{} {} {} {}".format(
+        cmd = "{} {} {} {}".format(
                             env_prefix,
                             cls._process_prefix,
                             cmd.rstrip(),
-                            cls._process_suffix),
+                            cls._process_suffix)
+        proc = sp.Popen(cmd,
                         bufsize=-1,
                         shell=True,
                         stdout=stdout,
diff --git a/snakemake/singularity.py b/snakemake/singularity.py
new file mode 100644
index 0000000..1f4f29b
--- /dev/null
+++ b/snakemake/singularity.py
@@ -0,0 +1,73 @@
+import subprocess
+import shutil
+import os
+from urllib.parse import urlparse
+import hashlib
+from distutils.version import LooseVersion
+
+from snakemake import conda
+from snakemake.common import lazy_property
+from snakemake.exceptions import WorkflowError
+from snakemake.logging import logger
+
+
+class Image:
+    def __init__(self, url, dag):
+        if not shutil.which("singularity"):
+            raise WorkflowError("The singularity command has to be "
+                                "available in order to use singularity "
+                                "integration.")
+        try:
+            v = subprocess.check_output(["singularity", "--version"],
+                                        stderr=subprocess.PIPE).decode()
+        except subprocess.CalledProcessError as e:
+            raise WorkflowError(
+                "Failed to get singularity version:\n{}".format(
+                    e.stderr.decode()))
+        if not LooseVersion(v) >= LooseVersion("2.4"):
+            raise WorkflowError("Minimum singularity version is 2.4.")
+
+        self.url = url
+        self._img_dir = dag.workflow.persistence.singularity_img_path
+
+    @property
+    def is_local(self):
+        scheme = urlparse(self.url).scheme
+        return not scheme or scheme == "file"
+
+    @lazy_property
+    def hash(self):
+        md5hash = hashlib.md5()
+        md5hash.update(self.url.encode())
+        return md5hash.hexdigest()
+
+    def pull(self, dryrun=False):
+        if self.is_local:
+            return
+        if dryrun:
+            logger.info("Singularity image {} will be pulled.".format(self.url))
+            return
+        logger.debug("Singularity image location: {}".format(self.path))
+        if not os.path.exists(self.path):
+            logger.info("Pulling singularity image {}.".format(self.url))
+            try:
+                p = subprocess.check_output(["singularity", "pull",
+                                         "--name", self.hash, self.url],
+                                        cwd=self._img_dir,
+                                        stderr=subprocess.STDOUT)
+            except subprocess.CalledProcessError as e:
+                raise WorkflowError("Failed to pull singularity image "
+                                    "from {}:\n{}".format(self.url,
+                                                          e.stdout.decode()))
+
+
+    @property
+    def path(self):
+        if self.is_local:
+            return urlparse(self.url).path
+        return os.path.join(self._img_dir, self.hash) + ".simg"
+
+
+def shellcmd(img_path, cmd, args):
+    return "singularity exec {} {} bash -c \"{}\"".format(
+        args, img_path, cmd.replace("\"", r"\""))
diff --git a/snakemake/version.py b/snakemake/version.py
index ee2cf61..85f6349 100644
--- a/snakemake/version.py
+++ b/snakemake/version.py
@@ -1,3 +1,3 @@
-__version__ = "3.13.3"
+__version__ = "4.3.0"
 
-MIN_PY_VERSION = (3, 3)
+MIN_PY_VERSION = (3, 5)
diff --git a/snakemake/workflow.py b/snakemake/workflow.py
index 12c1a74..0610942 100644
--- a/snakemake/workflow.py
+++ b/snakemake/workflow.py
@@ -14,6 +14,7 @@ from itertools import filterfalse, chain
 from functools import partial
 from operator import attrgetter
 import copy
+import subprocess
 
 from snakemake.logging import logger, format_resources, format_resource_names
 from snakemake.rules import Rule, Ruleorder
@@ -24,7 +25,7 @@ from snakemake.dag import DAG
 from snakemake.scheduler import JobScheduler
 from snakemake.parser import parse
 import snakemake.io
-from snakemake.io import protected, temp, temporary, ancient, expand, dynamic, glob_wildcards, flag, not_iterable, touch, unpack
+from snakemake.io import protected, temp, temporary, ancient, expand, dynamic, glob_wildcards, flag, not_iterable, touch, unpack, local
 from snakemake.persistence import Persistence
 from snakemake.utils import update_config
 from snakemake.script import script
@@ -46,10 +47,14 @@ class Workflow:
                  debug=False,
                  use_conda=False,
                  conda_prefix=None,
+                 use_singularity=False,
+                 singularity_prefix=None,
+                 singularity_args="",
                  mode=Mode.default,
                  wrapper_prefix=None,
                  printshellcmds=False,
                  restart_times=None,
+                 attempt=1,
                  default_remote_provider=None,
                  default_remote_prefix=""):
         """
@@ -87,12 +92,19 @@ class Workflow:
         self._rulecount = 0
         self.use_conda = use_conda
         self.conda_prefix = conda_prefix
+        self.use_singularity = use_singularity
+        self.singularity_prefix = singularity_prefix
+        self.singularity_args = singularity_args
         self.mode = mode
         self.wrapper_prefix = wrapper_prefix
         self.printshellcmds = printshellcmds
         self.restart_times = restart_times
+        self.attempt = attempt
         self.default_remote_provider = default_remote_provider
         self.default_remote_prefix = default_remote_prefix
+        self.configfiles = []
+
+        self.iocache = snakemake.io.IOCache()
 
         global config
         config = copy.deepcopy(self.overwrite_config)
@@ -103,6 +115,32 @@ class Workflow:
         global rules
         rules = Rules()
 
+    def get_sources(self):
+        files = set()
+
+        # get registered sources
+        for f in self.included:
+            files.add(os.path.relpath(f))
+        for rule in self.rules:
+            if rule.script:
+                files.add(os.path.relpath(rule.script))
+        for f in self.configfiles:
+            files.add(f)
+
+        # get git-managed files
+        try:
+            out = subprocess.check_output(["git", "ls-files", "."])
+            for f in out.decode().split("\n"):
+                if f:
+                    files.add(os.path.relpath(f))
+        except subprocess.CalledProcessError as e:
+            if "fatal: Not a git repository" in e.stderr.decode():
+                raise WorkflowError("Error: this is not a git repository.")
+            raise WorkflowError("Error executing git:\n{}".format(
+                e.stderr.decode()))
+
+        return files
+
     @property
     def subworkflows(self):
         return self._subworkflows.values()
@@ -208,6 +246,9 @@ class Workflow:
                 printd3dag=False,
                 drmaa=None,
                 drmaa_log_dir=None,
+                kubernetes=None,
+                kubernetes_envvars=None,
+                container_image=None,
                 stats=None,
                 force_incomplete=False,
                 ignore_incomplete=False,
@@ -234,9 +275,13 @@ class Workflow:
                 keep_remote_local=False,
                 allowed_rules=None,
                 max_jobs_per_second=None,
+                max_status_checks_per_second=None,
                 greediness=1.0,
                 no_hooks=False,
-                force_use_threads=False):
+                force_use_threads=False,
+                create_envs_only=False,
+                assume_shared_fs=True,
+                cluster_status=None):
 
         self.global_resources = dict() if resources is None else resources
         self.global_resources["_cores"] = cores
@@ -253,7 +298,8 @@ class Workflow:
         else:
 
             def files(items):
-                return map(os.path.relpath, filterfalse(self.is_rule, items))
+                relpath = lambda f: f if os.path.isabs(f) else os.path.relpath(f)
+                return map(relpath, filterfalse(self.is_rule, items))
 
         if not targets:
             targets = [self.first_rule
@@ -321,6 +367,7 @@ class Workflow:
             nolock=nolock,
             dag=dag,
             conda_prefix=self.conda_prefix,
+            singularity_prefix=self.singularity_prefix,
             warn_only=dryrun or printrulegraph or printdag or summary or archive or
             list_version_changes or list_code_changes or list_input_changes or
             list_params_changes)
@@ -330,6 +377,7 @@ class Workflow:
                 self.persistence.cleanup_metadata(f)
             return True
 
+        logger.info("Building DAG of jobs...")
         dag.init()
         dag.check_dynamic()
 
@@ -382,8 +430,18 @@ class Workflow:
             # rescue globals
             self.globals.update(globals_backup)
 
-        dag.check_incomplete()
+        if not (cluster and cluster_status):
+            # no incomplete check needed because we use external jobids to handle
+            # this later in the executor
+            dag.check_incomplete()
         dag.postprocess()
+        # deactivate IOCache such that from now on we always get updated
+        # size, existence and mtime information
+        # ATTENTION: this may never be removed without really good reason.
+        # Otherwise weird things may happen.
+        self.iocache.deactivate()
+        # clear and deactivate persistence cache, from now on we want to see updates
+        self.persistence.deactivate_cache()
 
         if nodeps:
             missing_input = [f for job in dag.targetjobs for f in job.input
@@ -447,27 +505,39 @@ class Workflow:
             self.persistence.cleanup_shadow()
 
         if self.use_conda:
-            dag.create_conda_envs(dryrun=dryrun)
+            if assume_shared_fs:
+                dag.create_conda_envs(dryrun=dryrun)
+            if create_envs_only:
+                return True
+        if self.use_singularity:
+            if assume_shared_fs:
+                dag.pull_singularity_imgs(dryrun=dryrun)
 
         scheduler = JobScheduler(self, dag, cores,
                                  local_cores=local_cores,
                                  dryrun=dryrun,
                                  touch=touch,
                                  cluster=cluster,
+                                 cluster_status=cluster_status,
                                  cluster_config=cluster_config,
                                  cluster_sync=cluster_sync,
                                  jobname=jobname,
                                  max_jobs_per_second=max_jobs_per_second,
+                                 max_status_checks_per_second=max_status_checks_per_second,
                                  quiet=quiet,
                                  keepgoing=keepgoing,
                                  drmaa=drmaa,
                                  drmaa_log_dir=drmaa_log_dir,
+                                 kubernetes=kubernetes,
+                                 kubernetes_envvars=kubernetes_envvars,
+                                 container_image=container_image,
                                  printreason=printreason,
                                  printshellcmds=printshellcmds,
                                  latency_wait=latency_wait,
                                  benchmark_repeats=benchmark_repeats,
                                  greediness=greediness,
-                                 force_use_threads=force_use_threads)
+                                 force_use_threads=force_use_threads,
+                                 assume_shared_fs=assume_shared_fs)
 
         if not dryrun:
             if len(dag):
@@ -503,14 +573,17 @@ class Workflow:
             if dryrun:
                 if len(dag):
                     logger.run_info("\n".join(dag.stats()))
+                logger.remove_logfile()
             elif stats:
                 scheduler.stats.to_json(stats)
+                logger.logfile_hint()
             if not dryrun and not no_hooks:
                 self._onsuccess(logger.get_logfile())
             return True
         else:
             if not dryrun and not no_hooks:
                 self._onerror(logger.get_logfile())
+            logger.logfile_hint()
             return False
 
     @property
@@ -591,6 +664,7 @@ class Workflow:
     def configfile(self, jsonpath):
         """ Update the global config with the given dictionary. """
         global config
+        self.configfiles.append(jsonpath)
         c = snakemake.io.load_configfile(jsonpath)
         update_config(config, c)
         update_config(config, self.overwrite_config)
@@ -664,13 +738,28 @@ class Workflow:
             if ruleinfo.wrapper:
                 rule.conda_env = snakemake.wrapper.get_conda_env(
                     ruleinfo.wrapper, prefix=self.wrapper_prefix)
+                # TODO retrieve suitable singularity image
+
+            if ruleinfo.conda_env and ruleinfo.singularity_img:
+                raise RuleException("Conda and singularity directive are "
+                                    "mutually exclusive.")
+
             if ruleinfo.conda_env:
                 if not (ruleinfo.script or ruleinfo.wrapper or ruleinfo.shellcmd):
                     raise RuleException("Conda environments are only allowed "
-                        "with shell, script or wrapper directives (not with run).", rule=rule)
+                        "with shell, script or wrapper directives "
+                        "(not with run).", rule=rule)
                 if not os.path.isabs(ruleinfo.conda_env):
                     ruleinfo.conda_env = os.path.join(self.current_basedir, ruleinfo.conda_env)
                 rule.conda_env = ruleinfo.conda_env
+
+            if ruleinfo.singularity_img:
+                if not (ruleinfo.script or ruleinfo.wrapper or ruleinfo.shellcmd):
+                    raise RuleException("Singularity directive is only allowed "
+                        "with shell, script or wrapper directives "
+                        "(not with run).", rule=rule)
+                rule.singularity_img = ruleinfo.singularity_img
+
             rule.norun = ruleinfo.norun
             rule.docstring = ruleinfo.docstring
             rule.run_func = ruleinfo.func
@@ -742,6 +831,13 @@ class Workflow:
 
         return decorate
 
+    def singularity(self, singularity_img):
+        def decorate(ruleinfo):
+            ruleinfo.singularity_img = singularity_img
+            return ruleinfo
+
+        return decorate
+
     def threads(self, threads):
         def decorate(ruleinfo):
             ruleinfo.threads = threads
@@ -831,6 +927,7 @@ class RuleInfo:
         self.message = None
         self.benchmark = None
         self.conda_env = None
+        self.singularity_img = None
         self.wildcard_constraints = None
         self.threads = None
         self.shadow_depth = None
@@ -870,11 +967,19 @@ class Subworkflow:
 
     def target(self, paths):
         if not_iterable(paths):
-            return flag(os.path.join(self.workdir, paths), "subworkflow", self)
+            path = paths
+            path = (path if os.path.isabs(path)
+                         else os.path.join(self.workdir, path))
+            return flag(path, "subworkflow", self)
         return [self.target(path) for path in paths]
 
     def targets(self, dag):
-        return [f for job in dag.jobs for f in job.subworkflow_input
+        def relpath(f):
+            if f.startswith(self.workdir):
+                return os.path.relpath(f, start=self.workdir)
+            # do not adjust absolute targets outside of workdir
+            return f
+        return [relpath(f) for job in dag.jobs for f in job.subworkflow_input
                 if job.subworkflow_input[f] is self]
 
 
diff --git a/snakemake/wrapper.py b/snakemake/wrapper.py
index fc44d6f..8eff4d8 100644
--- a/snakemake/wrapper.py
+++ b/snakemake/wrapper.py
@@ -37,10 +37,26 @@ def get_conda_env(path, prefix=None):
     return path + "/environment.yaml"
 
 
-def wrapper(path, input, output, params, wildcards, threads, resources, log, config, rulename, conda_env, bench_record, prefix):
+def wrapper(path,
+            input,
+            output,
+            params,
+            wildcards,
+            threads,
+            resources,
+            log,
+            config,
+            rulename,
+            conda_env,
+            singularity_img,
+            singularity_args,
+            bench_record,
+            prefix):
     """
     Load a wrapper from https://bitbucket.org/snakemake/snakemake-wrappers under
     the given path + wrapper.py and execute it.
     """
     path = get_script(path, prefix=prefix)
-    script(path, "", input, output, params, wildcards, threads, resources, log, config, rulename, conda_env, bench_record)
+    script(path, "", input, output, params, wildcards, threads, resources,
+           log, config, rulename, conda_env, singularity_img,
+           singularity_args, bench_record)
diff --git a/test-environment.yml b/test-environment.yml
index 63b70a9..5f5f7d1 100644
--- a/test-environment.yml
+++ b/test-environment.yml
@@ -1,15 +1,14 @@
 channels:
   - bioconda
-  - r
-  - anaconda
   - conda-forge
+  - defaults
 dependencies:
-  - python >=3.3
+  - python >=3.5
   - rpy2 >=0.7.6
-  - boto
-  - moto
-  - httpretty ==0.8.10
-  - filechunkio
+  - boto3
+  - moto >=1.0.1
+  - httpretty
+  - wrapt
   - pyyaml
   - nose
   - ftputil
@@ -21,9 +20,15 @@ dependencies:
   - pytools
   - docutils
   - pandoc
+  - r-base >=3.4.1
   - r-rmarkdown
   - xorg-libxrender
   - xorg-libxext
   - xorg-libxau
   - xorg-libxdmcp
+  - xorg-libsm
   - psutil
+  - google-cloud-storage
+  - ratelimiter
+  - configargparse
+  - appdirs
diff --git a/tests/test_issue612/Snakefile b/tests/test_issue612/Snakefile
new file mode 100644
index 0000000..9d0ef1c
--- /dev/null
+++ b/tests/test_issue612/Snakefile
@@ -0,0 +1,11 @@
+rule A:
+    run: print("A")
+
+rule B:
+   run: print("B")
+
+
+rule C:
+   run:
+
+       print("C")
diff --git a/tests/test_restartable_job_cmd_exit_1/expected-results/.done b/tests/test_issue612/expected-results/.gitignore
similarity index 100%
copy from tests/test_restartable_job_cmd_exit_1/expected-results/.done
copy to tests/test_issue612/expected-results/.gitignore
diff --git a/tests/test_kubernetes/README.md b/tests/test_kubernetes/README.md
new file mode 100644
index 0000000..b8f2eb7
--- /dev/null
+++ b/tests/test_kubernetes/README.md
@@ -0,0 +1,11 @@
+# Executing this test case
+
+To run this test, you need a running kubernetes setup.
+For google cloud, see [here](https://snakemake.readthedocs.io/en/stable/executable.html#google-cloud-engine).
+With this, you can execute in case of google cloud:
+
+    snakemake --kubernetes --use-conda --default-remote-provider GS --default-remote-prefix my-bucket
+
+while replacing ``my-bucket`` with your storage bucket. The same test should also work on amazon (given that kubernetes is setup):
+
+    snakemake --kubernetes --use-conda --default-remote-provider S3 --default-remote-prefix my-bucket
diff --git a/tests/test_kubernetes/Snakefile b/tests/test_kubernetes/Snakefile
new file mode 100644
index 0000000..6d29d8b
--- /dev/null
+++ b/tests/test_kubernetes/Snakefile
@@ -0,0 +1,29 @@
+from snakemake.remote.GS import RemoteProvider as GSRemoteProvider
+GS = GSRemoteProvider()
+
+
+rule all:
+    input:
+        "landsat-data.txt.bz2"
+
+
+rule copy:
+    input:
+        GS.remote("gcp-public-data-landsat/LC08/01/001/003/LC08_L1GT_001003_20170430_20170501_01_RT/LC08_L1GT_001003_20170430_20170501_01_RT_MTL.txt")
+    output:
+        "landsat-data.txt"
+    shell:
+        "cp {input} {output}"
+
+
+rule pack:
+    input:
+        "landsat-data.txt"
+    output:
+        "landsat-data.txt.bz2"
+    conda:
+        "envs/gzip.yaml"
+    log:
+        "logs/pack.log"
+    shell:
+        "bzip2 -c {input} > {output}; echo successful > {log}"
diff --git a/tests/test_kubernetes/envs/gzip.yaml b/tests/test_kubernetes/envs/gzip.yaml
new file mode 100644
index 0000000..1695997
--- /dev/null
+++ b/tests/test_kubernetes/envs/gzip.yaml
@@ -0,0 +1,4 @@
+channels:
+  - conda-forge
+dependencies:
+  - bzip2
diff --git a/tests/test_profile/Snakefile b/tests/test_profile/Snakefile
new file mode 100644
index 0000000..e44ef15
--- /dev/null
+++ b/tests/test_profile/Snakefile
@@ -0,0 +1,3 @@
+rule:
+    shell:
+        "snakemake --profile . -s Snakefile.internal"
diff --git a/tests/test_profile/Snakefile.internal b/tests/test_profile/Snakefile.internal
new file mode 100644
index 0000000..c8d4a32
--- /dev/null
+++ b/tests/test_profile/Snakefile.internal
@@ -0,0 +1,5 @@
+rule a:
+    output:
+        config["out"]
+    shell:
+        "touch {output}"
diff --git a/tests/test_profile/config.yaml b/tests/test_profile/config.yaml
new file mode 100644
index 0000000..9f99daf
--- /dev/null
+++ b/tests/test_profile/config.yaml
@@ -0,0 +1 @@
+configfile: "workflow-config.yaml"
diff --git a/tests/test_restartable_job_cmd_exit_1/expected-results/.done b/tests/test_profile/expected-results/test.out
similarity index 100%
copy from tests/test_restartable_job_cmd_exit_1/expected-results/.done
copy to tests/test_profile/expected-results/test.out
diff --git a/tests/test_profile/workflow-config.yaml b/tests/test_profile/workflow-config.yaml
new file mode 100644
index 0000000..a0298d0
--- /dev/null
+++ b/tests/test_profile/workflow-config.yaml
@@ -0,0 +1 @@
+out: "test.out"
diff --git a/tests/test_remote_gs/Snakefile b/tests/test_remote_gs/Snakefile
new file mode 100644
index 0000000..81c7f1a
--- /dev/null
+++ b/tests/test_remote_gs/Snakefile
@@ -0,0 +1,15 @@
+from snakemake.remote import GS
+import google.auth
+try:
+    GS = GS.RemoteProvider()
+
+    rule copy:
+        input:
+            GS.remote("gcp-public-data-landsat/LC08/01/001/003/LC08_L1GT_001003_20170430_20170501_01_RT/LC08_L1GT_001003_20170430_20170501_01_RT_MTL.txt")
+        output:
+            "landsat-data.txt"
+        shell:
+            "cp {input} {output}"
+except google.auth.exceptions.DefaultCredentialsError:
+    # ignore the test if not authenticated
+    print("skipping test_remote_gs because we are not authenticated with gcloud")
diff --git a/tests/test_remote_gs/expected-results/landsat-data.txt b/tests/test_remote_gs/expected-results/landsat-data.txt
new file mode 100644
index 0000000..9665a93
--- /dev/null
+++ b/tests/test_remote_gs/expected-results/landsat-data.txt
@@ -0,0 +1,218 @@
+GROUP = L1_METADATA_FILE
+  GROUP = METADATA_FILE_INFO
+    ORIGIN = "Image courtesy of the U.S. Geological Survey"
+    REQUEST_ID = "0501705013406_00001"
+    LANDSAT_SCENE_ID = "LC80010032017120LGN00"
+    LANDSAT_PRODUCT_ID = "LC08_L1GT_001003_20170430_20170501_01_RT"
+    COLLECTION_NUMBER = 01
+    FILE_DATE = 2017-05-01T16:00:24Z
+    STATION_ID = "LGN"
+    PROCESSING_SOFTWARE_VERSION = "LPGS_2.7.0"
+  END_GROUP = METADATA_FILE_INFO
+  GROUP = PRODUCT_METADATA
+    DATA_TYPE = "L1GT"
+    COLLECTION_CATEGORY = "RT"
+    ELEVATION_SOURCE = "GLS2000"
+    OUTPUT_FORMAT = "GEOTIFF"
+    SPACECRAFT_ID = "LANDSAT_8"
+    SENSOR_ID = "OLI_TIRS"
+    WRS_PATH = 1
+    WRS_ROW = 3
+    NADIR_OFFNADIR = "NADIR"
+    TARGET_WRS_PATH = 1
+    TARGET_WRS_ROW = 3
+    DATE_ACQUIRED = 2017-04-30
+    SCENE_CENTER_TIME = "14:07:16.5180850Z"
+    CORNER_UL_LAT_PRODUCT = 80.21528
+    CORNER_UL_LON_PRODUCT = -17.96312
+    CORNER_UR_LAT_PRODUCT = 80.28798
+    CORNER_UR_LON_PRODUCT = -3.45901
+    CORNER_LL_LAT_PRODUCT = 77.79053
+    CORNER_LL_LON_PRODUCT = -16.19251
+    CORNER_LR_LAT_PRODUCT = 77.84841
+    CORNER_LR_LON_PRODUCT = -4.56164
+    CORNER_UL_PROJECTION_X_PRODUCT = 330600.000
+    CORNER_UL_PROJECTION_Y_PRODUCT = 8918700.000
+    CORNER_UR_PROJECTION_X_PRODUCT = 604200.000
+    CORNER_UR_PROJECTION_Y_PRODUCT = 8918700.000
+    CORNER_LL_PROJECTION_X_PRODUCT = 330600.000
+    CORNER_LL_PROJECTION_Y_PRODUCT = 8645400.000
+    CORNER_LR_PROJECTION_X_PRODUCT = 604200.000
+    CORNER_LR_PROJECTION_Y_PRODUCT = 8645400.000
+    PANCHROMATIC_LINES = 18221
+    PANCHROMATIC_SAMPLES = 18241
+    REFLECTIVE_LINES = 9111
+    REFLECTIVE_SAMPLES = 9121
+    THERMAL_LINES = 9111
+    THERMAL_SAMPLES = 9121
+    FILE_NAME_BAND_1 = "LC08_L1GT_001003_20170430_20170501_01_RT_B1.TIF"
+    FILE_NAME_BAND_2 = "LC08_L1GT_001003_20170430_20170501_01_RT_B2.TIF"
+    FILE_NAME_BAND_3 = "LC08_L1GT_001003_20170430_20170501_01_RT_B3.TIF"
+    FILE_NAME_BAND_4 = "LC08_L1GT_001003_20170430_20170501_01_RT_B4.TIF"
+    FILE_NAME_BAND_5 = "LC08_L1GT_001003_20170430_20170501_01_RT_B5.TIF"
+    FILE_NAME_BAND_6 = "LC08_L1GT_001003_20170430_20170501_01_RT_B6.TIF"
+    FILE_NAME_BAND_7 = "LC08_L1GT_001003_20170430_20170501_01_RT_B7.TIF"
+    FILE_NAME_BAND_8 = "LC08_L1GT_001003_20170430_20170501_01_RT_B8.TIF"
+    FILE_NAME_BAND_9 = "LC08_L1GT_001003_20170430_20170501_01_RT_B9.TIF"
+    FILE_NAME_BAND_10 = "LC08_L1GT_001003_20170430_20170501_01_RT_B10.TIF"
+    FILE_NAME_BAND_11 = "LC08_L1GT_001003_20170430_20170501_01_RT_B11.TIF"
+    FILE_NAME_BAND_QUALITY = "LC08_L1GT_001003_20170430_20170501_01_RT_BQA.TIF"
+    ANGLE_COEFFICIENT_FILE_NAME = "LC08_L1GT_001003_20170430_20170501_01_RT_ANG.txt"
+    METADATA_FILE_NAME = "LC08_L1GT_001003_20170430_20170501_01_RT_MTL.txt"
+    CPF_NAME = "LC08CPF_20170401_20170630_01.02"
+    BPF_NAME_OLI = "LO8BPF20170430140614_20170430144404.01"
+    BPF_NAME_TIRS = "LT8BPF20170426235522_20170427000359.01"
+    RLUT_FILE_NAME = "LC08RLUT_20150303_20431231_01_12.h5"
+  END_GROUP = PRODUCT_METADATA
+  GROUP = IMAGE_ATTRIBUTES
+    CLOUD_COVER = 24.32
+    CLOUD_COVER_LAND = -1
+    IMAGE_QUALITY_OLI = 9
+    IMAGE_QUALITY_TIRS = 7
+    TIRS_SSM_MODEL = "PRELIMINARY"
+    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
+    TIRS_STRAY_LIGHT_CORRECTION_SOURCE = "TIRS"
+    ROLL_ANGLE = -0.001
+    SUN_AZIMUTH = -156.47580997
+    SUN_ELEVATION = 24.99099132
+    EARTH_SUN_DISTANCE = 1.0074392
+    SATURATION_BAND_1 = "N"
+    SATURATION_BAND_2 = "N"
+    SATURATION_BAND_3 = "N"
+    SATURATION_BAND_4 = "N"
+    SATURATION_BAND_5 = "N"
+    SATURATION_BAND_6 = "N"
+    SATURATION_BAND_7 = "N"
+    SATURATION_BAND_8 = "N"
+    SATURATION_BAND_9 = "N"
+    TRUNCATION_OLI = "UPPER"
+  END_GROUP = IMAGE_ATTRIBUTES
+  GROUP = MIN_MAX_RADIANCE
+    RADIANCE_MAXIMUM_BAND_1 = 748.87909
+    RADIANCE_MINIMUM_BAND_1 = -61.84268
+    RADIANCE_MAXIMUM_BAND_2 = 766.86133
+    RADIANCE_MINIMUM_BAND_2 = -63.32766
+    RADIANCE_MAXIMUM_BAND_3 = 706.65613
+    RADIANCE_MINIMUM_BAND_3 = -58.35589
+    RADIANCE_MAXIMUM_BAND_4 = 595.89227
+    RADIANCE_MINIMUM_BAND_4 = -49.20898
+    RADIANCE_MAXIMUM_BAND_5 = 364.65634
+    RADIANCE_MINIMUM_BAND_5 = -30.11344
+    RADIANCE_MAXIMUM_BAND_6 = 90.68671
+    RADIANCE_MINIMUM_BAND_6 = -7.48894
+    RADIANCE_MAXIMUM_BAND_7 = 30.56628
+    RADIANCE_MINIMUM_BAND_7 = -2.52417
+    RADIANCE_MAXIMUM_BAND_8 = 674.38605
+    RADIANCE_MINIMUM_BAND_8 = -55.69102
+    RADIANCE_MAXIMUM_BAND_9 = 142.51598
+    RADIANCE_MINIMUM_BAND_9 = -11.76902
+    RADIANCE_MAXIMUM_BAND_10 = 22.00180
+    RADIANCE_MINIMUM_BAND_10 = 0.10033
+    RADIANCE_MAXIMUM_BAND_11 = 22.00180
+    RADIANCE_MINIMUM_BAND_11 = 0.10033
+  END_GROUP = MIN_MAX_RADIANCE
+  GROUP = MIN_MAX_REFLECTANCE
+    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
+  END_GROUP = MIN_MAX_REFLECTANCE
+  GROUP = MIN_MAX_PIXEL_VALUE
+    QUANTIZE_CAL_MAX_BAND_1 = 65535
+    QUANTIZE_CAL_MIN_BAND_1 = 1
+    QUANTIZE_CAL_MAX_BAND_2 = 65535
+    QUANTIZE_CAL_MIN_BAND_2 = 1
+    QUANTIZE_CAL_MAX_BAND_3 = 65535
+    QUANTIZE_CAL_MIN_BAND_3 = 1
+    QUANTIZE_CAL_MAX_BAND_4 = 65535
+    QUANTIZE_CAL_MIN_BAND_4 = 1
+    QUANTIZE_CAL_MAX_BAND_5 = 65535
+    QUANTIZE_CAL_MIN_BAND_5 = 1
+    QUANTIZE_CAL_MAX_BAND_6 = 65535
+    QUANTIZE_CAL_MIN_BAND_6 = 1
+    QUANTIZE_CAL_MAX_BAND_7 = 65535
+    QUANTIZE_CAL_MIN_BAND_7 = 1
+    QUANTIZE_CAL_MAX_BAND_8 = 65535
+    QUANTIZE_CAL_MIN_BAND_8 = 1
+    QUANTIZE_CAL_MAX_BAND_9 = 65535
+    QUANTIZE_CAL_MIN_BAND_9 = 1
+    QUANTIZE_CAL_MAX_BAND_10 = 65535
+    QUANTIZE_CAL_MIN_BAND_10 = 1
+    QUANTIZE_CAL_MAX_BAND_11 = 65535
+    QUANTIZE_CAL_MIN_BAND_11 = 1
+  END_GROUP = MIN_MAX_PIXEL_VALUE
+  GROUP = RADIOMETRIC_RESCALING
+    RADIANCE_MULT_BAND_1 = 1.2371E-02
+    RADIANCE_MULT_BAND_2 = 1.2668E-02
+    RADIANCE_MULT_BAND_3 = 1.1674E-02
+    RADIANCE_MULT_BAND_4 = 9.8438E-03
+    RADIANCE_MULT_BAND_5 = 6.0239E-03
+    RADIANCE_MULT_BAND_6 = 1.4981E-03
+    RADIANCE_MULT_BAND_7 = 5.0494E-04
+    RADIANCE_MULT_BAND_8 = 1.1140E-02
+    RADIANCE_MULT_BAND_9 = 2.3543E-03
+    RADIANCE_MULT_BAND_10 = 3.3420E-04
+    RADIANCE_MULT_BAND_11 = 3.3420E-04
+    RADIANCE_ADD_BAND_1 = -61.85505
+    RADIANCE_ADD_BAND_2 = -63.34032
+    RADIANCE_ADD_BAND_3 = -58.36757
+    RADIANCE_ADD_BAND_4 = -49.21882
+    RADIANCE_ADD_BAND_5 = -30.11946
+    RADIANCE_ADD_BAND_6 = -7.49044
+    RADIANCE_ADD_BAND_7 = -2.52468
+    RADIANCE_ADD_BAND_8 = -55.70216
+    RADIANCE_ADD_BAND_9 = -11.77137
+    RADIANCE_ADD_BAND_10 = 0.10000
+    RADIANCE_ADD_BAND_11 = 0.10000
+    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
+    REFLECTANCE_ADD_BAND_1 = -0.100000
+    REFLECTANCE_ADD_BAND_2 = -0.100000
+    REFLECTANCE_ADD_BAND_3 = -0.100000
+    REFLECTANCE_ADD_BAND_4 = -0.100000
+    REFLECTANCE_ADD_BAND_5 = -0.100000
+    REFLECTANCE_ADD_BAND_6 = -0.100000
+    REFLECTANCE_ADD_BAND_7 = -0.100000
+    REFLECTANCE_ADD_BAND_8 = -0.100000
+    REFLECTANCE_ADD_BAND_9 = -0.100000
+  END_GROUP = RADIOMETRIC_RESCALING
+  GROUP = TIRS_THERMAL_CONSTANTS
+    K1_CONSTANT_BAND_10 = 774.8853
+    K2_CONSTANT_BAND_10 = 1321.0789
+    K1_CONSTANT_BAND_11 = 480.8883
+    K2_CONSTANT_BAND_11 = 1201.1442
+  END_GROUP = TIRS_THERMAL_CONSTANTS
+  GROUP = PROJECTION_PARAMETERS
+    MAP_PROJECTION = "UTM"
+    DATUM = "WGS84"
+    ELLIPSOID = "WGS84"
+    UTM_ZONE = 29
+    GRID_CELL_SIZE_PANCHROMATIC = 15.00
+    GRID_CELL_SIZE_REFLECTIVE = 30.00
+    GRID_CELL_SIZE_THERMAL = 30.00
+    ORIENTATION = "NORTH_UP"
+    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
+  END_GROUP = PROJECTION_PARAMETERS
+END_GROUP = L1_METADATA_FILE
+END
diff --git a/tests/test_remote_gs/landsat-data.txt b/tests/test_remote_gs/landsat-data.txt
new file mode 100644
index 0000000..9665a93
--- /dev/null
+++ b/tests/test_remote_gs/landsat-data.txt
@@ -0,0 +1,218 @@
+GROUP = L1_METADATA_FILE
+  GROUP = METADATA_FILE_INFO
+    ORIGIN = "Image courtesy of the U.S. Geological Survey"
+    REQUEST_ID = "0501705013406_00001"
+    LANDSAT_SCENE_ID = "LC80010032017120LGN00"
+    LANDSAT_PRODUCT_ID = "LC08_L1GT_001003_20170430_20170501_01_RT"
+    COLLECTION_NUMBER = 01
+    FILE_DATE = 2017-05-01T16:00:24Z
+    STATION_ID = "LGN"
+    PROCESSING_SOFTWARE_VERSION = "LPGS_2.7.0"
+  END_GROUP = METADATA_FILE_INFO
+  GROUP = PRODUCT_METADATA
+    DATA_TYPE = "L1GT"
+    COLLECTION_CATEGORY = "RT"
+    ELEVATION_SOURCE = "GLS2000"
+    OUTPUT_FORMAT = "GEOTIFF"
+    SPACECRAFT_ID = "LANDSAT_8"
+    SENSOR_ID = "OLI_TIRS"
+    WRS_PATH = 1
+    WRS_ROW = 3
+    NADIR_OFFNADIR = "NADIR"
+    TARGET_WRS_PATH = 1
+    TARGET_WRS_ROW = 3
+    DATE_ACQUIRED = 2017-04-30
+    SCENE_CENTER_TIME = "14:07:16.5180850Z"
+    CORNER_UL_LAT_PRODUCT = 80.21528
+    CORNER_UL_LON_PRODUCT = -17.96312
+    CORNER_UR_LAT_PRODUCT = 80.28798
+    CORNER_UR_LON_PRODUCT = -3.45901
+    CORNER_LL_LAT_PRODUCT = 77.79053
+    CORNER_LL_LON_PRODUCT = -16.19251
+    CORNER_LR_LAT_PRODUCT = 77.84841
+    CORNER_LR_LON_PRODUCT = -4.56164
+    CORNER_UL_PROJECTION_X_PRODUCT = 330600.000
+    CORNER_UL_PROJECTION_Y_PRODUCT = 8918700.000
+    CORNER_UR_PROJECTION_X_PRODUCT = 604200.000
+    CORNER_UR_PROJECTION_Y_PRODUCT = 8918700.000
+    CORNER_LL_PROJECTION_X_PRODUCT = 330600.000
+    CORNER_LL_PROJECTION_Y_PRODUCT = 8645400.000
+    CORNER_LR_PROJECTION_X_PRODUCT = 604200.000
+    CORNER_LR_PROJECTION_Y_PRODUCT = 8645400.000
+    PANCHROMATIC_LINES = 18221
+    PANCHROMATIC_SAMPLES = 18241
+    REFLECTIVE_LINES = 9111
+    REFLECTIVE_SAMPLES = 9121
+    THERMAL_LINES = 9111
+    THERMAL_SAMPLES = 9121
+    FILE_NAME_BAND_1 = "LC08_L1GT_001003_20170430_20170501_01_RT_B1.TIF"
+    FILE_NAME_BAND_2 = "LC08_L1GT_001003_20170430_20170501_01_RT_B2.TIF"
+    FILE_NAME_BAND_3 = "LC08_L1GT_001003_20170430_20170501_01_RT_B3.TIF"
+    FILE_NAME_BAND_4 = "LC08_L1GT_001003_20170430_20170501_01_RT_B4.TIF"
+    FILE_NAME_BAND_5 = "LC08_L1GT_001003_20170430_20170501_01_RT_B5.TIF"
+    FILE_NAME_BAND_6 = "LC08_L1GT_001003_20170430_20170501_01_RT_B6.TIF"
+    FILE_NAME_BAND_7 = "LC08_L1GT_001003_20170430_20170501_01_RT_B7.TIF"
+    FILE_NAME_BAND_8 = "LC08_L1GT_001003_20170430_20170501_01_RT_B8.TIF"
+    FILE_NAME_BAND_9 = "LC08_L1GT_001003_20170430_20170501_01_RT_B9.TIF"
+    FILE_NAME_BAND_10 = "LC08_L1GT_001003_20170430_20170501_01_RT_B10.TIF"
+    FILE_NAME_BAND_11 = "LC08_L1GT_001003_20170430_20170501_01_RT_B11.TIF"
+    FILE_NAME_BAND_QUALITY = "LC08_L1GT_001003_20170430_20170501_01_RT_BQA.TIF"
+    ANGLE_COEFFICIENT_FILE_NAME = "LC08_L1GT_001003_20170430_20170501_01_RT_ANG.txt"
+    METADATA_FILE_NAME = "LC08_L1GT_001003_20170430_20170501_01_RT_MTL.txt"
+    CPF_NAME = "LC08CPF_20170401_20170630_01.02"
+    BPF_NAME_OLI = "LO8BPF20170430140614_20170430144404.01"
+    BPF_NAME_TIRS = "LT8BPF20170426235522_20170427000359.01"
+    RLUT_FILE_NAME = "LC08RLUT_20150303_20431231_01_12.h5"
+  END_GROUP = PRODUCT_METADATA
+  GROUP = IMAGE_ATTRIBUTES
+    CLOUD_COVER = 24.32
+    CLOUD_COVER_LAND = -1
+    IMAGE_QUALITY_OLI = 9
+    IMAGE_QUALITY_TIRS = 7
+    TIRS_SSM_MODEL = "PRELIMINARY"
+    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
+    TIRS_STRAY_LIGHT_CORRECTION_SOURCE = "TIRS"
+    ROLL_ANGLE = -0.001
+    SUN_AZIMUTH = -156.47580997
+    SUN_ELEVATION = 24.99099132
+    EARTH_SUN_DISTANCE = 1.0074392
+    SATURATION_BAND_1 = "N"
+    SATURATION_BAND_2 = "N"
+    SATURATION_BAND_3 = "N"
+    SATURATION_BAND_4 = "N"
+    SATURATION_BAND_5 = "N"
+    SATURATION_BAND_6 = "N"
+    SATURATION_BAND_7 = "N"
+    SATURATION_BAND_8 = "N"
+    SATURATION_BAND_9 = "N"
+    TRUNCATION_OLI = "UPPER"
+  END_GROUP = IMAGE_ATTRIBUTES
+  GROUP = MIN_MAX_RADIANCE
+    RADIANCE_MAXIMUM_BAND_1 = 748.87909
+    RADIANCE_MINIMUM_BAND_1 = -61.84268
+    RADIANCE_MAXIMUM_BAND_2 = 766.86133
+    RADIANCE_MINIMUM_BAND_2 = -63.32766
+    RADIANCE_MAXIMUM_BAND_3 = 706.65613
+    RADIANCE_MINIMUM_BAND_3 = -58.35589
+    RADIANCE_MAXIMUM_BAND_4 = 595.89227
+    RADIANCE_MINIMUM_BAND_4 = -49.20898
+    RADIANCE_MAXIMUM_BAND_5 = 364.65634
+    RADIANCE_MINIMUM_BAND_5 = -30.11344
+    RADIANCE_MAXIMUM_BAND_6 = 90.68671
+    RADIANCE_MINIMUM_BAND_6 = -7.48894
+    RADIANCE_MAXIMUM_BAND_7 = 30.56628
+    RADIANCE_MINIMUM_BAND_7 = -2.52417
+    RADIANCE_MAXIMUM_BAND_8 = 674.38605
+    RADIANCE_MINIMUM_BAND_8 = -55.69102
+    RADIANCE_MAXIMUM_BAND_9 = 142.51598
+    RADIANCE_MINIMUM_BAND_9 = -11.76902
+    RADIANCE_MAXIMUM_BAND_10 = 22.00180
+    RADIANCE_MINIMUM_BAND_10 = 0.10033
+    RADIANCE_MAXIMUM_BAND_11 = 22.00180
+    RADIANCE_MINIMUM_BAND_11 = 0.10033
+  END_GROUP = MIN_MAX_RADIANCE
+  GROUP = MIN_MAX_REFLECTANCE
+    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
+    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
+    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
+  END_GROUP = MIN_MAX_REFLECTANCE
+  GROUP = MIN_MAX_PIXEL_VALUE
+    QUANTIZE_CAL_MAX_BAND_1 = 65535
+    QUANTIZE_CAL_MIN_BAND_1 = 1
+    QUANTIZE_CAL_MAX_BAND_2 = 65535
+    QUANTIZE_CAL_MIN_BAND_2 = 1
+    QUANTIZE_CAL_MAX_BAND_3 = 65535
+    QUANTIZE_CAL_MIN_BAND_3 = 1
+    QUANTIZE_CAL_MAX_BAND_4 = 65535
+    QUANTIZE_CAL_MIN_BAND_4 = 1
+    QUANTIZE_CAL_MAX_BAND_5 = 65535
+    QUANTIZE_CAL_MIN_BAND_5 = 1
+    QUANTIZE_CAL_MAX_BAND_6 = 65535
+    QUANTIZE_CAL_MIN_BAND_6 = 1
+    QUANTIZE_CAL_MAX_BAND_7 = 65535
+    QUANTIZE_CAL_MIN_BAND_7 = 1
+    QUANTIZE_CAL_MAX_BAND_8 = 65535
+    QUANTIZE_CAL_MIN_BAND_8 = 1
+    QUANTIZE_CAL_MAX_BAND_9 = 65535
+    QUANTIZE_CAL_MIN_BAND_9 = 1
+    QUANTIZE_CAL_MAX_BAND_10 = 65535
+    QUANTIZE_CAL_MIN_BAND_10 = 1
+    QUANTIZE_CAL_MAX_BAND_11 = 65535
+    QUANTIZE_CAL_MIN_BAND_11 = 1
+  END_GROUP = MIN_MAX_PIXEL_VALUE
+  GROUP = RADIOMETRIC_RESCALING
+    RADIANCE_MULT_BAND_1 = 1.2371E-02
+    RADIANCE_MULT_BAND_2 = 1.2668E-02
+    RADIANCE_MULT_BAND_3 = 1.1674E-02
+    RADIANCE_MULT_BAND_4 = 9.8438E-03
+    RADIANCE_MULT_BAND_5 = 6.0239E-03
+    RADIANCE_MULT_BAND_6 = 1.4981E-03
+    RADIANCE_MULT_BAND_7 = 5.0494E-04
+    RADIANCE_MULT_BAND_8 = 1.1140E-02
+    RADIANCE_MULT_BAND_9 = 2.3543E-03
+    RADIANCE_MULT_BAND_10 = 3.3420E-04
+    RADIANCE_MULT_BAND_11 = 3.3420E-04
+    RADIANCE_ADD_BAND_1 = -61.85505
+    RADIANCE_ADD_BAND_2 = -63.34032
+    RADIANCE_ADD_BAND_3 = -58.36757
+    RADIANCE_ADD_BAND_4 = -49.21882
+    RADIANCE_ADD_BAND_5 = -30.11946
+    RADIANCE_ADD_BAND_6 = -7.49044
+    RADIANCE_ADD_BAND_7 = -2.52468
+    RADIANCE_ADD_BAND_8 = -55.70216
+    RADIANCE_ADD_BAND_9 = -11.77137
+    RADIANCE_ADD_BAND_10 = 0.10000
+    RADIANCE_ADD_BAND_11 = 0.10000
+    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
+    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
+    REFLECTANCE_ADD_BAND_1 = -0.100000
+    REFLECTANCE_ADD_BAND_2 = -0.100000
+    REFLECTANCE_ADD_BAND_3 = -0.100000
+    REFLECTANCE_ADD_BAND_4 = -0.100000
+    REFLECTANCE_ADD_BAND_5 = -0.100000
+    REFLECTANCE_ADD_BAND_6 = -0.100000
+    REFLECTANCE_ADD_BAND_7 = -0.100000
+    REFLECTANCE_ADD_BAND_8 = -0.100000
+    REFLECTANCE_ADD_BAND_9 = -0.100000
+  END_GROUP = RADIOMETRIC_RESCALING
+  GROUP = TIRS_THERMAL_CONSTANTS
+    K1_CONSTANT_BAND_10 = 774.8853
+    K2_CONSTANT_BAND_10 = 1321.0789
+    K1_CONSTANT_BAND_11 = 480.8883
+    K2_CONSTANT_BAND_11 = 1201.1442
+  END_GROUP = TIRS_THERMAL_CONSTANTS
+  GROUP = PROJECTION_PARAMETERS
+    MAP_PROJECTION = "UTM"
+    DATUM = "WGS84"
+    ELLIPSOID = "WGS84"
+    UTM_ZONE = 29
+    GRID_CELL_SIZE_PANCHROMATIC = 15.00
+    GRID_CELL_SIZE_REFLECTIVE = 30.00
+    GRID_CELL_SIZE_THERMAL = 30.00
+    ORIENTATION = "NORTH_UP"
+    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
+  END_GROUP = PROJECTION_PARAMETERS
+END_GROUP = L1_METADATA_FILE
+END
diff --git a/tests/test_remote_log/Snakefile b/tests/test_remote_log/Snakefile
new file mode 100644
index 0000000..7c42097
--- /dev/null
+++ b/tests/test_remote_log/Snakefile
@@ -0,0 +1,17 @@
+#import re, os, sys
+
+# clean up moto state
+shell("rm -f motoState.p")
+
+from snakemake.remote.S3Mocked import RemoteProvider as S3RemoteProvider
+
+S3 = S3RemoteProvider()
+
+
+rule test:
+    input:
+        S3.remote('test-remote-bucket/test.txt')
+    log:
+        S3.remote("test-remote-bucket/testlog.txt")
+    shell:
+        "exit 1"
diff --git a/tests/test_remote_log/expected-results/motoState.p b/tests/test_remote_log/expected-results/motoState.p
new file mode 100644
index 0000000..9693af9
Binary files /dev/null and b/tests/test_remote_log/expected-results/motoState.p differ
diff --git a/tests/test_restartable_job_cmd_exit_1/expected-results/.done b/tests/test_remote_log/test.txt
similarity index 100%
copy from tests/test_restartable_job_cmd_exit_1/expected-results/.done
copy to tests/test_remote_log/test.txt
diff --git a/tests/test_remote_ncbi/Snakefile b/tests/test_remote_ncbi/Snakefile
index 96edbb4..58f426e 100644
--- a/tests/test_remote_ncbi/Snakefile
+++ b/tests/test_remote_ncbi/Snakefile
@@ -19,8 +19,7 @@ rule download_and_count:
         # Since *.fasta files could come from several different databases, specify the database here.
         # if the input files are ambiguous, the provider will alert the user with possible options
         NCBI.remote(input_files, db="nuccore", seq_start=5000)
-
     output:
         "sizes.txt"
-    run:
-        shell("wc -c {input} > sizes.txt")
+    shell:
+        r"wc -c {input} | sed 's/^[ \t]*//' > sizes.txt"
diff --git a/tests/test_remote_ncbi/expected-results/sizes.txt b/tests/test_remote_ncbi/expected-results/sizes.txt
index b74d8a1..297cbc0 100644
--- a/tests/test_remote_ncbi/expected-results/sizes.txt
+++ b/tests/test_remote_ncbi/expected-results/sizes.txt
@@ -1,4 +1,4 @@
-    5801 KY785484.1.fasta
-    5255 KY785481.1.fasta
-    5318 KY785480.1.fasta
-   16374 total
+5801 KY785484.1.fasta
+5255 KY785481.1.fasta
+5318 KY785480.1.fasta
+16374 total
diff --git a/tests/test_remote_ncbi_simple/Snakefile b/tests/test_remote_ncbi_simple/Snakefile
index c66b271..fc35c0f 100644
--- a/tests/test_remote_ncbi_simple/Snakefile
+++ b/tests/test_remote_ncbi_simple/Snakefile
@@ -10,5 +10,5 @@ rule download_and_count:
         NCBI.remote("KY785484.1.fasta", db="nuccore")
     output:
         "sizes.txt"
-    run:
-        shell("wc -c {input} > sizes.txt")
+    shell:
+        r"wc -c {input} | sed 's/^[ \t]*//' > sizes.txt"
diff --git a/tests/test_remote_ncbi_simple/expected-results/sizes.txt b/tests/test_remote_ncbi_simple/expected-results/sizes.txt
index 738724a..17f225d 100644
--- a/tests/test_remote_ncbi_simple/expected-results/sizes.txt
+++ b/tests/test_remote_ncbi_simple/expected-results/sizes.txt
@@ -1 +1 @@
-   10861 KY785484.1.fasta
+10861 KY785484.1.fasta
diff --git a/tests/test_restartable_job_cmd_exit_1/Snakefile b/tests/test_restartable_job_cmd_exit_1/Snakefile
index ca78245..86c51d9 100644
--- a/tests/test_restartable_job_cmd_exit_1/Snakefile
+++ b/tests/test_restartable_job_cmd_exit_1/Snakefile
@@ -3,17 +3,20 @@ localrules: all
 shell.executable('bash')
 
 rule all:
-	input: '.done'
+    input: '.done'
 
 rule fails_sometimes:
-	output:
-		'.done'
-	shell:
-		r"""
-		if [[ ! -f ".first" ]]; then
-			touch .first
-			exit 1
-		else
-			touch .done
-		fi
-		"""
+    output:
+        '.done'
+    resources:
+        mem=lambda wildcards, attempt: 100 * attempt
+    shell:
+        r"""
+        echo {resources.mem}
+        if [[ ! -f ".first" ]]; then
+            touch .first
+            exit 1
+        else
+            echo {resources.mem} > {output}
+        fi
+        """
diff --git a/tests/test_restartable_job_cmd_exit_1/expected-results/.done b/tests/test_restartable_job_cmd_exit_1/expected-results/.done
index e69de29..08839f6 100644
--- a/tests/test_restartable_job_cmd_exit_1/expected-results/.done
+++ b/tests/test_restartable_job_cmd_exit_1/expected-results/.done
@@ -0,0 +1 @@
+200
diff --git a/tests/test_script/expected-results/test.html b/tests/test_script/expected-results/test.html
index 72f2b27..24fd457 100644
--- a/tests/test_script/expected-results/test.html
+++ b/tests/test_script/expected-results/test.html
@@ -4,10 +4,10 @@
 
 <head>
 
-<meta charset="utf-8">
+<meta charset="utf-8" />
 <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
 <meta name="generator" content="pandoc" />
-<meta name="viewport" content="width=device-width, initial-scale=1">
+
 
 <meta name="author" content="Mattias" />
 
@@ -16,37 +16,20 @@
 <title>Test Report</title>
 
 <script src="data:application/x-javascript;base64,LyohIGpRdWVyeSB2MS4xMS4zIHwgKGMpIDIwMDUsIDIwMTUgalF1ZXJ5IEZvdW5kYXRpb24sIEluYy4gfCBqcXVlcnkub3JnL2xpY2Vuc2UgKi8KIWZ1bmN0aW9uKGEsYil7Im9iamVjdCI9PXR5cGVvZiBtb2R1bGUmJiJvYmplY3QiPT10eXBlb2YgbW9kdWxlLmV4cG9ydHM/bW9kdWxlLmV4cG9ydHM9YS5kb2N1bWVudD9iKGEsITApOmZ1bmN0aW9uKGEpe2lmKCFhLmRvY3VtZW50KXRocm93IG5ldyBFcnJvcigialF1ZXJ5IHJlcXVpcmVzIGEgd2luZG93IHdpdGggYSBkb2N1bWVudCIpO3JldHVybiBiKGEpfTpiKGEpfSgidW5kZWZpbmVkIiE9dHlwZW9mIHdpbmRvdz93aW5kb3c6dG [...]
-<script src="data:application/x-javascript;base64,LyohIGpRdWVyeSBVSSAtIHYxLjExLjQgLSAyMDE2LTAxLTA1CiogaHR0cDovL2pxdWVyeXVpLmNvbQoqIEluY2x1ZGVzOiBjb3JlLmpzLCB3aWRnZXQuanMsIG1vdXNlLmpzLCBwb3NpdGlvbi5qcywgZHJhZ2dhYmxlLmpzLCBkcm9wcGFibGUuanMsIHJlc2l6YWJsZS5qcywgc2VsZWN0YWJsZS5qcywgc29ydGFibGUuanMsIGFjY29yZGlvbi5qcywgYXV0b2NvbXBsZXRlLmpzLCBidXR0b24uanMsIGRpYWxvZy5qcywgbWVudS5qcywgcHJvZ3Jlc3NiYXIuanMsIHNlbGVjdG1lbnUuanMsIHNsaWRlci5qcywgc3Bpbm5lci5qcywgdGFicy5qcywgdG9vbHRpcC5qcywgZWZmZWN0LmpzLC [...]
-<link href="data:text/css;charset=utf-8,%0A%0A%2Etocify%20%7B%0Awidth%3A%2020%25%3B%0Amax%2Dheight%3A%2090%25%3B%0Aoverflow%3A%20auto%3B%0Amargin%2Dleft%3A%202%25%3B%0Aposition%3A%20fixed%3B%0Aborder%3A%201px%20solid%20%23ccc%3B%0Awebkit%2Dborder%2Dradius%3A%206px%3B%0Amoz%2Dborder%2Dradius%3A%206px%3B%0Aborder%2Dradius%3A%206px%3B%0A%7D%0A%0A%2Etocify%20ul%2C%20%2Etocify%20li%20%7B%0Alist%2Dstyle%3A%20none%3B%0Amargin%3A%200%3B%0Apadding%3A%200%3B%0Aborder%3A%20none%3B%0Aline%2Dheight%3 [...]
-<script src="data:application/x-javascript;base64,LyoganF1ZXJ5IFRvY2lmeSAtIHYxLjkuMSAtIDIwMTMtMTAtMjIKICogaHR0cDovL3d3dy5ncmVnZnJhbmtvLmNvbS9qcXVlcnkudG9jaWZ5LmpzLwogKiBDb3B5cmlnaHQgKGMpIDIwMTMgR3JlZyBGcmFua287IExpY2Vuc2VkIE1JVCAqLwoKLy8gSW1tZWRpYXRlbHktSW52b2tlZCBGdW5jdGlvbiBFeHByZXNzaW9uIChJSUZFKSBbQmVuIEFsbWFuIEJsb2cgUG9zdF0oaHR0cDovL2JlbmFsbWFuLmNvbS9uZXdzLzIwMTAvMTEvaW1tZWRpYXRlbHktaW52b2tlZC1mdW5jdGlvbi1leHByZXNzaW9uLykgdGhhdCBjYWxscyBhbm90aGVyIElJRkUgdGhhdCBjb250YWlucyBhbGwgb2YgdG [...]
 <meta name="viewport" content="width=device-width, initial-scale=1" />
-<link href="data:text/css;charset=utf-8,html%7Bfont%2Dfamily%3Asans%2Dserif%3B%2Dwebkit%2Dtext%2Dsize%2Dadjust%3A100%25%3B%2Dms%2Dtext%2Dsize%2Dadjust%3A100%25%7Dbody%7Bmargin%3A0%7Darticle%2Caside%2Cdetails%2Cfigcaption%2Cfigure%2Cfooter%2Cheader%2Chgroup%2Cmain%2Cmenu%2Cnav%2Csection%2Csummary%7Bdisplay%3Ablock%7Daudio%2Ccanvas%2Cprogress%2Cvideo%7Bdisplay%3Ainline%2Dblock%3Bvertical%2Dalign%3Abaseline%7Daudio%3Anot%28%5Bcontrols%5D%29%7Bdisplay%3Anone%3Bheight%3A0%7D%5Bhidden%5D%2Ctem [...]
+<link href="data:text/css;charset=utf-8,html%7Bfont%2Dfamily%3Asans%2Dserif%3B%2Dwebkit%2Dtext%2Dsize%2Dadjust%3A100%25%3B%2Dms%2Dtext%2Dsize%2Dadjust%3A100%25%7Dbody%7Bmargin%3A0%7Darticle%2Caside%2Cdetails%2Cfigcaption%2Cfigure%2Cfooter%2Cheader%2Chgroup%2Cmain%2Cmenu%2Cnav%2Csection%2Csummary%7Bdisplay%3Ablock%7Daudio%2Ccanvas%2Cprogress%2Cvideo%7Bdisplay%3Ainline%2Dblock%3Bvertical%2Dalign%3Abaseline%7Daudio%3Anot%28%5Bcontrols%5D%29%7Bdisplay%3Anone%3Bheight%3A0%7D%5Bhidden%5D%2Ctem [...]
 <script src="data:application/x-javascript;base64,LyohCiAqIEJvb3RzdHJhcCB2My4zLjUgKGh0dHA6Ly9nZXRib290c3RyYXAuY29tKQogKiBDb3B5cmlnaHQgMjAxMS0yMDE1IFR3aXR0ZXIsIEluYy4KICogTGljZW5zZWQgdW5kZXIgdGhlIE1JVCBsaWNlbnNlCiAqLwppZigidW5kZWZpbmVkIj09dHlwZW9mIGpRdWVyeSl0aHJvdyBuZXcgRXJyb3IoIkJvb3RzdHJhcCdzIEphdmFTY3JpcHQgcmVxdWlyZXMgalF1ZXJ5Iik7K2Z1bmN0aW9uKGEpeyJ1c2Ugc3RyaWN0Ijt2YXIgYj1hLmZuLmpxdWVyeS5zcGxpdCgiICIpWzBdLnNwbGl0KCIuIik7aWYoYlswXTwyJiZiWzFdPDl8fDE9PWJbMF0mJjk9PWJbMV0mJmJbMl08MSl0aHJvdy [...]
 <script src="data:application/x-javascript;base64,LyoqCiogQHByZXNlcnZlIEhUTUw1IFNoaXYgMy43LjIgfCBAYWZhcmthcyBAamRhbHRvbiBAam9uX25lYWwgQHJlbSB8IE1JVC9HUEwyIExpY2Vuc2VkCiovCi8vIE9ubHkgcnVuIHRoaXMgY29kZSBpbiBJRSA4CmlmICghIXdpbmRvdy5uYXZpZ2F0b3IudXNlckFnZW50Lm1hdGNoKCJNU0lFIDgiKSkgewohZnVuY3Rpb24oYSxiKXtmdW5jdGlvbiBjKGEsYil7dmFyIGM9YS5jcmVhdGVFbGVtZW50KCJwIiksZD1hLmdldEVsZW1lbnRzQnlUYWdOYW1lKCJoZWFkIilbMF18fGEuZG9jdW1lbnRFbGVtZW50O3JldHVybiBjLmlubmVySFRNTD0ieDxzdHlsZT4iK2IrIjwvc3R5bGU+IixkLm [...]
 <script src="data:application/x-javascript;base64,LyohIFJlc3BvbmQuanMgdjEuNC4yOiBtaW4vbWF4LXdpZHRoIG1lZGlhIHF1ZXJ5IHBvbHlmaWxsICogQ29weXJpZ2h0IDIwMTMgU2NvdHQgSmVobAogKiBMaWNlbnNlZCB1bmRlciBodHRwczovL2dpdGh1Yi5jb20vc2NvdHRqZWhsL1Jlc3BvbmQvYmxvYi9tYXN0ZXIvTElDRU5TRS1NSVQKICogICovCgovLyBPbmx5IHJ1biB0aGlzIGNvZGUgaW4gSUUgOAppZiAoISF3aW5kb3cubmF2aWdhdG9yLnVzZXJBZ2VudC5tYXRjaCgiTVNJRSA4IikpIHsKIWZ1bmN0aW9uKGEpeyJ1c2Ugc3RyaWN0IjthLm1hdGNoTWVkaWE9YS5tYXRjaE1lZGlhfHxmdW5jdGlvbihhKXt2YXIgYixjPWEuZG [...]
+<script src="data:application/x-javascript;base64,LyohIGpRdWVyeSBVSSAtIHYxLjExLjQgLSAyMDE2LTAxLTA1CiogaHR0cDovL2pxdWVyeXVpLmNvbQoqIEluY2x1ZGVzOiBjb3JlLmpzLCB3aWRnZXQuanMsIG1vdXNlLmpzLCBwb3NpdGlvbi5qcywgZHJhZ2dhYmxlLmpzLCBkcm9wcGFibGUuanMsIHJlc2l6YWJsZS5qcywgc2VsZWN0YWJsZS5qcywgc29ydGFibGUuanMsIGFjY29yZGlvbi5qcywgYXV0b2NvbXBsZXRlLmpzLCBidXR0b24uanMsIGRpYWxvZy5qcywgbWVudS5qcywgcHJvZ3Jlc3NiYXIuanMsIHNlbGVjdG1lbnUuanMsIHNsaWRlci5qcywgc3Bpbm5lci5qcywgdGFicy5qcywgdG9vbHRpcC5qcywgZWZmZWN0LmpzLC [...]
+<link href="data:text/css;charset=utf-8,%0A%0A%2Etocify%20%7B%0Awidth%3A%2020%25%3B%0Amax%2Dheight%3A%2090%25%3B%0Aoverflow%3A%20auto%3B%0Amargin%2Dleft%3A%202%25%3B%0Aposition%3A%20fixed%3B%0Aborder%3A%201px%20solid%20%23ccc%3B%0Awebkit%2Dborder%2Dradius%3A%206px%3B%0Amoz%2Dborder%2Dradius%3A%206px%3B%0Aborder%2Dradius%3A%206px%3B%0A%7D%0A%0A%2Etocify%20ul%2C%20%2Etocify%20li%20%7B%0Alist%2Dstyle%3A%20none%3B%0Amargin%3A%200%3B%0Apadding%3A%200%3B%0Aborder%3A%20none%3B%0Aline%2Dheight%3 [...]
+<script src="data:application/x-javascript;base64,LyoganF1ZXJ5IFRvY2lmeSAtIHYxLjkuMSAtIDIwMTMtMTAtMjIKICogaHR0cDovL3d3dy5ncmVnZnJhbmtvLmNvbS9qcXVlcnkudG9jaWZ5LmpzLwogKiBDb3B5cmlnaHQgKGMpIDIwMTMgR3JlZyBGcmFua287IExpY2Vuc2VkIE1JVCAqLwoKLy8gSW1tZWRpYXRlbHktSW52b2tlZCBGdW5jdGlvbiBFeHByZXNzaW9uIChJSUZFKSBbQmVuIEFsbWFuIEJsb2cgUG9zdF0oaHR0cDovL2JlbmFsbWFuLmNvbS9uZXdzLzIwMTAvMTEvaW1tZWRpYXRlbHktaW52b2tlZC1mdW5jdGlvbi1leHByZXNzaW9uLykgdGhhdCBjYWxscyBhbm90aGVyIElJRkUgdGhhdCBjb250YWlucyBhbGwgb2YgdG [...]
+<script src="data:application/x-javascript;base64,CgovKioKICogalF1ZXJ5IFBsdWdpbjogU3RpY2t5IFRhYnMKICoKICogQGF1dGhvciBBaWRhbiBMaXN0ZXIgPGFpZGFuQHBocC5uZXQ+CiAqIGFkYXB0ZWQgYnkgUnViZW4gQXJzbGFuIHRvIGFjdGl2YXRlIHBhcmVudCB0YWJzIHRvbwogKiBodHRwOi8vd3d3LmFpZGFubGlzdGVyLmNvbS8yMDE0LzAzL3BlcnNpc3RpbmctdGhlLXRhYi1zdGF0ZS1pbi1ib290c3RyYXAvCiAqLwooZnVuY3Rpb24oJCkgewogICJ1c2Ugc3RyaWN0IjsKICAkLmZuLnJtYXJrZG93blN0aWNreVRhYnMgPSBmdW5jdGlvbigpIHsKICAgIHZhciBjb250ZXh0ID0gdGhpczsKICAgIC8vIFNob3cgdGhlIHRhYi [...]
 
 
 
 
-
-</head>
-
-<body>
-
 <style type="text/css">
-.main-container {
-  max-width: 940px;
-  margin-left: auto;
-  margin-right: auto;
-}
-code {
-  color: inherit;
-  background-color: rgba(0, 0, 0, 0.04);
-}
-img {
-  max-width:100%;
-  height: auto;
-}
 h1 {
   font-size: 34px;
 }
@@ -68,6 +51,30 @@ h5 {
 h6 {
   font-size: 12px;
 }
+.table th:not([align]) {
+  text-align: left;
+}
+</style>
+
+
+</head>
+
+<body>
+
+<style type="text/css">
+.main-container {
+  max-width: 940px;
+  margin-left: auto;
+  margin-right: auto;
+}
+code {
+  color: inherit;
+  background-color: rgba(0, 0, 0, 0.04);
+}
+img {
+  max-width:100%;
+  height: auto;
+}
 .tabbed-pane {
   padding-top: 12px;
 }
@@ -77,10 +84,10 @@ button.code-folding-btn:focus {
 </style>
 
 
+
 <div class="container-fluid main-container">
 
 <!-- tabsets -->
-<script src="data:application/x-javascript;base64,Cgp3aW5kb3cuYnVpbGRUYWJzZXRzID0gZnVuY3Rpb24odG9jSUQpIHsKCiAgLy8gYnVpbGQgYSB0YWJzZXQgZnJvbSBhIHNlY3Rpb24gZGl2IHdpdGggdGhlIC50YWJzZXQgY2xhc3MKICBmdW5jdGlvbiBidWlsZFRhYnNldCh0YWJzZXQpIHsKCiAgICAvLyBjaGVjayBmb3IgZmFkZSBhbmQgcGlsbHMgb3B0aW9ucwogICAgdmFyIGZhZGUgPSB0YWJzZXQuaGFzQ2xhc3MoInRhYnNldC1mYWRlIik7CiAgICB2YXIgcGlsbHMgPSB0YWJzZXQuaGFzQ2xhc3MoInRhYnNldC1waWxscyIpOwogICAgdmFyIG5hdkNsYXNzID0gcGlsbHMgPyAibmF2LXBpbGxzIiA6ICJuYXYtdGFicyI7CgogIC [...]
 <script>
 $(document).ready(function () {
   window.buildTabsets("TOC");
@@ -94,15 +101,21 @@ $(document).ready(function () {
 
 <script>
 $(document).ready(function ()  {
+
+    // move toc-ignore selectors from section div to header
+    $('div.section.toc-ignore')
+        .removeClass('toc-ignore')
+        .children('h1,h2,h3,h4,h5').addClass('toc-ignore');
+
     // establish options
     var options = {
       selectors: "h1,h2,h3",
       theme: "bootstrap3",
       context: '.toc-content',
       hashGenerator: function (text) {
-        return text.replace(/[.\/?&!#<>]/g, '').replace(/\s/g, '_').toLowerCase();
+        return text.replace(/[.\\/?&!#<>]/g, '').replace(/\s/g, '_').toLowerCase();
       },
-      ignoreSelector: "h1.title, .toc-ignore",
+      ignoreSelector: ".toc-ignore",
       scrollTo: 0
     };
     options.showAndHide = false;
@@ -125,6 +138,7 @@ $(document).ready(function ()  {
 }
 }
 
+
 .toc-content {
   padding-left: 30px;
   padding-right: 40px;
@@ -158,8 +172,9 @@ div.tocify {
 }
 
 .tocify-subheader .tocify-item {
-  font-size: 0.9em;
-  padding-left: 5px;
+  font-size: 0.90em;
+  padding-left: 25px;
+  text-indent: 0;
 }
 
 .tocify .list-group-item {
@@ -171,7 +186,6 @@ div.tocify {
 }
 .tocify-subheader .tocify-item {
   font-size: 0.95em;
-  padding-left: 10px;
 }
 
 </style>
@@ -191,7 +205,8 @@ div.tocify {
 <div class="fluid-row" id="header">
 
 
-<h1 class="title">Test Report</h1>
+
+<h1 class="title toc-ignore">Test Report</h1>
 <h4 class="author"><em>Mattias</em></h4>
 <h4 class="date"><em>March 22, 2017</em></h4>
 
@@ -214,10 +229,14 @@ div.tocify {
 <script>
 
 // add bootstrap table styles to pandoc tables
-$(document).ready(function () {
+function bootstrapStylePandocTables() {
   $('tr.header').parent('thead').parent('table').addClass('table table-condensed');
+}
+$(document).ready(function () {
+  bootstrapStylePandocTables();
 });
 
+
 </script>
 
 <!-- dynamically load mathjax for compatibility with self-contained -->
@@ -225,7 +244,7 @@ $(document).ready(function () {
   (function () {
     var script = document.createElement("script");
     script.type = "text/javascript";
-    script.src  = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
+    script.src  = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
     document.getElementsByTagName("head")[0].appendChild(script);
   })();
 </script>
diff --git a/tests/test_singularity/Snakefile b/tests/test_singularity/Snakefile
new file mode 100644
index 0000000..91b679b
--- /dev/null
+++ b/tests/test_singularity/Snakefile
@@ -0,0 +1,7 @@
+rule a:
+    output:
+        "test.out"
+    singularity:
+        "shub://vsoch/hello-world"
+    shell:
+        '/rawr.sh > {output}; echo "test" >> {output}'
diff --git a/tests/test_singularity/expected-results/test.out b/tests/test_singularity/expected-results/test.out
new file mode 100644
index 0000000..d4ba991
--- /dev/null
+++ b/tests/test_singularity/expected-results/test.out
@@ -0,0 +1,2 @@
+RaawwWWWWWRRRR!! Avocado!
+test
diff --git a/tests/test_static_remote/S3MockedForStaticTest.py b/tests/test_static_remote/S3MockedForStaticTest.py
index 3042753..4f6c343 100644
--- a/tests/test_static_remote/S3MockedForStaticTest.py
+++ b/tests/test_static_remote/S3MockedForStaticTest.py
@@ -20,7 +20,7 @@ from snakemake.logging import logger
 
 try:
     # third-party
-    import boto
+    import boto3
     from moto import mock_s3
     import filechunkio
 except ImportError as e:
@@ -90,9 +90,8 @@ class RemoteObject(S3RemoteObject):
         bucket_name = 'test-static-remote-bucket'
         test_files = ('test.txt', 'out1.txt', 'out2.txt')
 
-        conn = boto.connect_s3()
-        if bucket_name not in [b.name for b in conn.get_all_buckets()]:
-            conn.create_bucket(bucket_name)
+        s3 = boto3.resource('s3')
+        s3.create_bucket(Bucket=bucket_name)
 
         # "Upload" files that should be in S3 before tests...
         s3c = S3Helper()
diff --git a/tests/test_wrapper/Snakefile b/tests/test_wrapper/Snakefile
index 7f82bd3..8cfe3e7 100644
--- a/tests/test_wrapper/Snakefile
+++ b/tests/test_wrapper/Snakefile
@@ -4,4 +4,4 @@ rule compress_vcf:
     output:
         "test.vcf.gz"
     wrapper:
-        "0.9.0/bio/vcf/compress"
+        "0.17.2/bio/vcf/compress"
diff --git a/tests/tests.py b/tests/tests.py
index 2f03d3f..c62d4e5 100644
--- a/tests/tests.py
+++ b/tests/tests.py
@@ -77,7 +77,7 @@ def run(path,
                  shell=True)
             config['subworkdir'] = subworkdir
 
-        call('find {} -maxdepth 1 -type f -print0 | xargs -0 -I%% -n 1 cp %% {}'.format(
+        call('find {} -maxdepth 1 -type f -print0 | xargs -0 -I%% -n 1 cp -r %% {}'.format(
             quote(path), quote(tmpdir)),
              shell=True)
         success = snakemake(snakefile,
@@ -100,6 +100,8 @@ def run(path,
                     targetfile), 'expected file "{}" not produced'.format(
                         resultfile)
                 if check_md5:
+                    # if md5sum(targetfile) != md5sum(expectedfile):
+                    #     import pdb; pdb.set_trace()
                     assert md5sum(targetfile) == md5sum(
                         expectedfile), 'wrong result produced for file "{}"'.format(
                             resultfile)
@@ -286,17 +288,18 @@ def test_yaml_config():
     run(dpath("test_yaml_config"))
 
 
-def test_remote():
-    try:
-        import moto
-        import boto
-        import filechunkio
-
-        # only run the remote file test if the dependencies
-        # are installed, otherwise do nothing
-        run(dpath("test_remote"), cores=1)
-    except ImportError:
-        pass
+# TODO reenable once S3Mocked works with boto3
+# def test_remote():
+#     try:
+#         import moto
+#         import boto3
+#         import filechunkio
+#
+#         # only run the remote file test if the dependencies
+#         # are installed, otherwise do nothing
+#         run(dpath("test_remote"), cores=1)
+#     except ImportError:
+#         pass
 
 
 def test_cluster_sync():
@@ -410,17 +413,18 @@ def test_spaces_in_fnames():
         printshellcmds=True)
 
 
-def test_static_remote():
-    try:
-        import moto
-        import boto
-        import filechunkio
+# TODO deactivate because of problems with moto and boto3.
+# def test_static_remote():
+#     import importlib
+#     try:
+#         importlib.reload(boto3)
+#         importlib.reload(moto)
+#         # only run the remote file test if the dependencies
+#         # are installed, otherwise do nothing
+#         run(dpath("test_static_remote"), cores=1)
+#     except ImportError:
+#         pass
 
-        # only run the remote file test if the dependencies
-        # are installed, otherwise do nothing
-        run(dpath("test_static_remote"), cores=1)
-    except ImportError:
-        pass
 
 def test_remote_ncbi_simple():
     try:
@@ -488,7 +492,7 @@ def test_restartable_job_cmd_exit_1():
         restart_times=0, shouldfail=True)
     # Restarting once is enough
     run(dpath("test_restartable_job_cmd_exit_1"), cluster="./qsub",
-        restart_times=1, shouldfail=False)
+        restart_times=1, printshellcmds=True)
 
 
 def test_restartable_job_qsub_exit_1():
@@ -506,6 +510,7 @@ def test_restartable_job_qsub_exit_1():
     run(dpath("test_restartable_job_qsub_exit_1"), cluster="./qsub",
         restart_times=1, shouldfail=False)
 
+
 def test_threads():
     run(dpath("test_threads"), cores=20)
 
@@ -513,27 +518,55 @@ def test_threads():
 def test_dynamic_temp():
     run(dpath("test_dynamic_temp"))
 
-def test_ftp_immediate_close():
-    try:
-        import ftputil
 
-        # only run the remote file test if the dependencies
-        # are installed, otherwise do nothing
-        run(dpath("test_ftp_immediate_close"))
-    except ImportError:
-        pass
+# TODO this currently hangs. Has to be investigated (issue #660).
+#def test_ftp_immediate_close():
+#    try:
+#        import ftputil
+#
+#        # only run the remote file test if the dependencies
+#        # are installed, otherwise do nothing
+#        run(dpath("test_ftp_immediate_close"))
+#    except ImportError:
+#        pass
+
 
 def test_issue260():
    run(dpath("test_issue260"))
 
-def test_default_remote():
-    run(dpath("test_default_remote"),
-        default_remote_provider="S3Mocked",
-        default_remote_prefix="test-remote-bucket")
+
+# TODO reenable once S3Mocked works again with boto3
+# def test_default_remote():
+#     run(dpath("test_default_remote"),
+#         default_remote_provider="S3Mocked",
+#         default_remote_prefix="test-remote-bucket")
+
 
 def test_run_namedlist():
     run(dpath("test_run_namedlist"))
 
+
+def test_remote_gs():
+    run(dpath("test_remote_gs"))
+
+
+def test_remote_log():
+    run(dpath("test_remote_log"), shouldfail=True)
+
+
+def test_profile():
+    run(dpath("test_profile"))
+
+
+# TODO reenable once we run tests in a VM instead of Docker (maybe go back to codeship)?
+# def test_singularity():
+#     run(dpath("test_singularity"), use_singularity=True)
+
+
+def test_issue612():
+    run(dpath("test_issue612"), dryrun=True)
+
+
 if __name__ == '__main__':
     import nose
     nose.run(defaultTest=__name__)
diff --git a/wercker.yml b/wercker.yml
index cbdd59d..3bdb4fd 100644
--- a/wercker.yml
+++ b/wercker.yml
@@ -7,12 +7,12 @@ build:
   steps:
     - script:
         name: env
-        code: conda env update --name root --file test-environment.yml
+        code: conda env update --name test --file test-environment.yml
 
     - script:
         name: pip
-        code: pip install -e .
+        code: source activate test; pip install -e .
 
     - script:
         name: run tests
-        code: python setup.py nosetests
+        code: source activate test; python setup.py nosetests

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/snakemake.git



More information about the debian-med-commit mailing list