[med-svn] [snakemake] 01/04: Imported Upstream version 3.4

Kevin Murray daube-guest at moszumanska.debian.org
Sun Sep 13 14:00:42 UTC 2015


This is an automated email from the git hooks/post-receive script.

daube-guest pushed a commit to branch master
in repository snakemake.

commit 47de40639e62505cd4806a76f5e23be5d27846ca
Author: Kevin Murray <spam at kdmurray.id.au>
Date:   Sun Sep 13 22:51:41 2015 +1000

    Imported Upstream version 3.4
---
 .gitignore                                         |   8 +
 LICENSE                                            |   7 +
 README                                             |   8 +
 bin/snakemake                                      |  20 +
 conda/build.sh                                     |  12 +
 conda/snakemake/bld.bat                            |   8 +
 conda/snakemake/build.sh                           |   9 +
 conda/snakemake/meta.yaml                          |  22 +
 docker/Dockerfile                                  |   7 +
 docs/Makefile                                      | 177 ++++
 docs/api.rst                                       |   4 +
 docs/conf.py                                       | 266 ++++++
 docs/index.rst                                     |  21 +
 docs/requirements.txt                              |   2 +
 docs/utils.rst                                     |   5 +
 examples/c/README.txt                              |   1 +
 examples/c/include/hello.h                         |   1 +
 examples/c/src/Makefile                            |  30 +
 examples/c/src/Snakefile                           |  41 +
 examples/c/src/dag.png                             | Bin 0 -> 8155 bytes
 examples/c/src/hello.c                             |   8 +
 examples/c/src/hellofunc.c                         |   8 +
 examples/cufflinks/Snakefile                       |  76 ++
 examples/cufflinks/dag.png                         | Bin 0 -> 26777 bytes
 examples/cufflinks/dag.svg                         | 107 +++
 examples/cufflinks/hg19.fa                         |   0
 examples/cufflinks/hg19.gtf                        |   0
 examples/cufflinks/mapped/101.bam                  |   0
 examples/cufflinks/mapped/102.bam                  |   0
 examples/cufflinks/mapped/103.bam                  |   0
 examples/cufflinks/mapped/104.bam                  |   0
 examples/idea/idea.pdf                             | Bin 0 -> 20853 bytes
 examples/idea/idea.png                             | Bin 0 -> 43456 bytes
 examples/idea/idea.svg                             | 757 ++++++++++++++++
 examples/latex/Snakefile                           |  23 +
 examples/latex/dag.png                             | Bin 0 -> 9527 bytes
 examples/latex/document.tex                        |   0
 examples/latex/fig1.pdf                            |   0
 examples/latex/response-to-editor.tex              |   0
 examples/latex/tex.rules                           |  30 +
 examples/mirna/dag.dot                             | 446 ++++++++++
 examples/mirna/dag.png                             | Bin 0 -> 2375627 bytes
 examples/report/Snakefile                          |  30 +
 examples/report/report.html                        | 114 +++
 logo.png                                           | Bin 0 -> 3210 bytes
 logo.svg                                           | 113 +++
 misc/snakemake.vim                                 |  61 ++
 setup.py                                           |  48 +
 snakemake/__init__.py                              | 985 +++++++++++++++++++++
 snakemake/dag.py                                   | 926 +++++++++++++++++++
 snakemake/exceptions.py                            | 300 +++++++
 snakemake/executors.py                             | 710 +++++++++++++++
 snakemake/futures.py                               |  36 +
 snakemake/gui.html                                 | 358 ++++++++
 snakemake/gui.py                                   | 171 ++++
 snakemake/io.py                                    | 577 ++++++++++++
 snakemake/jobs.py                                  | 358 ++++++++
 snakemake/jobscript.sh                             |   3 +
 snakemake/logging.py                               | 259 ++++++
 snakemake/output_index.py                          |  52 ++
 snakemake/parser.py                                | 659 ++++++++++++++
 snakemake/persistence.py                           | 299 +++++++
 snakemake/report.css                               | 147 +++
 snakemake/report.py                                | 127 +++
 snakemake/rules.py                                 | 520 +++++++++++
 snakemake/scheduler.py                             | 411 +++++++++
 snakemake/shell.py                                 |  76 ++
 snakemake/stats.py                                 |  78 ++
 snakemake/utils.py                                 | 220 +++++
 snakemake/version.py                               |   1 +
 snakemake/workflow.py                              | 729 +++++++++++++++
 tests/__init__.py                                  |   0
 tests/knapsack/1.txt                               |   0
 tests/knapsack/2.txt                               |   0
 tests/knapsack/3.txt                               |   0
 tests/knapsack/Snakefile                           |  25 +
 tests/test01/Snakefile                             |  65 ++
 tests/test01/expected-results/dir/test.out         |   1 +
 tests/test01/expected-results/test.inter           |   1 +
 tests/test01/test.in                               |   1 +
 tests/test02/Snakefile                             |  18 +
 tests/test02/expected-results/test.out             |   1 +
 tests/test02/test.in                               |   1 +
 tests/test03/Snakefile                             |   6 +
 tests/test03/expected-results/test.out             |   1 +
 tests/test03/params                                |   1 +
 tests/test03/test.in                               |   1 +
 tests/test04/Snakefile                             |  17 +
 tests/test04/expected-results/test.out             |   1 +
 tests/test04/params                                |   1 +
 tests/test04/test.in                               |   1 +
 tests/test05/Snakefile                             |  35 +
 tests/test05/expected-results/test.1.inter         |   2 +
 tests/test05/expected-results/test.1.inter2        |   2 +
 tests/test05/expected-results/test.2.inter         |   2 +
 tests/test05/expected-results/test.2.inter2        |   2 +
 tests/test05/expected-results/test.3.inter         |   2 +
 tests/test05/expected-results/test.3.inter2        |   2 +
 tests/test05/expected-results/test.predictions     |   6 +
 tests/test05/test.in                               |   1 +
 tests/test06/Snakefile                             |   8 +
 tests/test06/expected-results/test.bla.out         |   1 +
 tests/test06/test.in                               |   1 +
 tests/test07/Snakefile                             |  12 +
 tests/test07/expected-results/test.out             |   1 +
 tests/test07/expected-results/test2.out            |   1 +
 tests/test07/test.in                               |   1 +
 tests/test08/Snakefile                             |  11 +
 tests/test08/expected-results/test.out             |   1 +
 tests/test08/expected-results/test2.out            |   1 +
 tests/test08/test.in                               |   1 +
 tests/test08/test2.in                              |   1 +
 tests/test09/Snakefile                             |  16 +
 tests/test09/expected-results/.gitignore           |   0
 tests/test09/test.in                               |   1 +
 tests/test10/Snakefile                             |   5 +
 tests/test10/expected-results/test.out             |   1 +
 tests/test10/test.in                               |   1 +
 tests/test11/Snakefile                             |   7 +
 tests/test11/expected-results/test.inter           |   1 +
 tests/test11/expected-results/test.out             |   1 +
 tests/test11/import.snakefile                      |   5 +
 tests/test11/test.in                               |   1 +
 tests/test12/Snakefile                             |  14 +
 tests/test12/expected-results/test.out             |   1 +
 tests/test12/test.in                               |   1 +
 tests/test13/Snakefile                             |  12 +
 .../expected-results/test.algo1-p7-improved.out    |   2 +
 tests/test13/expected-results/test.algo1-p7.out    |   1 +
 tests/test13/test.in                               |   1 +
 tests/test14/Snakefile.nonstandard                 |  27 +
 tests/test14/expected-results/test.1.inter         |   2 +
 tests/test14/expected-results/test.1.inter2        |   2 +
 tests/test14/expected-results/test.2.inter         |   2 +
 tests/test14/expected-results/test.2.inter2        |   2 +
 tests/test14/expected-results/test.3.inter         |   2 +
 tests/test14/expected-results/test.3.inter2        |   2 +
 tests/test14/expected-results/test.predictions     |  10 +
 tests/test14/qsub                                  |   6 +
 tests/test14/qsub.py                               |  14 +
 tests/test14/test.in                               |   1 +
 tests/test15/Snakefile                             |   7 +
 tests/test15/expected-results/test.out             |   1 +
 tests/test15/test.in                               |   1 +
 tests/testHighWorkload/Snakefile                   |  19 +
 tests/testHighWorkload/mfa/00.mfa                  |   0
 tests/testHighWorkload/mfa/01.mfa                  |   0
 tests/testHighWorkload/mfa/02.mfa                  |   0
 tests/testHighWorkload/mfa/03.mfa                  |   0
 tests/testHighWorkload/mfa/04.mfa                  |   0
 tests/testHighWorkload/mfa/05.mfa                  |   0
 tests/testHighWorkload/mfa/06.mfa                  |   0
 tests/testHighWorkload/mfa/07.mfa                  |   0
 tests/testHighWorkload/mfa/08.mfa                  |   0
 tests/testHighWorkload/mfa/09.mfa                  |   0
 tests/testHighWorkload/mfa/10.mfa                  |   0
 tests/test_benchmark/Snakefile                     |  11 +
 .../expected-results/test.benchmark.json           |   0
 tests/test_cluster_dynamic/Snakefile               |  26 +
 .../test_cluster_dynamic/expected-results/out.txt  |   7 +
 tests/test_cluster_dynamic/qsub                    |   6 +
 tests/test_cluster_dynamic/test.txt                |   7 +
 tests/test_conditional/Snakefile                   |  18 +
 tests/test_conditional/expected-results/test.0.out |   0
 tests/test_conditional/expected-results/test.1.out |   0
 tests/test_conditional/expected-results/test.2.out |   0
 tests/test_conditional/expected-results/test.out   |   0
 tests/test_config/Snakefile                        |  13 +
 tests/test_config/expected-results/test.out        |   0
 tests/test_config/test.json                        |   3 +
 tests/test_config/test.rules                       |   1 +
 tests/test_config/test2.json                       |   3 +
 tests/test_config/test3.json                       |   3 +
 tests/test_dynamic/Snakefile                       |  26 +
 tests/test_dynamic/expected-results/test.00.xy.csv |   0
 tests/test_dynamic/expected-results/test.00.xy.out |   0
 tests/test_dynamic/expected-results/test.01.xy.csv |   0
 tests/test_dynamic/expected-results/test.01.xy.out |   0
 tests/test_dynamic/expected-results/test.02.xy.csv |   0
 tests/test_dynamic/expected-results/test.02.xy.out |   0
 tests/test_dynamic/test.xy.in                      |   0
 tests/test_dynamic_complex/Snakefile               |  19 +
 .../test_dynamic_complex/expected-results/a_final  |   0
 .../test_dynamic_complex/expected-results/b_final  |   0
 .../test_dynamic_complex/expected-results/c_final  |   0
 tests/test_globwildcards/Snakefile                 |  11 +
 .../test_globwildcards/expected-results/test.0.out |   0
 .../test_globwildcards/expected-results/test.1.out |   0
 .../test_globwildcards/expected-results/test.2.out |   0
 tests/test_globwildcards/test.0.txt                |   0
 tests/test_globwildcards/test.1.txt                |   0
 tests/test_globwildcards/test.2.txt                |   0
 tests/test_keyword_list/Snakefile                  |   7 +
 tests/test_keyword_list/expected-results/test.out  |   0
 tests/test_keyword_list/test.in1                   |   0
 tests/test_keyword_list/test.in2                   |   0
 tests/test_local_import/Snakefile                  |   6 +
 tests/test_local_import/bar.py                     |   0
 tests/test_local_import/expected-results/test.out  |   0
 tests/test_local_import/foo/__init__.py            |   0
 tests/test_many_jobs/Snakefile                     |  23 +
 tests/test_multiple_includes/Snakefile             |   5 +
 .../expected-results/test1.txt                     |   0
 .../expected-results/test2.txt                     |   0
 tests/test_multiple_includes/test_rule.smk         |   3 +
 tests/test_multiple_includes/test_second_rule.smk  |   4 +
 tests/test_params/Snakefile                        |  12 +
 .../test_params/expected-results/somedir/test.out  |   0
 tests/test_parser/Snakefile                        |  13 +
 tests/test_parser/expected-results/test.out        |   0
 tests/test_parser/test.out                         |   0
 tests/test_persistent_dict/Snakefile               |  33 +
 .../expected-results/.gitignore                    |   0
 tests/test_report/Snakefile                        |  31 +
 tests/test_report/expected-results/report.html     | 171 ++++
 tests/test_report/fig.png                          | Bin 0 -> 20461 bytes
 tests/test_report/fig2.png                         | Bin 0 -> 9089 bytes
 tests/test_ruledag/1.a                             |   0
 tests/test_ruledag/2.a                             |   0
 tests/test_ruledag/3.a                             |   0
 tests/test_ruledag/4.a                             |   0
 tests/test_ruledag/5.a                             |   0
 tests/test_ruledag/Snakefile                       |  18 +
 tests/test_ruledeps/Snakefile                      |  27 +
 tests/test_ruledeps/expected-results/test.out      |   0
 tests/test_same_wildcard/Snakefile                 |   8 +
 .../expected-results/test_test.out                 |   1 +
 tests/test_same_wildcard/test_test.in              |   1 +
 tests/test_shell/Snakefile                         |  10 +
 tests/test_shell/expected-results/test.out         |   2 +
 tests/test_shell/test.in                           |   2 +
 tests/test_srcdir/Snakefile                        |   7 +
 tests/test_srcdir/expected-results/test.out        |   1 +
 tests/test_srcdir/script.sh                        |   2 +
 tests/test_subworkflows/Snakefile                  |  12 +
 tests/test_subworkflows/expected-results/test.out  |   1 +
 tests/test_temp/Snakefile                          |  21 +
 .../test_temp/expected-results/test.realigned.bam  |   0
 tests/test_temp/qsub                               |   6 +
 tests/test_temp/test.bam                           |   0
 tests/test_temp_expand/Snakefile                   |  16 +
 tests/test_temp_expand/expected-results/test.txt   |   0
 tests/test_touch/Snakefile                         |   5 +
 tests/test_touch/expected-results/test.out         |   0
 tests/test_url_include/Snakefile                   |   6 +
 .../test_url_include/expected-results/test.1.inter |   2 +
 .../expected-results/test.1.inter2                 |   2 +
 .../test_url_include/expected-results/test.2.inter |   2 +
 .../expected-results/test.2.inter2                 |   2 +
 .../test_url_include/expected-results/test.3.inter |   2 +
 .../expected-results/test.3.inter2                 |   2 +
 .../expected-results/test.predictions              |   6 +
 tests/test_url_include/test.in                     |   1 +
 tests/test_wildcard_count_ambiguity/Snakefile      |  14 +
 .../expected-results/test.out                      |   0
 tests/test_yaml_config/Snakefile                   |   9 +
 tests/test_yaml_config/expected-results/test.out   |   0
 tests/test_yaml_config/test.yaml                   |   2 +
 tests/tests.py                                     | 268 ++++++
 259 files changed, 11797 insertions(+)

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..7e505fc
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+*~
+*.pyc
+
+build/
+dist/
+*.egg-info/
+*.egg
+.snakemake*
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..7472113
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2015 Johannes Köster <johannes.koester at tu-dortmund.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/README b/README
new file mode 100644
index 0000000..5166c44
--- /dev/null
+++ b/README
@@ -0,0 +1,8 @@
+Snakemake -- a pythonic workflow system
+
+Build systems like GNU Make are frequently used to create complicated workflows, 
+e.g. in bioinformatics. This project aims to reduce the complexity of creating 
+workflows by providing a clean and modern domain specific specification language (DSL) 
+in python style, together with a fast and comfortable execution environment.
+
+Copyright (c) 2015 Johannes Köster <johannes.koester at tu-dortmund.de> (see LICENSE)
diff --git a/bin/snakemake b/bin/snakemake
new file mode 100755
index 0000000..403a75b
--- /dev/null
+++ b/bin/snakemake
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+
+__author__ = "Johannes Köster"
+
+# If running from within source directory,
+# add '../snakemake' to sys.path.
+_libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
+if os.path.isfile(os.path.join(_libdir, 'snakemake', '__init__.py')):
+    sys.path.insert(0, _libdir)
+
+from snakemake import main
+
+if __name__ == "__main__":
+    #import cProfile
+    #cProfile.run('main()', "snakemake.profile")
+    main()
diff --git a/conda/build.sh b/conda/build.sh
new file mode 100644
index 0000000..2ff397f
--- /dev/null
+++ b/conda/build.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+conda build snakemake
+conda convert ~/miniconda3/conda-bld/linux-64/snakemake-*.tar.bz2 -p all
+echo Due to a bug in conda convert, the subdir in info/index.json is not updated (to e.g. win-64).
+echo This has to be done manually in the tarball.
+exit 0
+# TODO reactivate this once conda convert has been fixed.
+for p in */snakemake-*.tar.bz2
+do
+    binstar upload $p
+done
diff --git a/conda/snakemake/bld.bat b/conda/snakemake/bld.bat
new file mode 100644
index 0000000..87b1481
--- /dev/null
+++ b/conda/snakemake/bld.bat
@@ -0,0 +1,8 @@
+"%PYTHON%" setup.py install
+if errorlevel 1 exit 1
+
+:: Add more build steps here, if they are necessary.
+
+:: See
+:: http://docs.continuum.io/conda/build.html
+:: for a list of environment variables that are set during the build process.
diff --git a/conda/snakemake/build.sh b/conda/snakemake/build.sh
new file mode 100644
index 0000000..4d7fc03
--- /dev/null
+++ b/conda/snakemake/build.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+$PYTHON setup.py install
+
+# Add more build steps here, if they are necessary.
+
+# See
+# http://docs.continuum.io/conda/build.html
+# for a list of environment variables that are set during the build process.
diff --git a/conda/snakemake/meta.yaml b/conda/snakemake/meta.yaml
new file mode 100644
index 0000000..9e75120
--- /dev/null
+++ b/conda/snakemake/meta.yaml
@@ -0,0 +1,22 @@
+package:
+  name: snakemake
+  version: "3.4"
+source:
+  fn: snakemake-3.3.tar.gz
+  url: https://pypi.python.org/packages/source/s/snakemake/snakemake-3.3.tar.gz
+  md5: 92b9166e43cb1ee26bedfec0013b57de
+build:
+  entry_points:
+    - snakemake = snakemake:main
+    - snakemake-bash-completion = snakemake:bash_completion
+requirements:
+  build:
+    - python >=3.2
+    - setuptools
+  run:
+    - python >=3.2
+    - docutils
+about:
+  home: https://bitbucket.org/johanneskoester/snakemake
+  license: MIT License
+  summary: 'Build systems like GNU Make are frequently used to create complicated workflows, e.g. in bioinformatics. This project aims to reduce the complexity of creating workflows by providing a clean and modern domain specific language (DSL) in python style, together with a fast and comfortable execution environment.'
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 0000000..9792c7a
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,7 @@
+# a docker image based on Ubuntu with snakemake installed
+FROM ubuntu:14.04
+MAINTAINER Johannes Köster <johannes.koester at tu-dortmund.de>
+RUN apt-get -qq update
+RUN apt-get install -qqy python3-setuptools python3-docutils python3-flask
+RUN easy_install3 snakemake
+ENTRYPOINT ["snakemake"]
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..76a6746
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Snakemake.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Snakemake.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/Snakemake"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Snakemake"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/docs/api.rst b/docs/api.rst
new file mode 100644
index 0000000..28920a8
--- /dev/null
+++ b/docs/api.rst
@@ -0,0 +1,4 @@
+The Snakemake API
+=================
+
+.. autofunction:: snakemake.snakemake
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..38eb443
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Snakemake documentation build configuration file, created by
+# sphinx-quickstart on Sat Feb  1 16:01:02 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import sphinx_bootstrap_theme
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.mathjax',
+    'sphinx.ext.viewcode',
+    'sphinxcontrib.napoleon'
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'Snakemake'
+copyright = '2014, Johannes Koester'
+
+import snakemake
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = snakemake.__version__
+# The full version, including alpha/beta/rc tags.
+release = snakemake.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'bootstrap'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {'bootswatch_theme': "spacelab"}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Snakemakedoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'Snakemake.tex', 'Snakemake Documentation',
+   'Johannes Koester', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'snakemake', 'Snakemake Documentation',
+     ['Johannes Koester'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'Snakemake', 'Snakemake Documentation',
+   'Johannes Koester', 'Snakemake', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..de2b778
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,21 @@
+.. Snakemake documentation master file, created by
+   sphinx-quickstart on Sat Feb  1 16:01:02 2014.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to Snakemake's API documentation!
+=========================================
+
+Build systems like GNU Make are frequently used to create complicated workflows, e.g. in bioinformatics. This project aims to reduce the complexity of creating workflows by providing a fast and comfortable execution environment, together with a clean and modern domain specific specification language (DSL) in python style.
+
+Apart from being a command line tool, Snakemake can also be called from withing other python code and hence serve as a framework for organizing workflows withing your software. These pages describe the public parts of the Snakemake api.
+
+For the user documentation and general information, see https://bitbucket.org/johanneskoester/snakemake.
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   api
+   utils
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..f9a8b63
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+sphinxcontrib-napoleon
+sphinx_bootstrap_theme
diff --git a/docs/utils.rst b/docs/utils.rst
new file mode 100644
index 0000000..c01509d
--- /dev/null
+++ b/docs/utils.rst
@@ -0,0 +1,5 @@
+Additional utils
+================
+
+.. automodule:: snakemake.utils
+    :members:
diff --git a/examples/c/README.txt b/examples/c/README.txt
new file mode 100644
index 0000000..995d30f
--- /dev/null
+++ b/examples/c/README.txt
@@ -0,0 +1 @@
+http://www.cs.colby.edu/maxwell/courses/tutorials/maketutor/
diff --git a/examples/c/include/hello.h b/examples/c/include/hello.h
new file mode 100644
index 0000000..fccca1b
--- /dev/null
+++ b/examples/c/include/hello.h
@@ -0,0 +1 @@
+void myPrintHelloMake(void);
diff --git a/examples/c/src/Makefile b/examples/c/src/Makefile
new file mode 100644
index 0000000..12d0f8f
--- /dev/null
+++ b/examples/c/src/Makefile
@@ -0,0 +1,30 @@
+
+
+IDIR=../include
+ODIR=obj
+LDIR=../lib
+
+LIBS=-lm
+
+CC=gcc
+CFLAGS=-I$(IDIR)
+
+_HEADERS = hello.h
+HEADERS = $(patsubst %,$(IDIR)/%,$(_HEADERS))
+
+_OBJS = hello.o hellofunc.o 
+OBJS = $(patsubst %,$(ODIR)/%,$(_OBJS))
+
+# build the executable from the object files
+hello: $(OBJS)
+	$(CC) -o $@ $^ $(CFLAGS)
+
+# compile a single .c file to an .o file
+$(ODIR)/%.o: %.c $(HEADERS)
+	$(CC) -c -o $@ $< $(CFLAGS)
+
+
+# clean up temporary files
+.PHONY: clean
+clean:
+	rm -f $(ODIR)/*.o *~ core $(IDIR)/*~ 
diff --git a/examples/c/src/Snakefile b/examples/c/src/Snakefile
new file mode 100644
index 0000000..29e5088
--- /dev/null
+++ b/examples/c/src/Snakefile
@@ -0,0 +1,41 @@
+from os.path import join
+
+IDIR = '../include'
+ODIR = 'obj'
+LDIR = '../lib'
+
+LIBS = '-lm'
+
+CC = 'gcc'
+CFLAGS = '-I' + IDIR
+
+
+_HEADERS = ['hello.h']
+HEADERS = [join(IDIR, hfile) for hfile in _HEADERS]
+
+_OBJS = ['hello.o', 'hellofunc.o']
+OBJS = [join(ODIR, ofile) for ofile in _OBJS]
+
+
+rule hello:
+    """build the executable from the object files"""
+    output:
+        'hello'
+    input:
+        OBJS
+    shell:
+        "{CC} -o {output} {input} {CFLAGS} {LIBS}"
+
+rule c_to_o:
+    """compile a single .c file to an .o file"""
+    output:
+        temp('{ODIR}/{name}.o')
+    input:
+        '{name}.c', HEADERS
+    shell:
+        "{CC} -c -o {output} {input} {CFLAGS}"
+
+rule clean:
+    """clean up temporary files"""
+    shell:
+        "rm -f   *~  core  {IDIR}/*~"
diff --git a/examples/c/src/dag.png b/examples/c/src/dag.png
new file mode 100644
index 0000000..4e0cd42
Binary files /dev/null and b/examples/c/src/dag.png differ
diff --git a/examples/c/src/hello.c b/examples/c/src/hello.c
new file mode 100644
index 0000000..f5619ab
--- /dev/null
+++ b/examples/c/src/hello.c
@@ -0,0 +1,8 @@
+#include <stdio.h>
+
+int main() {
+  // call a function in another file
+  myPrintHello();
+
+  return(0);
+}
diff --git a/examples/c/src/hellofunc.c b/examples/c/src/hellofunc.c
new file mode 100644
index 0000000..c75832a
--- /dev/null
+++ b/examples/c/src/hellofunc.c
@@ -0,0 +1,8 @@
+#include <stdio.h>
+
+void myPrintHello(void) {
+
+  printf("Hello makefiles!\n");
+
+  return;
+}
diff --git a/examples/cufflinks/Snakefile b/examples/cufflinks/Snakefile
new file mode 100644
index 0000000..858b28b
--- /dev/null
+++ b/examples/cufflinks/Snakefile
@@ -0,0 +1,76 @@
+# path to track and reference
+TRACK   = 'hg19.gtf'
+REF     = 'hg19.fa'
+
+
+# sample names and classes
+CLASS1  = '101 102'.split()
+CLASS2  = '103 104'.split()
+SAMPLES = CLASS1 + CLASS2
+
+
+# path to bam files
+CLASS1_BAM = expand('mapped/{sample}.bam', sample=CLASS1)
+CLASS2_BAM = expand('mapped/{sample}.bam', sample=CLASS2)
+
+
+rule all:
+    input:
+        'diffexp/isoform_exp.diff',
+        'assembly/comparison'
+
+
+rule assembly:
+    input:
+        'mapped/{sample}.bam'
+    output:
+        'assembly/{sample}/transcripts.gtf',
+        dir='assembly/{sample}'
+    threads: 4
+    shell:
+        'cufflinks --num-threads {threads} -o {output.dir} '
+        '--frag-bias-correct {REF} {input}'
+
+
+rule compose_merge:
+    input:
+        expand('assembly/{sample}/transcripts.gtf', sample=SAMPLES)
+    output:
+        txt='assembly/assemblies.txt'
+    run:
+        with open(output.txt, 'w') as out:
+            print(*input, sep="\n", file=out)
+
+
+rule merge_assemblies:
+    input:
+        'assembly/assemblies.txt'
+    output:
+        'assembly/merged/merged.gtf', dir='assembly/merged'
+    shell:
+        'cuffmerge -o {output.dir} -s {REF} {input}'
+
+
+rule compare_assemblies:
+    input:
+        'assembly/merged/merged.gtf'
+    output:
+        'assembly/comparison/all.stats',
+        dir='assembly/comparison'
+    shell:
+        'cuffcompare -o {output.dir}all -s {REF} -r {TRACK} {input}'
+
+
+rule diffexp:
+    input:
+        class1=CLASS1_BAM,
+        class2=CLASS2_BAM,
+        gtf='assembly/merged/merged.gtf'
+    output:
+        'diffexp/gene_exp.diff', 'diffexp/isoform_exp.diff'
+    params:
+        class1=",".join(CLASS1_BAM),
+        class2=",".join(CLASS2_BAM)
+    threads: 8
+    shell:
+        'cuffdiff --num-threads {threads} {input.gtf} {params.class1} {params.class2}'
diff --git a/examples/cufflinks/dag.png b/examples/cufflinks/dag.png
new file mode 100644
index 0000000..2cda21e
Binary files /dev/null and b/examples/cufflinks/dag.png differ
diff --git a/examples/cufflinks/dag.svg b/examples/cufflinks/dag.svg
new file mode 100644
index 0000000..fc91632
--- /dev/null
+++ b/examples/cufflinks/dag.svg
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+ "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by graphviz version 2.34.0 (20140110.0949)
+ -->
+<!-- Title: snakemake_dag Pages: 1 -->
+<svg width="366pt" height="332pt"
+ viewBox="0.00 0.00 366.00 332.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 328)">
+<title>snakemake_dag</title>
+<polygon fill="white" stroke="white" points="-4,4 -4,-328 362,-328 362,4 -4,4"/>
+<!-- 0 -->
+<g id="node1" class="node"><title>0</title>
+<path fill="none" stroke="#59d856" stroke-width="2" d="M64,-324C64,-324 12,-324 12,-324 6,-324 0,-318 0,-312 0,-312 0,-300 0,-300 0,-294 6,-288 12,-288 12,-288 64,-288 64,-288 70,-288 76,-294 76,-300 76,-300 76,-312 76,-312 76,-318 70,-324 64,-324"/>
+<text text-anchor="middle" x="38" y="-309" font-family="sans" font-size="10.00">assembly</text>
+<text text-anchor="middle" x="38" y="-298" font-family="sans" font-size="10.00">sample: 103</text>
+</g>
+<!-- 2 -->
+<g id="node3" class="node"><title>2</title>
+<path fill="none" stroke="#56a9d8" stroke-width="2" d="M216,-252C216,-252 142,-252 142,-252 136,-252 130,-246 130,-240 130,-240 130,-228 130,-228 130,-222 136,-216 142,-216 142,-216 216,-216 216,-216 222,-216 228,-222 228,-228 228,-228 228,-240 228,-240 228,-246 222,-252 216,-252"/>
+<text text-anchor="middle" x="179" y="-231.5" font-family="sans" font-size="10.00">compose_merge</text>
+</g>
+<!-- 0->2 -->
+<g id="edge6" class="edge"><title>0->2</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M72.4925,-287.876C91.4403,-278.469 115.132,-266.708 135.356,-256.667"/>
+<polygon fill="grey" stroke="grey" points="136.974,-259.771 144.375,-252.19 133.862,-253.502 136.974,-259.771"/>
+</g>
+<!-- 1 -->
+<g id="node2" class="node"><title>1</title>
+<path fill="none" stroke="#d8ac56" stroke-width="2" d="M194,-36C194,-36 164,-36 164,-36 158,-36 152,-30 152,-24 152,-24 152,-12 152,-12 152,-6 158,-0 164,-0 164,-0 194,-0 194,-0 200,-0 206,-6 206,-12 206,-12 206,-24 206,-24 206,-30 200,-36 194,-36"/>
+<text text-anchor="middle" x="179" y="-15.5" font-family="sans" font-size="10.00">all</text>
+</g>
+<!-- 5 -->
+<g id="node6" class="node"><title>5</title>
+<path fill="none" stroke="#d85656" stroke-width="2" d="M220.25,-180C220.25,-180 137.75,-180 137.75,-180 131.75,-180 125.75,-174 125.75,-168 125.75,-168 125.75,-156 125.75,-156 125.75,-150 131.75,-144 137.75,-144 137.75,-144 220.25,-144 220.25,-144 226.25,-144 232.25,-150 232.25,-156 232.25,-156 232.25,-168 232.25,-168 232.25,-174 226.25,-180 220.25,-180"/>
+<text text-anchor="middle" x="179" y="-159.5" font-family="sans" font-size="10.00">merge_assemblies</text>
+</g>
+<!-- 2->5 -->
+<g id="edge7" class="edge"><title>2->5</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M179,-215.697C179,-207.983 179,-198.712 179,-190.112"/>
+<polygon fill="grey" stroke="grey" points="182.5,-190.104 179,-180.104 175.5,-190.104 182.5,-190.104"/>
+</g>
+<!-- 3 -->
+<g id="node4" class="node"><title>3</title>
+<path fill="none" stroke="#59d856" stroke-width="2" d="M158,-324C158,-324 106,-324 106,-324 100,-324 94,-318 94,-312 94,-312 94,-300 94,-300 94,-294 100,-288 106,-288 106,-288 158,-288 158,-288 164,-288 170,-294 170,-300 170,-300 170,-312 170,-312 170,-318 164,-324 158,-324"/>
+<text text-anchor="middle" x="132" y="-309" font-family="sans" font-size="10.00">assembly</text>
+<text text-anchor="middle" x="132" y="-298" font-family="sans" font-size="10.00">sample: 102</text>
+</g>
+<!-- 3->2 -->
+<g id="edge3" class="edge"><title>3->2</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M143.618,-287.697C149.139,-279.474 155.847,-269.483 161.932,-260.421"/>
+<polygon fill="grey" stroke="grey" points="164.847,-262.358 167.516,-252.104 159.035,-258.456 164.847,-262.358"/>
+</g>
+<!-- 4 -->
+<g id="node5" class="node"><title>4</title>
+<path fill="none" stroke="#59d856" stroke-width="2" d="M252,-324C252,-324 200,-324 200,-324 194,-324 188,-318 188,-312 188,-312 188,-300 188,-300 188,-294 194,-288 200,-288 200,-288 252,-288 252,-288 258,-288 264,-294 264,-300 264,-300 264,-312 264,-312 264,-318 258,-324 252,-324"/>
+<text text-anchor="middle" x="226" y="-309" font-family="sans" font-size="10.00">assembly</text>
+<text text-anchor="middle" x="226" y="-298" font-family="sans" font-size="10.00">sample: 101</text>
+</g>
+<!-- 4->2 -->
+<g id="edge5" class="edge"><title>4->2</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M214.382,-287.697C208.861,-279.474 202.153,-269.483 196.068,-260.421"/>
+<polygon fill="grey" stroke="grey" points="198.965,-258.456 190.484,-252.104 193.153,-262.358 198.965,-258.456"/>
+</g>
+<!-- 6 -->
+<g id="node7" class="node"><title>6</title>
+<path fill="none" stroke="#afd856" stroke-width="2" d="M174.25,-108C174.25,-108 79.75,-108 79.75,-108 73.75,-108 67.75,-102 67.75,-96 67.75,-96 67.75,-84 67.75,-84 67.75,-78 73.75,-72 79.75,-72 79.75,-72 174.25,-72 174.25,-72 180.25,-72 186.25,-78 186.25,-84 186.25,-84 186.25,-96 186.25,-96 186.25,-102 180.25,-108 174.25,-108"/>
+<text text-anchor="middle" x="127" y="-87.5" font-family="sans" font-size="10.00">compare_assemblies</text>
+</g>
+<!-- 5->6 -->
+<g id="edge8" class="edge"><title>5->6</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M166.146,-143.697C159.975,-135.389 152.463,-125.277 145.676,-116.141"/>
+<polygon fill="grey" stroke="grey" points="148.479,-114.045 139.706,-108.104 142.86,-118.219 148.479,-114.045"/>
+</g>
+<!-- 8 -->
+<g id="node9" class="node"><title>8</title>
+<path fill="none" stroke="#56d8a9" stroke-width="2" d="M247,-108C247,-108 217,-108 217,-108 211,-108 205,-102 205,-96 205,-96 205,-84 205,-84 205,-78 211,-72 217,-72 217,-72 247,-72 247,-72 253,-72 259,-78 259,-84 259,-84 259,-96 259,-96 259,-102 253,-108 247,-108"/>
+<text text-anchor="middle" x="232" y="-87.5" font-family="sans" font-size="10.00">diffexp</text>
+</g>
+<!-- 5->8 -->
+<g id="edge9" class="edge"><title>5->8</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M192.101,-143.697C198.391,-135.389 206.047,-125.277 212.965,-116.141"/>
+<polygon fill="grey" stroke="grey" points="215.804,-118.19 219.05,-108.104 210.223,-113.964 215.804,-118.19"/>
+</g>
+<!-- 6->1 -->
+<g id="edge1" class="edge"><title>6->1</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M139.854,-71.6966C146.025,-63.3893 153.537,-53.2771 160.324,-44.1407"/>
+<polygon fill="grey" stroke="grey" points="163.14,-46.2189 166.294,-36.1043 157.521,-42.0446 163.14,-46.2189"/>
+</g>
+<!-- 7 -->
+<g id="node8" class="node"><title>7</title>
+<path fill="none" stroke="#59d856" stroke-width="2" d="M346,-324C346,-324 294,-324 294,-324 288,-324 282,-318 282,-312 282,-312 282,-300 282,-300 282,-294 288,-288 294,-288 294,-288 346,-288 346,-288 352,-288 358,-294 358,-300 358,-300 358,-312 358,-312 358,-318 352,-324 346,-324"/>
+<text text-anchor="middle" x="320" y="-309" font-family="sans" font-size="10.00">assembly</text>
+<text text-anchor="middle" x="320" y="-298" font-family="sans" font-size="10.00">sample: 104</text>
+</g>
+<!-- 7->2 -->
+<g id="edge4" class="edge"><title>7->2</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M285.508,-287.876C266.56,-278.469 242.868,-266.708 222.644,-256.667"/>
+<polygon fill="grey" stroke="grey" points="224.138,-253.502 213.625,-252.19 221.026,-259.771 224.138,-253.502"/>
+</g>
+<!-- 8->1 -->
+<g id="edge2" class="edge"><title>8->1</title>
+<path fill="none" stroke="grey" stroke-width="2" d="M218.899,-71.6966C212.609,-63.3893 204.953,-53.2771 198.035,-44.1407"/>
+<polygon fill="grey" stroke="grey" points="200.777,-41.9642 191.95,-36.1043 195.196,-46.1897 200.777,-41.9642"/>
+</g>
+</g>
+</svg>
diff --git a/examples/cufflinks/hg19.fa b/examples/cufflinks/hg19.fa
new file mode 100644
index 0000000..e69de29
diff --git a/examples/cufflinks/hg19.gtf b/examples/cufflinks/hg19.gtf
new file mode 100644
index 0000000..e69de29
diff --git a/examples/cufflinks/mapped/101.bam b/examples/cufflinks/mapped/101.bam
new file mode 100644
index 0000000..e69de29
diff --git a/examples/cufflinks/mapped/102.bam b/examples/cufflinks/mapped/102.bam
new file mode 100644
index 0000000..e69de29
diff --git a/examples/cufflinks/mapped/103.bam b/examples/cufflinks/mapped/103.bam
new file mode 100644
index 0000000..e69de29
diff --git a/examples/cufflinks/mapped/104.bam b/examples/cufflinks/mapped/104.bam
new file mode 100644
index 0000000..e69de29
diff --git a/examples/idea/idea.pdf b/examples/idea/idea.pdf
new file mode 100644
index 0000000..6e501ba
Binary files /dev/null and b/examples/idea/idea.pdf differ
diff --git a/examples/idea/idea.png b/examples/idea/idea.png
new file mode 100644
index 0000000..ebe4f9e
Binary files /dev/null and b/examples/idea/idea.png differ
diff --git a/examples/idea/idea.svg b/examples/idea/idea.svg
new file mode 100644
index 0000000..f5d2f53
--- /dev/null
+++ b/examples/idea/idea.svg
@@ -0,0 +1,757 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Generated by graphviz version 2.26.3 (20100126.1600)
+ -->
+
+<!-- Title: snakemake_dag Pages: 1 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="1292.4581"
+   height="327.84036"
+   viewBox="0 0 1033.9665 262.27229"
+   id="svg4042"
+   version="1.1"
+   inkscape:version="0.48.3.1 r9886"
+   sodipodi:docname="idea.svg"
+   inkscape:export-filename="/home/johannes/scms/snakemake/examples/idea/idea.png"
+   inkscape:export-xdpi="55.732769"
+   inkscape:export-ydpi="55.732769">
+  <metadata
+     id="metadata4269">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs4267">
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect7400"
+       effect="spiro" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect7396"
+       is_visible="true" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect7392"
+       effect="spiro" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect7020"
+       is_visible="true" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect6920"
+       effect="spiro" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect6918"
+       is_visible="true" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect6916"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect6914"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect6912"
+       effect="spiro" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect6910"
+       effect="spiro" />
+    <marker
+       inkscape:stockid="TriangleOutL"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="TriangleOutL"
+       style="overflow:visible">
+      <path
+         id="path5345"
+         d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         transform="scale(0.8,0.8)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="TriangleOutM"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="TriangleOutM"
+       style="overflow:visible">
+      <path
+         id="path5348"
+         d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+         transform="scale(0.4,0.4)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4689"
+       is_visible="true" />
+    <inkscape:path-effect
+       is_visible="true"
+       id="path-effect4685"
+       effect="spiro" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4677"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4673"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4669"
+       is_visible="true" />
+    <inkscape:path-effect
+       effect="spiro"
+       id="path-effect4665"
+       is_visible="true" />
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1535"
+     inkscape:window-height="876"
+     id="namedview4265"
+     showgrid="false"
+     showguides="true"
+     inkscape:guide-bbox="true"
+     inkscape:zoom="0.72634009"
+     inkscape:cx="622.76273"
+     inkscape:cy="108.8366"
+     inkscape:window-x="65"
+     inkscape:window-y="24"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="svg4042"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0">
+    <sodipodi:guide
+       orientation="1,0"
+       position="323.533,251.10885"
+       id="guide4471" />
+    <sodipodi:guide
+       orientation="0,1"
+       position="293.24416,328.20772"
+       id="guide4473" />
+    <sodipodi:guide
+       orientation="0,1"
+       position="355.19861,239.40634"
+       id="guide4475" />
+    <sodipodi:guide
+       orientation="0,1"
+       position="305.63505,107.23685"
+       id="guide4477" />
+    <sodipodi:guide
+       orientation="1,0"
+       position="302.19313,345.41729"
+       id="guide4479" />
+  </sodipodi:namedview>
+  <text
+     xml:space="preserve"
+     style="font-size:32px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:end;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:end;fill:#000000;fill-opacity:1;stroke:none;font-family:Purisa;-inkscape-font-specification:Purisa"
+     x="241.09412"
+     y="88.421936"
+     id="text4419"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan4421"
+       x="252.47224"
+       y="88.421936"
+       style="font-size:17.60000038px;text-align:end;text-anchor:end">Produce the files </tspan><tspan
+       sodipodi:role="line"
+       x="252.47224"
+       y="110.42194"
+       style="font-size:17.60000038px;text-align:end;text-anchor:end"
+       id="tspan4437">you want to have from </tspan><tspan
+       sodipodi:role="line"
+       x="241.09412"
+       y="132.42194"
+       style="font-size:17.60000038px;text-align:end;text-anchor:end"
+       id="tspan4439">some intermediate</tspan><tspan
+       sodipodi:role="line"
+       x="241.09412"
+       y="154.42194"
+       style="font-size:17.60000038px;text-align:end;text-anchor:end"
+       id="tspan4441">result</tspan></text>
+  <text
+     xml:space="preserve"
+     style="font-size:32px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:end;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:end;fill:#000000;fill-opacity:1;stroke:none;font-family:Purisa;-inkscape-font-specification:Purisa"
+     x="242.54863"
+     y="17.879404"
+     id="text4423"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan4425"
+       x="242.54861"
+       y="17.879404"
+       style="font-size:17.60000038px;text-align:end;text-anchor:end">Tell Snakemake what files</tspan><tspan
+       sodipodi:role="line"
+       x="242.54861"
+       y="39.879406"
+       style="font-size:17.60000038px;text-align:end;text-anchor:end"
+       id="tspan4427">you want to be created</tspan></text>
+  <text
+     xml:space="preserve"
+     style="font-size:11.19999981px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans Mono;-inkscape-font-specification:DejaVu Sans Mono"
+     x="258.82642"
+     y="8.5695314"
+     id="text4429"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan4431"
+       x="258.82642"
+       y="8.5695314">rule:</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="22.56953"
+       id="tspan4435">  input: "A.txt", "B.txt", "C.txt"</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="36.56953"
+       id="tspan4433" /></text>
+  <text
+     xml:space="preserve"
+     style="font-size:11.19999981px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans Mono;-inkscape-font-specification:DejaVu Sans Mono"
+     x="258.82642"
+     y="79.662758"
+     id="text4443"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan4445"
+       x="258.82642"
+       y="79.662758">rule:</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="93.662758"
+       id="tspan4447">  input: "{sample}.inter"</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="107.66276"
+       id="tspan4449">  output: "{sample}.txt"</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="121.66276"
+       id="tspan4451">  shell: "somecommand {input} {output}"</tspan></text>
+  <text
+     xml:space="preserve"
+     style="font-size:11.19999981px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans Mono;-inkscape-font-specification:DejaVu Sans Mono"
+     x="258.82642"
+     y="185.26453"
+     id="text4453"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan4455"
+       x="258.82642"
+       y="185.26453">rule:</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="199.26453"
+       id="tspan4457">  input:  "{sample}.in"</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="213.26453"
+       id="tspan4459">  output: "{sample}.inter"</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="227.26453"
+       id="tspan4461">  run:</tspan><tspan
+       sodipodi:role="line"
+       x="258.82642"
+       y="241.26453"
+       id="tspan4463">    somepythoncode()</tspan></text>
+  <text
+     xml:space="preserve"
+     style="font-size:11.19999981px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans Mono;-inkscape-font-specification:DejaVu Sans Mono"
+     x="241.07681"
+     y="194.57442"
+     id="text4465"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan4467"
+       x="252.45493"
+       y="194.57442"
+       style="font-size:17.60000038px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:end;text-anchor:end;font-family:Purisa;-inkscape-font-specification:Purisa">Create a needed </tspan><tspan
+       sodipodi:role="line"
+       x="241.07681"
+       y="216.57442"
+       style="font-size:17.60000038px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:end;text-anchor:end;font-family:Purisa;-inkscape-font-specification:Purisa"
+       id="tspan4469">intermediate result</tspan></text>
+  <g
+     id="g6922"
+     transform="translate(717.30917,14.769109)">
+    <g
+       transform="translate(-56.378671,-157.84605)"
+       id="g4545">
+      <polyline
+         points="261,-108 231,-108 "
+         id="polyline4052"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 235,156 c -6,0 -12,6 -12,12"
+         id="path4054"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="219,-96 219,-84 "
+         id="polyline4056"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 223,180 c 0,6 6,12 12,12"
+         id="path4058"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="231,-72 261,-72 "
+         id="polyline4060"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 265,192 c 6,0 12,-6 12,-12"
+         id="path4062"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="273,-84 273,-96 "
+         id="polyline4064"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 277,168 c 0,-6 -6,-12 -12,-12"
+         id="path4066"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+    </g>
+    <g
+       id="g4555"
+       transform="translate(-152.84265,-63.987999)">
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4557"
+         points="261,-108 231,-108 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4559"
+         d="m 235,156 c -6,0 -12,6 -12,12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4561"
+         points="219,-96 219,-84 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4563"
+         d="m 223,180 c 0,6 6,12 12,12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4565"
+         points="231,-72 261,-72 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4567"
+         d="m 265,192 c 6,0 12,-6 12,-12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4569"
+         points="273,-84 273,-96 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4571"
+         d="m 277,168 c 0,-6 -6,-12 -12,-12" />
+    </g>
+    <g
+       transform="translate(-56.84265,-63.987999)"
+       id="g4573">
+      <polyline
+         points="261,-108 231,-108 "
+         id="polyline4575"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 235,156 c -6,0 -12,6 -12,12"
+         id="path4577"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="219,-96 219,-84 "
+         id="polyline4579"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 223,180 c 0,6 6,12 12,12"
+         id="path4581"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="231,-72 261,-72 "
+         id="polyline4583"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 265,192 c 6,0 12,-6 12,-12"
+         id="path4585"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="273,-84 273,-96 "
+         id="polyline4587"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 277,168 c 0,-6 -6,-12 -12,-12"
+         id="path4589"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+    </g>
+    <g
+       id="g4591"
+       transform="translate(39.15735,-63.987999)">
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4593"
+         points="261,-108 231,-108 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4595"
+         d="m 235,156 c -6,0 -12,6 -12,12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4597"
+         points="219,-96 219,-84 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4599"
+         d="m 223,180 c 0,6 6,12 12,12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4601"
+         points="231,-72 261,-72 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4603"
+         d="m 265,192 c 6,0 12,-6 12,-12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4605"
+         points="273,-84 273,-96 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4607"
+         d="m 277,168 c 0,-6 -6,-12 -12,-12" />
+    </g>
+    <g
+       transform="translate(-152.84265,33.612001)"
+       id="g4609">
+      <polyline
+         points="261,-108 231,-108 "
+         id="polyline4611"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 235,156 c -6,0 -12,6 -12,12"
+         id="path4613"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="219,-96 219,-84 "
+         id="polyline4615"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 223,180 c 0,6 6,12 12,12"
+         id="path4617"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="231,-72 261,-72 "
+         id="polyline4619"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 265,192 c 6,0 12,-6 12,-12"
+         id="path4621"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="273,-84 273,-96 "
+         id="polyline4623"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 277,168 c 0,-6 -6,-12 -12,-12"
+         id="path4625"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+    </g>
+    <g
+       id="g4627"
+       transform="translate(-56.84265,33.612001)">
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4629"
+         points="261,-108 231,-108 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4631"
+         d="m 235,156 c -6,0 -12,6 -12,12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4633"
+         points="219,-96 219,-84 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4635"
+         d="m 223,180 c 0,6 6,12 12,12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4637"
+         points="231,-72 261,-72 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4639"
+         d="m 265,192 c 6,0 12,-6 12,-12" />
+      <polyline
+         transform="translate(4,264)"
+         style="fill:none;stroke:#000000"
+         id="polyline4641"
+         points="273,-84 273,-96 " />
+      <path
+         style="fill:none;stroke:#000000"
+         inkscape:connector-curvature="0"
+         id="path4643"
+         d="m 277,168 c 0,-6 -6,-12 -12,-12" />
+    </g>
+    <g
+       transform="translate(39.15735,33.612001)"
+       id="g4645">
+      <polyline
+         points="261,-108 231,-108 "
+         id="polyline4647"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 235,156 c -6,0 -12,6 -12,12"
+         id="path4649"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="219,-96 219,-84 "
+         id="polyline4651"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 223,180 c 0,6 6,12 12,12"
+         id="path4653"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="231,-72 261,-72 "
+         id="polyline4655"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 265,192 c 6,0 12,-6 12,-12"
+         id="path4657"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+      <polyline
+         points="273,-84 273,-96 "
+         id="polyline4659"
+         style="fill:none;stroke:#000000"
+         transform="translate(4,264)" />
+      <path
+         d="m 277,168 c 0,-6 -6,-12 -12,-12"
+         id="path4661"
+         inkscape:connector-curvature="0"
+         style="fill:none;stroke:#000000" />
+    </g>
+    <path
+       inkscape:connector-curvature="0"
+       inkscape:original-d="M 104.42754,85.71624 187.58418,41.109039"
+       inkscape:path-effect="#path-effect4665"
+       id="path4663"
+       d="M 104.42754,85.71624 187.58418,41.109039"
+       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#TriangleOutL)" />
+    <path
+       inkscape:connector-curvature="0"
+       inkscape:original-d="M 200.74902,85.71624 196.34336,41.109039"
+       inkscape:path-effect="#path-effect4669"
+       id="path4667"
+       d="M 200.74902,85.71624 196.34336,41.109039"
+       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#TriangleOutL)" />
+    <path
+       inkscape:connector-curvature="0"
+       inkscape:original-d="M 285.05918,81.861296 205.20678,41.109039"
+       inkscape:path-effect="#path-effect4673"
+       id="path4671"
+       d="M 285.05918,81.861296 205.20678,41.109039"
+       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#TriangleOutL)" />
+    <path
+       inkscape:connector-curvature="0"
+       inkscape:original-d="m 93.41342,184.29266 3.30424,-50.11427"
+       inkscape:path-effect="#path-effect4677"
+       id="path4675"
+       d="m 93.41342,184.29266 3.30424,-50.11427"
+       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#TriangleOutL)" />
+    <path
+       sodipodi:nodetypes="cc"
+       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#TriangleOutL)"
+       d="m 194.92048,184.84337 -2.20282,-50.66498"
+       id="path4683"
+       inkscape:path-effect="#path-effect4685"
+       inkscape:original-d="m 194.92048,184.84337 -2.20282,-50.66498"
+       inkscape:connector-curvature="0" />
+    <path
+       sodipodi:nodetypes="cc"
+       inkscape:connector-curvature="0"
+       inkscape:original-d="m 291.47119,183.19125 -2.75353,-49.01286"
+       inkscape:path-effect="#path-effect4689"
+       id="path4687"
+       d="m 291.47119,183.19125 -2.75353,-49.01286"
+       style="fill:none;stroke:#000000;stroke-width:0.80000001px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#TriangleOutL)" />
+  </g>
+  <text
+     sodipodi:linespacing="125%"
+     id="text6749"
+     y="188.65048"
+     x="608.41602"
+     style="font-size:32px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:end;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:end;fill:#000000;fill-opacity:1;stroke:none;font-family:Purisa;-inkscape-font-specification:Purisa"
+     xml:space="preserve"><tspan
+       id="tspan6757"
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle"
+       y="188.65048"
+       x="608.41602"
+       sodipodi:role="line">Use wildcards to write</tspan><tspan
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle"
+       y="210.65048"
+       x="614.1051"
+       sodipodi:role="line"
+       id="tspan6768">general rules </tspan><tspan
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle"
+       y="232.65048"
+       x="608.41602"
+       sodipodi:role="line"
+       id="tspan6770">for all samples</tspan></text>
+  <text
+     xml:space="preserve"
+     style="font-size:11.19999981px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans Mono;-inkscape-font-specification:DejaVu Sans Mono"
+     x="587.0473"
+     y="92.224762"
+     id="text6997"
+     sodipodi:linespacing="125%"><tspan
+       sodipodi:role="line"
+       id="tspan6999"
+       x="587.0473"
+       y="92.224762" /></text>
+  <text
+     xml:space="preserve"
+     style="font-size:32px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:end;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:end;fill:#000000;fill-opacity:1;stroke:none;font-family:Purisa;-inkscape-font-specification:Purisa"
+     x="662.5033"
+     y="14.971384"
+     id="text7001"
+     sodipodi:linespacing="125%"><tspan
+       id="tspan7007"
+       sodipodi:role="line"
+       x="662.5033"
+       y="14.971384"
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle" /><tspan
+       sodipodi:role="line"
+       x="662.5033"
+       y="36.971386"
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle"
+       id="tspan7402">Snakemake determines</tspan><tspan
+       sodipodi:role="line"
+       x="662.5033"
+       y="58.971386"
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle"
+       id="tspan7014">the dependencies</tspan><tspan
+       sodipodi:role="line"
+       x="662.5033"
+       y="80.97139"
+       style="font-size:17.60000038px;text-align:center;text-anchor:middle"
+       id="tspan7016">for you</tspan></text>
+  <path
+     style="fill:none;stroke:#000000;stroke-width:2.4000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutM)"
+     d="M 590.35153,164.91798 C 580.16928,149.47767 567.05439,135.97865 551.91439,125.35501 529.63946,109.72484 502.96935,100.43117 475.80465,98.83323"
+     id="path7018"
+     inkscape:path-effect="#path-effect7020"
+     inkscape:original-d="M 590.35153,164.91798 C 572.39995,162.54886 573.92429,138.27595 551.91439,125.35501 529.9045,112.43406 499.78015,95.445318 475.80465,98.83323"
+     inkscape:connector-curvature="0"
+     sodipodi:nodetypes="csc" />
+  <path
+     sodipodi:nodetypes="csc"
+     inkscape:connector-curvature="0"
+     inkscape:original-d="m 572.72894,241.29172 c -17.95159,2.36912 -49.46963,6.81661 -71.47952,19.73755 -22.0099,12.92095 -71.95967,-28.46517 -95.93517,-31.85308"
+     inkscape:path-effect="#path-effect7392"
+     id="path7390"
+     d="m 572.72894,241.29172 c -21.70087,12.36027 -46.511,19.21106 -71.47952,19.73755 -34.35194,0.72436 -68.83847,-10.72611 -95.93517,-31.85308"
+     style="fill:none;stroke:#000000;stroke-width:2.4000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutM)" />
+  <path
+     style="fill:none;stroke:#000000;stroke-width:2.4000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutM)"
+     d="m 727.399,84.074769 c 6.2608,19.485401 16.04952,37.831381 28.74902,53.881341 11.97423,15.13334 26.52763,28.22059 42.84279,38.5267"
+     id="path7394"
+     inkscape:path-effect="#path-effect7396"
+     inkscape:original-d="m 727.399,84.074769 c -17.95159,2.36912 50.75891,40.960401 28.74902,53.881341 -22.0099,12.92095 66.81829,41.91461 42.84279,38.5267"
+     inkscape:connector-curvature="0"
+     sodipodi:nodetypes="csc" />
+  <path
+     sodipodi:nodetypes="csc"
+     inkscape:connector-curvature="0"
+     inkscape:original-d="m 764.84702,53.235221 c -17.95159,2.36912 80.49704,-9.704577 58.48715,3.216363 -22.0099,12.92095 62.41265,17.68354 38.43715,14.29563"
+     inkscape:path-effect="#path-effect7400"
+     id="path7398"
+     d="m 764.84702,53.235221 c 19.48106,-2.396139 39.38631,-1.301497 58.48715,3.216363 13.33684,3.154518 26.28029,7.968476 38.43715,14.29563"
+     style="fill:none;stroke:#000000;stroke-width:2.4000001;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#TriangleOutM)" />
+</svg>
diff --git a/examples/latex/Snakefile b/examples/latex/Snakefile
new file mode 100644
index 0000000..2b08089
--- /dev/null
+++ b/examples/latex/Snakefile
@@ -0,0 +1,23 @@
+DOCUMENTS = ['document', 'response-to-editor']
+TEXS = [doc+".tex" for doc in DOCUMENTS]
+PDFS = [doc+".pdf" for doc in DOCUMENTS]
+FIGURES = ['fig1.pdf']
+
+include:
+    'tex.rules'
+
+rule all:
+    input:
+        PDFS
+
+rule zipit:
+    output:
+        'upload.zip'
+    input:
+        TEXS, FIGURES, PDFS
+    shell:
+        'zip -T {output} {input}'
+
+rule pdfclean:
+    shell:
+        "rm -f  {PDFS}"
diff --git a/examples/latex/dag.png b/examples/latex/dag.png
new file mode 100644
index 0000000..88e30fa
Binary files /dev/null and b/examples/latex/dag.png differ
diff --git a/examples/latex/document.tex b/examples/latex/document.tex
new file mode 100644
index 0000000..e69de29
diff --git a/examples/latex/fig1.pdf b/examples/latex/fig1.pdf
new file mode 100644
index 0000000..e69de29
diff --git a/examples/latex/response-to-editor.tex b/examples/latex/response-to-editor.tex
new file mode 100644
index 0000000..e69de29
diff --git a/examples/latex/tex.rules b/examples/latex/tex.rules
new file mode 100644
index 0000000..1988944
--- /dev/null
+++ b/examples/latex/tex.rules
@@ -0,0 +1,30 @@
+ruleorder:  tex2pdf_with_bib > tex2pdf_without_bib
+
+rule tex2pdf_with_bib:
+    input:
+        '{name}.tex',
+        '{name}.bib'
+    output:
+        '{name}.pdf'
+    shell:
+        """
+        pdflatex {wildcards.name}
+        bibtex {wildcards.name}
+        pdflatex {wildcards.name}
+        pdflatex {wildcards.name}
+        """
+
+rule tex2pdf_without_bib:
+    input:
+        '{name}.tex'
+    output:
+        '{name}.pdf'
+    shell:
+        """
+        pdflatex {wildcards.name}
+        pdflatex {wildcards.name}
+        """
+
+rule texclean:
+    shell:
+        "rm -f  *.log *.aux *.bbl *.blg *.synctex.gz"
diff --git a/examples/mirna/dag.dot b/examples/mirna/dag.dot
new file mode 100644
index 0000000..0a28919
--- /dev/null
+++ b/examples/mirna/dag.dot
@@ -0,0 +1,446 @@
+digraph snakemake_dag {
+	35[label = "do_readlengths_of_one_file"];
+	34 -> 35;
+	103[label = "count_mirbase_mappable"];
+	102 -> 103;
+	143[label = "count_mirna_expression_of_one_dataset"];
+	142 -> 143;
+	141 -> 143;
+	42[label = "do_readlengths_pdfs"];
+	41 -> 42;
+	164[label = "annotate"];
+	147 -> 164;
+	90 -> 164;
+	2[label = "compute_readcounts\nds: 554"];
+	10[label = "run_cutadapt\nds: 552"];
+	34[label = "run_cutadapt\nds: 560"];
+	153[label = "rna_type"];
+	152 -> 153;
+	11[label = "do_readlengths_of_one_file"];
+	10 -> 11;
+	138[label = "sort_a_bam_file"];
+	110 -> 138;
+	38[label = "do_readlengths_of_one_file"];
+	37 -> 38;
+	56[label = "do_readlengths_of_one_file"];
+	55 -> 56;
+	36[label = "do_readlengths_pdfs"];
+	35 -> 36;
+	137[label = "count_mirna_expression_of_one_dataset"];
+	135 -> 137;
+	136 -> 137;
+	13[label = "run_cutadapt\nds: 553"];
+	37[label = "run_cutadapt\nds: 561"];
+	41[label = "do_readlengths_of_one_file"];
+	40 -> 41;
+	109[label = "count_mirbase_mappable"];
+	108 -> 109;
+	132[label = "sort_a_bam_file"];
+	106 -> 132;
+	165[label = "rna_type"];
+	164 -> 165;
+	18[label = "do_readlengths_pdfs"];
+	17 -> 18;
+	40[label = "do_shortreads\nds: 552"];
+	16[label = "run_cutadapt\nds: 554"];
+	84[label = "map_ds_against_hg"];
+	16 -> 84;
+	81 -> 84;
+	151[label = "rna_type"];
+	150 -> 151;
+	155[label = "rna_type"];
+	154 -> 155;
+	51[label = "do_readlengths_pdfs"];
+	50 -> 51;
+	166[label = "annotate"];
+	91 -> 166;
+	147 -> 166;
+	20[label = "do_readlengths_of_one_file"];
+	19 -> 20;
+	43[label = "do_shortreads\nds: 553"];
+	64[label = "do_shortreads\nds: 560"];
+	0[label = "compute_readcounts\nds: 552"];
+	139[label = "index_a_bam_file"];
+	138 -> 139;
+	5[label = "compute_readcounts\nds: 557"];
+	19[label = "run_cutadapt\nds: 555"];
+	54[label = "do_readlengths_pdfs"];
+	53 -> 54;
+	21[label = "do_readlengths_pdfs"];
+	20 -> 21;
+	152[label = "annotate"];
+	84 -> 152;
+	147 -> 152;
+	47[label = "do_readlengths_of_one_file"];
+	46 -> 47;
+	46[label = "do_shortreads\nds: 554"];
+	23[label = "do_readlengths_of_one_file"];
+	22 -> 23;
+	150[label = "annotate"];
+	147 -> 150;
+	83 -> 150;
+	24[label = "do_readlengths_pdfs"];
+	23 -> 24;
+	57[label = "do_readlengths_pdfs"];
+	56 -> 57;
+	118[label = "index_a_bam_file"];
+	117 -> 118;
+	22[label = "run_cutadapt\nds: 556"];
+	49[label = "do_shortreads\nds: 555"];
+	140[label = "count_mirna_expression_of_one_dataset"];
+	138 -> 140;
+	139 -> 140;
+	170[label = "all"];
+	57 -> 170;
+	101 -> 170;
+	54 -> 170;
+	21 -> 170;
+	24 -> 170;
+	85 -> 170;
+	103 -> 170;
+	2 -> 170;
+	27 -> 170;
+	144 -> 170;
+	63 -> 170;
+	78 -> 170;
+	82 -> 170;
+	105 -> 170;
+	3 -> 170;
+	86 -> 170;
+	6 -> 170;
+	0 -> 170;
+	107 -> 170;
+	97 -> 170;
+	80 -> 170;
+	36 -> 170;
+	12 -> 170;
+	39 -> 170;
+	8 -> 170;
+	109 -> 170;
+	88 -> 170;
+	18 -> 170;
+	71 -> 170;
+	83 -> 170;
+	5 -> 170;
+	4 -> 170;
+	89 -> 170;
+	60 -> 170;
+	48 -> 170;
+	145 -> 170;
+	113 -> 170;
+	90 -> 170;
+	42 -> 170;
+	169 -> 170;
+	73 -> 170;
+	7 -> 170;
+	84 -> 170;
+	91 -> 170;
+	30 -> 170;
+	51 -> 170;
+	1 -> 170;
+	146 -> 170;
+	87 -> 170;
+	9 -> 170;
+	95 -> 170;
+	33 -> 170;
+	69 -> 170;
+	111 -> 170;
+	99 -> 170;
+	45 -> 170;
+	15 -> 170;
+	66 -> 170;
+	55[label = "do_shortreads\nds: 557"];
+	26[label = "do_readlengths_of_one_file"];
+	25 -> 26;
+	60[label = "do_readlengths_pdfs"];
+	59 -> 60;
+	93[label = "build_bwa_index"];
+	92 -> 93;
+	27[label = "do_readlengths_pdfs"];
+	26 -> 27;
+	52[label = "do_shortreads\nds: 556"];
+	135[label = "sort_a_bam_file"];
+	108 -> 135;
+	4[label = "compute_readcounts\nds: 556"];
+	59[label = "do_readlengths_of_one_file"];
+	58 -> 59;
+	63[label = "do_readlengths_pdfs"];
+	62 -> 63;
+	154[label = "annotate"];
+	147 -> 154;
+	85 -> 154;
+	29[label = "do_readlengths_of_one_file"];
+	28 -> 29;
+	6[label = "compute_readcounts\nds: 558"];
+	14[label = "do_readlengths_of_one_file"];
+	13 -> 14;
+	3[label = "compute_readcounts\nds: 555"];
+	114[label = "sort_a_bam_file"];
+	94 -> 114;
+	73[label = "do_readlengths_pdfs"];
+	72 -> 73;
+	7[label = "compute_readcounts\nds: 559"];
+	120[label = "sort_a_bam_file"];
+	98 -> 120;
+	142[label = "index_a_bam_file"];
+	141 -> 142;
+	58[label = "do_shortreads\nds: 558"];
+	158[label = "annotate"];
+	87 -> 158;
+	147 -> 158;
+	62[label = "do_readlengths_of_one_file"];
+	61 -> 62;
+	9[label = "compute_readcounts\nds: 561"];
+	28[label = "run_cutadapt\nds: 558"];
+	157[label = "rna_type"];
+	156 -> 157;
+	12[label = "do_readlengths_pdfs"];
+	11 -> 12;
+	31[label = "run_cutadapt\nds: 559"];
+	61[label = "do_shortreads\nds: 559"];
+	168[label = "rnatypes"];
+	165 -> 168;
+	151 -> 168;
+	149 -> 168;
+	153 -> 168;
+	161 -> 168;
+	163 -> 168;
+	159 -> 168;
+	157 -> 168;
+	155 -> 168;
+	167 -> 168;
+	65[label = "do_readlengths_of_one_file"];
+	64 -> 65;
+	136[label = "index_a_bam_file"];
+	135 -> 136;
+	126[label = "sort_a_bam_file"];
+	102 -> 126;
+	50[label = "do_readlengths_of_one_file"];
+	49 -> 50;
+	122[label = "count_mirna_expression_of_one_dataset"];
+	121 -> 122;
+	120 -> 122;
+	147[label = "hgtrack"];
+	39[label = "do_readlengths_pdfs"];
+	38 -> 39;
+	144[label = "normalize_expressions"];
+	128 -> 144;
+	116 -> 144;
+	122 -> 144;
+	119 -> 144;
+	137 -> 144;
+	131 -> 144;
+	125 -> 144;
+	143 -> 144;
+	140 -> 144;
+	134 -> 144;
+	113[label = "count_mirbase_mappable"];
+	112 -> 113;
+	68[label = "do_readlengths_of_one_file"];
+	67 -> 68;
+	45[label = "do_readlengths_pdfs"];
+	44 -> 45;
+	82[label = "map_ds_against_hg"];
+	10 -> 82;
+	81 -> 82;
+	86[label = "map_ds_against_hg"];
+	22 -> 86;
+	81 -> 86;
+	117[label = "sort_a_bam_file"];
+	96 -> 117;
+	159[label = "rna_type"];
+	158 -> 159;
+	67[label = "do_shortreads\nds: 561"];
+	8[label = "compute_readcounts\nds: 560"];
+	88[label = "map_ds_against_hg"];
+	28 -> 88;
+	81 -> 88;
+	99[label = "count_mirbase_mappable"];
+	98 -> 99;
+	71[label = "do_readlengths_pdfs"];
+	70 -> 71;
+	161[label = "rna_type"];
+	160 -> 161;
+	25[label = "run_cutadapt\nds: 557"];
+	89[label = "map_ds_against_hg"];
+	31 -> 89;
+	81 -> 89;
+	32[label = "do_readlengths_of_one_file"];
+	31 -> 32;
+	124[label = "index_a_bam_file"];
+	123 -> 124;
+	90[label = "map_ds_against_hg"];
+	34 -> 90;
+	81 -> 90;
+	141[label = "sort_a_bam_file"];
+	112 -> 141;
+	131[label = "count_mirna_expression_of_one_dataset"];
+	129 -> 131;
+	130 -> 131;
+	160[label = "annotate"];
+	147 -> 160;
+	88 -> 160;
+	91[label = "map_ds_against_hg"];
+	37 -> 91;
+	81 -> 91;
+	95[label = "count_mirbase_mappable"];
+	94 -> 95;
+	97[label = "count_mirbase_mappable"];
+	96 -> 97;
+	87[label = "map_ds_against_hg"];
+	25 -> 87;
+	81 -> 87;
+	70[label = "do_readlengths_summary"];
+	23 -> 70;
+	11 -> 70;
+	17 -> 70;
+	26 -> 70;
+	29 -> 70;
+	32 -> 70;
+	14 -> 70;
+	35 -> 70;
+	20 -> 70;
+	38 -> 70;
+	133[label = "index_a_bam_file"];
+	132 -> 133;
+	149[label = "rna_type"];
+	148 -> 149;
+	94[label = "map_ds_against_mirbase"];
+	93 -> 94;
+	40 -> 94;
+	92 -> 94;
+	92[label = "compute_mirnas_with_context"];
+	116[label = "count_mirna_expression_of_one_dataset"];
+	114 -> 116;
+	115 -> 116;
+	125[label = "count_mirna_expression_of_one_dataset"];
+	123 -> 125;
+	124 -> 125;
+	33[label = "do_readlengths_pdfs"];
+	32 -> 33;
+	96[label = "map_ds_against_mirbase"];
+	93 -> 96;
+	43 -> 96;
+	92 -> 96;
+	78[label = "do_readlengths_pdfs"];
+	77 -> 78;
+	101[label = "count_mirbase_mappable"];
+	100 -> 101;
+	17[label = "do_readlengths_of_one_file"];
+	16 -> 17;
+	98[label = "map_ds_against_mirbase"];
+	93 -> 98;
+	46 -> 98;
+	92 -> 98;
+	146[label = "correlate_seq_with_rtpcr"];
+	144 -> 146;
+	119[label = "count_mirna_expression_of_one_dataset"];
+	117 -> 119;
+	118 -> 119;
+	69[label = "do_readlengths_pdfs"];
+	68 -> 69;
+	72[label = "do_readlengths_summary"];
+	50 -> 72;
+	41 -> 72;
+	56 -> 72;
+	53 -> 72;
+	47 -> 72;
+	44 -> 72;
+	68 -> 72;
+	59 -> 72;
+	65 -> 72;
+	62 -> 72;
+	100[label = "map_ds_against_mirbase"];
+	93 -> 100;
+	49 -> 100;
+	92 -> 100;
+	162[label = "annotate"];
+	89 -> 162;
+	147 -> 162;
+	53[label = "do_readlengths_of_one_file"];
+	52 -> 53;
+	105[label = "count_mirbase_mappable"];
+	104 -> 105;
+	167[label = "rna_type"];
+	166 -> 167;
+	48[label = "do_readlengths_pdfs"];
+	47 -> 48;
+	102[label = "map_ds_against_mirbase"];
+	93 -> 102;
+	52 -> 102;
+	92 -> 102;
+	107[label = "count_mirbase_mappable"];
+	106 -> 107;
+	148[label = "annotate"];
+	82 -> 148;
+	147 -> 148;
+	111[label = "count_mirbase_mappable"];
+	110 -> 111;
+	169[label = "plot_rnatypes"];
+	168 -> 169;
+	104[label = "map_ds_against_mirbase"];
+	93 -> 104;
+	55 -> 104;
+	92 -> 104;
+	80[label = "plot_mirna_distancematrix_histogram"];
+	79 -> 80;
+	127[label = "index_a_bam_file"];
+	126 -> 127;
+	106[label = "map_ds_against_mirbase"];
+	93 -> 106;
+	58 -> 106;
+	92 -> 106;
+	134[label = "count_mirna_expression_of_one_dataset"];
+	133 -> 134;
+	132 -> 134;
+	108[label = "map_ds_against_mirbase"];
+	93 -> 108;
+	61 -> 108;
+	92 -> 108;
+	77[label = "compute_mirna_lengths"];
+	79[label = "compute_fasta_distancematrix\nprefix: mirbase/mirnas"];
+	85[label = "map_ds_against_hg"];
+	19 -> 85;
+	81 -> 85;
+	163[label = "rna_type"];
+	162 -> 163;
+	110[label = "map_ds_against_mirbase"];
+	64 -> 110;
+	92 -> 110;
+	93 -> 110;
+	66[label = "do_readlengths_pdfs"];
+	65 -> 66;
+	129[label = "sort_a_bam_file"];
+	104 -> 129;
+	121[label = "index_a_bam_file"];
+	120 -> 121;
+	30[label = "do_readlengths_pdfs"];
+	29 -> 30;
+	112[label = "map_ds_against_mirbase"];
+	67 -> 112;
+	92 -> 112;
+	93 -> 112;
+	1[label = "compute_readcounts\nds: 553"];
+	128[label = "count_mirna_expression_of_one_dataset"];
+	126 -> 128;
+	127 -> 128;
+	130[label = "index_a_bam_file"];
+	129 -> 130;
+	44[label = "do_readlengths_of_one_file"];
+	43 -> 44;
+	81[label = "build_bwa_index\nprefix: hgref/hg1kv37.fasta.gz"];
+	15[label = "do_readlengths_pdfs"];
+	14 -> 15;
+	145[label = "differential_expressions"];
+	144 -> 145;
+	156[label = "annotate"];
+	147 -> 156;
+	86 -> 156;
+	83[label = "map_ds_against_hg"];
+	13 -> 83;
+	81 -> 83;
+	123[label = "sort_a_bam_file"];
+	100 -> 123;
+	115[label = "index_a_bam_file"];
+	114 -> 115;
+}
diff --git a/examples/mirna/dag.png b/examples/mirna/dag.png
new file mode 100644
index 0000000..dc0e826
Binary files /dev/null and b/examples/mirna/dag.png differ
diff --git a/examples/report/Snakefile b/examples/report/Snakefile
new file mode 100644
index 0000000..17eafd9
--- /dev/null
+++ b/examples/report/Snakefile
@@ -0,0 +1,30 @@
+
+from snakemake.utils import report
+
+rule report:
+	input: "Snakefile"
+	output: "report.html"
+	run:
+		report("""
+		======================
+		Report of some project
+		======================
+
+		Some text containing a formula
+		:math:`\sum_{{j \in E}} t_j \leq I`
+		and embedding a table F1_ and a figure F2_.
+
+		Additionally you can of course include inline tables:
+
+		========= ====== =====
+		Merged header
+		----------------------
+		Parameter Value  Other
+		========= ====== =====
+		kmer      3      foo
+		iteration 4      bar
+		========= ====== =====
+		
+
+		test
+		""", output[0], F1=input[0], F2=input[0])
diff --git a/examples/report/report.html b/examples/report/report.html
new file mode 100644
index 0000000..1cff203
--- /dev/null
+++ b/examples/report/report.html
@@ -0,0 +1,114 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Docutils 0.9.1: http://docutils.sourceforge.net/" />
+<title>Report of some project</title>
+<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
+<style type="text/css">
+
+
+body {
+	background: gray;
+	font-family: sans-serif;
+	padding-top: 10px;
+}
+
+div.document {
+	background: white;
+	max-width: 800px;
+	margin: auto;
+	box-shadow: 8px 8px 8px #666;
+	padding: 10px;
+}
+
+div.document:before {
+	content: "snakemake report";
+	text-align: right;
+	display: block;
+	font-weight: bold;
+}
+
+div.document p {
+	font-family: serif;
+}
+
+div#metadata {
+	text-align: right;
+}
+
+table.docutils {
+	border: none;
+	border-collapse: collapse;
+	border-top: 2px solid gray;
+	border-bottom: 2px solid gray;
+	text-align: center;
+}
+
+table.docutils th {
+	border: none;
+	border-top: 2px solid gray;
+	border-bottom: 2px solid gray;
+	padding: 5px;
+}
+
+table.docutils td {
+	border: none;
+	padding: 5px;
+}
+
+table.docutils th:last-child, td:last-child {
+	text-align: left;
+}
+
+table.docutils th:first-child, td:first-child {
+	text-align: right;
+}
+
+table.docutils th:only-child, td:only-child {
+	text-align: center;
+}
+
+</style>
+</head>
+<body>
+<div class="document" id="report-of-some-project">
+<h1 class="title">Report of some project</h1>
+
+<p>Some text containing a formula
+<span class="math">
+\(\sum_{j \in E} t_j \leq I\)</span>
+
+and embedding a table <a class="reference external" href="data:;charset=utf8;base64,CmZyb20gc25ha2VtYWtlLnV0aWxzIGltcG9ydCByZXBvcnQKCnJ1bGUgcmVwb3J0OgoJaW5wdXQ6ICJTbmFrZWZpbGUiCglvdXRwdXQ6ICJyZXBvcnQuaHRtbCIKCXJ1bjoKCQlyZXBvcnQoIiIiCgkJPT09PT09PT09PT09PT09PT09PT09PQoJCVJlcG9ydCBvZiBzb21lIHByb2plY3QKCQk9PT09PT09PT09PT09PT09PT09PT09CgoJCVNvbWUgdGV4dCBjb250YWluaW5nIGEgZm9ybXVsYQoJCTptYXRoOmBcc3VtX3t7aiBcaW4gRX19IHRfaiBcbGVxIElgCgkJYW5kIGVtYmVkZGluZyBhIHRhYmxlIEYxXyBhbmQgYSBmaWd1cmUgRjJfLgoK [...]
+<p>Additionally you can of course include inline tables:</p>
+<table border="1" class="docutils">
+<colgroup>
+<col width="45%" />
+<col width="30%" />
+<col width="25%" />
+</colgroup>
+<thead valign="bottom">
+<tr><th class="head" colspan="3">Merged header</th>
+</tr>
+<tr><th class="head">Parameter</th>
+<th class="head">Value</th>
+<th class="head">Other</th>
+</tr>
+</thead>
+<tbody valign="top">
+<tr><td>kmer</td>
+<td>3</td>
+<td>foo</td>
+</tr>
+<tr><td>iteration</td>
+<td>4</td>
+<td>bar</td>
+</tr>
+</tbody>
+</table>
+<p>test</p>
+<div id="metadata">2013-01-31</div>
+</div>
+</body>
+</html>
diff --git a/logo.png b/logo.png
new file mode 100644
index 0000000..90388b4
Binary files /dev/null and b/logo.png differ
diff --git a/logo.svg b/logo.svg
new file mode 100644
index 0000000..d259360
--- /dev/null
+++ b/logo.svg
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="200.35001"
+   height="200.35001"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.3.1 r9886"
+   sodipodi:docname="logo.svg">
+  <defs
+     id="defs4" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.98994949"
+     inkscape:cx="75.972507"
+     inkscape:cy="-29.716642"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     showguides="true"
+     inkscape:guide-bbox="true"
+     inkscape:window-width="1440"
+     inkscape:window-height="850"
+     inkscape:window-x="0"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     showborder="true">
+    <sodipodi:guide
+       orientation="0,1"
+       position="50.73655,128.87169"
+       id="guide2985" />
+    <sodipodi:guide
+       orientation="0,1"
+       position="126.49799,21.795564"
+       id="guide2987" />
+    <sodipodi:guide
+       orientation="1,0"
+       position="17.401516,72.303194"
+       id="guide2989" />
+  </sodipodi:namedview>
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Ebene 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-52.29901,27.248325)">
+    <g
+       id="g3817"
+       transform="translate(22,-34)">
+      <path
+         sodipodi:nodetypes="csssc"
+         inkscape:connector-curvature="0"
+         id="path2991"
+         d="m 57.700526,151.30611 c 0,0 -6.260158,-107.076127 30.690963,-107.076127 36.951121,0 1.73984,107.076127 38.690961,107.076127 36.95112,0 1.73984,-107.076125 38.69096,-107.076127 36.95112,-2e-6 30.69097,107.076127 30.69097,107.076127"
+         style="fill:none;stroke:#535353;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+      <path
+         transform="translate(-4.4847712,-29.010153)"
+         d="m 212.13203,168.4787 c 0,9.48416 -4.97486,17.17259 -11.11167,17.17259 -6.13682,0 -11.11168,-7.68843 -11.11168,-17.17259 0,-9.48416 4.97486,-17.17259 11.11168,-17.17259 6.13681,0 11.11167,7.68843 11.11167,17.17259 z"
+         sodipodi:ry="17.172592"
+         sodipodi:rx="11.111678"
+         sodipodi:cy="168.4787"
+         sodipodi:cx="201.02036"
+         id="path3761"
+         style="fill:#535353;fill-opacity:1;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none"
+         sodipodi:type="arc" />
+      <g
+         style="stroke:#535353;stroke-opacity:1"
+         transform="matrix(0.99202526,-0.12603922,0.12603922,0.99202526,-16.815648,26.089196)"
+         id="g3788">
+        <path
+           style="fill:none;stroke:#535353;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+           d="m 195.96959,151.30611 c 0,0 -1.47462,16.22927 -7.78807,18.5863"
+           id="path3765"
+           inkscape:connector-curvature="0"
+           sodipodi:nodetypes="cc" />
+        <path
+           sodipodi:nodetypes="cc"
+           inkscape:connector-curvature="0"
+           id="path3786"
+           d="m 195.80643,151.56319 c 0,0 -3.70358,15.86969 1.54841,20.09246"
+           style="fill:none;stroke:#535353;stroke-width:3;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+      </g>
+    </g>
+  </g>
+</svg>
diff --git a/misc/snakemake.vim b/misc/snakemake.vim
new file mode 100644
index 0000000..e0f59f8
--- /dev/null
+++ b/misc/snakemake.vim
@@ -0,0 +1,61 @@
+" Vim syntax file
+" Language:	Snakemake (extended from python.vim)
+" Maintainer:	Jay Hesselberth (jay.hesselberth at gmail.com)
+" Last Change:	2015 Jul 1 
+"
+" Usage
+"
+" copy to $HOME/.vim/syntax directory and add:
+"
+" au BufNewFile,BufRead Snakefile set syntax=snakemake
+" au BufNewFile,BufRead *.snake set syntax=snakemake
+"
+" to your $HOME/.vimrc file
+"
+" force coloring in a vim session with:
+"
+" :set syntax=snakemake
+"
+
+" load settings from system python.vim (7.4)
+source $VIMRUNTIME/syntax/python.vim
+
+"
+" Snakemake rules, as of version 3.3
+"
+" XXX N.B. several of the new defs are missing from this table i.e.
+" subworkflow, touch etc
+"
+" rule       = "rule" (identifier | "") ":" ruleparams
+" include    = "include:" stringliteral
+" workdir    = "workdir:" stringliteral
+" ni         = NEWLINE INDENT
+" ruleparams = [ni input] [ni output] [ni params] [ni message] [ni threads] [ni (run | shell)] NEWLINE snakemake
+" input      = "input" ":" parameter_list
+" output     = "output" ":" parameter_list
+" params     = "params" ":" parameter_list
+" message    = "message" ":" stringliteral
+" threads    = "threads" ":" integer
+" resources  = "resources" ":" parameter_list
+" version    = "version" ":" statement
+" run        = "run" ":" ni statement
+" shell      = "shell" ":" stringliteral
+
+syn keyword pythonStatement	include workdir onsuccess onerror
+syn keyword pythonStatement	ruleorder localrules configfile
+syn keyword pythonStatement	touch protected temp
+syn keyword pythonStatement	input output params message threads resources
+syn keyword pythonStatement	version run shell benchmark snakefile log
+syn keyword pythonStatement	rule subworkflow nextgroup=pythonFunction skipwhite
+
+" similar to special def and class treatment from python.vim, except
+" parenthetical part of def and class
+syn match   pythonFunction
+      \ "\%(\%(rule\s\|subworkflow\s\)\s*\)\@<=\h*" contained
+
+syn sync match pythonSync grouphere NONE "^\s*\%(rule\|subworkflow\)\s\+\h\w*\s*"
+
+let b:current_syntax = "snakemake"
+
+" vim:set sw=2 sts=2 ts=8 noet:
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..900c0fc
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,48 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import sys
+
+if sys.version_info < (3, 2):
+    print("At least Python 3.2 is required.\n", file=sys.stderr)
+    exit(1)
+
+try:
+    from setuptools import setup
+except ImportError:
+    print("Please install setuptools before installing snakemake.",
+          file=sys.stderr)
+    exit(1)
+
+# load version info
+exec(open("snakemake/version.py").read())
+
+setup(
+    name='snakemake',
+    version=__version__,
+    author='Johannes Köster',
+    author_email='johannes.koester at tu-dortmund.de',
+    description=
+    'Build systems like GNU Make are frequently used to create complicated '
+    'workflows, e.g. in bioinformatics. This project aims to reduce the '
+    'complexity of creating workflows by providing a clean and modern domain '
+    'specific language (DSL) in python style, together with a fast and '
+    'comfortable execution environment.',
+    zip_safe=False,
+    license='MIT',
+    url='https://bitbucket.org/johanneskoester/snakemake',
+    packages=['snakemake'],
+    entry_points={
+        "console_scripts":
+        ["snakemake = snakemake:main",
+         "snakemake-bash-completion = snakemake:bash_completion"]
+    },
+    package_data={'': ['*.css', '*.sh', '*.html']},
+    classifiers=
+    ["Development Status :: 5 - Production/Stable", "Environment :: Console",
+     "Intended Audience :: Science/Research",
+     "License :: OSI Approved :: MIT License", "Natural Language :: English",
+     "Programming Language :: Python :: 3",
+     "Topic :: Scientific/Engineering :: Bio-Informatics"])
diff --git a/snakemake/__init__.py b/snakemake/__init__.py
new file mode 100644
index 0000000..c068e14
--- /dev/null
+++ b/snakemake/__init__.py
@@ -0,0 +1,985 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import subprocess
+import glob
+import argparse
+from argparse import ArgumentError
+import logging as _logging
+import multiprocessing
+import re
+import sys
+import inspect
+import threading
+import webbrowser
+from functools import partial
+
+from snakemake.workflow import Workflow
+from snakemake.exceptions import print_exception
+from snakemake.logging import setup_logger, logger
+from snakemake.version import __version__
+from snakemake.io import load_configfile
+from snakemake.shell import shell
+
+
+def snakemake(snakefile,
+              listrules=False,
+              list_target_rules=False,
+              cores=1,
+              nodes=1,
+              resources=dict(),
+              config=dict(),
+              configfile=None,
+              config_args=None,
+              workdir=None,
+              targets=None,
+              dryrun=False,
+              touch=False,
+              forcetargets=False,
+              forceall=False,
+              forcerun=[],
+              prioritytargets=[],
+              stats=None,
+              printreason=False,
+              printshellcmds=False,
+              printdag=False,
+              printrulegraph=False,
+              printd3dag=False,
+              nocolor=False,
+              quiet=False,
+              keepgoing=False,
+              cluster=None,
+              cluster_config=None,
+              cluster_sync=None,
+              drmaa=None,
+              jobname="snakejob.{rulename}.{jobid}.sh",
+              immediate_submit=False,
+              standalone=False,
+              ignore_ambiguity=False,
+              snakemakepath=None,
+              lock=True,
+              unlock=False,
+              cleanup_metadata=None,
+              force_incomplete=False,
+              ignore_incomplete=False,
+              list_version_changes=False,
+              list_code_changes=False,
+              list_input_changes=False,
+              list_params_changes=False,
+              list_resources=False,
+              summary=False,
+              detailed_summary=False,
+              latency_wait=3,
+              benchmark_repeats=1,
+              wait_for_files=None,
+              print_compilation=False,
+              debug=False,
+              notemp=False,
+              nodeps=False,
+              keep_target_files=False,
+              allowed_rules=None,
+              jobscript=None,
+              timestamp=False,
+              greediness=None,
+              overwrite_shellcmd=None,
+              updated_files=None,
+              log_handler=None,
+              keep_logger=False,
+              verbose=False):
+    """Run snakemake on a given snakefile.
+
+    This function provides access to the whole snakemake functionality. It is not thread-safe.
+
+    Args:
+        snakefile (str):            the path to the snakefile
+        listrules (bool):           list rules (default False)
+        list_target_rules (bool):   list target rules (default False)
+        cores (int):                the number of provided cores (ignored when using cluster support) (default 1)
+        nodes (int):                the number of provided cluster nodes (ignored without cluster support) (default 1)
+        resources (dict):           provided resources, a dictionary assigning integers to resource names, e.g. {gpu=1, io=5} (default {})
+        config (dict):              override values for workflow config
+        workdir (str):              path to working directory (default None)
+        targets (list):             list of targets, e.g. rule or file names (default None)
+        dryrun (bool):              only dry-run the workflow (default False)
+        touch (bool):               only touch all output files if present (default False)
+        forcetargets (bool):        force given targets to be re-created (default False)
+        forceall (bool):            force all output files to be re-created (default False)
+        forcerun (list):             list of files and rules that shall be re-created/re-executed (default [])
+        prioritytargets (list):     list of targets that shall be run with maximum priority (default [])
+        stats (str):                path to file that shall contain stats about the workflow execution (default None)
+        printreason (bool):         print the reason for the execution of each job (default false)
+        printshellcmds (bool):      print the shell command of each job (default False)
+        printdag (bool):            print the dag in the graphviz dot language (default False)
+        printrulegraph (bool):      print the graph of rules in the graphviz dot language (default False)
+        printd3dag (bool):          print a D3.js compatible JSON representation of the DAG (default False)
+        nocolor (bool):             do not print colored output (default False)
+        quiet (bool):               do not print any default job information (default False)
+        keepgoing (bool):           keep goind upon errors (default False)
+        cluster (str):              submission command of a cluster or batch system to use, e.g. qsub (default None)
+        cluster_config (str):       configuration file for cluster options (default None)
+        cluster_sync (str):         blocking cluster submission command (like SGE 'qsub -sync y')  (default None)
+        drmaa (str):                if not None use DRMAA for cluster support, str specifies native args passed to the cluster when submitting a job
+        jobname (str):              naming scheme for cluster job scripts (default "snakejob.{rulename}.{jobid}.sh")
+        immediate_submit (bool):    immediately submit all cluster jobs, regardless of dependencies (default False)
+        standalone (bool):          kill all processes very rudely in case of failure (do not use this if you use this API) (default False)
+        ignore_ambiguity (bool):    ignore ambiguous rules and always take the first possible one (default False)
+        snakemakepath (str):        path to the snakemake executable (default None)
+        lock (bool):                lock the working directory when executing the workflow (default True)
+        unlock (bool):              just unlock the working directory (default False)
+        cleanup_metadata (bool):    just cleanup metadata of output files (default False)
+        force_incomplete (bool):    force the re-creation of incomplete files (default False)
+        ignore_incomplete (bool):   ignore incomplete files (default False)
+        list_version_changes (bool): list output files with changed rule version (default False)
+        list_code_changes (bool):   list output files with changed rule code (default False)
+        list_input_changes (bool):  list output files with changed input files (default False)
+        list_params_changes (bool): list output files with changed params (default False)
+        summary (bool):             list summary of all output files and their status (default False)
+        latency_wait (int):         how many seconds to wait for an output file to appear after the execution of a job, e.g. to handle filesystem latency (default 3)
+        benchmark_repeats (int):    number of repeated runs of a job if declared for benchmarking (default 1)
+        wait_for_files (list):      wait for given files to be present before executing the workflow
+        list_resources (bool):      list resources used in the workflow (default False)
+        summary (bool):             list summary of all output files and their status (default False). If no option  is specified a basic summary will be ouput. If 'detailed' is added as an option e.g --summary detailed, extra info about the input and shell commands will be included
+        detailed_summary (bool):    list summary of all input and output files and their status (default False)
+        print_compilation (bool):   print the compilation of the snakefile (default False)
+        debug (bool):               allow to use the debugger within rules
+        notemp (bool):              ignore temp file flags, e.g. do not delete output files marked as temp after use (default False)
+        nodeps (bool):              ignore dependencies (default False)
+        keep_target_files (bool):   Do not adjust the paths of given target files relative to the working directory.
+        allowed_rules (set):        Restrict allowed rules to the given set. If None or empty, all rules are used.
+        jobscript (str):            path to a custom shell script template for cluster jobs (default None)
+        timestamp (bool):           print time stamps in front of any output (default False)
+        greediness (float):         set the greediness of scheduling. This value between 0 and 1 determines how careful jobs are selected for execution. The default value (0.5 if prioritytargets are used, 1.0 else) provides the best speed and still acceptable scheduling quality.
+        overwrite_shellcmd (str):   a shell command that shall be executed instead of those given in the workflow. This is for debugging purposes only.
+        updated_files(list):        a list that will be filled with the files that are updated or created during the workflow execution
+        verbose(bool):              show additional debug output (default False)
+        log_handler (function):     redirect snakemake output to this custom log handler, a function that takes a log message dictionary (see below) as its only argument (default None). The log message dictionary for the log handler has to following entries:
+
+            :level:
+                the log level ("info", "error", "debug", "progress", "job_info")
+
+            :level="info", "error" or "debug":
+                :msg:
+                    the log message
+            :level="progress":
+                :done:
+                    number of already executed jobs
+
+                :total:
+                    number of total jobs
+
+            :level="job_info":
+                :input:
+                    list of input files of a job
+
+                :output:
+                    list of output files of a job
+
+                :log:
+                    path to log file of a job
+
+                :local:
+                    whether a job is executed locally (i.e. ignoring cluster)
+
+                :msg:
+                    the job message
+
+                :reason:
+                    the job reason
+
+                :priority:
+                    the job priority
+
+                :threads:
+                    the threads of the job
+
+
+    Returns:
+        bool:   True if workflow execution was successful.
+
+    """
+
+    if updated_files is None:
+        updated_files = list()
+
+    if cluster or cluster_sync or drmaa:
+        cores = sys.maxsize
+    else:
+        nodes = sys.maxsize
+
+    if cluster_config:
+        cluster_config = load_configfile(cluster_config)
+    else:
+        cluster_config = dict()
+
+    if not keep_logger:
+        setup_logger(handler=log_handler,
+                     quiet=quiet,
+                     printreason=printreason,
+                     printshellcmds=printshellcmds,
+                     nocolor=nocolor,
+                     stdout=dryrun,
+                     debug=verbose,
+                     timestamp=timestamp)
+
+    if greediness is None:
+        greediness = 0.5 if prioritytargets else 1.0
+    else:
+        if not (0 <= greediness <= 1.0):
+            logger.error("Error: greediness must be a float between 0 and 1.")
+            return False
+
+    if not os.path.exists(snakefile):
+        logger.error("Error: Snakefile \"{}\" not present.".format(snakefile))
+        return False
+    snakefile = os.path.abspath(snakefile)
+
+    cluster_mode = (cluster is not None) + (cluster_sync is not None) + (drmaa is not None)
+    if cluster_mode > 1:
+        logger.error("Error: cluster and drmaa args are mutually exclusive")
+        return False
+    if debug and (cores > 1 or cluster_mode):
+        logger.error("Error: debug mode cannot be used with more than one core or cluster execution.")
+        return False
+
+    overwrite_config = dict()
+    if configfile:
+        overwrite_config.update(load_configfile(configfile))
+    if config:
+        overwrite_config.update(config)
+
+    if workdir:
+        olddir = os.getcwd()
+        if not os.path.exists(workdir):
+            logger.info(
+                "Creating specified working directory {}.".format(workdir))
+            os.makedirs(workdir)
+        workdir = os.path.abspath(workdir)
+        os.chdir(workdir)
+    workflow = Workflow(snakefile=snakefile,
+                        snakemakepath=snakemakepath,
+                        jobscript=jobscript,
+                        overwrite_shellcmd=overwrite_shellcmd,
+                        overwrite_config=overwrite_config,
+                        overwrite_workdir=workdir,
+                        overwrite_configfile=configfile,
+                        config_args=config_args,
+                        debug=debug)
+
+    if standalone:
+        try:
+            # set the process group
+            os.setpgrp()
+        except:
+            # ignore: if it does not work we can still work without it
+            pass
+
+    success = True
+    try:
+        workflow.include(snakefile,
+                         overwrite_first_rule=True,
+                         print_compilation=print_compilation)
+        workflow.check()
+
+        if not print_compilation:
+            if listrules:
+                workflow.list_rules()
+            elif list_target_rules:
+                workflow.list_rules(only_targets=True)
+            elif list_resources:
+                workflow.list_resources()
+            else:
+                # if not printdag and not printrulegraph:
+                # handle subworkflows
+                subsnakemake = partial(snakemake,
+                                       cores=cores,
+                                       nodes=nodes,
+                                       resources=resources,
+                                       dryrun=dryrun,
+                                       touch=touch,
+                                       printreason=printreason,
+                                       printshellcmds=printshellcmds,
+                                       nocolor=nocolor,
+                                       quiet=quiet,
+                                       keepgoing=keepgoing,
+                                       cluster=cluster,
+                                       cluster_config=cluster_config,
+                                       cluster_sync=cluster_sync,
+                                       drmaa=drmaa,
+                                       jobname=jobname,
+                                       immediate_submit=immediate_submit,
+                                       standalone=standalone,
+                                       ignore_ambiguity=ignore_ambiguity,
+                                       snakemakepath=snakemakepath,
+                                       lock=lock,
+                                       unlock=unlock,
+                                       cleanup_metadata=cleanup_metadata,
+                                       force_incomplete=force_incomplete,
+                                       ignore_incomplete=ignore_incomplete,
+                                       latency_wait=latency_wait,
+                                       benchmark_repeats=benchmark_repeats,
+                                       verbose=verbose,
+                                       notemp=notemp,
+                                       nodeps=nodeps,
+                                       jobscript=jobscript,
+                                       timestamp=timestamp,
+                                       greediness=greediness,
+                                       overwrite_shellcmd=overwrite_shellcmd,
+                                       config=config,
+                                       config_args=config_args,
+                                       keep_logger=True)
+                success = workflow.execute(
+                    targets=targets,
+                    dryrun=dryrun,
+                    touch=touch,
+                    cores=cores,
+                    nodes=nodes,
+                    forcetargets=forcetargets,
+                    forceall=forceall,
+                    forcerun=forcerun,
+                    prioritytargets=prioritytargets,
+                    quiet=quiet,
+                    keepgoing=keepgoing,
+                    printshellcmds=printshellcmds,
+                    printreason=printreason,
+                    printrulegraph=printrulegraph,
+                    printdag=printdag,
+                    cluster=cluster,
+                    cluster_config=cluster_config,
+                    cluster_sync=cluster_sync,
+                    jobname=jobname,
+                    drmaa=drmaa,
+                    printd3dag=printd3dag,
+                    immediate_submit=immediate_submit,
+                    ignore_ambiguity=ignore_ambiguity,
+                    stats=stats,
+                    force_incomplete=force_incomplete,
+                    ignore_incomplete=ignore_incomplete,
+                    list_version_changes=list_version_changes,
+                    list_code_changes=list_code_changes,
+                    list_input_changes=list_input_changes,
+                    list_params_changes=list_params_changes,
+                    summary=summary,
+                    latency_wait=latency_wait,
+                    benchmark_repeats=benchmark_repeats,
+                    wait_for_files=wait_for_files,
+                    detailed_summary=detailed_summary,
+                    nolock=not lock,
+                    unlock=unlock,
+                    resources=resources,
+                    notemp=notemp,
+                    nodeps=nodeps,
+                    keep_target_files=keep_target_files,
+                    cleanup_metadata=cleanup_metadata,
+                    subsnakemake=subsnakemake,
+                    updated_files=updated_files,
+                    allowed_rules=allowed_rules,
+                    greediness=greediness)
+
+    # BrokenPipeError is not present in Python 3.2, so lets wait until everbody uses > 3.2
+    #except BrokenPipeError:
+    # ignore this exception and stop. It occurs if snakemake output is piped into less and less quits before reading the whole output.
+    # in such a case, snakemake shall stop scheduling and quit with error 1
+    #    success = False
+    except (Exception, BaseException) as ex:
+        print_exception(ex, workflow.linemaps)
+        success = False
+    if workdir:
+        os.chdir(olddir)
+    if workflow.persistence:
+        workflow.persistence.unlock()
+    if not keep_logger:
+        logger.cleanup()
+    return success
+
+
+def parse_resources(args):
+    resources = dict()
+    if args.resources is not None:
+        valid = re.compile("[a-zA-Z_]\w*$")
+        for res in args.resources:
+            try:
+                res, val = res.split("=")
+            except ValueError:
+                raise ValueError(
+                    "Resources have to be defined as name=value pairs.")
+            if not valid.match(res):
+                raise ValueError(
+                    "Resource definition must start with a valid identifier.")
+            try:
+                val = int(val)
+            except ValueError:
+                raise ValueError(
+                    "Resource definiton must contain an integer after the identifier.")
+            if res == "_cores":
+                raise ValueError(
+                    "Resource _cores is already defined internally. Use a different name.")
+            resources[res] = val
+    return resources
+
+
+def parse_config(args):
+    parsers = [int, float, eval, str]
+    config = dict()
+    if args.config is not None:
+        valid = re.compile("[a-zA-Z_]\w*$")
+        for entry in args.config:
+            try:
+                key, val = entry.split("=", 1)
+            except ValueError:
+                raise ValueError(
+                    "Config entries have to be defined as name=value pairs.")
+            if not valid.match(key):
+                raise ValueError(
+                    "Config entry must start with a valid identifier.")
+            v = None
+            for parser in parsers:
+                try:
+                    v = parser(val)
+                    break
+                except:
+                    pass
+            assert v is not None
+            config[key] = v
+    return config
+
+
+def get_argument_parser():
+    parser = argparse.ArgumentParser(
+        description="Snakemake is a Python based language and execution "
+        "environment for GNU Make-like workflows.")
+
+    parser.add_argument("target",
+                        nargs="*",
+                        default=None,
+                        help="Targets to build. May be rules or files.")
+    parser.add_argument("--snakefile", "-s",
+                        metavar="FILE",
+                        default="Snakefile",
+                        help="The workflow definition in a snakefile.")
+    parser.add_argument(
+        "--gui",
+        nargs="?",
+        const="8000",
+        metavar="PORT",
+        type=int,
+        help="Serve an HTML based user interface to the given port "
+        "(default: 8000). If possible, a browser window is opened.")
+    parser.add_argument(
+        "--cores", "--jobs", "-j",
+        action="store",
+        default=1,
+        const=multiprocessing.cpu_count(),
+        nargs="?",
+        metavar="N",
+        type=int,
+        help=("Use at most N cores in parallel (default: 1). "
+              "If N is omitted, the limit is set to the number of "
+              "available cores."))
+    parser.add_argument(
+        "--resources", "--res",
+        nargs="*",
+        metavar="NAME=INT",
+        help=("Define additional resources that shall constrain the scheduling "
+              "analogously to threads (see above). A resource is defined as "
+              "a name and an integer value. E.g. --resources gpu=1. Rules can "
+              "use resources by defining the resource keyword, e.g. "
+              "resources: gpu=1. If now two rules require 1 of the resource "
+              "'gpu' they won't be run in parallel by the scheduler."))
+    parser.add_argument(
+        "--config",
+        nargs="*",
+        metavar="KEY=VALUE",
+        help=
+        ("Set or overwrite values in the workflow config object. "
+         "The workflow config object is accessible as variable config inside "
+         "the workflow. Default values can be set by providing a JSON file "
+         "(see Documentation)."))
+    parser.add_argument(
+        "--configfile",
+        metavar="FILE",
+        help=
+        ("Specify or overwrite the config file of the workflow (see the docs). "
+         "Values specified in JSON or YAML format are available in the global config "
+         "dictionary inside the workflow."))
+    parser.add_argument("--list", "-l",
+                        action="store_true",
+                        help="Show availiable rules in given Snakefile.")
+    parser.add_argument("--list-target-rules", "--lt",
+                        action="store_true",
+                        help="Show available target rules in given Snakefile.")
+    parser.add_argument("--directory", "-d",
+                        metavar="DIR",
+                        action="store",
+                        help=("Specify working directory (relative paths in "
+                              "the snakefile will use this as their origin)."))
+    parser.add_argument("--dryrun", "-n",
+                        action="store_true",
+                        help="Do not execute anything.")
+    parser.add_argument(
+        "--printshellcmds", "-p",
+        action="store_true",
+        help="Print out the shell commands that will be executed.")
+    parser.add_argument(
+        "--dag",
+        action="store_true",
+        help="Do not execute anything and print the directed "
+        "acyclic graph of jobs in the dot language. Recommended "
+        "use on Unix systems: snakemake --dag | dot | display")
+    parser.add_argument(
+        "--rulegraph",
+        action="store_true",
+        help="Do not execute anything and print the dependency graph "
+        "of rules in the dot language. This will be less "
+        "crowded than above DAG of jobs, but also show less information. "
+        "Note that each rule is displayed once, hence the displayed graph will be "
+        "cyclic if a rule appears in several steps of the workflow. "
+        "Use this if above option leads to a DAG that is too large. "
+        "Recommended use on Unix systems: snakemake --rulegraph | dot | display")
+    parser.add_argument("--d3dag",
+                        action="store_true",
+                        help="Print the DAG in D3.js compatible JSON format.")
+    parser.add_argument(
+        "--summary", "-S",
+        action="store_true",
+        help="Print a summary of all files created by the workflow. The "
+        "has the following columns: filename, modification time, "
+        "rule version, status, plan.\n"
+        "Thereby rule version contains the version"
+        "the file was created with (see the version keyword of rules), and "
+        "status denotes whether the file is missing, its input files are "
+        "newer or if version or implementation of the rule changed since "
+        "file creation. Finally the last column denotes whether the file "
+        "will be updated or created during the next workflow execution.")
+    parser.add_argument(
+        "--detailed-summary", "-D",
+        action="store_true",
+        help="Print a summary of all files created by the workflow. The "
+        "has the following columns: filename, modification time, "
+        "rule version, input file(s), shell command, status, plan.\n"
+        "Thereby rule version contains the version"
+        "the file was created with (see the version keyword of rules), and "
+        "status denotes whether the file is missing, its input files are "
+        "newer or if version or implementation of the rule changed since "
+        "file creation. The input file and shell command columns are self"
+        "explanatory. Finally the last column denotes whether the file "
+        "will be updated or created during the next workflow execution.")
+    parser.add_argument(
+        "--touch", "-t",
+        action="store_true",
+        help=("Touch output files (mark them up to date without really "
+              "changing them) instead of running their commands. This is "
+              "used to pretend that the rules were executed, in order to "
+              "fool future invocations of snakemake. Fails if a file does "
+              "not yet exist."))
+    parser.add_argument("--keep-going", "-k",
+                        action="store_true",
+                        help="Go on with independent jobs if a job fails.")
+    parser.add_argument(
+        "--force", "-f",
+        action="store_true",
+        help=("Force the execution of the selected target or the first rule "
+              "regardless of already created output."))
+    parser.add_argument(
+        "--forceall", "-F",
+        action="store_true",
+        help=("Force the execution of the selected (or the first) rule and "
+              "all rules it is dependent on regardless of already created "
+              "output."))
+    parser.add_argument(
+        "--forcerun", "-R",
+        nargs="+",
+        metavar="TARGET",
+        help=("Force the re-execution or creation of the given rules or files."
+              " Use this option if you changed a rule and want to have all its "
+              "output in your workflow updated."))
+    parser.add_argument(
+        "--prioritize", "-P",
+        nargs="+",
+        metavar="TARGET",
+        help=("Tell the scheduler to assign creation of given targets "
+              "(and all their dependencies) highest priority. (EXPERIMENTAL)"))
+    parser.add_argument(
+        "--allow-ambiguity", "-a",
+        action="store_true",
+        help=("Don't check for ambiguous rules and simply use the first if "
+              "several can produce the same file. This allows the user to "
+              "prioritize rules by their order in the snakefile."))
+    # TODO extend below description to explain the wildcards that can be used
+
+    cluster_group = parser.add_mutually_exclusive_group()
+    cluster_group.add_argument(
+        "--cluster", "-c",
+        metavar="CMD",
+        help=
+        ("Execute snakemake rules with the given submit command, "
+         "e.g. qsub. Snakemake compiles jobs into scripts that are "
+         "submitted to the cluster with the given command, once all input "
+         "files for a particular job are present.\n"
+         "The submit command can be decorated to make it aware of certain "
+         "job properties (input, output, params, wildcards, log, threads "
+         "and dependencies (see the argument below)), e.g.:\n"
+         "$ snakemake --cluster 'qsub -pe threaded {threads}'.")),
+    cluster_group.add_argument(
+        "--cluster-sync",
+        metavar="CMD",
+        help=("cluster submission command will block, returning the remote exit"
+              "status upon remote termination (for example, this should be used"
+              "if the cluster command is 'qsub -sync y' (SGE)")),
+    cluster_group.add_argument(
+        "--drmaa",
+        nargs="?",
+        const="",
+        metavar="ARGS",
+        help="Execute snakemake on a cluster accessed via DRMAA, "
+        "Snakemake compiles jobs into scripts that are "
+        "submitted to the cluster with the given command, once all input "
+        "files for a particular job are present. ARGS can be used to "
+        "specify options of the underlying cluster system, "
+        "thereby using the job properties input, output, params, wildcards, log, "
+        "threads and dependencies, e.g.: "
+        "--drmaa ' -pe threaded {threads}'. Note that ARGS must be given in quotes and "
+        "with a leading whitespace.")
+
+    parser.add_argument(
+        "--cluster-config", "-u",
+        metavar="FILE",
+        help=
+        ("A JSON or YAML file that defines the wildcards used in 'cluster'"
+         "for specific rules, instead of having them specified in the Snakefile."
+         "For example, for rule 'job' you may define: "
+         "{ 'job' : { 'time' : '24:00:00' } } "
+         "to specify the time for rule 'job'.\n")),
+    parser.add_argument(
+        "--immediate-submit", "--is",
+        action="store_true",
+        help=
+        "Immediately submit all jobs to the cluster instead of waiting "
+         "for present input files. This will fail, unless you make "
+         "the cluster aware of job dependencies, e.g. via:\n"
+         "$ snakemake --cluster 'sbatch --dependency {dependencies}.\n"
+         "Assuming that your submit script (here sbatch) outputs the "
+         "generated job id to the first stdout line, {dependencies} will "
+         "be filled with space separated job ids this job depends on.")
+    parser.add_argument(
+        "--jobscript", "--js",
+        metavar="SCRIPT",
+        help="Provide a custom job script for submission to the cluster. "
+        "The default script resides as 'jobscript.sh' in the "
+        "installation directory.")
+    parser.add_argument(
+        "--jobname", "--jn",
+        default="snakejob.{rulename}.{jobid}.sh",
+        metavar="NAME",
+        help=
+        "Provide a custom name for the jobscript that is submitted to the "
+        "cluster (see --cluster). NAME is \"snakejob.{rulename}.{jobid}.sh\" "
+        "per default. The wildcard {jobid} has to be present in the name.")
+    parser.add_argument("--reason", "-r",
+                        action="store_true",
+                        help="Print the reason for each executed rule.")
+    parser.add_argument(
+        "--stats",
+        metavar="FILE",
+        help=
+        "Write stats about Snakefile execution in JSON format to the given file.")
+    parser.add_argument("--nocolor",
+                        action="store_true",
+                        help="Do not use a colored output.")
+    parser.add_argument("--quiet", "-q",
+                        action="store_true",
+                        help="Do not output any progress or rule information.")
+    parser.add_argument("--nolock",
+                        action="store_true",
+                        help="Do not lock the working directory")
+    parser.add_argument("--unlock",
+                        action="store_true",
+                        help="Remove a lock on the working directory.")
+    parser.add_argument(
+        "--cleanup-metadata", "--cm",
+        nargs="*",
+        metavar="FILE",
+        help="Cleanup the metadata "
+        "of given files. That means that snakemake removes any tracked "
+        "version info, and any marks that files are incomplete.")
+    parser.add_argument(
+        "--rerun-incomplete", "--ri",
+        action="store_true",
+        help="Re-run all "
+        "jobs the output of which is recognized as incomplete.")
+    parser.add_argument("--ignore-incomplete", "--ii",
+                        action="store_true",
+                        help="Ignore "
+                        "any incomplete jobs.")
+    parser.add_argument(
+        "--list-version-changes", "--lv",
+        action="store_true",
+        help="List all output files that have been created with "
+        "a different version (as determined by the version keyword).")
+    parser.add_argument(
+        "--list-code-changes", "--lc",
+        action="store_true",
+        help=
+        "List all output files for which the rule body (run or shell) have "
+        "changed in the Snakefile.")
+    parser.add_argument(
+        "--list-input-changes", "--li",
+        action="store_true",
+        help=
+        "List all output files for which the defined input files have changed "
+        "in the Snakefile (e.g. new input files were added in the rule "
+        "definition or files were renamed). For listing input file "
+        "modification in the filesystem, use --summary.")
+    parser.add_argument(
+        "--list-params-changes", "--lp",
+        action="store_true",
+        help="List all output files for which the defined params have changed "
+        "in the Snakefile.")
+    parser.add_argument(
+        "--latency-wait", "--output-wait", "-w",
+        type=int,
+        default=5,
+        metavar="SECONDS",
+        help=
+        "Wait given seconds if an output file of a job is not present after "
+        "the job finished. This helps if your filesystem "
+        "suffers from latency (default 5).")
+    parser.add_argument(
+        "--wait-for-files",
+        nargs="*",
+        metavar="FILE",
+        help="Wait --latency-wait seconds for these "
+        "files to be present before executing the workflow. "
+        "This option is used internally to handle filesystem latency in cluster "
+        "environments.")
+    parser.add_argument(
+        "--benchmark-repeats",
+        type=int,
+        default=1,
+        metavar="N",
+        help="Repeat a job N times if marked for benchmarking (default 1).")
+    parser.add_argument(
+        "--notemp", "--nt",
+        action="store_true",
+        help="Ignore temp() declarations. This is useful when running only "
+        "a part of the workflow, since temp() would lead to deletion of "
+        "probably needed files by other parts of the workflow.")
+    parser.add_argument(
+        "--keep-target-files",
+        action="store_true",
+        help=
+        "Do not adjust the paths of given target files relative to the working directory.")
+    parser.add_argument(
+        "--allowed-rules",
+        nargs="+",
+        help=
+        "Only use given rules. If omitted, all rules in Snakefile are used.")
+    parser.add_argument('--timestamp', '-T',
+                        action='store_true',
+                        help='Add a timestamp to all logging output')
+    parser.add_argument(
+        "--greediness",
+        type=float,
+        default=None,
+        help=
+        "Set the greediness of scheduling. This value between 0 and 1 "
+        "determines how careful jobs are selected for execution. The default "
+        "value (1.0) provides the best speed and still acceptable scheduling "
+        "quality.")
+    parser.add_argument(
+        "--print-compilation",
+        action="store_true",
+        help="Print the python representation of the workflow.")
+    parser.add_argument(
+        "--overwrite-shellcmd",
+        help=
+        "Provide a shell command that shall be executed instead of those "
+        "given in the workflow. "
+        "This is for debugging purposes only.")
+    parser.add_argument("--verbose",
+                        action="store_true",
+                        help="Print debugging output.")
+    parser.add_argument("--debug",
+                        action="store_true",
+                        help=
+                        "Allow to debug rules with e.g. PDB. This flag "
+                        "allows to set breakpoints in run blocks.")
+    parser.add_argument(
+        "--profile",
+        metavar="FILE",
+        help=
+        "Profile Snakemake and write the output to FILE. This requires yappi "
+        "to be installed.")
+    parser.add_argument(
+        "--bash-completion",
+        action="store_true",
+        help=
+        "Output code to register bash completion for snakemake. Put the "
+        "following in your .bashrc (including the accents): "
+        "`snakemake --bash-completion` or issue it in an open terminal "
+        "session.")
+    parser.add_argument("--version", "-v",
+                        action="version",
+                        version=__version__)
+    return parser
+
+
+def main():
+    parser = get_argument_parser()
+    args = parser.parse_args()
+
+    if args.bash_completion:
+        print("complete -C snakemake-bash-completion snakemake")
+        sys.exit(0)
+
+    snakemakepath = sys.argv[0]
+
+    try:
+        resources = parse_resources(args)
+        config = parse_config(args)
+    except ValueError as e:
+        print(e, file=sys.stderr)
+        print("", file=sys.stderr)
+        parser.print_help()
+        sys.exit(1)
+
+    if args.profile:
+        import yappi
+        yappi.start()
+
+    _snakemake = partial(snakemake, args.snakefile,
+                         snakemakepath=snakemakepath)
+
+    if args.gui is not None:
+        try:
+            import snakemake.gui as gui
+        except ImportError:
+            print("Error: GUI needs Flask to be installed. Install "
+                  "with easy_install or contact your administrator.",
+                  file=sys.stderr)
+            sys.exit(1)
+
+        _logging.getLogger("werkzeug").setLevel(_logging.ERROR)
+        gui.register(_snakemake, args)
+        url = "http://127.0.0.1:{}".format(args.gui)
+        print("Listening on {}.".format(url), file=sys.stderr)
+
+        def open_browser():
+            try:
+                webbrowser.open(url)
+            except:
+                pass
+
+        print("Open this address in your browser to access the GUI.",
+              file=sys.stderr)
+        threading.Timer(0.5, open_browser).start()
+        success = True
+        try:
+            gui.app.run(debug=False, threaded=True, port=args.gui)
+        except (KeyboardInterrupt, SystemExit):
+            # silently close
+            pass
+    else:
+        success = _snakemake(listrules=args.list,
+                             list_target_rules=args.list_target_rules,
+                             cores=args.cores,
+                             nodes=args.cores,
+                             resources=resources,
+                             config=config,
+                             configfile=args.configfile,
+                             config_args=args.config,
+                             workdir=args.directory,
+                             targets=args.target,
+                             dryrun=args.dryrun,
+                             printshellcmds=args.printshellcmds,
+                             printreason=args.reason,
+                             printdag=args.dag,
+                             printrulegraph=args.rulegraph,
+                             printd3dag=args.d3dag,
+                             touch=args.touch,
+                             forcetargets=args.force,
+                             forceall=args.forceall,
+                             forcerun=args.forcerun,
+                             prioritytargets=args.prioritize,
+                             stats=args.stats,
+                             nocolor=args.nocolor,
+                             quiet=args.quiet,
+                             keepgoing=args.keep_going,
+                             cluster=args.cluster,
+                             cluster_config=args.cluster_config,
+                             cluster_sync=args.cluster_sync,
+                             drmaa=args.drmaa,
+                             jobname=args.jobname,
+                             immediate_submit=args.immediate_submit,
+                             standalone=True,
+                             ignore_ambiguity=args.allow_ambiguity,
+                             snakemakepath=snakemakepath,
+                             lock=not args.nolock,
+                             unlock=args.unlock,
+                             cleanup_metadata=args.cleanup_metadata,
+                             force_incomplete=args.rerun_incomplete,
+                             ignore_incomplete=args.ignore_incomplete,
+                             list_version_changes=args.list_version_changes,
+                             list_code_changes=args.list_code_changes,
+                             list_input_changes=args.list_input_changes,
+                             list_params_changes=args.list_params_changes,
+                             summary=args.summary,
+                             detailed_summary=args.detailed_summary,
+                             print_compilation=args.print_compilation,
+                             verbose=args.verbose,
+                             debug=args.debug,
+                             jobscript=args.jobscript,
+                             notemp=args.notemp,
+                             timestamp=args.timestamp,
+                             greediness=args.greediness,
+                             overwrite_shellcmd=args.overwrite_shellcmd,
+                             latency_wait=args.latency_wait,
+                             benchmark_repeats=args.benchmark_repeats,
+                             wait_for_files=args.wait_for_files,
+                             keep_target_files=args.keep_target_files,
+                             allowed_rules=args.allowed_rules)
+
+    if args.profile:
+        with open(args.profile, "w") as out:
+            profile = yappi.get_func_stats()
+            profile.sort("totaltime")
+            profile.print_all(out=out)
+
+    sys.exit(0 if success else 1)
+
+
+def bash_completion(snakefile="Snakefile"):
+    if not len(sys.argv) >= 2:
+        print(
+            "Calculate bash completion for snakemake. This tool shall not be invoked by hand.")
+        sys.exit(1)
+
+    prefix = sys.argv[2]
+
+    if prefix.startswith("-"):
+        opts = [action.option_strings[0]
+                for action in get_argument_parser()._actions
+                if action.option_strings and
+                action.option_strings[0].startswith(prefix)]
+        print(*opts, sep="\n")
+    else:
+        files = glob.glob("{}*".format(prefix))
+        if files:
+            print(*files, sep="\n")
+        elif os.path.exists(snakefile):
+            workflow = Workflow(snakefile=snakefile, snakemakepath="snakemake")
+            workflow.include(snakefile)
+
+            workflow_files = sorted(set(file
+                                        for file in workflow.concrete_files
+                                        if file.startswith(prefix)))
+            if workflow_files:
+                print(*workflow_files, sep="\n")
+
+            rules = [rule.name for rule in workflow.rules
+                     if rule.name.startswith(prefix)]
+            if rules:
+                print(*rules, sep="\n")
+    sys.exit(0)
diff --git a/snakemake/dag.py b/snakemake/dag.py
new file mode 100644
index 0000000..f1ead14
--- /dev/null
+++ b/snakemake/dag.py
@@ -0,0 +1,926 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import textwrap
+import time
+from collections import defaultdict, Counter
+from itertools import chain, combinations, filterfalse, product, groupby
+from functools import partial, lru_cache
+from operator import itemgetter, attrgetter
+
+from snakemake.io import IOFile, _IOFile, PeriodicityDetector, wait_for_files
+from snakemake.jobs import Job, Reason
+from snakemake.exceptions import RuleException, MissingInputException
+from snakemake.exceptions import MissingRuleException, AmbiguousRuleException
+from snakemake.exceptions import CyclicGraphException, MissingOutputException
+from snakemake.exceptions import IncompleteFilesException
+from snakemake.exceptions import PeriodicWildcardError
+from snakemake.exceptions import UnexpectedOutputException, InputFunctionException
+from snakemake.logging import logger
+from snakemake.output_index import OutputIndex
+
+
+class DAG:
+    def __init__(self, workflow,
+                 rules=None,
+                 dryrun=False,
+                 targetfiles=None,
+                 targetrules=None,
+                 forceall=False,
+                 forcerules=None,
+                 forcefiles=None,
+                 priorityfiles=None,
+                 priorityrules=None,
+                 ignore_ambiguity=False,
+                 force_incomplete=False,
+                 ignore_incomplete=False,
+                 notemp=False):
+
+        self.dryrun = dryrun
+        self.dependencies = defaultdict(partial(defaultdict, set))
+        self.depending = defaultdict(partial(defaultdict, set))
+        self._needrun = set()
+        self._priority = dict()
+        self._downstream_size = dict()
+        self._reason = defaultdict(Reason)
+        self._finished = set()
+        self._dynamic = set()
+        self._len = 0
+        self.workflow = workflow
+        self.rules = set(rules)
+        self.ignore_ambiguity = ignore_ambiguity
+        self.targetfiles = targetfiles
+        self.targetrules = targetrules
+        self.priorityfiles = priorityfiles
+        self.priorityrules = priorityrules
+        self.targetjobs = set()
+        self.prioritytargetjobs = set()
+        self._ready_jobs = set()
+        self.notemp = notemp
+        self._jobid = dict()
+
+        self.forcerules = set()
+        self.forcefiles = set()
+        self.updated_subworkflow_files = set()
+        if forceall:
+            self.forcerules.update(self.rules)
+        elif forcerules:
+            self.forcerules.update(forcerules)
+        if forcefiles:
+            self.forcefiles.update(forcefiles)
+        self.omitforce = set()
+
+        self.force_incomplete = force_incomplete
+        self.ignore_incomplete = ignore_incomplete
+
+        self.periodic_wildcard_detector = PeriodicityDetector()
+
+        self.update_output_index()
+
+    def init(self):
+        """ Initialise the DAG. """
+        for job in map(self.rule2job, self.targetrules):
+            job = self.update([job])
+            self.targetjobs.add(job)
+
+        for file in self.targetfiles:
+            job = self.update(self.file2jobs(file), file=file)
+            self.targetjobs.add(job)
+
+        self.update_needrun()
+
+    def update_output_index(self):
+        self.output_index = OutputIndex(self.rules)
+
+    def check_incomplete(self):
+        if not self.ignore_incomplete:
+            incomplete = self.incomplete_files
+            if incomplete:
+                if self.force_incomplete:
+                    logger.debug("Forcing incomplete files:")
+                    logger.debug("\t" + "\n\t".join(incomplete))
+                    self.forcefiles.update(incomplete)
+                else:
+                    raise IncompleteFilesException(incomplete)
+
+    def check_dynamic(self):
+        for job in filter(lambda job: (
+            job.dynamic_output and not self.needrun(job)
+        ), self.jobs):
+            self.update_dynamic(job)
+
+    @property
+    def dynamic_output_jobs(self):
+        return (job for job in self.jobs if job.dynamic_output)
+
+    @property
+    def jobs(self):
+        """ All jobs in the DAG. """
+        for job in self.bfs(self.dependencies, *self.targetjobs):
+            yield job
+
+    @property
+    def needrun_jobs(self):
+        """ Jobs that need to be executed. """
+        for job in filter(self.needrun,
+                          self.bfs(self.dependencies, *self.targetjobs,
+                                   stop=self.noneedrun_finished)):
+            yield job
+
+    @property
+    def local_needrun_jobs(self):
+        return filter(lambda job: self.workflow.is_local(job.rule),
+                      self.needrun_jobs)
+
+    @property
+    def finished_jobs(self):
+        """ Jobs that have been executed. """
+        for job in filter(self.finished, self.bfs(self.dependencies,
+                                                  *self.targetjobs)):
+            yield job
+
+    @property
+    def ready_jobs(self):
+        """ Jobs that are ready to execute. """
+        return self._ready_jobs
+
+    def ready(self, job):
+        """ Return whether a given job is ready to execute. """
+        return job in self._ready_jobs
+
+    def needrun(self, job):
+        """ Return whether a given job needs to be executed. """
+        return job in self._needrun
+
+    def priority(self, job):
+        return self._priority[job]
+
+    def downstream_size(self, job):
+        return self._downstream_size[job]
+
+    def _job_values(self, jobs, values):
+        return [values[job] for job in jobs]
+
+    def priorities(self, jobs):
+        return self._job_values(jobs, self._priority)
+
+    def downstream_sizes(self, jobs):
+        return self._job_values(jobs, self._downstream_size)
+
+    def noneedrun_finished(self, job):
+        """
+        Return whether a given job is finished or was not
+        required to run at all.
+        """
+        return not self.needrun(job) or self.finished(job)
+
+    def reason(self, job):
+        """ Return the reason of the job execution. """
+        return self._reason[job]
+
+    def finished(self, job):
+        """ Return whether a job is finished. """
+        return job in self._finished
+
+    def dynamic(self, job):
+        """
+        Return whether a job is dynamic (i.e. it is only a placeholder
+        for those that are created after the job with dynamic output has
+        finished.
+        """
+        return job in self._dynamic
+
+    def requested_files(self, job):
+        """ Return the files a job requests. """
+        return set(*self.depending[job].values())
+
+    @property
+    def incomplete_files(self):
+        return list(chain(*(
+            job.output for job in filter(self.workflow.persistence.incomplete,
+                                         filterfalse(self.needrun, self.jobs))
+        )))
+
+    @property
+    def newversion_files(self):
+        return list(chain(*(
+            job.output
+            for job in filter(self.workflow.persistence.newversion, self.jobs)
+        )))
+
+    def missing_temp(self, job):
+        """
+        Return whether a temp file that is input of the given job is missing.
+        """
+        for job_, files in self.depending[job].items():
+            if self.needrun(job_) and any(not f.exists for f in files):
+                return True
+        return False
+
+    def check_output(self, job, wait=3):
+        """ Raise exception if output files of job are missing. """
+        try:
+            wait_for_files(job.expanded_output, latency_wait=wait)
+        except IOError as e:
+            raise MissingOutputException(str(e), rule=job.rule)
+
+        input_maxtime = job.input_maxtime
+        if input_maxtime is not None:
+            output_mintime = job.output_mintime
+            if output_mintime is not None and output_mintime < input_maxtime:
+                raise RuleException(
+                    "Output files {} are older than input "
+                    "files. Did you extract an archive? Make sure that output "
+                    "files have a more recent modification date than the "
+                    "archive, e.g. by using 'touch'.".format(
+                        ", ".join(job.expanded_output)),
+                    rule=job.rule)
+
+    def check_periodic_wildcards(self, job):
+        """ Raise an exception if a wildcard of the given job appears to be periodic,
+        indicating a cyclic dependency. """
+        for wildcard, value in job.wildcards_dict.items():
+            periodic_substring = self.periodic_wildcard_detector.is_periodic(
+                value)
+            if periodic_substring is not None:
+                raise PeriodicWildcardError(
+                    "The value {} in wildcard {} is periodically repeated ({}). "
+                    "This would lead to an infinite recursion. "
+                    "To avoid this, e.g. restrict the wildcards in this rule to certain values.".format(
+                        periodic_substring, wildcard, value),
+                    rule=job.rule)
+
+    def handle_protected(self, job):
+        """ Write-protect output files that are marked with protected(). """
+        for f in job.expanded_output:
+            if f in job.protected_output:
+                logger.info("Write-protecting output file {}.".format(f))
+                f.protect()
+
+    def handle_touch(self, job):
+        """ Touches those output files that are marked for touching. """
+        for f in job.expanded_output:
+            if f in job.touch_output:
+                logger.info("Touching output file {}.".format(f))
+                f.touch_or_create()
+
+    def handle_temp(self, job):
+        """ Remove temp files if they are no longer needed. """
+        if self.notemp:
+            return
+
+        needed = lambda job_, f: any(
+            f in files for j, files in self.depending[job_].items()
+            if not self.finished(j) and self.needrun(j) and j != job)
+
+        def unneeded_files():
+            for job_, files in self.dependencies[job].items():
+                for f in job_.temp_output & files:
+                    if not needed(job_, f):
+                        yield f
+            for f in filterfalse(partial(needed, job), job.temp_output):
+                if not f in self.targetfiles:
+                    yield f
+
+        for f in unneeded_files():
+            logger.info("Removing temporary output file {}.".format(f))
+            f.remove()
+
+    def jobid(self, job):
+        if job not in self._jobid:
+            self._jobid[job] = len(self._jobid)
+        return self._jobid[job]
+
+    def update(self, jobs, file=None, visited=None, skip_until_dynamic=False):
+        """ Update the DAG by adding given jobs and their dependencies. """
+        if visited is None:
+            visited = set()
+        producer = None
+        exceptions = list()
+        jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
+        cycles = list()
+
+        for job in jobs:
+            if file in job.input:
+                cycles.append(job)
+                continue
+            if job in visited:
+                cycles.append(job)
+                continue
+            try:
+                self.check_periodic_wildcards(job)
+                self.update_(job,
+                             visited=set(visited),
+                             skip_until_dynamic=skip_until_dynamic)
+                # TODO this might fail if a rule discarded here is needed
+                # elsewhere
+                if producer:
+                    if job < producer or self.ignore_ambiguity:
+                        break
+                    elif producer is not None:
+                        raise AmbiguousRuleException(file, job, producer)
+                producer = job
+            except (MissingInputException, CyclicGraphException,
+                    PeriodicWildcardError) as ex:
+                exceptions.append(ex)
+        if producer is None:
+            if cycles:
+                job = cycles[0]
+                raise CyclicGraphException(job.rule, file, rule=job.rule)
+            if exceptions:
+                raise exceptions[0]
+        return producer
+
+    def update_(self, job, visited=None, skip_until_dynamic=False):
+        """ Update the DAG by adding the given job and its dependencies. """
+        if job in self.dependencies:
+            return
+        if visited is None:
+            visited = set()
+        visited.add(job)
+        dependencies = self.dependencies[job]
+        potential_dependencies = self.collect_potential_dependencies(
+            job).items()
+
+        skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
+
+        missing_input = job.missing_input
+        producer = dict()
+        exceptions = dict()
+        for file, jobs in potential_dependencies:
+            try:
+                producer[file] = self.update(
+                    jobs,
+                    file=file,
+                    visited=visited,
+                    skip_until_dynamic=skip_until_dynamic or file in
+                    job.dynamic_input)
+            except (MissingInputException, CyclicGraphException,
+                    PeriodicWildcardError) as ex:
+                if file in missing_input:
+                    self.delete_job(job,
+                                    recursive=False)  # delete job from tree
+                    raise ex
+
+        for file, job_ in producer.items():
+            dependencies[job_].add(file)
+            self.depending[job_][job].add(file)
+
+        missing_input -= producer.keys()
+        if missing_input:
+            self.delete_job(job, recursive=False)  # delete job from tree
+            raise MissingInputException(job.rule, missing_input)
+
+        if skip_until_dynamic:
+            self._dynamic.add(job)
+
+    def update_needrun(self):
+        """ Update the information whether a job needs to be executed. """
+
+        def output_mintime(job):
+            for job_ in self.bfs(self.depending, job):
+                t = job_.output_mintime
+                if t:
+                    return t
+
+        def needrun(job):
+            reason = self.reason(job)
+            noinitreason = not reason
+            updated_subworkflow_input = self.updated_subworkflow_files.intersection(
+                job.input)
+            if (job not in self.omitforce and job.rule in self.forcerules or
+                not self.forcefiles.isdisjoint(job.output)):
+                reason.forced = True
+            elif updated_subworkflow_input:
+                reason.updated_input.update(updated_subworkflow_input)
+            elif job in self.targetjobs:
+                # TODO find a way to handle added/removed input files here?
+                if not job.output and not job.benchmark:
+                    if job.input:
+                        if job.rule.norun:
+                            reason.updated_input_run.update([f
+                                                             for f in job.input
+                                                             if not f.exists])
+                        else:
+                            reason.nooutput = True
+                    else:
+                        reason.noio = True
+                else:
+                    if job.rule in self.targetrules:
+                        missing_output = job.missing_output()
+                    else:
+                        missing_output = job.missing_output(
+                            requested=set(chain(*self.depending[job].values()))
+                            | self.targetfiles)
+                    reason.missing_output.update(missing_output)
+            if not reason:
+                output_mintime_ = output_mintime(job)
+                if output_mintime_:
+                    updated_input = [
+                        f for f in job.input
+                        if f.exists and f.is_newer(output_mintime_)
+                    ]
+                    reason.updated_input.update(updated_input)
+            if noinitreason and reason:
+                reason.derived = False
+            return job
+
+        reason = self.reason
+        _needrun = self._needrun
+        dependencies = self.dependencies
+        depending = self.depending
+
+        _needrun.clear()
+        candidates = set(self.jobs)
+
+        queue = list(filter(reason, map(needrun, candidates)))
+        visited = set(queue)
+        while queue:
+            job = queue.pop(0)
+            _needrun.add(job)
+
+            for job_, files in dependencies[job].items():
+                missing_output = job_.missing_output(requested=files)
+                reason(job_).missing_output.update(missing_output)
+                if missing_output and not job_ in visited:
+                    visited.add(job_)
+                    queue.append(job_)
+
+            for job_, files in depending[job].items():
+                if job_ in candidates:
+                    reason(job_).updated_input_run.update(files)
+                    if not job_ in visited:
+                        visited.add(job_)
+                        queue.append(job_)
+
+        self._len = len(_needrun)
+
+    def update_priority(self):
+        """ Update job priorities. """
+        prioritized = (lambda job: job.rule in self.priorityrules or
+                       not self.priorityfiles.isdisjoint(job.output))
+        for job in self.needrun_jobs:
+            self._priority[job] = job.rule.priority
+        for job in self.bfs(self.dependencies,
+                            *filter(prioritized, self.needrun_jobs),
+                            stop=self.noneedrun_finished):
+            self._priority[job] = Job.HIGHEST_PRIORITY
+
+    def update_ready(self):
+        """ Update information whether a job is ready to execute. """
+        for job in filter(self.needrun, self.jobs):
+            if not self.finished(job) and self._ready(job):
+                self._ready_jobs.add(job)
+
+    def update_downstream_size(self):
+        for job in self.needrun_jobs:
+            self._downstream_size[job] = sum(
+                1 for _ in self.bfs(self.depending, job,
+                                    stop=self.noneedrun_finished)) - 1
+
+    def postprocess(self):
+        self.update_needrun()
+        self.update_priority()
+        self.update_ready()
+        self.update_downstream_size()
+
+    def _ready(self, job):
+        return self._finished.issuperset(
+            filter(self.needrun, self.dependencies[job]))
+
+    def finish(self, job, update_dynamic=True):
+        self._finished.add(job)
+        try:
+            self._ready_jobs.remove(job)
+        except KeyError:
+            pass
+        # mark depending jobs as ready
+        for job_ in self.depending[job]:
+            if self.needrun(job_) and self._ready(job_):
+                self._ready_jobs.add(job_)
+
+        if update_dynamic and job.dynamic_output:
+            logger.info("Dynamically updating jobs")
+            newjob = self.update_dynamic(job)
+            if newjob:
+                # simulate that this job ran and was finished before
+                self.omitforce.add(newjob)
+                self._needrun.add(newjob)
+                self._finished.add(newjob)
+
+                self.postprocess()
+                self.handle_protected(newjob)
+                self.handle_touch(newjob)
+                # add finished jobs to len as they are not counted after new postprocess
+                self._len += len(self._finished)
+
+    def update_dynamic(self, job):
+        dynamic_wildcards = job.dynamic_wildcards
+        if not dynamic_wildcards:
+            # this happens e.g. in dryrun if output is not yet present
+            return
+
+        depending = list(filter(lambda job_: not self.finished(job_),
+                                self.bfs(self.depending, job)))
+        newrule, non_dynamic_wildcards = job.rule.dynamic_branch(
+            dynamic_wildcards,
+            input=False)
+        self.specialize_rule(job.rule, newrule)
+
+        # no targetfile needed for job
+        newjob = Job(newrule, self, format_wildcards=non_dynamic_wildcards)
+        self.replace_job(job, newjob)
+        for job_ in depending:
+            if job_.dynamic_input:
+                newrule_ = job_.rule.dynamic_branch(dynamic_wildcards)
+                if newrule_ is not None:
+                    self.specialize_rule(job_.rule, newrule_)
+                    if not self.dynamic(job_):
+                        logger.debug("Updating job {}.".format(job_))
+                        newjob_ = Job(newrule_, self,
+                                      targetfile=job_.targetfile)
+
+                        unexpected_output = self.reason(
+                            job_).missing_output.intersection(
+                                newjob.existing_output)
+                        if unexpected_output:
+                            logger.warning(
+                                "Warning: the following output files of rule {} were not "
+                                "present when the DAG was created:\n{}".format(
+                                    newjob_.rule, unexpected_output))
+
+                        self.replace_job(job_, newjob_)
+        return newjob
+
+    def delete_job(self, job, recursive=True):
+        for job_ in self.depending[job]:
+            del self.dependencies[job_][job]
+        del self.depending[job]
+        for job_ in self.dependencies[job]:
+            depending = self.depending[job_]
+            del depending[job]
+            if not depending and recursive:
+                self.delete_job(job_)
+        del self.dependencies[job]
+        if job in self._needrun:
+            self._len -= 1
+            self._needrun.remove(job)
+            del self._reason[job]
+        if job in self._finished:
+            self._finished.remove(job)
+        if job in self._dynamic:
+            self._dynamic.remove(job)
+        if job in self._ready_jobs:
+            self._ready_jobs.remove(job)
+
+    def replace_job(self, job, newjob):
+        depending = list(self.depending[job].items())
+        if self.finished(job):
+            self._finished.add(newjob)
+
+        self.delete_job(job)
+        self.update([newjob])
+
+        for job_, files in depending:
+            if not job_.dynamic_input:
+                self.dependencies[job_][newjob].update(files)
+                self.depending[newjob][job_].update(files)
+        if job in self.targetjobs:
+            self.targetjobs.remove(job)
+            self.targetjobs.add(newjob)
+
+    def specialize_rule(self, rule, newrule):
+        assert newrule is not None
+        self.rules.add(newrule)
+        self.update_output_index()
+
+    def collect_potential_dependencies(self, job):
+        dependencies = defaultdict(list)
+        # use a set to circumvent multiple jobs for the same file
+        # if user specified it twice
+        file2jobs = self.file2jobs
+        for file in set(job.input):
+            # omit the file if it comes from a subworkflow
+            if file in job.subworkflow_input:
+                continue
+            try:
+                if file in job.dependencies:
+                    jobs = [Job(job.dependencies[file], self, targetfile=file)]
+                else:
+                    jobs = file2jobs(file)
+                dependencies[file].extend(jobs)
+            except MissingRuleException as ex:
+                pass
+        return dependencies
+
+    def bfs(self, direction, *jobs, stop=lambda job: False):
+        queue = list(jobs)
+        visited = set(queue)
+        while queue:
+            job = queue.pop(0)
+            if stop(job):
+                # stop criterion reached for this node
+                continue
+            yield job
+            for job_, _ in direction[job].items():
+                if not job_ in visited:
+                    queue.append(job_)
+                    visited.add(job_)
+
+    def level_bfs(self, direction, *jobs, stop=lambda job: False):
+        queue = [(job, 0) for job in jobs]
+        visited = set(jobs)
+        while queue:
+            job, level = queue.pop(0)
+            if stop(job):
+                # stop criterion reached for this node
+                continue
+            yield level, job
+            level += 1
+            for job_, _ in direction[job].items():
+                if not job_ in visited:
+                    queue.append((job_, level))
+                    visited.add(job_)
+
+    def dfs(self, direction, *jobs, stop=lambda job: False, post=True):
+        visited = set()
+        for job in jobs:
+            for job_ in self._dfs(direction, job, visited,
+                                  stop=stop,
+                                  post=post):
+                yield job_
+
+    def _dfs(self, direction, job, visited, stop, post):
+        if stop(job):
+            return
+        if not post:
+            yield job
+        for job_ in direction[job]:
+            if not job_ in visited:
+                visited.add(job_)
+                for j in self._dfs(direction, job_, visited, stop, post):
+                    yield j
+        if post:
+            yield job
+
+    def is_isomorph(self, job1, job2):
+        if job1.rule != job2.rule:
+            return False
+        rule = lambda job: job.rule.name
+        queue1, queue2 = [job1], [job2]
+        visited1, visited2 = set(queue1), set(queue2)
+        while queue1 and queue2:
+            job1, job2 = queue1.pop(0), queue2.pop(0)
+            deps1 = sorted(self.dependencies[job1], key=rule)
+            deps2 = sorted(self.dependencies[job2], key=rule)
+            for job1_, job2_ in zip(deps1, deps2):
+                if job1_.rule != job2_.rule:
+                    return False
+                if not job1_ in visited1 and not job2_ in visited2:
+                    queue1.append(job1_)
+                    visited1.add(job1_)
+                    queue2.append(job2_)
+                    visited2.add(job2_)
+                elif not (job1_ in visited1 and job2_ in visited2):
+                    return False
+        return True
+
+    def all_longest_paths(self, *jobs):
+        paths = defaultdict(list)
+
+        def all_longest_paths(_jobs):
+            for job in _jobs:
+                if job in paths:
+                    continue
+                deps = self.dependencies[job]
+                if not deps:
+                    paths[job].append([job])
+                    continue
+                all_longest_paths(deps)
+                for _job in deps:
+                    paths[job].extend(path + [job] for path in paths[_job])
+
+        all_longest_paths(jobs)
+        return chain(*(paths[job] for job in jobs))
+
+    def new_wildcards(self, job):
+        new_wildcards = set(job.wildcards.items())
+        for job_ in self.dependencies[job]:
+            if not new_wildcards:
+                return set()
+            for wildcard in job_.wildcards.items():
+                new_wildcards.discard(wildcard)
+        return new_wildcards
+
+    def rule2job(self, targetrule):
+        return Job(targetrule, self)
+
+    def file2jobs(self, targetfile):
+        rules = self.output_index.match(targetfile)
+        jobs = []
+        exceptions = list()
+        for rule in rules:
+            if rule.is_producer(targetfile):
+                try:
+                    jobs.append(Job(rule, self, targetfile=targetfile))
+                except InputFunctionException as e:
+                    exceptions.append(e)
+        if not jobs:
+            if exceptions:
+                raise exceptions[0]
+            raise MissingRuleException(targetfile)
+        return jobs
+
+    def rule_dot2(self):
+        dag = defaultdict(list)
+        visited = set()
+        preselect = set()
+
+        def preselect_parents(job):
+            for parent in self.depending[job]:
+                if parent in preselect:
+                    continue
+                preselect.add(parent)
+                preselect_parents(parent)
+
+        def build_ruledag(job, key=lambda job: job.rule.name):
+            if job in visited:
+                return
+            visited.add(job)
+            deps = sorted(self.dependencies[job], key=key)
+            deps = [(group[0] if preselect.isdisjoint(group) else
+                     preselect.intersection(group).pop())
+                    for group in (list(g) for _, g in groupby(deps, key))]
+            dag[job].extend(deps)
+            preselect_parents(job)
+            for dep in deps:
+                build_ruledag(dep)
+
+        for job in self.targetjobs:
+            build_ruledag(job)
+
+        return self._dot(dag.keys(),
+                         print_wildcards=False,
+                         print_types=False,
+                         dag=dag)
+
+    def rule_dot(self):
+        graph = defaultdict(set)
+        for job in self.jobs:
+            graph[job.rule].update(dep.rule for dep in self.dependencies[job])
+        return self._dot(graph)
+
+    def dot(self):
+        def node2style(job):
+            if not self.needrun(job):
+                return "rounded,dashed"
+            if self.dynamic(job) or job.dynamic_input:
+                return "rounded,dotted"
+            return "rounded"
+
+        def format_wildcard(wildcard):
+            name, value = wildcard
+            if _IOFile.dynamic_fill in value:
+                value = "..."
+            return "{}: {}".format(name, value)
+
+        node2rule = lambda job: job.rule
+        node2label = lambda job: "\\n".join(chain([
+            job.rule.name
+        ], sorted(map(format_wildcard, self.new_wildcards(job)))))
+
+        dag = {job: self.dependencies[job] for job in self.jobs}
+
+        return self._dot(dag,
+                         node2rule=node2rule,
+                         node2style=node2style,
+                         node2label=node2label)
+
+    def _dot(self, graph,
+             node2rule=lambda node: node,
+             node2style=lambda node: "rounded",
+             node2label=lambda node: node):
+
+        # color rules
+        huefactor = 2 / (3 * len(self.rules))
+        rulecolor = {
+            rule: "{:.2f} 0.6 0.85".format(i * huefactor)
+            for i, rule in enumerate(self.rules)
+        }
+
+        # markup
+        node_markup = '\t{}[label = "{}", color = "{}", style="{}"];'.format
+        edge_markup = "\t{} -> {}".format
+
+        # node ids
+        ids = {node: i for i, node in enumerate(graph)}
+
+        # calculate nodes
+        nodes = [node_markup(ids[node], node2label(node),
+                             rulecolor[node2rule(node)], node2style(node))
+                 for node in graph]
+        # calculate edges
+        edges = [edge_markup(ids[dep], ids[node])
+                 for node, deps in graph.items() for dep in deps]
+
+        return textwrap.dedent("""\
+            digraph snakemake_dag {{
+                graph[bgcolor=white, margin=0];
+                node[shape=box, style=rounded, fontname=sans, \
+                fontsize=10, penwidth=2];
+                edge[penwidth=2, color=grey];
+            {items}
+            }}\
+            """).format(items="\n".join(nodes + edges))
+
+    def summary(self, detailed=False):
+        if detailed:
+            yield "output_file\tdate\trule\tversion\tinput_file(s)\tshellcmd\tstatus\tplan"
+        else:
+            yield "output_file\tdate\trule\tversion\tstatus\tplan"
+
+        for job in self.jobs:
+            output = job.rule.output if self.dynamic(
+                job) else job.expanded_output
+            for f in output:
+                rule = self.workflow.persistence.rule(f)
+                rule = "-" if rule is None else rule
+
+                version = self.workflow.persistence.version(f)
+                version = "-" if version is None else str(version)
+
+                date = time.ctime(f.mtime) if f.exists else "-"
+
+                pending = "update pending" if self.reason(job) else "no update"
+
+                input = self.workflow.persistence.input(f)
+                input = "-" if input is None else ",".join(input)
+
+                shellcmd = self.workflow.persistence.shellcmd(f)
+                shellcmd = "-" if shellcmd is None else shellcmd
+                # remove new line characters, leading and trailing whitespace
+                shellcmd = shellcmd.strip().replace("\n", "; ")
+
+                status = "ok"
+                if not f.exists:
+                    status = "missing"
+                elif self.reason(job).updated_input:
+                    status = "updated input files"
+                elif self.workflow.persistence.version_changed(job, file=f):
+                    status = "version changed to {}".format(job.rule.version)
+                elif self.workflow.persistence.code_changed(job, file=f):
+                    status = "rule implementation changed"
+                elif self.workflow.persistence.input_changed(job, file=f):
+                    status = "set of input files changed"
+                elif self.workflow.persistence.params_changed(job, file=f):
+                    status = "params changed"
+                if detailed:
+                    yield "\t".join((f, date, rule, version, input, shellcmd,
+                                     status, pending))
+                else:
+                    yield "\t".join((f, date, rule, version, status, pending))
+
+    def d3dag(self, max_jobs=10000):
+        def node(job):
+            jobid = self.jobid(job)
+            return {
+                "id": jobid,
+                "value": {
+                    "jobid": jobid,
+                    "label": job.rule.name,
+                    "rule": job.rule.name
+                }
+            }
+
+        def edge(a, b):
+            return {"u": self.jobid(a), "v": self.jobid(b)}
+
+        jobs = list(self.jobs)
+
+        if len(jobs) > max_jobs:
+            logger.info(
+                "Job-DAG is too large for visualization (>{} jobs).".format(
+                    max_jobs))
+        else:
+            logger.d3dag(nodes=[node(job) for job in jobs],
+                         edges=[edge(dep, job) for job in jobs for dep in
+                                self.dependencies[job] if self.needrun(dep)])
+
+    def stats(self):
+        rules = Counter()
+        rules.update(job.rule for job in self.needrun_jobs)
+        rules.update(job.rule for job in self.finished_jobs)
+        yield "Job counts:"
+        yield "\tcount\tjobs"
+        for rule, count in sorted(rules.most_common(),
+                                  key=lambda item: item[0].name):
+            yield "\t{}\t{}".format(count, rule)
+        yield "\t{}".format(len(self))
+
+    def __str__(self):
+        return self.dot()
+
+    def __len__(self):
+        return self._len
diff --git a/snakemake/exceptions.py b/snakemake/exceptions.py
new file mode 100644
index 0000000..d606c99
--- /dev/null
+++ b/snakemake/exceptions.py
@@ -0,0 +1,300 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import traceback
+from tokenize import TokenError
+
+from snakemake.logging import logger
+
+
+def format_error(ex, lineno,
+                 linemaps=None,
+                 snakefile=None,
+                 show_traceback=False):
+    if linemaps is None:
+        linemaps = dict()
+    msg = str(ex)
+    if linemaps and snakefile and snakefile in linemaps:
+        lineno = linemaps[snakefile][lineno]
+        if isinstance(ex, SyntaxError):
+            msg = ex.msg
+    location = (" in line {} of {}".format(lineno, snakefile) if
+                lineno and snakefile else "")
+    tb = ""
+    if show_traceback:
+        tb = "\n".join(format_traceback(cut_traceback(ex), linemaps=linemaps))
+    return '{}{}{}{}'.format(ex.__class__.__name__, location, ":\n" + msg
+                             if msg else ".", "\n{}".format(tb) if
+                             show_traceback and tb else "")
+
+
+def get_exception_origin(ex, linemaps):
+    for file, lineno, _, _ in reversed(traceback.extract_tb(ex.__traceback__)):
+        if file in linemaps:
+            return lineno, file
+
+
+def cut_traceback(ex):
+    snakemake_path = os.path.dirname(__file__)
+    for line in traceback.extract_tb(ex.__traceback__):
+        dir = os.path.dirname(line[0])
+        if not dir:
+            dir = "."
+        if not os.path.isdir(dir) or not os.path.samefile(snakemake_path, dir):
+            yield line
+
+
+def format_traceback(tb, linemaps):
+    for file, lineno, function, code in tb:
+        if file in linemaps:
+            lineno = linemaps[file][lineno]
+        if code is not None:
+            yield '  File "{}", line {}, in {}'.format(file, lineno, function)
+
+
+def print_exception(ex, linemaps, print_traceback=True):
+    """
+    Print an error message for a given exception.
+
+    Arguments
+    ex -- the exception
+    linemaps -- a dict of a dict that maps for each snakefile
+        the compiled lines to source code lines in the snakefile.
+    """
+    #traceback.print_exception(type(ex), ex, ex.__traceback__)
+    if isinstance(ex, SyntaxError) or isinstance(ex, IndentationError):
+        logger.error(format_error(ex, ex.lineno,
+                                  linemaps=linemaps,
+                                  snakefile=ex.filename,
+                                  show_traceback=print_traceback))
+        return
+    origin = get_exception_origin(ex, linemaps)
+    if origin is not None:
+        lineno, file = origin
+        logger.error(format_error(ex, lineno,
+                                  linemaps=linemaps,
+                                  snakefile=file,
+                                  show_traceback=print_traceback))
+        return
+    elif isinstance(ex, TokenError):
+        logger.error(format_error(ex, None, show_traceback=False))
+    elif isinstance(ex, MissingRuleException):
+        logger.error(format_error(ex, None,
+                                  linemaps=linemaps,
+                                  snakefile=ex.filename,
+                                  show_traceback=False))
+    elif isinstance(ex, RuleException):
+        for e in ex._include + [ex]:
+            if not e.omit:
+                logger.error(format_error(e, e.lineno,
+                                          linemaps=linemaps,
+                                          snakefile=e.filename,
+                                          show_traceback=print_traceback))
+    elif isinstance(ex, WorkflowError):
+        logger.error(format_error(ex, ex.lineno,
+                                  linemaps=linemaps,
+                                  snakefile=ex.snakefile,
+                                  show_traceback=print_traceback))
+    elif isinstance(ex, KeyboardInterrupt):
+        logger.info("Cancelling snakemake on user request.")
+    else:
+        traceback.print_exception(type(ex), ex, ex.__traceback__)
+
+
+class WorkflowError(Exception):
+    @staticmethod
+    def format_args(args):
+        for arg in args:
+            if isinstance(arg, str):
+                yield arg
+            else:
+                yield "{}: {}".format(arg.__class__.__name__, str(arg))
+
+    def __init__(self, *args, lineno=None, snakefile=None, rule=None):
+        super().__init__("\n".join(self.format_args(args)))
+        if rule is not None:
+            self.lineno = rule.lineno
+            self.snakefile = rule.snakefile
+        else:
+            self.lineno = lineno
+            self.snakefile = snakefile
+        self.rule = rule
+
+
+class WildcardError(WorkflowError):
+    pass
+
+
+class RuleException(Exception):
+    """
+    Base class for exception occuring withing the
+    execution or definition of rules.
+    """
+
+    def __init__(self,
+                 message=None,
+                 include=None,
+                 lineno=None,
+                 snakefile=None,
+                 rule=None):
+        """
+        Creates a new instance of RuleException.
+
+        Arguments
+        message -- the exception message
+        include -- iterable of other exceptions to be included
+        lineno -- the line the exception originates
+        snakefile -- the file the exception originates
+        """
+        super(RuleException, self).__init__(message)
+        self._include = set()
+        if include:
+            for ex in include:
+                self._include.add(ex)
+                self._include.update(ex._include)
+        if rule is not None:
+            if lineno is None:
+                lineno = rule.lineno
+            if snakefile is None:
+                snakefile = rule.snakefile
+
+        self._include = list(self._include)
+        self.lineno = lineno
+        self.filename = snakefile
+        self.omit = not message
+
+    @property
+    def messages(self):
+        return map(str, (ex for ex in self._include + [self] if not ex.omit))
+
+
+class InputFunctionException(WorkflowError):
+    pass
+
+
+class MissingOutputException(RuleException):
+    pass
+
+
+class IOException(RuleException):
+    def __init__(self, prefix, rule, files,
+                 include=None,
+                 lineno=None,
+                 snakefile=None):
+        message = ("{} for rule {}:\n{}".format(prefix, rule, "\n".join(files))
+                   if files else "")
+        super().__init__(message=message,
+                         include=include,
+                         lineno=lineno,
+                         snakefile=snakefile,
+                         rule=rule)
+
+
+class MissingInputException(IOException):
+    def __init__(self, rule, files, include=None, lineno=None, snakefile=None):
+        super().__init__("Missing input files", rule, files, include,
+                         lineno=lineno,
+                         snakefile=snakefile)
+
+
+class PeriodicWildcardError(RuleException):
+    pass
+
+
+class ProtectedOutputException(IOException):
+    def __init__(self, rule, files, include=None, lineno=None, snakefile=None):
+        super().__init__("Write-protected output files", rule, files, include,
+                         lineno=lineno,
+                         snakefile=snakefile)
+
+
+class UnexpectedOutputException(IOException):
+    def __init__(self, rule, files, include=None, lineno=None, snakefile=None):
+        super().__init__("Unexpectedly present output files "
+                         "(accidentally created by other rule?)", rule, files,
+                         include,
+                         lineno=lineno,
+                         snakefile=snakefile)
+
+
+class AmbiguousRuleException(RuleException):
+    def __init__(self, filename, job_a, job_b, lineno=None, snakefile=None):
+        super().__init__(
+            "Rules {job_a} and {job_b} are ambiguous for the file {f}.\n"
+            "Expected input files:\n"
+            "\t{job_a}: {job_a.input}\n"
+            "\t{job_b}: {job_b.input}".format(job_a=job_a,
+                                              job_b=job_b,
+                                              f=filename),
+            lineno=lineno,
+            snakefile=snakefile)
+        self.rule1, self.rule2 = job_a.rule, job_b.rule
+
+
+class CyclicGraphException(RuleException):
+    def __init__(self, repeatedrule, file, rule=None):
+        super().__init__("Cyclic dependency on rule {}.".format(repeatedrule),
+                         rule=rule)
+        self.file = file
+
+
+class MissingRuleException(RuleException):
+    def __init__(self, file, lineno=None, snakefile=None):
+        super().__init__(
+            "No rule to produce {} (if you use input functions make sure that they don't raise unexpected exceptions).".format(
+                file),
+            lineno=lineno,
+            snakefile=snakefile)
+
+
+class UnknownRuleException(RuleException):
+    def __init__(self, name, prefix="", lineno=None, snakefile=None):
+        msg = "There is no rule named {}.".format(name)
+        if prefix:
+            msg = "{} {}".format(prefix, msg)
+        super().__init__(msg, lineno=lineno, snakefile=snakefile)
+
+
+class NoRulesException(RuleException):
+    def __init__(self, lineno=None, snakefile=None):
+        super().__init__("There has to be at least one rule.",
+                         lineno=lineno,
+                         snakefile=snakefile)
+
+
+class IncompleteFilesException(RuleException):
+    def __init__(self, files):
+        super().__init__(
+            "The files below seem to be incomplete. "
+            "If you are sure that certain files are not incomplete, "
+            "mark them as complete with\n\n"
+            "    snakemake --cleanup-metadata <filenames>\n\n"
+            "To re-generate the files rerun your command with the "
+            "--rerun-incomplete flag.\nIncomplete files:\n{}".format(
+                "\n".join(files)))
+
+
+class IOFileException(RuleException):
+    def __init__(self, msg, lineno=None, snakefile=None):
+        super().__init__(msg, lineno=lineno, snakefile=snakefile)
+
+
+class ClusterJobException(RuleException):
+    def __init__(self, job, jobid, jobscript):
+        super().__init__(
+            "Error executing rule {} on cluster (jobid: {}, jobscript: {}). "
+            "For detailed error see the cluster log.".format(job.rule.name,
+                                                             jobid, jobscript),
+            lineno=job.rule.lineno,
+            snakefile=job.rule.snakefile)
+
+
+class CreateRuleException(RuleException):
+    pass
+
+
+class TerminatedException(Exception):
+    pass
diff --git a/snakemake/executors.py b/snakemake/executors.py
new file mode 100644
index 0000000..d04fd31
--- /dev/null
+++ b/snakemake/executors.py
@@ -0,0 +1,710 @@
+__authors__ = ["Johannes Köster", "David Alexander"]
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import sys
+import time
+import datetime
+import json
+import textwrap
+import stat
+import shutil
+import random
+import string
+import threading
+import concurrent.futures
+import subprocess
+import signal
+from functools import partial
+from itertools import chain
+
+from snakemake.jobs import Job
+from snakemake.shell import shell
+from snakemake.logging import logger
+from snakemake.stats import Stats
+from snakemake.utils import format, Unformattable
+from snakemake.io import get_wildcard_names, Wildcards
+from snakemake.exceptions import print_exception, get_exception_origin
+from snakemake.exceptions import format_error, RuleException
+from snakemake.exceptions import ClusterJobException, ProtectedOutputException, WorkflowError
+from snakemake.futures import ProcessPoolExecutor
+
+
+class AbstractExecutor:
+    def __init__(self, workflow, dag,
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 printthreads=True,
+                 latency_wait=3,
+                 benchmark_repeats=1):
+        self.workflow = workflow
+        self.dag = dag
+        self.quiet = quiet
+        self.printreason = printreason
+        self.printshellcmds = printshellcmds
+        self.printthreads = printthreads
+        self.latency_wait = latency_wait
+        self.benchmark_repeats = benchmark_repeats
+
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        job.check_protected_output()
+        self._run(job)
+        callback(job)
+
+    def shutdown(self):
+        pass
+
+    def _run(self, job):
+        self.printjob(job)
+
+    def rule_prefix(self, job):
+        return "local " if self.workflow.is_local(job.rule) else ""
+
+    def printjob(self, job):
+        # skip dynamic jobs that will be "executed" only in dryrun mode
+        if self.dag.dynamic(job):
+            return
+
+        def format_files(job, io, ruleio, dynamicio):
+            for f in io:
+                f_ = ruleio[f]
+                if f in dynamicio:
+                    yield "{} (dynamic)".format(f.format_dynamic())
+                else:
+                    yield f
+
+        priority = self.dag.priority(job)
+        logger.job_info(jobid=self.dag.jobid(job),
+                        msg=job.message,
+                        name=job.rule.name,
+                        local=self.workflow.is_local(job.rule),
+                        input=list(format_files(job, job.input, job.ruleio,
+                                                job.dynamic_input)),
+                        output=list(format_files(job, job.output, job.ruleio,
+                                                 job.dynamic_output)),
+                        log=list(job.log),
+                        benchmark=job.benchmark,
+                        reason=str(self.dag.reason(job)),
+                        resources=job.resources_dict,
+                        priority="highest"
+                        if priority == Job.HIGHEST_PRIORITY else priority,
+                        threads=job.threads)
+
+        if job.dynamic_output:
+            logger.info("Subsequent jobs will be added dynamically "
+                        "depending on the output of this rule")
+
+    def print_job_error(self, job):
+        logger.error("Error in job {} while creating output file{} {}.".format(
+            job, "s" if len(job.output) > 1 else "", ", ".join(job.output)))
+
+    def finish_job(self, job):
+        self.dag.handle_touch(job)
+        self.dag.check_output(job, wait=self.latency_wait)
+        self.dag.handle_protected(job)
+        self.dag.handle_temp(job)
+
+
+class DryrunExecutor(AbstractExecutor):
+    def _run(self, job):
+        super()._run(job)
+        logger.shellcmd(job.shellcmd)
+
+
+class RealExecutor(AbstractExecutor):
+    def __init__(self, workflow, dag,
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 latency_wait=3,
+                 benchmark_repeats=1):
+        super().__init__(workflow, dag,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats)
+        self.stats = Stats()
+
+    def _run(self, job, callback=None, error_callback=None):
+        super()._run(job)
+        self.stats.report_job_start(job)
+        try:
+            self.workflow.persistence.started(job)
+        except IOError as e:
+            logger.info(
+                "Failed to set marker file for job started ({}). "
+                "Snakemake will work, but cannot ensure that output files "
+                "are complete in case of a kill signal or power loss. "
+                "Please ensure write permissions for the "
+                "directory {}".format(e, self.workflow.persistence.path))
+
+    def finish_job(self, job):
+        super().finish_job(job)
+        self.stats.report_job_end(job)
+        try:
+            self.workflow.persistence.finished(job)
+        except IOError as e:
+            logger.info("Failed to remove marker file for job started "
+                        "({}). Please ensure write permissions for the "
+                        "directory {}".format(e,
+                                              self.workflow.persistence.path))
+
+
+class TouchExecutor(RealExecutor):
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        super()._run(job)
+        try:
+            for f in job.expanded_output:
+                f.touch()
+            if job.benchmark:
+                job.benchmark.touch()
+            time.sleep(0.1)
+            self.finish_job(job)
+            callback(job)
+        except OSError as ex:
+            print_exception(ex, self.workflow.linemaps)
+            error_callback(job)
+
+
+_ProcessPoolExceptions = (KeyboardInterrupt, )
+try:
+    from concurrent.futures.process import BrokenProcessPool
+    _ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
+except ImportError:
+    pass
+
+
+class CPUExecutor(RealExecutor):
+    def __init__(self, workflow, dag, workers,
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 threads=False,
+                 latency_wait=3,
+                 benchmark_repeats=1):
+        super().__init__(workflow, dag,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats)
+
+        self.pool = (concurrent.futures.ThreadPoolExecutor(max_workers=workers)
+                     if threads else ProcessPoolExecutor(max_workers=workers))
+
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        job.prepare()
+        super()._run(job)
+
+        benchmark = None
+        if job.benchmark is not None:
+            benchmark = str(job.benchmark)
+
+        future = self.pool.submit(
+            run_wrapper, job.rule.run_func, job.input.plainstrings(),
+            job.output.plainstrings(), job.params, job.wildcards, job.threads,
+            job.resources, job.log.plainstrings(), job.rule.version, benchmark,
+            self.benchmark_repeats, self.workflow.linemaps, self.workflow.debug)
+        future.add_done_callback(partial(self._callback, job, callback,
+                                         error_callback))
+
+    def shutdown(self):
+        self.pool.shutdown()
+
+    def cancel(self):
+        self.pool.shutdown()
+
+    def _callback(self, job, callback, error_callback, future):
+        try:
+            ex = future.exception()
+            if ex:
+                raise ex
+            self.finish_job(job)
+            callback(job)
+        except _ProcessPoolExceptions:
+            job.cleanup()
+            self.workflow.persistence.cleanup(job)
+            # no error callback, just silently ignore the interrupt as the main scheduler is also killed
+        except (Exception, BaseException) as ex:
+            self.print_job_error(job)
+            print_exception(ex, self.workflow.linemaps)
+            job.cleanup()
+            self.workflow.persistence.cleanup(job)
+            error_callback(job)
+
+
+class ClusterExecutor(RealExecutor):
+
+    default_jobscript = "jobscript.sh"
+
+    def __init__(self, workflow, dag, cores,
+                 jobname="snakejob.{rulename}.{jobid}.sh",
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 latency_wait=3,
+                 benchmark_repeats=1,
+                 cluster_config=None, ):
+        super().__init__(workflow, dag,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats)
+        if workflow.snakemakepath is None:
+            raise ValueError("Cluster executor needs to know the path "
+                             "to the snakemake binary.")
+
+        jobscript = workflow.jobscript
+        if jobscript is None:
+            jobscript = os.path.join(os.path.dirname(__file__),
+                                     self.default_jobscript)
+        try:
+            with open(jobscript) as f:
+                self.jobscript = f.read()
+        except IOError as e:
+            raise WorkflowError(e)
+
+        if not "jobid" in get_wildcard_names(jobname):
+            raise WorkflowError(
+                "Defined jobname (\"{}\") has to contain the wildcard {jobid}.")
+
+        self.exec_job = (
+            'cd {workflow.workdir_init} && '
+            '{workflow.snakemakepath} --snakefile {workflow.snakefile} '
+            '--force -j{cores} --keep-target-files '
+            '--wait-for-files {job.input} --latency-wait {latency_wait} '
+            '--benchmark-repeats {benchmark_repeats} '
+            '{overwrite_workdir} {overwrite_config} --nocolor '
+            '--notemp --quiet --nolock {target}')
+
+        if printshellcmds:
+            self.exec_job += " --printshellcmds "
+
+        if not any(dag.dynamic_output_jobs):
+            # disable restiction to target rule in case of dynamic rules!
+            self.exec_job += " --allowed-rules {job.rule.name} "
+        self.jobname = jobname
+        self.threads = []
+        self._tmpdir = None
+        self.cores = cores if cores else ""
+        self.cluster_config = cluster_config if cluster_config else dict()
+
+    def shutdown(self):
+        for thread in self.threads:
+            thread.join()
+        shutil.rmtree(self.tmpdir)
+
+    def cancel(self):
+        self.shutdown()
+
+    def _run(self, job, callback=None, error_callback=None):
+        super()._run(job, callback=callback, error_callback=error_callback)
+        logger.shellcmd(job.shellcmd)
+
+    @property
+    def tmpdir(self):
+        if self._tmpdir is None:
+            while True:
+                self._tmpdir = ".snakemake/tmp." + "".join(
+                    random.sample(string.ascii_uppercase + string.digits, 6))
+                if not os.path.exists(self._tmpdir):
+                    os.mkdir(self._tmpdir)
+                    break
+        return os.path.abspath(self._tmpdir)
+
+    def get_jobscript(self, job):
+        return os.path.join(
+            self.tmpdir,
+            job.format_wildcards(self.jobname,
+                                 rulename=job.rule.name,
+                                 jobid=self.dag.jobid(job),
+                                 cluster=self.cluster_wildcards(job)))
+
+    def spawn_jobscript(self, job, jobscript, **kwargs):
+        overwrite_workdir = ""
+        if self.workflow.overwrite_workdir:
+            overwrite_workdir = "--directory {} ".format(
+                self.workflow.overwrite_workdir)
+        overwrite_config = ""
+        if self.workflow.overwrite_configfile:
+            overwrite_config = "--configfile {} ".format(
+                self.workflow.overwrite_configfile)
+        if self.workflow.config_args:
+            overwrite_config += "--config {} ".format(
+                " ".join(self.workflow.config_args))
+
+        target = job.output if job.output else job.rule.name
+        format = partial(str.format,
+                         job=job,
+                         overwrite_workdir=overwrite_workdir,
+                         overwrite_config=overwrite_config,
+                         workflow=self.workflow,
+                         cores=self.cores,
+                         properties=job.json(),
+                         latency_wait=self.latency_wait,
+                         benchmark_repeats=self.benchmark_repeats,
+                         target=target, **kwargs)
+        try:
+            exec_job = format(self.exec_job)
+            with open(jobscript, "w") as f:
+                print(format(self.jobscript, exec_job=exec_job), file=f)
+        except KeyError as e:
+            raise WorkflowError(
+                "Error formatting jobscript: {} not found\n"
+                "Make sure that your custom jobscript it up to date.".format(e))
+        os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR)
+
+    def cluster_wildcards(self, job):
+        cluster = self.cluster_config.get("__default__", dict()).copy()
+        cluster.update(self.cluster_config.get(job.rule.name, dict()))
+        return Wildcards(fromdict=cluster)
+
+
+class GenericClusterExecutor(ClusterExecutor):
+    def __init__(self, workflow, dag, cores,
+                 submitcmd="qsub",
+                 cluster_config=None,
+                 jobname="snakejob.{rulename}.{jobid}.sh",
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 latency_wait=3,
+                 benchmark_repeats=1):
+        super().__init__(workflow, dag, cores,
+                         jobname=jobname,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats,
+                         cluster_config=cluster_config, )
+        self.submitcmd = submitcmd
+        self.external_jobid = dict()
+        self.exec_job += ' && touch "{jobfinished}" || touch "{jobfailed}"'
+
+    def cancel(self):
+        logger.info("Will exit after finishing currently running jobs.")
+        self.shutdown()
+
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        super()._run(job)
+        workdir = os.getcwd()
+        jobid = self.dag.jobid(job)
+
+        jobscript = self.get_jobscript(job)
+        jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
+        jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
+        self.spawn_jobscript(job, jobscript,
+                             jobfinished=jobfinished,
+                             jobfailed=jobfailed)
+
+        deps = " ".join(self.external_jobid[f] for f in job.input
+                        if f in self.external_jobid)
+        try:
+            submitcmd = job.format_wildcards(
+                self.submitcmd,
+                dependencies=deps,
+                cluster=self.cluster_wildcards(job))
+        except AttributeError as e:
+            raise WorkflowError(str(e), rule=job.rule)
+        try:
+            ext_jobid = subprocess.check_output(
+                '{submitcmd} "{jobscript}"'.format(submitcmd=submitcmd,
+                                                   jobscript=jobscript),
+                shell=True).decode().split("\n")
+        except subprocess.CalledProcessError as ex:
+            raise WorkflowError(
+                "Error executing jobscript (exit code {}):\n{}".format(
+                    ex.returncode, ex.output.decode()),
+                rule=job.rule)
+        if ext_jobid and ext_jobid[0]:
+            ext_jobid = ext_jobid[0]
+            self.external_jobid.update((f, ext_jobid) for f in job.output)
+            logger.debug("Submitted job {} with external jobid {}.".format(
+                jobid, ext_jobid))
+
+        thread = threading.Thread(target=self._wait_for_job,
+                                  args=(job, callback, error_callback,
+                                        jobscript, jobfinished, jobfailed))
+        thread.daemon = True
+        thread.start()
+        self.threads.append(thread)
+
+        submit_callback(job)
+
+    def _wait_for_job(self, job, callback, error_callback, jobscript,
+                      jobfinished, jobfailed):
+        while True:
+            if os.path.exists(jobfinished):
+                os.remove(jobfinished)
+                os.remove(jobscript)
+                self.finish_job(job)
+                callback(job)
+                return
+            if os.path.exists(jobfailed):
+                os.remove(jobfailed)
+                os.remove(jobscript)
+                self.print_job_error(job)
+                print_exception(ClusterJobException(job, self.dag.jobid(job),
+                                                    self.get_jobscript(job)),
+                                self.workflow.linemaps)
+                error_callback(job)
+                return
+            time.sleep(1)
+
+
+class SynchronousClusterExecutor(ClusterExecutor):
+    """
+    invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
+    synchronous, blocking the foreground thread and returning the
+    remote exit code at remote exit.
+    """
+
+    def __init__(self, workflow, dag, cores,
+                 submitcmd="qsub",
+                 cluster_config=None,
+                 jobname="snakejob.{rulename}.{jobid}.sh",
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 latency_wait=3,
+                 benchmark_repeats=1):
+        super().__init__(workflow, dag, cores,
+                         jobname=jobname,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats,
+                         cluster_config=cluster_config, )
+        self.submitcmd = submitcmd
+        self.external_jobid = dict()
+
+    def cancel(self):
+        logger.info("Will exit after finishing currently running jobs.")
+        self.shutdown()
+
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        super()._run(job)
+        workdir = os.getcwd()
+        jobid = self.dag.jobid(job)
+
+        jobscript = self.get_jobscript(job)
+        self.spawn_jobscript(job, jobscript)
+
+        deps = " ".join(self.external_jobid[f] for f in job.input
+                        if f in self.external_jobid)
+        try:
+            submitcmd = job.format_wildcards(
+                self.submitcmd,
+                dependencies=deps,
+                cluster=self.cluster_wildcards(job))
+        except AttributeError as e:
+            raise WorkflowError(str(e), rule=job.rule)
+
+        thread = threading.Thread(
+            target=self._submit_job,
+            args=(job, callback, error_callback, submitcmd, jobscript))
+        thread.daemon = True
+        thread.start()
+        self.threads.append(thread)
+        submit_callback(job)
+
+    def _submit_job(self, job, callback, error_callback, submitcmd, jobscript):
+        try:
+            ext_jobid = subprocess.check_output(
+                '{submitcmd} "{jobscript}"'.format(submitcmd=submitcmd,
+                                                   jobscript=jobscript),
+                shell=True).decode().split("\n")
+            os.remove(jobscript)
+            self.finish_job(job)
+            callback(job)
+
+        except subprocess.CalledProcessError as ex:
+            os.remove(jobscript)
+            self.print_job_error(job)
+            print_exception(ClusterJobException(job, self.dag.jobid(job),
+                                                self.get_jobscript(job)),
+                            self.workflow.linemaps)
+            error_callback(job)
+
+
+class DRMAAExecutor(ClusterExecutor):
+    def __init__(self, workflow, dag, cores,
+                 jobname="snakejob.{rulename}.{jobid}.sh",
+                 printreason=False,
+                 quiet=False,
+                 printshellcmds=False,
+                 drmaa_args="",
+                 latency_wait=3,
+                 benchmark_repeats=1,
+                 cluster_config=None, ):
+        super().__init__(workflow, dag, cores,
+                         jobname=jobname,
+                         printreason=printreason,
+                         quiet=quiet,
+                         printshellcmds=printshellcmds,
+                         latency_wait=latency_wait,
+                         benchmark_repeats=benchmark_repeats,
+                         cluster_config=cluster_config, )
+        try:
+            import drmaa
+        except ImportError:
+            raise WorkflowError(
+                "Python support for DRMAA is not installed. "
+                "Please install it, e.g. with easy_install3 --user drmaa")
+        except RuntimeError as e:
+            raise WorkflowError("Error loading drmaa support:\n{}".format(e))
+        self.session = drmaa.Session()
+        self.drmaa_args = drmaa_args
+        self.session.initialize()
+        self.submitted = list()
+
+    def cancel(self):
+        from drmaa.const import JobControlAction
+        for jobid in self.submitted:
+            self.session.control(jobid, JobControlAction.TERMINATE)
+        self.shutdown()
+
+    def run(self, job,
+            callback=None,
+            submit_callback=None,
+            error_callback=None):
+        super()._run(job)
+        jobscript = self.get_jobscript(job)
+        self.spawn_jobscript(job, jobscript)
+
+        try:
+            drmaa_args = job.format_wildcards(
+                self.drmaa_args,
+                cluster=self.cluster_wildcards(job))
+        except AttributeError as e:
+            raise WorkflowError(str(e), rule=job.rule)
+
+        import drmaa
+        try:
+            jt = self.session.createJobTemplate()
+            jt.remoteCommand = jobscript
+            jt.nativeSpecification = drmaa_args
+
+            jobid = self.session.runJob(jt)
+        except (drmaa.errors.InternalException,
+                drmaa.errors.InvalidAttributeValueException) as e:
+            print_exception(WorkflowError("DRMAA Error: {}".format(e)),
+                            self.workflow.linemaps)
+            error_callback(job)
+            return
+        logger.info("Submitted DRMAA job (jobid {})".format(jobid))
+        self.submitted.append(jobid)
+        self.session.deleteJobTemplate(jt)
+
+        thread = threading.Thread(
+            target=self._wait_for_job,
+            args=(job, jobid, callback, error_callback, jobscript))
+        thread.daemon = True
+        thread.start()
+        self.threads.append(thread)
+
+        submit_callback(job)
+
+    def shutdown(self):
+        super().shutdown()
+        self.session.exit()
+
+    def _wait_for_job(self, job, jobid, callback, error_callback, jobscript):
+        import drmaa
+        try:
+            retval = self.session.wait(jobid,
+                                       drmaa.Session.TIMEOUT_WAIT_FOREVER)
+        except drmaa.errors.InternalException as e:
+            print_exception(WorkflowError("DRMAA Error: {}".format(e)),
+                            self.workflow.linemaps)
+            os.remove(jobscript)
+            error_callback(job)
+            return
+        os.remove(jobscript)
+        if retval.hasExited and retval.exitStatus == 0:
+            self.finish_job(job)
+            callback(job)
+        else:
+            self.print_job_error(job)
+            print_exception(
+                ClusterJobException(job, self.dag.jobid(job), jobscript),
+                self.workflow.linemaps)
+            error_callback(job)
+
+
+def run_wrapper(run, input, output, params, wildcards, threads, resources, log,
+                version, benchmark, benchmark_repeats, linemaps, debug=False):
+    """
+    Wrapper around the run method that handles directory creation and
+    output file deletion on error.
+
+    Arguments
+    run       -- the run method
+    input     -- list of input files
+    output    -- list of output files
+    wildcards -- so far processed wildcards
+    threads   -- usable threads
+    log       -- list of log files
+    """
+    if os.name == "posix" and debug:
+        sys.stdin = open('/dev/stdin')
+
+    try:
+        runs = 1 if benchmark is None else benchmark_repeats
+        wallclock = []
+        for i in range(runs):
+            w = time.time()
+            # execute the actual run method.
+            run(input, output, params, wildcards, threads, resources, log,
+                version)
+            w = time.time() - w
+            wallclock.append(w)
+
+    except (KeyboardInterrupt, SystemExit) as e:
+        # re-raise the keyboard interrupt in order to record an error in the scheduler but ignore it
+        raise e
+    except (Exception, BaseException) as ex:
+        # this ensures that exception can be re-raised in the parent thread
+        lineno, file = get_exception_origin(ex, linemaps)
+        raise RuleException(format_error(ex, lineno,
+                                         linemaps=linemaps,
+                                         snakefile=file,
+                                         show_traceback=True))
+
+    if benchmark is not None:
+        try:
+            with open(benchmark, "w") as f:
+                json.dump({
+                    name: {
+                        "s": times,
+                        "h:m:s": [str(datetime.timedelta(seconds=t))
+                                  for t in times]
+                    }
+                    for name, times in zip("wall_clock_times".split(),
+                                           [wallclock])
+                }, f,
+                          indent=4)
+        except (Exception, BaseException) as ex:
+            raise WorkflowError(ex)
diff --git a/snakemake/futures.py b/snakemake/futures.py
new file mode 100644
index 0000000..aa05b48
--- /dev/null
+++ b/snakemake/futures.py
@@ -0,0 +1,36 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import sys
+import os
+import multiprocessing
+import concurrent.futures
+from concurrent.futures.process import _ResultItem, _process_worker
+
+
+def _graceful_process_worker(call_queue, result_queue):
+    """Override the default _process_worker from concurrent.futures.
+    We ensure here that KeyboardInterrupts lead to silent failures.
+    """
+    try:
+        _process_worker(call_queue, result_queue)
+    except KeyboardInterrupt:
+        # let the process silently fail in case of a keyboard interrupt
+        raise SystemExit()
+
+
+class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
+    """Override the default ProcessPoolExecutor to gracefully handle KeyboardInterrupts."""
+
+    def _adjust_process_count(self):
+        for _ in range(len(self._processes), self._max_workers):
+            p = multiprocessing.Process(
+                target=_graceful_process_worker,
+                args=(self._call_queue, self._result_queue))
+            p.start()
+            if sys.version_info < (3, 3):
+                self._processes.add(p)
+            else:
+                self._processes[p.pid] = p
diff --git a/snakemake/gui.html b/snakemake/gui.html
new file mode 100644
index 0000000..22ad7af
--- /dev/null
+++ b/snakemake/gui.html
@@ -0,0 +1,358 @@
+<html>
+    <head>
+        <script type="text/javascript" src="http://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
+        <script type="text/javascript" src="http://cdnjs.cloudflare.com/ajax/libs/d3/3.4.6/d3.min.js"></script>
+        <script type="text/javascript" src="http://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.1.1/js/bootstrap.min.js"></script>
+        <script type="text/javascript" src="http://cpettitt.github.io/project/dagre-d3/v0.1.5/dagre-d3.js"></script>
+        <script type="text/javascript" src="http://cdnjs.cloudflare.com/ajax/libs/bootstrap-select/1.5.4/bootstrap-select.min.js"></script>
+        <link rel="stylesheet" type="text/css" href="http://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.1.1/css/bootstrap.min.css"/>
+        <link rel="stylesheet" type="text/css" href="http://cdnjs.cloudflare.com/ajax/libs/bootstrap-select/1.5.4/bootstrap-select.min.css"/>
+        <style>
+        body {
+          font-family: sans-serif;
+          font-size: 10pt;
+          font-color: #333333;
+          background-color: #EEEEEE;
+        }
+        
+        #header {
+            position: fixed;
+            top: 0px;
+            left: 0px;
+            width: 100%;
+            background-color: #3498db;
+            color: #FFFFFF;
+            font-size: 150%;
+            height: 70px;
+        }
+        
+        #header #snakemake {
+            float: left;
+            padding: 5px;
+            padding-right: 15px;
+        }
+        
+        #header #workflow {
+            float: left;
+            padding: 5px;
+        }
+        
+        #top {
+            padding-top: 70px;
+        }
+        
+        #bottom {
+            width: 100%;
+            border-width: 0px;
+        }
+        
+        #bottom td {
+            vertical-align: top;
+        }
+        
+        #workflow-progress {
+            width: 100%;
+        }
+        
+        #workflow-progress #progress-text {
+            width: 0%;
+            transition: width 2s;
+            min-width: 20em;
+            text-align: right;
+            color: #CCCCCC;
+            height: 2em;
+        }
+        
+        #workflow-progress #progress-bar {
+            width: 0%;
+            transition: width 2s;
+            height: 0.5em;
+            background-color: #CCCCCC;
+        }
+        
+        #left-panel {
+            width: 50%;
+            background-color: #DDDDDD;
+        }
+        
+        #left-panel #control {
+            padding: 5px;
+        }
+        
+        #log {
+            width: 50%;
+            background-color: #FFFFFF;
+        }
+        
+        #log table {
+            font-size: 10pt;
+            margin: 0;
+        }
+        
+        #log div {
+             overflow: auto;
+             padding: 5px;
+        }
+        
+        #log .info {
+            background-color: #f1c40f;
+        }
+        
+        #log .info:nth-child(even) {
+            background-color: #FFDD53;
+        }
+        
+        #log .error {
+            background-color: #e74c3c;
+        }
+        
+        #log .error:nth-child(even) {
+            background-color: #c0392b;
+        }
+        
+        #log .job_info {
+            background-color: #2ecc71;
+        }
+        
+        #log .job_info:nth-child(even) {
+            background-color: #27ae60;
+        }
+        
+        th.ruleitem {
+            width: 10em;
+            vertical-align: top;
+        }
+
+        #dag-container {
+            width: 100%;
+            max-height: 400px;
+            overflow: auto;
+            text-align: center;
+            padding: 10px;
+        }
+        
+        #dag .node rect {
+            stroke-width: 3px;
+            fill: #EEEEEE;
+        }
+
+        #dag .edgeLabel rect {
+            fill: #fff;
+        }
+
+        #dag .edgePath {
+            stroke: #333333;
+            stroke-width: 3px;
+            fill: none;
+        }
+        </style>
+        <script>
+        var dag = null;
+
+        function render_dag() {
+            if(!dag) {
+                return;
+            }
+            
+            var svg = d3.select("#dag");
+            svg.select("g").remove();
+            var view = svg.append("g");
+            view.attr("transform", "translate(5,5)");
+            
+            var color = d3.scale.category20();
+            
+            var layout = dagreD3.layout().rankDir("LR");
+            var g = dagreD3.json.decode(dag.nodes, dag.edges);
+            var renderer = new dagreD3.Renderer().layout(layout);
+            var oldDrawNodes = renderer.drawNodes();
+            renderer.drawNodes(function(graph, svg) {
+                var svgNodes = oldDrawNodes(graph, svg);
+                svgNodes.select("rect")
+                    .attr("id", function(u) { return "job_" + g.node(u).jobid; })
+                    .attr("style", function(u) {
+                        col = color(g.node(u).rule);
+                        return "stroke: " + col + ";";
+                    });
+                return svgNodes;
+            });
+            
+            var result = renderer.run(g, view);
+            svg.attr("width", result.graph().width + 40)
+               .attr("height", result.graph().height + 40);
+        }
+
+        function update_dag() {
+            d3.json("dag", function(dag_json) {
+                dag = dag_json;
+                render_dag();
+            });
+        }
+
+        var logid = 0;
+        function update() {
+            d3.json("status", function(status) {
+                if(status["running"]) {
+                    $("#control fieldset").attr("disabled", true);
+                }
+                else {
+                    $("#control fieldset").attr("disabled", false);
+                }
+            });
+        
+            d3.json("progress", function(progress) {
+                var done = progress.done;
+                if(done) {
+                    var total = progress.total;
+                    var percent = done / total * 100;
+                    $("#workflow-progress #progress-bar")
+                        .css("width", percent + "%");
+                    $("#workflow-progress #progress-text")
+                        .css("width", percent + "%")
+                        .text(done + " of " + total + " jobs finished (" + percent + "%)");
+                }
+            });
+            
+            $.getJSON("log/" + logid, function(entries) {
+                $.each(entries, function(i, entry) {
+                    if(entry.level == "job_info") {
+                        var html = '<div class="job_info"><h4>'
+                        + entry.name + '</h4><table>';
+                        $.each(["input", "output"], function(i, item) {
+                            if(entry[item].length) {
+                                html += '<tr><th class="ruleitem">' + item + '</th><td>' + entry[item].join(", ") + '</td></tr>';
+                            }
+                        });
+                        $.each(["threads", "priority"], function(item) {
+                            if(entry[item] > 1) {
+                                html += '<tr><th class="ruleitem">' + item + '</th><td>' + entry[item] + '</td></tr>';
+                            }
+                        });
+                        html += '</table></div>';
+                        $(html).appendTo("#log");
+                    }
+                    else if (entry.level == "job_finished") {
+                        // do nothing for now
+                    }
+                    else {
+                        var type = "info"
+                        switch(entry.level) {
+                            case "info":
+                                type="warning";
+                                break;
+                            case "error":
+                                type="danger";
+                                break;
+                        }
+                        $("#log").append(
+                            '<div class="' + entry.level + '">'
+                            + entry.msg +
+                            '</div>'
+                        );
+                    }
+                    logid++;
+                });
+            });
+        }
+        
+        function set_args() {
+            $.post("set_args", {"targets": $("#targets").val()});
+        }
+        
+        $( document ).ready(function() {
+            update_dag();
+            setInterval(update, 2000);
+
+            $('.selectpicker').selectpicker();
+
+            $("#run-btn").click(function() {
+                $("#log").empty();
+                $.ajax("run");
+            });
+
+            $("#dryrun-btn").click(function() {
+                $("#log").empty();
+                $.ajax("dryrun");
+            });
+
+            $("#targets").change(function() {
+                set_args();
+                update_dag();
+            });
+        });
+        </script>
+    </head>
+    <body>
+        <div id="header">
+          <div id="snakemake">SNAKEMAKE {{version}}</div>
+          <div id="workflow">
+              WORKFLOW<br/>
+             <span style="font-size: 50%; font-weight: normal;">{{snakefilepath}}</span>
+          </div>
+        </div>
+        <div id="top">
+            <div id="dag-container">
+                <svg id="dag">
+                </svg>
+            </div>
+            <div id="workflow-progress">
+                <div id="progress-bar">
+                </div>
+                <div id="progress-text">
+                </div>
+            </div>
+        </div>
+        <table id="bottom">
+            <tr>
+                <td id="left-panel">
+                    <form class="form-horizontal" id="control">
+                        <fieldset>
+                            <div class="form-group">
+                                <div class="col-sm-offset-2 col-sm-10">
+                                    <div class="btn-group">
+                                        <button type="button" id="dryrun-btn" class="btn btn-default">Dry-Run</button>
+                                        <button type="button" id="run-btn" class="btn btn-default">Run</button>
+                                    </div>
+                                </div>
+                            </div>
+                            <div class="form-group">
+                                <label for="targets" class="col-sm-2 control-label">Targets</label>
+                                <div class="col-sm-10">
+                                    <select id="targets" class="form-control selectpicker" autocomplete="off" multiple>
+                                        <option  selected="selected" value="{{targets[0]}}">{{targets[0]}}</option>
+                                        {% for target in targets[1:] %}
+                                        <option value="{{target}}">{{target}}</option>
+                                        {% endfor %}
+                                    </select>
+                                </div>
+                            </div>
+                            <div class="form-group">
+                                <div class="col-sm-offset-2 col-sm-10">
+                                    <h4>Resources</h4>
+                                </div>
+                            </div>
+                            <div class="form-group">
+                                <label for="cores" class="col-sm-2 control-label">{{cores_label}}</label>
+                                <div class="col-sm-10">
+                                    <input type="text" class="col-sm-10 form-control" id="cores" value="1">
+                                </div>
+                            </div>
+                            {% for resource in resources %}
+                                <div class="form-group">
+                                    <label for="{{resource}}" class="col-sm-2 control-label">{{resource}}</label>
+                                    <div class="col-sm-10">
+                                        <div class="input-group">
+                                            <span class="input-group-addon"><input type="checkbox" id="{{resource}}"></span>
+                                            <input type="text" class="form-control" id="{{resource}}_value">
+                                        </div>
+                                    </div>
+                                </div>
+                            {% endfor %}
+                        </fieldset>
+                    </form>
+                </td>
+                <td id="log">
+                </td>
+            </tr>
+        </table>
+    </body>
+</html>
diff --git a/snakemake/gui.py b/snakemake/gui.py
new file mode 100644
index 0000000..e3cd4d0
--- /dev/null
+++ b/snakemake/gui.py
@@ -0,0 +1,171 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import json
+import os
+import threading
+
+from flask import Flask, render_template, request
+
+from snakemake.version import __version__
+
+LOCK = threading.Lock()
+
+app = Flask("snakemake", template_folder=os.path.dirname(__file__))
+#app.debug=True
+app.extensions = {
+    "dag": None,
+    "run_snakemake": None,
+    "progress": "",
+    "log": [],
+    "status": {"running": False},
+    "args": None,
+    "targets": [],
+    "rule_info": [],
+    "resources": []
+}
+
+
+def register(run_snakemake, args):
+    app.extensions["run_snakemake"] = run_snakemake
+    app.extensions["args"] = dict(targets=args.target,
+                                  cluster=args.cluster,
+                                  workdir=args.directory,
+                                  touch=args.touch,
+                                  forcetargets=args.force,
+                                  forceall=args.forceall,
+                                  forcerun=args.forcerun,
+                                  prioritytargets=args.prioritize,
+                                  stats=args.stats,
+                                  keepgoing=args.keep_going,
+                                  jobname=args.jobname,
+                                  immediate_submit=args.immediate_submit,
+                                  ignore_ambiguity=args.allow_ambiguity,
+                                  lock=not args.nolock,
+                                  force_incomplete=args.rerun_incomplete,
+                                  ignore_incomplete=args.ignore_incomplete,
+                                  jobscript=args.jobscript,
+                                  notemp=args.notemp,
+                                  latency_wait=args.latency_wait)
+
+    target_rules = []
+
+    def log_handler(msg):
+        if msg["level"] == "rule_info":
+            target_rules.append(msg["name"])
+
+    run_snakemake(list_target_rules=True, log_handler=log_handler)
+    for target in args.target:
+        target_rules.remove(target)
+    app.extensions["targets"] = args.target + target_rules
+
+    resources = []
+
+    def log_handler(msg):
+        if msg["level"] == "info":
+            resources.append(msg["msg"])
+
+    run_snakemake(list_resources=True, log_handler=log_handler)
+    app.extensions["resources"] = resources
+    app.extensions["snakefilepath"] = os.path.abspath(args.snakefile)
+
+
+def run_snakemake(**kwargs):
+    args = dict(app.extensions["args"])
+    args.update(kwargs)
+    app.extensions["run_snakemake"](**args)
+
+
+ at app.route("/")
+def index():
+    args = app.extensions["args"]
+    return render_template("gui.html",
+                           targets=app.extensions["targets"],
+                           cores_label="Nodes" if args["cluster"] else "Cores",
+                           resources=app.extensions["resources"],
+                           snakefilepath=app.extensions["snakefilepath"],
+                           version=__version__,
+                           node_width=15,
+                           node_padding=10)
+
+
+ at app.route("/dag")
+def dag():
+    if app.extensions["dag"] is None:
+
+        def record(msg):
+            if msg["level"] == "d3dag":
+                app.extensions["dag"] = msg
+            elif msg["level"] in ("error", "info"):
+                app.extensions["log"].append(msg)
+
+        run_snakemake(printd3dag=True, log_handler=record)
+    return json.dumps(app.extensions["dag"])
+
+
+ at app.route("/log/<int:id>")
+def log(id):
+    log = app.extensions["log"][id:]
+    return json.dumps(log)
+
+
+ at app.route("/progress")
+def progress():
+    return json.dumps(app.extensions["progress"])
+
+
+def _run(dryrun=False):
+    def log_handler(msg):
+        level = msg["level"]
+        if level == "progress":
+            app.extensions["progress"] = msg
+        elif level in ("info", "error", "job_info", "job_finished"):
+            app.extensions["log"].append(msg)
+
+    with LOCK:
+        app.extensions["status"]["running"] = True
+    run_snakemake(log_handler=log_handler, dryrun=dryrun)
+    with LOCK:
+        app.extensions["status"]["running"] = False
+    return ""
+
+
+ at app.route("/run")
+def run():
+    _run()
+
+
+ at app.route("/dryrun")
+def dryrun():
+    _run(dryrun=True)
+
+
+ at app.route("/status")
+def status():
+    with LOCK:
+        return json.dumps(app.extensions["status"])
+
+
+ at app.route("/targets")
+def targets():
+    return json.dumps(app.extensions["targets"])
+
+
+ at app.route("/get_args")
+def get_args():
+    return json.dumps(app.extensions["args"])
+
+
+ at app.route("/set_args", methods=["POST"])
+def set_args():
+    app.extensions["args"].update({
+        name: value
+        for name, value in request.form.items() if not name.endswith("[]")
+    })
+    targets = request.form.getlist("targets[]")
+    if targets != app.extensions["args"]["targets"]:
+        app.extensions["dag"] = None
+    app.extensions["args"]["targets"] = targets
+    return ""
diff --git a/snakemake/io.py b/snakemake/io.py
new file mode 100644
index 0000000..f6bd974
--- /dev/null
+++ b/snakemake/io.py
@@ -0,0 +1,577 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import re
+import stat
+import time
+import json
+from itertools import product, chain
+from collections import Iterable, namedtuple
+from snakemake.exceptions import MissingOutputException, WorkflowError, WildcardError
+from snakemake.logging import logger
+
+
+def IOFile(file, rule=None):
+    f = _IOFile(file)
+    f.rule = rule
+    return f
+
+
+class _IOFile(str):
+    """
+    A file that is either input or output of a rule.
+    """
+
+    dynamic_fill = "__snakemake_dynamic__"
+
+    def __new__(cls, file):
+        obj = str.__new__(cls, file)
+        obj._is_function = type(file).__name__ == "function"
+        obj._file = file
+        obj.rule = None
+        obj._regex = None
+        return obj
+
+    @property
+    def file(self):
+        if not self._is_function:
+            return self._file
+        else:
+            raise ValueError("This IOFile is specified as a function and "
+                             "may not be used directly.")
+
+    @property
+    def exists(self):
+        return os.path.exists(self.file)
+
+    @property
+    def protected(self):
+        return self.exists and not os.access(self.file, os.W_OK)
+
+    @property
+    def mtime(self):
+        return os.stat(self.file).st_mtime
+
+    def is_newer(self, time):
+        return self.mtime > time
+
+    def prepare(self):
+        path_until_wildcard = re.split(self.dynamic_fill, self.file)[0]
+        dir = os.path.dirname(path_until_wildcard)
+        if len(dir) > 0 and not os.path.exists(dir):
+            try:
+                os.makedirs(dir)
+            except OSError as e:
+                # ignore Errno 17 "File exists" (reason: multiprocessing)
+                if e.errno != 17:
+                    raise e
+
+    def protect(self):
+        mode = (os.stat(self.file).st_mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~
+                stat.S_IWOTH)
+        if os.path.isdir(self.file):
+            for root, dirs, files in os.walk(self.file):
+                for d in dirs:
+                    os.chmod(os.path.join(self.file, d), mode)
+                for f in files:
+                    os.chmod(os.path.join(self.file, f), mode)
+        else:
+            os.chmod(self.file, mode)
+
+    def remove(self):
+        remove(self.file)
+
+    def touch(self):
+        try:
+            os.utime(self.file, None)
+        except OSError as e:
+            if e.errno == 2:
+                raise MissingOutputException(
+                    "Output file {} of rule {} shall be touched but "
+                    "does not exist.".format(self.file, self.rule.name),
+                    lineno=self.rule.lineno,
+                    snakefile=self.rule.snakefile)
+            else:
+                raise e
+
+    def touch_or_create(self):
+        try:
+            self.touch()
+        except MissingOutputException:
+            # create empty file
+            with open(self.file, "w") as f:
+                pass
+
+    def apply_wildcards(self, wildcards,
+                        fill_missing=False,
+                        fail_dynamic=False):
+        f = self._file
+        if self._is_function:
+            f = self._file(Namedlist(fromdict=wildcards))
+
+        return IOFile(apply_wildcards(f, wildcards,
+                                      fill_missing=fill_missing,
+                                      fail_dynamic=fail_dynamic,
+                                      dynamic_fill=self.dynamic_fill),
+                      rule=self.rule)
+
+    def get_wildcard_names(self):
+        return get_wildcard_names(self.file)
+
+    def contains_wildcard(self):
+        return contains_wildcard(self.file)
+
+    def regex(self):
+        if self._regex is None:
+            # compile a regular expression
+            self._regex = re.compile(regex(self.file))
+        return self._regex
+
+    def constant_prefix(self):
+        first_wildcard = _wildcard_regex.search(self.file)
+        if first_wildcard:
+            return self.file[:first_wildcard.start()]
+        return self.file
+
+    def match(self, target):
+        return self.regex().match(target) or None
+
+    def format_dynamic(self):
+        return self.replace(self.dynamic_fill, "{*}")
+
+    def __eq__(self, other):
+        f = other._file if isinstance(other, _IOFile) else other
+        return self._file == f
+
+    def __hash__(self):
+        return self._file.__hash__()
+
+
+_wildcard_regex = re.compile(
+    "\{\s*(?P<name>\w+?)(\s*,\s*(?P<constraint>([^\{\}]+|\{\d+(,\d+)?\})*))?\s*\}")
+
+#    "\{\s*(?P<name>\w+?)(\s*,\s*(?P<constraint>[^\}]*))?\s*\}")
+
+
+def wait_for_files(files, latency_wait=3):
+    """Wait for given files to be present in filesystem."""
+    files = list(files)
+    get_missing = lambda: [f for f in files if not os.path.exists(f)]
+    missing = get_missing()
+    if missing:
+        logger.info("Waiting at most {} seconds for missing files.".format(
+            latency_wait))
+        for _ in range(latency_wait):
+            if not get_missing():
+                return
+            time.sleep(1)
+        raise IOError("Missing files after {} seconds:\n{}".format(
+            latency_wait, "\n".join(get_missing())))
+
+
+def get_wildcard_names(pattern):
+    return set(match.group('name')
+               for match in _wildcard_regex.finditer(pattern))
+
+
+def contains_wildcard(path):
+    return _wildcard_regex.search(path) is not None
+
+
+def remove(file):
+    if os.path.exists(file):
+        if os.path.isdir(file):
+            try:
+                os.removedirs(file)
+            except OSError:
+                # ignore non empty directories
+                pass
+        else:
+            os.remove(file)
+
+
+def regex(filepattern):
+    f = []
+    last = 0
+    wildcards = set()
+    for match in _wildcard_regex.finditer(filepattern):
+        f.append(re.escape(filepattern[last:match.start()]))
+        wildcard = match.group("name")
+        if wildcard in wildcards:
+            if match.group("constraint"):
+                raise ValueError(
+                    "If multiple wildcards of the same name "
+                    "appear in a string, eventual constraints have to be defined "
+                    "at the first occurence and will be inherited by the others.")
+            f.append("(?P={})".format(wildcard))
+        else:
+            wildcards.add(wildcard)
+            f.append("(?P<{}>{})".format(wildcard, match.group("constraint") if
+                                         match.group("constraint") else ".+"))
+        last = match.end()
+    f.append(re.escape(filepattern[last:]))
+    f.append("$")  # ensure that the match spans the whole file
+    return "".join(f)
+
+
+def apply_wildcards(pattern, wildcards,
+                    fill_missing=False,
+                    fail_dynamic=False,
+                    dynamic_fill=None,
+                    keep_dynamic=False):
+    def format_match(match):
+        name = match.group("name")
+        try:
+            value = wildcards[name]
+            if fail_dynamic and value == dynamic_fill:
+                raise WildcardError(name)
+            return str(value)  # convert anything into a str
+        except KeyError as ex:
+            if keep_dynamic:
+                return "{{{}}}".format(name)
+            elif fill_missing:
+                return dynamic_fill
+            else:
+                raise WildcardError(str(ex))
+
+    return re.sub(_wildcard_regex, format_match, pattern)
+
+
+def not_iterable(value):
+    return isinstance(value, str) or not isinstance(value, Iterable)
+
+
+class AnnotatedString(str):
+    def __init__(self, value):
+        self.flags = dict()
+
+
+def flag(value, flag_type, flag_value=True):
+    if isinstance(value, AnnotatedString):
+        value.flags[flag_type] = flag_value
+        return value
+    if not_iterable(value):
+        value = AnnotatedString(value)
+        value.flags[flag_type] = flag_value
+        return value
+    return [flag(v, flag_type, flag_value=flag_value) for v in value]
+
+
+def is_flagged(value, flag):
+    if isinstance(value, AnnotatedString):
+        return flag in value.flags
+    return False
+
+
+def temp(value):
+    """
+    A flag for an input or output file that shall be removed after usage.
+    """
+    if is_flagged(value, "protected"):
+        raise SyntaxError(
+            "Protected and temporary flags are mutually exclusive.")
+    return flag(value, "temp")
+
+
+def temporary(value):
+    """ An alias for temp. """
+    return temp(value)
+
+
+def protected(value):
+    """ A flag for a file that shall be write protected after creation. """
+    if is_flagged(value, "temp"):
+        raise SyntaxError(
+            "Protected and temporary flags are mutually exclusive.")
+    return flag(value, "protected")
+
+
+def dynamic(value):
+    """
+    A flag for a file that shall be dynamic, i.e. the multiplicity
+    (and wildcard values) will be expanded after a certain
+    rule has been run """
+    annotated = flag(value, "dynamic")
+    tocheck = [annotated] if not_iterable(annotated) else annotated
+    for file in tocheck:
+        matches = list(_wildcard_regex.finditer(file))
+        #if len(matches) != 1:
+        #    raise SyntaxError("Dynamic files need exactly one wildcard.")
+        for match in matches:
+            if match.group("constraint"):
+                raise SyntaxError(
+                    "The wildcards in dynamic files cannot be constrained.")
+    return annotated
+
+
+def touch(value):
+    return flag(value, "touch")
+
+
+def expand(*args, **wildcards):
+    """
+    Expand wildcards in given filepatterns.
+
+    Arguments
+    *args -- first arg: filepatterns as list or one single filepattern,
+        second arg (optional): a function to combine wildcard values
+        (itertools.product per default)
+    **wildcards -- the wildcards as keyword arguments
+        with their values as lists
+    """
+    filepatterns = args[0]
+    if len(args) == 1:
+        combinator = product
+    elif len(args) == 2:
+        combinator = args[1]
+    if isinstance(filepatterns, str):
+        filepatterns = [filepatterns]
+
+    def flatten(wildcards):
+        for wildcard, values in wildcards.items():
+            if isinstance(values, str) or not isinstance(values, Iterable):
+                values = [values]
+            yield [(wildcard, value) for value in values]
+
+    try:
+        return [filepattern.format(**comb)
+                for comb in map(dict, combinator(*flatten(wildcards))) for
+                filepattern in filepatterns]
+    except KeyError as e:
+        raise WildcardError("No values given for wildcard {}.".format(e))
+
+
+def limit(pattern, **wildcards):
+    """
+    Limit wildcards to the given values.
+
+    Arguments:
+    **wildcards -- the wildcards as keyword arguments
+                   with their values as lists
+    """
+    return pattern.format(**{
+        wildcard: "{{{},{}}}".format(wildcard, "|".join(values))
+        for wildcard, values in wildcards.items()
+    })
+
+
+def glob_wildcards(pattern):
+    """
+    Glob the values of the wildcards by matching the given pattern to the filesystem.
+    Returns a named tuple with a list of values for each wildcard.
+    """
+    pattern = os.path.normpath(pattern)
+    first_wildcard = re.search("{[^{]", pattern)
+    dirname = os.path.dirname(pattern[:first_wildcard.start(
+    )]) if first_wildcard else os.path.dirname(pattern)
+    if not dirname:
+        dirname = "."
+
+    names = [match.group('name')
+             for match in _wildcard_regex.finditer(pattern)]
+    Wildcards = namedtuple("Wildcards", names)
+    wildcards = Wildcards(*[list() for name in names])
+
+    pattern = re.compile(regex(pattern))
+    for dirpath, dirnames, filenames in os.walk(dirname):
+        for f in chain(filenames, dirnames):
+            if dirpath != ".":
+                f = os.path.join(dirpath, f)
+            match = re.match(pattern, f)
+            if match:
+                for name, value in match.groupdict().items():
+                    getattr(wildcards, name).append(value)
+    return wildcards
+
+
+# TODO rewrite Namedlist!
+class Namedlist(list):
+    """
+    A list that additionally provides functions to name items. Further,
+    it is hashable, however the hash does not consider the item names.
+    """
+
+    def __init__(self, toclone=None, fromdict=None, plainstr=False):
+        """
+        Create the object.
+
+        Arguments
+        toclone  -- another Namedlist that shall be cloned
+        fromdict -- a dict that shall be converted to a
+            Namedlist (keys become names)
+        """
+        list.__init__(self)
+        self._names = dict()
+
+        if toclone:
+            self.extend(map(str, toclone) if plainstr else toclone)
+            if isinstance(toclone, Namedlist):
+                self.take_names(toclone.get_names())
+        if fromdict:
+            for key, item in fromdict.items():
+                self.append(item)
+                self.add_name(key)
+
+    def add_name(self, name):
+        """
+        Add a name to the last item.
+
+        Arguments
+        name -- a name
+        """
+        self.set_name(name, len(self) - 1)
+
+    def set_name(self, name, index, end=None):
+        """
+        Set the name of an item.
+
+        Arguments
+        name  -- a name
+        index -- the item index
+        """
+        if end is None:
+            self._names[name] = (index, index + 1)
+            setattr(self, name, self[index])
+        else:
+            self._names[name] = (index, end)
+            setattr(self, name, Namedlist(toclone=self[index:end]))
+
+    def get_names(self):
+        """
+        Get the defined names as (name, index) pairs.
+        """
+        for name, index in self._names.items():
+            yield name, index
+
+    def take_names(self, names):
+        """
+        Take over the given names.
+
+        Arguments
+        names -- the given names as (name, index) pairs
+        """
+        for name, (i, j) in names:
+            self.set_name(name, i, end=j)
+
+    def items(self):
+        for name in self._names:
+            yield name, getattr(self, name)
+
+    def allitems(self):
+        next = 0
+        for name, index in sorted(self._names.items(),
+                                  key=lambda item: item[1]):
+            start, end = index
+            if start > next:
+                for item in self[next:start]:
+                    yield None, item
+            yield name, getattr(self, name)
+            next = end
+        for item in self[next:]:
+            yield None, item
+
+    def insert_items(self, index, items):
+        self[index:index + 1] = items
+        add = len(items) - 1
+        for name, (i, j) in self._names.items():
+            if i > index:
+                self._names[name] = (i + add, j + add)
+            elif i == index:
+                self.set_name(name, i, end=i + len(items))
+
+    def keys(self):
+        return self._names
+
+    def plainstrings(self):
+        return self.__class__.__call__(toclone=self, plainstr=True)
+
+    def __getitem__(self, key):
+        try:
+            return super().__getitem__(key)
+        except TypeError:
+            pass
+        return getattr(self, key)
+
+    def __hash__(self):
+        return hash(tuple(self))
+
+    def __str__(self):
+        return " ".join(map(str, self))
+
+
+class InputFiles(Namedlist):
+    pass
+
+
+class OutputFiles(Namedlist):
+    pass
+
+
+class Wildcards(Namedlist):
+    pass
+
+
+class Params(Namedlist):
+    pass
+
+
+class Resources(Namedlist):
+    pass
+
+
+class Log(Namedlist):
+    pass
+
+
+def _load_configfile(configpath):
+    "Tries to load a configfile first as JSON, then as YAML, into a dict."
+    try:
+        with open(configpath) as f:
+            try:
+                return json.load(f)
+            except ValueError:
+                f.seek(0)  # try again
+            try:
+                import yaml
+            except ImportError:
+                raise WorkflowError("Config file is not valid JSON and PyYAML "
+                                    "has not been installed. Please install "
+                                    "PyYAML to use YAML config files.")
+            try:
+                return yaml.load(f)
+            except yaml.YAMLError:
+                raise WorkflowError("Config file is not valid JSON or YAML.")
+    except FileNotFoundError:
+        raise WorkflowError("Config file {} not found.".format(configpath))
+
+
+def load_configfile(configpath):
+    "Loads a JSON or YAML configfile as a dict, then checks that it's a dict."
+    config = _load_configfile(configpath)
+    if not isinstance(config, dict):
+        raise WorkflowError("Config file must be given as JSON or YAML "
+                            "with keys at top level.")
+    return config
+
+##### Wildcard pumping detection #####
+
+
+class PeriodicityDetector:
+    def __init__(self, min_repeat=50, max_repeat=100):
+        """
+        Args:
+            max_len (int): The maximum length of the periodic substring.
+        """
+        self.regex = re.compile(
+            "((?P<value>.+)(?P=value){{{min_repeat},{max_repeat}}})$".format(
+                min_repeat=min_repeat - 1,
+                max_repeat=max_repeat - 1))
+
+    def is_periodic(self, value):
+        """Returns the periodic substring or None if not periodic."""
+        m = self.regex.search(value)  # search for a periodic suffix.
+        if m is not None:
+            return m.group("value")
diff --git a/snakemake/jobs.py b/snakemake/jobs.py
new file mode 100644
index 0000000..287c4de
--- /dev/null
+++ b/snakemake/jobs.py
@@ -0,0 +1,358 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import sys
+import base64
+import json
+
+from collections import defaultdict
+from itertools import chain
+from functools import partial
+from operator import attrgetter
+
+from snakemake.io import IOFile, Wildcards, Resources, _IOFile
+from snakemake.utils import format, listfiles
+from snakemake.exceptions import RuleException, ProtectedOutputException
+from snakemake.exceptions import UnexpectedOutputException
+from snakemake.logging import logger
+
+
+def jobfiles(jobs, type):
+    return chain(*map(attrgetter(type), jobs))
+
+
+class Job:
+    HIGHEST_PRIORITY = sys.maxsize
+
+    def __init__(self, rule, dag, targetfile=None, format_wildcards=None):
+        self.rule = rule
+        self.dag = dag
+        self.targetfile = targetfile
+
+        self.wildcards_dict = self.rule.get_wildcards(targetfile)
+        self.wildcards = Wildcards(fromdict=self.wildcards_dict)
+        self._format_wildcards = (self.wildcards if format_wildcards is None
+                                  else Wildcards(fromdict=format_wildcards))
+
+        (self.input, self.output, self.params, self.log, self.benchmark,
+         self.ruleio,
+         self.dependencies) = rule.expand_wildcards(self.wildcards_dict)
+
+        self.resources_dict = {
+            name: min(self.rule.workflow.global_resources.get(name, res), res)
+            for name, res in rule.resources.items()
+        }
+        self.threads = self.resources_dict["_cores"]
+        self.resources = Resources(fromdict=self.resources_dict)
+        self._inputsize = None
+
+        self.dynamic_output, self.dynamic_input = set(), set()
+        self.temp_output, self.protected_output = set(), set()
+        self.touch_output = set()
+        self.subworkflow_input = dict()
+        for f in self.output:
+            f_ = self.ruleio[f]
+            if f_ in self.rule.dynamic_output:
+                self.dynamic_output.add(f)
+            if f_ in self.rule.temp_output:
+                self.temp_output.add(f)
+            if f_ in self.rule.protected_output:
+                self.protected_output.add(f)
+            if f_ in self.rule.touch_output:
+                self.touch_output.add(f)
+        for f in self.input:
+            f_ = self.ruleio[f]
+            if f_ in self.rule.dynamic_input:
+                self.dynamic_input.add(f)
+            if f_ in self.rule.subworkflow_input:
+                self.subworkflow_input[f] = self.rule.subworkflow_input[f_]
+        self._hash = self.rule.__hash__()
+        if True or not self.dynamic_output:
+            for o in self.output:
+                self._hash ^= o.__hash__()
+
+    @property
+    def priority(self):
+        return self.dag.priority(self)
+
+    @property
+    def b64id(self):
+        return base64.b64encode((self.rule.name + "".join(self.output)
+                                 ).encode("utf-8")).decode("utf-8")
+
+    @property
+    def inputsize(self):
+        """
+        Return the size of the input files.
+        Input files need to be present.
+        """
+        if self._inputsize is None:
+            self._inputsize = sum(map(os.path.getsize, self.input))
+        return self._inputsize
+
+    @property
+    def message(self):
+        """ Return the message for this job. """
+        try:
+            return (self.format_wildcards(self.rule.message) if
+                    self.rule.message else None)
+        except AttributeError as ex:
+            raise RuleException(str(ex), rule=self.rule)
+        except KeyError as ex:
+            raise RuleException("Unknown variable in message "
+                                "of shell command: {}".format(str(ex)),
+                                rule=self.rule)
+
+    @property
+    def shellcmd(self):
+        """ Return the shell command. """
+        try:
+            return (self.format_wildcards(self.rule.shellcmd) if
+                    self.rule.shellcmd else None)
+        except AttributeError as ex:
+            raise RuleException(str(ex), rule=self.rule)
+        except KeyError as ex:
+            raise RuleException("Unknown variable when printing "
+                                "shell command: {}".format(str(ex)),
+                                rule=self.rule)
+
+    @property
+    def expanded_output(self):
+        """ Iterate over output files while dynamic output is expanded. """
+        for f, f_ in zip(self.output, self.rule.output):
+            if f in self.dynamic_output:
+                expansion = self.expand_dynamic(
+                    f_,
+                    restriction=self.wildcards,
+                    omit_value=_IOFile.dynamic_fill)
+                if not expansion:
+                    yield f_
+                for f, _ in expansion:
+                    yield IOFile(f, self.rule)
+            else:
+                yield f
+
+    @property
+    def dynamic_wildcards(self):
+        """ Return all wildcard values determined from dynamic output. """
+        combinations = set()
+        for f, f_ in zip(self.output, self.rule.output):
+            if f in self.dynamic_output:
+                for f, w in self.expand_dynamic(
+                    f_,
+                    restriction=self.wildcards,
+                    omit_value=_IOFile.dynamic_fill):
+                    combinations.add(tuple(w.items()))
+        wildcards = defaultdict(list)
+        for combination in combinations:
+            for name, value in combination:
+                wildcards[name].append(value)
+        return wildcards
+
+    @property
+    def missing_input(self):
+        """ Return missing input files. """
+        # omit file if it comes from a subworkflow
+        return set(f for f in self.input
+                   if not f.exists and not f in self.subworkflow_input)
+
+    @property
+    def output_mintime(self):
+        """ Return oldest output file. """
+        existing = [f.mtime for f in self.expanded_output if f.exists]
+        if self.benchmark and self.benchmark.exists:
+            existing.append(self.benchmark.mtime)
+        if existing:
+            return min(existing)
+        return None
+
+    @property
+    def input_maxtime(self):
+        """ Return newest input file. """
+        existing = [f.mtime for f in self.input if f.exists]
+        if existing:
+            return max(existing)
+        return None
+
+    def missing_output(self, requested=None):
+        """ Return missing output files. """
+        files = set()
+        if self.benchmark and (requested is None or
+                               self.benchmark in requested):
+            if not self.benchmark.exists:
+                files.add(self.benchmark)
+
+        for f, f_ in zip(self.output, self.rule.output):
+            if requested is None or f in requested:
+                if f in self.dynamic_output:
+                    if not self.expand_dynamic(
+                        f_,
+                        restriction=self.wildcards,
+                        omit_value=_IOFile.dynamic_fill):
+                        files.add("{} (dynamic)".format(f_))
+                elif not f.exists:
+                    files.add(f)
+        return files
+
+    @property
+    def existing_output(self):
+        return filter(lambda f: f.exists, self.expanded_output)
+
+    def check_protected_output(self):
+        protected = list(filter(lambda f: f.protected, self.expanded_output))
+        if protected:
+            raise ProtectedOutputException(self.rule, protected)
+
+    def prepare(self):
+        """
+        Prepare execution of job.
+        This includes creation of directories and deletion of previously
+        created dynamic files.
+        """
+
+        self.check_protected_output()
+
+        unexpected_output = self.dag.reason(self).missing_output.intersection(
+            self.existing_output)
+        if unexpected_output:
+            logger.warning(
+                "Warning: the following output files of rule {} were not "
+                "present when the DAG was created:\n{}".format(
+                    self.rule, unexpected_output))
+
+        if self.dynamic_output:
+            for f, _ in chain(*map(partial(self.expand_dynamic,
+                                           restriction=self.wildcards,
+                                           omit_value=_IOFile.dynamic_fill),
+                                   self.rule.dynamic_output)):
+                os.remove(f)
+        for f, f_ in zip(self.output, self.rule.output):
+            f.prepare()
+        for f in self.log:
+            f.prepare()
+        if self.benchmark:
+            self.benchmark.prepare()
+
+    def cleanup(self):
+        """ Cleanup output files. """
+        to_remove = [f for f in self.expanded_output if f.exists]
+        if to_remove:
+            logger.info("Removing output files of failed job {}"
+                        " since they might be corrupted:\n{}".format(
+                            self, ", ".join(to_remove)))
+            for f in to_remove:
+                f.remove()
+
+    def format_wildcards(self, string, **variables):
+        """ Format a string with variables from the job. """
+        _variables = dict()
+        _variables.update(self.rule.workflow.globals)
+        _variables.update(dict(input=self.input,
+                               output=self.output,
+                               params=self.params,
+                               wildcards=self._format_wildcards,
+                               threads=self.threads,
+                               resources=self.resources,
+                               log=self.log,
+                               version=self.rule.version,
+                               rule=self.rule.name, ))
+        _variables.update(variables)
+        try:
+            return format(string, **_variables)
+        except NameError as ex:
+            raise RuleException("NameError: " + str(ex), rule=self.rule)
+        except IndexError as ex:
+            raise RuleException("IndexError: " + str(ex), rule=self.rule)
+
+    def properties(self, omit_resources="_cores _nodes".split()):
+        resources = {
+            name: res
+            for name, res in self.resources.items()
+            if name not in omit_resources
+        }
+        params = {name: value for name, value in self.params.items()}
+        properties = {
+            "rule": self.rule.name,
+            "local": self.dag.workflow.is_local(self.rule),
+            "input": self.input,
+            "output": self.output,
+            "params": params,
+            "threads": self.threads,
+            "resources": resources
+        }
+        return properties
+
+    def json(self):
+        return json.dumps(self.properties())
+
+    def __repr__(self):
+        return self.rule.name
+
+    def __eq__(self, other):
+        if other is None:
+            return False
+        return self.rule == other.rule and (
+            self.dynamic_output or self.wildcards_dict == other.wildcards_dict)
+
+    def __lt__(self, other):
+        return self.rule.__lt__(other.rule)
+
+    def __gt__(self, other):
+        return self.rule.__gt__(other.rule)
+
+    def __hash__(self):
+        return self._hash
+
+    @staticmethod
+    def expand_dynamic(pattern, restriction=None, omit_value=None):
+        """ Expand dynamic files. """
+        return list(listfiles(pattern,
+                              restriction=restriction,
+                              omit_value=omit_value))
+
+
+class Reason:
+    def __init__(self):
+        self.updated_input = set()
+        self.updated_input_run = set()
+        self.missing_output = set()
+        self.incomplete_output = set()
+        self.forced = False
+        self.noio = False
+        self.nooutput = False
+        self.derived = True
+
+    def __str__(self):
+        s = list()
+        if self.forced:
+            s.append("Forced execution")
+        else:
+            if self.noio:
+                s.append("Rules with neither input nor "
+                         "output files are always executed.")
+            elif self.nooutput:
+                s.append("Rules with a run or shell declaration but no output "
+                         "are always executed.")
+            else:
+                if self.missing_output:
+                    s.append("Missing output files: {}".format(
+                        ", ".join(self.missing_output)))
+                if self.incomplete_output:
+                    s.append("Incomplete output files: {}".format(
+                        ", ".join(self.incomplete_output)))
+                updated_input = self.updated_input - self.updated_input_run
+                if updated_input:
+                    s.append("Updated input files: {}".format(
+                        ", ".join(updated_input)))
+                if self.updated_input_run:
+                    s.append("Input files updated by another job: {}".format(
+                        ", ".join(self.updated_input_run)))
+        s = "; ".join(s)
+        return s
+
+    def __bool__(self):
+        return bool(self.updated_input or self.missing_output or self.forced or
+                    self.updated_input_run or self.noio or self.nooutput)
diff --git a/snakemake/jobscript.sh b/snakemake/jobscript.sh
new file mode 100644
index 0000000..9ba2347
--- /dev/null
+++ b/snakemake/jobscript.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+# properties = {properties}
+{exec_job}
diff --git a/snakemake/logging.py b/snakemake/logging.py
new file mode 100644
index 0000000..e069bfd
--- /dev/null
+++ b/snakemake/logging.py
@@ -0,0 +1,259 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import logging as _logging
+import platform
+import time
+import sys
+import os
+import json
+from multiprocessing import Lock
+import tempfile
+
+
+class ColorizingStreamHandler(_logging.StreamHandler):
+    _output_lock = Lock()
+
+    BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+    RESET_SEQ = "\033[0m"
+    COLOR_SEQ = "\033[%dm"
+    BOLD_SEQ = "\033[1m"
+
+    colors = {
+        'WARNING': YELLOW,
+        'INFO': GREEN,
+        'DEBUG': BLUE,
+        'CRITICAL': RED,
+        'ERROR': RED
+    }
+
+    def __init__(self, nocolor=False, stream=sys.stderr, timestamp=False):
+        super().__init__(stream=stream)
+        self.nocolor = nocolor or not self.can_color_tty
+        self.timestamp = timestamp
+
+    @property
+    def can_color_tty(self):
+        if 'TERM' in os.environ and os.environ['TERM'] == 'dumb':
+            return False
+        return self.is_tty and not platform.system() == 'Windows'
+
+    @property
+    def is_tty(self):
+        isatty = getattr(self.stream, 'isatty', None)
+        return isatty and isatty()
+
+    def emit(self, record):
+        with self._output_lock:
+            try:
+                self.format(record)  # add the message to the record
+                self.stream.write(self.decorate(record))
+                self.stream.write(getattr(self, 'terminator', '\n'))
+                self.flush()
+            except BrokenPipeError as e:
+                raise e
+            except (KeyboardInterrupt, SystemExit):
+                # ignore any exceptions in these cases as any relevant messages have been printed before
+                pass
+            except Exception as e:
+                self.handleError(record)
+
+    def decorate(self, record):
+        message = [record.message]
+        if self.timestamp:
+            message.insert(0, "[{}] ".format(time.asctime()))
+        if not self.nocolor and record.levelname in self.colors:
+            message.insert(0, self.COLOR_SEQ %
+                           (30 + self.colors[record.levelname]))
+            message.append(self.RESET_SEQ)
+        return "".join(message)
+
+
+class Logger:
+    def __init__(self):
+        self.logger = _logging.getLogger(__name__)
+        self.log_handler = [self.text_handler]
+        self.stream_handler = None
+        self.printshellcmds = False
+        self.printreason = False
+
+    def setup(self):
+        # logfile output is done always
+        self.logfile_fd, self.logfile = tempfile.mkstemp(
+            prefix="",
+            suffix=".snakemake.log")
+        self.logfile_handler = _logging.FileHandler(self.logfile)
+        self.logger.addHandler(self.logfile_handler)
+
+    def cleanup(self):
+        self.logger.removeHandler(self.logfile_handler)
+        self.logfile_handler.close()
+        os.close(self.logfile_fd)
+        os.remove(self.logfile)
+
+    def get_logfile(self):
+        self.logfile_handler.flush()
+        return self.logfile
+
+    def handler(self, msg):
+        for handler in self.log_handler:
+            handler(msg)
+
+    def set_stream_handler(self, stream_handler):
+        if self.stream_handler is not None:
+            self.logger.removeHandler(self.stream_handler)
+        self.stream_handler = stream_handler
+        self.logger.addHandler(stream_handler)
+
+    def set_level(self, level):
+        self.logger.setLevel(level)
+
+    def info(self, msg):
+        self.handler(dict(level="info", msg=msg))
+
+    def debug(self, msg):
+        self.handler(dict(level="debug", msg=msg))
+
+    def error(self, msg):
+        self.handler(dict(level="error", msg=msg))
+
+    def progress(self, done=None, total=None):
+        self.handler(dict(level="progress", done=done, total=total))
+
+    def resources_info(self, msg):
+        self.handler(dict(level="resources_info", msg=msg))
+
+    def run_info(self, msg):
+        self.handler(dict(level="run_info", msg=msg))
+
+    def job_info(self, **msg):
+        msg["level"] = "job_info"
+        self.handler(msg)
+
+    def shellcmd(self, msg):
+        if msg is not None:
+            self.handler(dict(level="shellcmd", msg=msg))
+
+    def job_finished(self, **msg):
+        msg["level"] = "job_finished"
+        self.handler(msg)
+
+    def rule_info(self, **msg):
+        msg["level"] = "rule_info"
+        self.handler(msg)
+
+    def d3dag(self, **msg):
+        msg["level"] = "d3dag"
+        self.handler(msg)
+
+    def text_handler(self, msg):
+        """The default snakemake log handler.
+
+        Prints the output to the console.
+
+        Args:
+            msg (dict):     the log message dictionary
+        """
+
+        def job_info(msg):
+            def format_item(item, omit=None, valueformat=str):
+                value = msg[item]
+                if value != omit:
+                    return "\t{}: {}".format(item, valueformat(value))
+
+            yield "{}rule {}:".format("local" if msg["local"] else "",
+                                      msg["name"])
+            for item in "input output log".split():
+                fmt = format_item(item, omit=[], valueformat=", ".join)
+                if fmt != None:
+                    yield fmt
+            singleitems = ["benchmark"]
+            if self.printreason:
+                singleitems.append("reason")
+            for item in singleitems:
+                fmt = format_item(item, omit=None)
+                if fmt != None:
+                    yield fmt
+            for item, omit in zip("priority threads".split(), [0, 1]):
+                fmt = format_item(item, omit=omit)
+                if fmt != None:
+                    yield fmt
+            resources = format_resources(msg["resources"])
+            if resources:
+                yield "\tresources: " + resources
+
+        level = msg["level"]
+        if level == "info":
+            self.logger.warning(msg["msg"])
+        elif level == "error":
+            self.logger.error(msg["msg"])
+        elif level == "debug":
+            self.logger.debug(msg["msg"])
+        elif level == "resources_info":
+            self.logger.warning(msg["msg"])
+        elif level == "run_info":
+            self.logger.warning(msg["msg"])
+        elif level == "progress" and not self.quiet:
+            done = msg["done"]
+            total = msg["total"]
+            self.logger.info("{} of {} steps ({:.0%}) done".format(
+                done, total, done / total))
+        elif level == "job_info":
+            if not self.quiet:
+                if msg["msg"] is not None:
+                    self.logger.info(msg["msg"])
+                else:
+                    self.logger.info("\n".join(job_info(msg)))
+        elif level == "shellcmd":
+            if self.printshellcmds:
+                self.logger.warning(msg["msg"])
+        elif level == "job_finished":
+            # do not display this on the console for now
+            pass
+        elif level == "rule_info":
+            self.logger.info(msg["name"])
+            if msg["docstring"]:
+                self.logger.info("\t" + msg["docstring"])
+        elif level == "d3dag":
+            print(json.dumps({"nodes": msg["nodes"], "links": msg["edges"]}))
+
+
+def format_resources(resources, omit_resources="_cores _nodes".split()):
+    return ", ".join("{}={}".format(name, value)
+                     for name, value in resources.items()
+                     if name not in omit_resources)
+
+
+def format_resource_names(resources, omit_resources="_cores _nodes".split()):
+    return ", ".join(name for name in resources if name not in omit_resources)
+
+
+logger = Logger()
+
+
+def setup_logger(handler=None,
+                 quiet=False,
+                 printshellcmds=False,
+                 printreason=False,
+                 nocolor=False,
+                 stdout=False,
+                 debug=False,
+                 timestamp=False):
+    logger.setup()
+    if handler is not None:
+        # custom log handler
+        logger.log_handler.append(handler)
+    else:
+        # console output only if no custom logger was specified
+        stream_handler = ColorizingStreamHandler(
+            nocolor=nocolor,
+            stream=sys.stdout if stdout else sys.stderr,
+            timestamp=timestamp)
+        logger.set_stream_handler(stream_handler)
+
+    logger.set_level(_logging.DEBUG if debug else _logging.INFO)
+    logger.quiet = quiet
+    logger.printshellcmds = printshellcmds
+    logger.printreason = printreason
diff --git a/snakemake/output_index.py b/snakemake/output_index.py
new file mode 100644
index 0000000..dbcfd95
--- /dev/null
+++ b/snakemake/output_index.py
@@ -0,0 +1,52 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+from collections import defaultdict
+
+from snakemake.io import _IOFile
+
+
+class Node:
+    __slots__ = ["rules", "children"]
+
+    def __init__(self):
+        self.rules = set()
+        self.children = defaultdict(Node)
+
+    def __repr__(self):
+        return "({}) -> {}".format(self.rules, dict(self.children))
+
+
+class OutputIndex:
+    def __init__(self, rules):
+        self.root = Node()
+
+        for rule in rules:
+            output = list(rule.output)
+            if rule.benchmark:
+                output.append(rule.benchmark)
+            for constant_prefix in sorted(map(_IOFile.constant_prefix, output)):
+                self.add_output(rule, constant_prefix)
+
+    def add_output(self, rule, constant_prefix):
+        node = self.root
+        for c in constant_prefix:
+            node = node.children[c]
+            if rule in node.rules:
+                # a prefix of file f is already recorded for this rule
+                # hence we can stop here
+                return
+        node.rules.add(rule)
+
+    def match(self, f):
+        node = self.root
+        for c in f:
+            for rule in node.rules:
+                yield rule
+            node = node.children.get(c, None)
+            if node is None:
+                return
+        for rule in node.rules:
+            yield rule
diff --git a/snakemake/parser.py b/snakemake/parser.py
new file mode 100644
index 0000000..831a2f7
--- /dev/null
+++ b/snakemake/parser.py
@@ -0,0 +1,659 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import tokenize
+import textwrap
+import os
+from urllib.error import HTTPError, URLError, ContentTooShortError
+import urllib.request
+from io import TextIOWrapper
+
+from snakemake.exceptions import WorkflowError
+
+dd = textwrap.dedent
+
+INDENT = "\t"
+
+
+def is_newline(token, newline_tokens=set((tokenize.NEWLINE, tokenize.NL))):
+    return token.type in newline_tokens
+
+
+def is_indent(token):
+    return token.type == tokenize.INDENT
+
+
+def is_dedent(token):
+    return token.type == tokenize.DEDENT
+
+
+def is_op(token):
+    return token.type == tokenize.OP
+
+
+def is_greater(token):
+    return is_op(token) and token.string == ">"
+
+
+def is_comma(token):
+    return is_op(token) and token.string == ","
+
+
+def is_name(token):
+    return token.type == tokenize.NAME
+
+
+def is_colon(token):
+    return is_op(token) and token.string == ":"
+
+
+def is_comment(token):
+    return token.type == tokenize.COMMENT
+
+
+def is_string(token):
+    return token.type == tokenize.STRING
+
+
+def is_eof(token):
+    return token.type == tokenize.ENDMARKER
+
+
+def lineno(token):
+    return token.start[0]
+
+
+class StopAutomaton(Exception):
+    def __init__(self, token):
+        self.token = token
+
+
+class TokenAutomaton:
+
+    subautomata = dict()
+
+    def __init__(self, snakefile, base_indent=0, dedent=0, root=True):
+        self.root = root
+        self.snakefile = snakefile
+        self.state = None
+        self.base_indent = base_indent
+        self.line = 0
+        self.indent = 0
+        self.was_indented = False
+        self.lasttoken = None
+        self._dedent = dedent
+
+    @property
+    def dedent(self):
+        return self._dedent
+
+    @property
+    def effective_indent(self):
+        return self.base_indent + self.indent - self.dedent
+
+    def indentation(self, token):
+        if is_indent(token) or is_dedent(token):
+            self.indent = token.end[1] - self.base_indent
+            self.was_indented |= self.indent > 0
+
+    def consume(self):
+        for token in self.snakefile:
+            self.indentation(token)
+            try:
+                for t, orig in self.state(token):
+                    if self.lasttoken == "\n" and not t.isspace():
+                        yield INDENT * self.effective_indent, orig
+                    yield t, orig
+                    self.lasttoken = t
+            except tokenize.TokenError as e:
+                self.error(
+                    str(e).split(",")[0].strip("()''"), token
+                )  # TODO the inferred line number seems to be wrong sometimes
+
+    def error(self, msg, token):
+        raise SyntaxError(msg,
+                          (self.snakefile.path, lineno(token), None, None))
+
+    def subautomaton(self, automaton, *args, **kwargs):
+        return self.subautomata[automaton](
+            self.snakefile, *args,
+            base_indent=self.base_indent + self.indent,
+            dedent=self.dedent,
+            root=False, **kwargs)
+
+
+class KeywordState(TokenAutomaton):
+
+    prefix = ""
+
+    def __init__(self, snakefile, base_indent=0, dedent=0, root=True):
+        super().__init__(snakefile,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.line = 0
+        self.state = self.colon
+
+    @property
+    def keyword(self):
+        return self.__class__.__name__.lower()[len(self.prefix):]
+
+    def end(self):
+        yield ")"
+
+    def decorate_end(self, token):
+        for t in self.end():
+            yield t, token
+
+    def colon(self, token):
+        if is_colon(token):
+            self.state = self.block
+            for t in self.start():
+                yield t, token
+        else:
+            self.error("Colon expected after keyword {}.".format(self.keyword),
+                       token)
+
+    def is_block_end(self, token):
+        return (self.line and self.indent <= 0) or is_eof(token)
+
+    def block(self, token):
+        if self.lasttoken == "\n" and is_comment(token):
+            # ignore lines containing only comments
+            self.line -= 1
+        if self.is_block_end(token):
+            for t, token_ in self.decorate_end(token):
+                yield t, token_
+            yield "\n", token
+            raise StopAutomaton(token)
+
+        if is_newline(token):
+            self.line += 1
+            yield token.string, token
+        elif not (is_indent(token) or is_dedent(token)):
+            if is_comment(token):
+                yield token.string, token
+            else:
+                for t in self.block_content(token):
+                    yield t
+
+    def yield_indent(self, token):
+        return token.string, token
+
+    def block_content(self, token):
+        yield token.string, token
+
+
+class GlobalKeywordState(KeywordState):
+    def start(self):
+        yield "workflow.{keyword}(".format(keyword=self.keyword)
+
+
+class DecoratorKeywordState(KeywordState):
+    decorator = None
+    args = list()
+
+    def start(self):
+        yield "@workflow.{}".format(self.decorator)
+        yield "\n"
+        yield "def __{}({}):".format(self.decorator, ", ".join(self.args))
+
+    def end(self):
+        yield ""
+
+
+class RuleKeywordState(KeywordState):
+    def __init__(self, snakefile,
+                 base_indent=0,
+                 dedent=0,
+                 root=True,
+                 rulename=None):
+        super().__init__(snakefile,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.rulename = rulename
+
+    def start(self):
+        yield "\n"
+        yield "@workflow.{keyword}(".format(keyword=self.keyword)
+
+
+class SubworkflowKeywordState(KeywordState):
+    prefix = "Subworkflow"
+
+    def start(self):
+        yield ", {keyword}=".format(keyword=self.keyword)
+
+    def end(self):
+        # no end needed
+        return list()
+
+# Global keyword states
+
+
+class Include(GlobalKeywordState):
+    pass
+
+
+class Workdir(GlobalKeywordState):
+    pass
+
+
+class Configfile(GlobalKeywordState):
+    pass
+
+
+class Ruleorder(GlobalKeywordState):
+    def block_content(self, token):
+        if is_greater(token):
+            yield ",", token
+        elif is_name(token):
+            yield '"{}"'.format(token.string), token
+        else:
+            self.error('Expected a descending order of rule names, '
+                       'e.g. rule1 > rule2 > rule3 ...', token)
+
+# subworkflows
+
+
+class SubworkflowSnakefile(SubworkflowKeywordState):
+    pass
+
+
+class SubworkflowWorkdir(SubworkflowKeywordState):
+    pass
+
+
+class Subworkflow(GlobalKeywordState):
+
+    subautomata = dict(snakefile=SubworkflowSnakefile,
+                       workdir=SubworkflowWorkdir)
+
+    def __init__(self, snakefile, base_indent=0, dedent=0, root=True):
+        super().__init__(snakefile,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.state = self.name
+        self.has_snakefile = False
+        self.has_workdir = False
+        self.has_name = False
+        self.primary_token = None
+
+    def end(self):
+        if not (self.has_snakefile or self.has_workdir):
+            self.error(
+                "A subworkflow needs either a path to a Snakefile or to a workdir.",
+                self.primary_token)
+        yield ")"
+
+    def name(self, token):
+        if is_name(token):
+            yield "workflow.subworkflow('{name}'".format(
+                name=token.string), token
+            self.has_name = True
+        elif is_colon(token) and self.has_name:
+            self.primary_token = token
+            self.state = self.block
+        else:
+            self.error("Expected name after subworkflow keyword.", token)
+
+    def block_content(self, token):
+        if is_name(token):
+            try:
+                if token.string == "snakefile":
+                    self.has_snakefile = True
+                if token.string == "workdir":
+                    self.has_workdir = True
+                for t in self.subautomaton(token.string).consume():
+                    yield t
+            except KeyError:
+                self.error("Unexpected keyword {} in "
+                           "subworkflow definition".format(token.string),
+                           token)
+            except StopAutomaton as e:
+                self.indentation(e.token)
+                for t in self.block(e.token):
+                    yield t
+        elif is_comment(token):
+            yield "\n", token
+            yield token.string, token
+        elif is_string(token):
+            # ignore docstring
+            pass
+        else:
+            self.error("Expecting subworkflow keyword, comment or docstrings "
+                       "inside a subworkflow definition.", token)
+
+
+class Localrules(GlobalKeywordState):
+    def block_content(self, token):
+        if is_comma(token):
+            yield ",", token
+        elif is_name(token):
+            yield '"{}"'.format(token.string), token
+        else:
+            self.error('Expected a comma separated list of rules that shall '
+                       'not be executed by the cluster command.', token)
+
+# Rule keyword states
+
+
+class Input(RuleKeywordState):
+    pass
+
+
+class Output(RuleKeywordState):
+    pass
+
+
+class Params(RuleKeywordState):
+    pass
+
+
+class Threads(RuleKeywordState):
+    pass
+
+
+class Resources(RuleKeywordState):
+    pass
+
+
+class Priority(RuleKeywordState):
+    pass
+
+
+class Version(RuleKeywordState):
+    pass
+
+
+class Log(RuleKeywordState):
+    pass
+
+
+class Message(RuleKeywordState):
+    pass
+
+
+class Benchmark(RuleKeywordState):
+    pass
+
+
+class Run(RuleKeywordState):
+    def __init__(self, snakefile, rulename,
+                 base_indent=0,
+                 dedent=0,
+                 root=True):
+        super().__init__(snakefile,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.rulename = rulename
+
+    def start(self):
+        yield "@workflow.run"
+        yield "\n"
+        yield ("def __{rulename}(input, output, params, wildcards, threads, "
+               "resources, log, version):".format(rulename=self.rulename))
+
+    def end(self):
+        yield ""
+
+    def is_block_end(self, token):
+        return (self.line and self.was_indented and self.indent <= 0) or is_eof(token)
+
+
+class Shell(Run):
+
+    overwrite_shellcmd = None
+
+    def __init__(self, snakefile, rulename,
+                 base_indent=0,
+                 dedent=0,
+                 root=True):
+        super().__init__(snakefile, rulename,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.shellcmd = list()
+        self.token = None
+        if self.overwrite_shellcmd is not None:
+            self.block_content = self.overwrite_block_content
+
+    def is_block_end(self, token):
+        return (self.line and self.indent <= 0) or is_eof(token)
+
+    def start(self):
+        yield "@workflow.shellcmd("
+
+    def end(self):
+        # the end is detected. So we can savely reset the indent to zero here
+        self.indent = 0
+        yield ")"
+        yield "\n"
+        for t in super().start():
+            yield t
+        yield "\n"
+        yield INDENT * (self.effective_indent + 1)
+        yield "shell("
+        yield "\n".join(self.shellcmd)
+        yield "\n"
+        yield ")"
+        for t in super().end():
+            yield t
+
+    def decorate_end(self, token):
+        if self.token is None:
+            # no block after shell keyword
+            self.error(
+                "Shell command must be given as string after the shell keyword.",
+                token)
+        for t in self.end():
+            yield t, self.token
+
+    def block_content(self, token):
+        self.token = token
+        self.shellcmd.append(token.string)
+        yield token.string, token
+
+    def overwrite_block_content(self, token):
+        if self.token is None:
+            self.token = token
+            shellcmd = '"{}"'.format(self.overwrite_shellcmd)
+            self.shellcmd.append(shellcmd)
+            yield shellcmd, token
+
+
+class Rule(GlobalKeywordState):
+    subautomata = dict(input=Input,
+                       output=Output,
+                       params=Params,
+                       threads=Threads,
+                       resources=Resources,
+                       priority=Priority,
+                       version=Version,
+                       log=Log,
+                       message=Message,
+                       benchmark=Benchmark,
+                       run=Run,
+                       shell=Shell)
+
+    def __init__(self, snakefile, base_indent=0, dedent=0, root=True):
+        super().__init__(snakefile,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.state = self.name
+        self.rulename = None
+        self.lineno = None
+        self.run = False
+        self.snakefile.rulecount += 1
+
+    def start(self):
+        yield ("@workflow.rule(name={rulename}, lineno={lineno}, "
+               "snakefile='{snakefile}')".format(
+                   rulename=("'{}'".format(self.rulename) if self.rulename is
+                             not None else None),
+                   lineno=self.lineno,
+                   snakefile=self.snakefile.path.replace('\\', '\\\\')))
+
+    def end(self):
+        if not self.run:
+            yield "@workflow.norun()"
+            yield "\n"
+            for t in self.subautomaton("run", rulename=self.rulename).start():
+                yield t
+            # the end is detected.
+            # So we can savely reset the indent to zero here
+            self.indent = 0
+            yield "\n"
+            yield INDENT * (self.effective_indent + 1)
+            yield "pass"
+
+    def name(self, token):
+        if is_name(token):
+            self.rulename = token.string
+        elif is_colon(token):
+            self.lineno = self.snakefile.lines + 1
+            self.state = self.block
+            for t in self.start():
+                yield t, token
+        else:
+            self.error("Expected name or colon after rule keyword.", token)
+
+    def block_content(self, token):
+        if is_name(token):
+            try:
+                if token.string == "run" or token.string == "shell":
+                    if self.run:
+                        raise self.error(
+                            "Multiple run or shell keywords in rule {}.".format(
+                                self.rulename), token)
+                    self.run = True
+                for t in self.subautomaton(token.string,
+                                           rulename=self.rulename).consume():
+                    yield t
+            except KeyError:
+                self.error("Unexpected keyword {} in "
+                           "rule definition".format(token.string), token)
+            except StopAutomaton as e:
+                self.indentation(e.token)
+                for t in self.block(e.token):
+                    yield t
+        elif is_comment(token):
+            yield "\n", token
+            yield token.string, token
+        elif is_string(token):
+            yield "\n", token
+            yield "@workflow.docstring({})".format(token.string), token
+        else:
+            self.error("Expecting rule keyword, comment or docstrings "
+                       "inside a rule definition.", token)
+
+    @property
+    def dedent(self):
+        return self.indent
+
+
+class OnSuccess(DecoratorKeywordState):
+    decorator = "onsuccess"
+    args = ["log"]
+
+
+class OnError(DecoratorKeywordState):
+    decorator = "onerror"
+    args = ["log"]
+
+
+class Python(TokenAutomaton):
+
+    subautomata = dict(include=Include,
+                       workdir=Workdir,
+                       configfile=Configfile,
+                       ruleorder=Ruleorder,
+                       rule=Rule,
+                       subworkflow=Subworkflow,
+                       localrules=Localrules,
+                       onsuccess=OnSuccess,
+                       onerror=OnError)
+
+    def __init__(self, snakefile, base_indent=0, dedent=0, root=True):
+        super().__init__(snakefile,
+                         base_indent=base_indent,
+                         dedent=dedent,
+                         root=root)
+        self.state = self.python
+
+    def python(self, token):
+        if not (is_indent(token) or is_dedent(token)):
+            if self.lasttoken is None or self.lasttoken.isspace():
+                try:
+                    for t in self.subautomaton(token.string).consume():
+                        yield t
+                except KeyError:
+                    yield token.string, token
+                except StopAutomaton as e:
+                    self.indentation(e.token)
+                    for t in self.python(e.token):
+                        yield t
+            else:
+                yield token.string, token
+
+
+class Snakefile:
+    def __init__(self, path):
+        self.path = path
+        try:
+            self.file = open(self.path, encoding="utf-8")
+        except FileNotFoundError as e:
+            try:
+                self.file = TextIOWrapper(urllib.request.urlopen(self.path),
+                                          encoding="utf-8")
+            except (HTTPError, URLError, ContentTooShortError, ValueError):
+                raise WorkflowError("Failed to open {}.".format(path))
+
+        self.tokens = tokenize.generate_tokens(self.file.readline)
+        self.rulecount = 0
+        self.lines = 0
+
+    def __next__(self):
+        return next(self.tokens)
+
+    def __iter__(self):
+        return self
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.file.close()
+
+
+def format_tokens(tokens):
+    t_ = None
+    for t in tokens:
+        if t_ and not t.isspace() and not t_.isspace():
+            yield " "
+        yield t
+        t_ = t
+
+
+def parse(path, overwrite_shellcmd=None):
+    Shell.overwrite_shellcmd = overwrite_shellcmd
+    with Snakefile(path) as snakefile:
+        automaton = Python(snakefile)
+        linemap = dict()
+        compilation = list()
+        for t, orig_token in automaton.consume():
+            l = lineno(orig_token)
+            linemap.update(dict((i, l) for i in range(
+                snakefile.lines + 1, snakefile.lines + t.count("\n") + 1)))
+            snakefile.lines += t.count("\n")
+            compilation.append(t)
+        compilation = "".join(format_tokens(compilation))
+        last = max(linemap)
+        linemap[last + 1] = linemap[last]
+        return compilation, linemap
diff --git a/snakemake/persistence.py b/snakemake/persistence.py
new file mode 100644
index 0000000..7ccb9d8
--- /dev/null
+++ b/snakemake/persistence.py
@@ -0,0 +1,299 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import shutil
+import signal
+import marshal
+import pickle
+from base64 import urlsafe_b64encode
+from functools import lru_cache, partial
+from itertools import filterfalse, count
+
+from snakemake.logging import logger
+from snakemake.jobs import jobfiles
+from snakemake.utils import listfiles
+
+
+class Persistence:
+    def __init__(self, nolock=False, dag=None, warn_only=False):
+        self.path = os.path.abspath(".snakemake")
+        if not os.path.exists(self.path):
+            os.mkdir(self.path)
+        self._lockdir = os.path.join(self.path, "locks")
+        if not os.path.exists(self._lockdir):
+            os.mkdir(self._lockdir)
+
+        self.dag = dag
+        self._lockfile = dict()
+
+        self._incomplete_path = os.path.join(self.path, "incomplete_files")
+        self._version_path = os.path.join(self.path, "version_tracking")
+        self._code_path = os.path.join(self.path, "code_tracking")
+        self._rule_path = os.path.join(self.path, "rule_tracking")
+        self._input_path = os.path.join(self.path, "input_tracking")
+        self._params_path = os.path.join(self.path, "params_tracking")
+        self._shellcmd_path = os.path.join(self.path, "shellcmd_tracking")
+
+        for d in (self._incomplete_path, self._version_path, self._code_path,
+                  self._rule_path, self._input_path, self._params_path,
+                  self._shellcmd_path):
+            if not os.path.exists(d):
+                os.mkdir(d)
+
+        if nolock:
+            self.lock = self.noop
+            self.unlock = self.noop
+        if warn_only:
+            self.lock = self.lock_warn_only
+            self.unlock = self.noop
+
+    @property
+    def files(self):
+        if self._files is None:
+            self._files = set(self.dag.output_files)
+        return self._files
+
+    @property
+    def locked(self):
+        inputfiles = set(self.all_inputfiles())
+        outputfiles = set(self.all_outputfiles())
+        if os.path.exists(self._lockdir):
+            for lockfile in self._locks("input"):
+                with open(lockfile) as lock:
+                    for f in lock:
+                        if f in outputfiles:
+                            return True
+            for lockfile in self._locks("output"):
+                with open(lockfile) as lock:
+                    for f in lock:
+                        if f in outputfiles or f in inputfiles:
+                            return True
+        return False
+
+    def lock_warn_only(self):
+        if self.locked:
+            logger.info(
+                "Error: Directory cannot be locked. This usually "
+                "means that another Snakemake instance is running on this directory."
+                "Another possiblity is that a previous run exited unexpectedly.")
+
+    def lock(self):
+        if self.locked:
+            raise IOError("Another snakemake process "
+                          "has locked this directory.")
+        self._lock(self.all_inputfiles(), "input")
+        self._lock(self.all_outputfiles(), "output")
+
+    def unlock(self, *args):
+        logger.debug("unlocking")
+        for lockfile in self._lockfile.values():
+            try:
+                logger.debug("removing lock")
+                os.remove(lockfile)
+            except OSError as e:
+                if e.errno != 2:  # missing file
+                    raise e
+        logger.debug("removed all locks")
+
+    def cleanup_locks(self):
+        shutil.rmtree(self._lockdir)
+
+    def cleanup_metadata(self, path):
+        self._delete_record(self._incomplete_path, path)
+        self._delete_record(self._version_path, path)
+        self._delete_record(self._code_path, path)
+        self._delete_record(self._rule_path, path)
+        self._delete_record(self._input_path, path)
+        self._delete_record(self._params_path, path)
+        self._delete_record(self._shellcmd_path, path)
+
+    def started(self, job):
+        for f in job.output:
+            self._record(self._incomplete_path, "", f)
+
+    def finished(self, job):
+        version = str(job.rule.version) if job.rule.version is not None else None
+        code = self._code(job.rule)
+        input = self._input(job)
+        params = self._params(job)
+        shellcmd = self._shellcmd(job)
+        for f in job.expanded_output:
+            self._delete_record(self._incomplete_path, f)
+            self._record(self._version_path, version, f)
+            self._record(self._code_path, code, f, bin=True)
+            self._record(self._rule_path, job.rule.name, f)
+            self._record(self._input_path, input, f)
+            self._record(self._params_path, params, f)
+            self._record(self._shellcmd_path, shellcmd, f)
+
+    def cleanup(self, job):
+        for f in job.expanded_output:
+            self._delete_record(self._incomplete_path, f)
+            self._delete_record(self._version_path, f)
+            self._delete_record(self._code_path, f)
+            self._delete_record(self._rule_path, f)
+            self._delete_record(self._input_path, f)
+            self._delete_record(self._params_path, f)
+            self._delete_record(self._shellcmd_path, f)
+
+    def incomplete(self, job):
+        marked_incomplete = partial(self._exists_record, self._incomplete_path)
+        return any(
+            map(lambda f: f.exists and marked_incomplete(f), job.output))
+
+    def version(self, path):
+        return self._read_record(self._version_path, path)
+
+    def rule(self, path):
+        return self._read_record(self._rule_path, path)
+
+    def input(self, path):
+        files = self._read_record(self._input_path, path)
+        if files is not None:
+            return files.split("\n")
+        return None
+
+    def shellcmd(self, path):
+        return self._read_record(self._shellcmd_path, path)
+
+    def version_changed(self, job, file=None):
+        cr = partial(self._changed_records, self._version_path,
+                     job.rule.version)
+        if file is None:
+            return cr(*job.output)
+        else:
+            return bool(list(cr(file)))
+
+    def code_changed(self, job, file=None):
+        cr = partial(self._changed_records, self._code_path,
+                     self._code(job.rule),
+                     bin=True)
+        if file is None:
+            return cr(*job.output)
+        else:
+            return bool(list(cr(file)))
+
+    def input_changed(self, job, file=None):
+        cr = partial(self._changed_records, self._input_path, self._input(job))
+        if file is None:
+            return cr(*job.output)
+        else:
+            return bool(list(cr(file)))
+
+    def params_changed(self, job, file=None):
+        cr = partial(self._changed_records, self._params_path,
+                     self._params(job))
+        if file is None:
+            return cr(*job.output)
+        else:
+            return bool(list(cr(file)))
+
+    def noop(self, *args):
+        pass
+
+    def _b64id(self, s):
+        return urlsafe_b64encode(str(s).encode()).decode()
+
+    @lru_cache()
+    def _code(self, rule):
+        code = rule.run_func.__code__
+        return pickle_code(code)
+
+    @lru_cache()
+    def _input(self, job):
+        return "\n".join(sorted(job.input))
+
+    @lru_cache()
+    def _params(self, job):
+        return "\n".join(sorted(job.params))
+
+    @lru_cache()
+    def _output(self, job):
+        return sorted(job.output)
+
+    @lru_cache()
+    def _shellcmd(self, job):
+        return job.shellcmd
+
+    def _record(self, subject, value, id, bin=False):
+        recpath = self._record_path(subject, id)
+        if value is not None:
+            os.makedirs(os.path.dirname(recpath), exist_ok=True)
+            with open(recpath, "wb" if bin else "w") as f:
+                f.write(value)
+        else:
+            if os.path.exists(recpath):
+                os.remove(recpath)
+
+    def _delete_record(self, subject, id):
+        try:
+            recpath = self._record_path(subject, id)
+            os.remove(recpath)
+            recdirs = os.path.relpath(os.path.dirname(recpath), start=subject)
+            if recdirs != ".":
+                os.removedirs(recdirs)
+        except OSError as e:
+            if e.errno != 2:  # not missing
+                raise e
+
+    def _read_record(self, subject, id, bin=False):
+        if not self._exists_record(subject, id):
+            return None
+        with open(self._record_path(subject, id), "rb" if bin else "r") as f:
+            return f.read()
+
+    def _changed_records(self, subject, value, *ids, bin=False):
+        equals = partial(self._equals_record, subject, value, bin=bin)
+        return filter(
+            lambda id: self._exists_record(subject, id) and not equals(id),
+            ids)
+
+    def _equals_record(self, subject, value, id, bin=False):
+        return self._read_record(subject, id, bin=bin) == value
+
+    def _exists_record(self, subject, id):
+        return os.path.exists(self._record_path(subject, id))
+
+    def _locks(self, type):
+        return (f for f, _ in listfiles(
+            os.path.join(self._lockdir, "{{n,[0-9]+}}.{}.lock".format(type)))
+                if not os.path.isdir(f))
+
+    def _lock(self, files, type):
+        for i in count(0):
+            lockfile = os.path.join(self._lockdir,
+                                    "{}.{}.lock".format(i, type))
+            if not os.path.exists(lockfile):
+                self._lockfile[type] = lockfile
+                with open(lockfile, "w") as lock:
+                    print(*files, sep="\n", file=lock)
+                return
+
+    def _record_path(self, subject, id):
+        max_len = os.pathconf(subject, "PC_NAME_MAX")
+        b64id = self._b64id(id)
+        # split into chunks of proper length
+        b64id = [b64id[i:i + max_len - 1]
+                 for i in range(0, len(b64id), max_len - 1)]
+        # prepend dirs with @ (does not occur in b64) to avoid conflict with b64-named files in the same dir
+        b64id = ["@" + s for s in b64id[:-1]] + [b64id[-1]]
+        path = os.path.join(subject, *b64id)
+        return path
+
+    def all_outputfiles(self):
+        # we only look at output files that will be updated
+        return jobfiles(self.dag.needrun_jobs, "output")
+
+    def all_inputfiles(self):
+        # we consider all input files, also of not running jobs
+        return jobfiles(self.dag.jobs, "input")
+
+
+def pickle_code(code):
+    consts = [(pickle_code(const) if type(const) == type(code) else const)
+              for const in code.co_consts]
+    return pickle.dumps(
+        (code.co_code, code.co_varnames, consts, code.co_names))
diff --git a/snakemake/report.css b/snakemake/report.css
new file mode 100644
index 0000000..105c791
--- /dev/null
+++ b/snakemake/report.css
@@ -0,0 +1,147 @@
+/**
+Credits for the colors and font selection go to the Twitter Bootstrap framework.
+*/
+
+
+body {
+    color: rgb(51, 51, 51);
+    font-size: 10pt;
+    padding-top: 10px;
+    font-family: "Helvetica Neue",Helvetica,Arial,sans-serif;
+}
+
+h1 {
+    font-size: 150%;
+}
+
+h2 {
+    font-size: 140%;
+}
+
+h3 {
+    font-size: 130%;
+}
+
+h4 {
+    font-size: 120%;
+}
+
+h5 {
+    font-size: 110%;
+}
+
+h6 {
+    font-size: 100%;
+}
+
+div#attachments {
+    color: gray;
+    padding: 0px;
+    border: 1px solid white;
+    border-radius: 4px 4px 4px 4px;
+    padding-top: 20px;
+}
+
+div#attachments :target a {
+    color: rgb(70, 136, 71);
+    border: 1px solid rgb(221, 221, 221);
+    border-radius: 4px 4px 4px 4px;
+}
+
+h1.title {
+    text-align: center;
+    font-size: 180%;
+}
+
+div.document {
+    position: relative;
+    background: white;
+    max-width: 800px;
+    margin: auto;
+    padding: 20px;
+    border: 1px solid rgb(221, 221, 221);
+    border-radius: 4px 4px 4px 4px;
+}
+
+div.document:after {
+    content: "snakemake report";
+    position: absolute;
+    top: -1px;
+    right: -1px;
+    padding: 3px 7px;
+    background-color: #f5f5f5;
+    border: 1px solid rgb(221, 221, 221);
+    color: #9da0a4;
+    font-weight: bold;
+    font-size: 12pt;
+    border-radius: 0 0 0 4px;
+}
+
+div.document p {
+    text-align: justify;
+}
+
+div#metadata {
+    text-align: right;
+}
+
+table.docutils {
+    border: none;
+    border-collapse: collapse;
+    border-top: 2px solid gray;
+    border-bottom: 2px solid gray;
+    text-align: center;
+}
+
+table.docutils th {
+    border: none;
+    border-top: 2px solid gray;
+    border-bottom: 2px solid gray;
+    padding: 5px;
+}
+
+table.docutils td {
+    border: none;
+    padding: 5px;
+}
+
+table.docutils th:last-child, td:last-child {
+    text-align: left;
+}
+
+table.docutils th:first-child, td:first-child {
+    text-align: right;
+}
+
+table.docutils th:only-child, td:only-child {
+    text-align: center;
+}
+
+table.docutils.footnote {
+    border: none;
+    text-align: left;
+}
+
+a {
+    color: rgb(0, 136, 204);
+    text-decoration: none;
+}
+
+a:hover {
+    color: rgb(0, 85, 128);
+    text-decoration: underline;
+}
+
+
+div.figure {
+    margin-left: 2em;
+    margin-right: 2em;
+}
+
+img {
+    max-width: 100%;
+}
+
+p.caption {
+    font-style: italic;
+}
diff --git a/snakemake/report.py b/snakemake/report.py
new file mode 100644
index 0000000..01e7561
--- /dev/null
+++ b/snakemake/report.py
@@ -0,0 +1,127 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import mimetypes
+import base64
+import textwrap
+import datetime
+import io
+
+from docutils.parsers.rst.directives.images import Image, Figure
+from docutils.parsers.rst import directives
+from docutils.core import publish_file
+
+from snakemake.utils import format
+from snakemake.logging import logger
+
+
+class EmbeddedMixin(object):
+    """
+    Replaces the URI of a directive with a base64-encoded version.
+
+    Useful for embedding images/figures in reports.
+    """
+
+    def run(self):
+        """
+        Image.run() handles most of the
+        """
+        result = Image.run(self)
+        reference = directives.uri(self.arguments[0])
+        self.options['uri'] = data_uri(reference)
+        return result
+
+# Create (and register) new image:: and figure:: directives that use a base64
+# data URI instead of pointing to a filename.
+
+
+class EmbeddedImage(Image, EmbeddedMixin):
+    pass
+
+
+directives.register_directive('embeddedimage', EmbeddedImage)
+
+
+class EmbeddedFigure(Figure, EmbeddedMixin):
+    pass
+
+
+directives.register_directive('embeddedfigure', EmbeddedFigure)
+
+
+def data_uri(file, defaultenc="utf8"):
+    """Craft a base64 data URI from file with proper encoding and mimetype."""
+    mime, encoding = mimetypes.guess_type(file)
+    if mime is None:
+        mime = "text/plain"
+        logger.info("Could not detect mimetype for {}, assuming "
+                    "text/plain.".format(file))
+    if encoding is None:
+        encoding = defaultenc
+    with open(file, "rb") as f:
+        data = base64.b64encode(f.read())
+    uri = ("data:{mime};charset={charset};filename={filename};base64,{data}"
+           "".format(filename=os.path.basename(file),
+                     mime=mime,
+                     charset=encoding,
+                     data=data.decode()))
+    return uri
+
+
+def report(text, path,
+           stylesheet=os.path.join(os.path.dirname(__file__), "report.css"),
+           defaultenc="utf8",
+           template=None,
+           metadata=None, **files):
+    outmime, _ = mimetypes.guess_type(path)
+    if outmime != "text/html":
+        raise ValueError("Path to report output has to be an HTML file.")
+    definitions = textwrap.dedent("""
+    .. role:: raw-html(raw)
+       :format: html
+
+    """)
+
+    metadata = textwrap.dedent("""
+
+    .. container::
+       :name: metadata
+
+       {metadata}{date}
+
+    """).format(metadata=metadata + " | " if metadata else "",
+                date=datetime.date.today().isoformat())
+
+    text = format(textwrap.dedent(text), stepout=3)
+
+    attachments = [textwrap.dedent("""
+        .. container::
+           :name: attachments
+
+        """)]
+    for name, file in sorted(files.items()):
+        data = data_uri(file)
+        attachments.append('''
+   .. container::
+      :name: {name}
+
+      [{name}] :raw-html:`<a href="{data}" download="{filename}" draggable="true">{filename}</a>`
+            '''.format(name=name,
+                       filename=os.path.basename(file),
+                       data=data))
+
+    text = definitions + text + "\n\n" + "\n\n".join(attachments) + metadata
+
+    overrides = dict()
+    if template is not None:
+        overrides["template"] = template
+    if stylesheet is not None:
+        overrides["stylesheet_path"] = stylesheet
+    html = open(path, "w")
+    publish_file(source=io.StringIO(text),
+                 destination=html,
+                 writer_name="html",
+                 settings_overrides=overrides)
diff --git a/snakemake/rules.py b/snakemake/rules.py
new file mode 100644
index 0000000..3608167
--- /dev/null
+++ b/snakemake/rules.py
@@ -0,0 +1,520 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import re
+import sys
+import inspect
+import sre_constants
+from collections import defaultdict
+
+from snakemake.io import IOFile, _IOFile, protected, temp, dynamic, Namedlist
+from snakemake.io import expand, InputFiles, OutputFiles, Wildcards, Params, Log
+from snakemake.io import apply_wildcards, is_flagged, not_iterable
+from snakemake.exceptions import RuleException, IOFileException, WildcardError, InputFunctionException
+
+
+class Rule:
+    def __init__(self, *args, lineno=None, snakefile=None):
+        """
+        Create a rule
+
+        Arguments
+        name -- the name of the rule
+        """
+        if len(args) == 2:
+            name, workflow = args
+            self.name = name
+            self.workflow = workflow
+            self.docstring = None
+            self.message = None
+            self._input = InputFiles()
+            self._output = OutputFiles()
+            self._params = Params()
+            self.dependencies = dict()
+            self.dynamic_output = set()
+            self.dynamic_input = set()
+            self.temp_output = set()
+            self.protected_output = set()
+            self.touch_output = set()
+            self.subworkflow_input = dict()
+            self.resources = dict(_cores=1, _nodes=1)
+            self.priority = 0
+            self.version = None
+            self._log = Log()
+            self._benchmark = None
+            self.wildcard_names = set()
+            self.lineno = lineno
+            self.snakefile = snakefile
+            self.run_func = None
+            self.shellcmd = None
+            self.norun = False
+        elif len(args) == 1:
+            other = args[0]
+            self.name = other.name
+            self.workflow = other.workflow
+            self.docstring = other.docstring
+            self.message = other.message
+            self._input = InputFiles(other._input)
+            self._output = OutputFiles(other._output)
+            self._params = Params(other._params)
+            self.dependencies = dict(other.dependencies)
+            self.dynamic_output = set(other.dynamic_output)
+            self.dynamic_input = set(other.dynamic_input)
+            self.temp_output = set(other.temp_output)
+            self.protected_output = set(other.protected_output)
+            self.touch_output = set(other.touch_output)
+            self.subworkflow_input = dict(other.subworkflow_input)
+            self.resources = other.resources
+            self.priority = other.priority
+            self.version = other.version
+            self._log = other._log
+            self._benchmark = other._benchmark
+            self.wildcard_names = set(other.wildcard_names)
+            self.lineno = other.lineno
+            self.snakefile = other.snakefile
+            self.run_func = other.run_func
+            self.shellcmd = other.shellcmd
+            self.norun = other.norun
+
+    def dynamic_branch(self, wildcards, input=True):
+        def get_io(rule):
+            return (rule.input, rule.dynamic_input) if input else (
+                rule.output, rule.dynamic_output
+            )
+
+        io, dynamic_io = get_io(self)
+
+        branch = Rule(self)
+        io_, dynamic_io_ = get_io(branch)
+
+        expansion = defaultdict(list)
+        for i, f in enumerate(io):
+            if f in dynamic_io:
+                try:
+                    for e in reversed(expand(f, zip, **wildcards)):
+                        expansion[i].append(IOFile(e, rule=branch))
+                except KeyError:
+                    return None
+
+        # replace the dynamic files with the expanded files
+        replacements = [(i, io[i], e)
+                        for i, e in reversed(list(expansion.items()))]
+        for i, old, exp in replacements:
+            dynamic_io_.remove(old)
+            io_.insert_items(i, exp)
+
+        if not input:
+            for i, old, exp in replacements:
+                if old in branch.temp_output:
+                    branch.temp_output.discard(old)
+                    branch.temp_output.update(exp)
+                if old in branch.protected_output:
+                    branch.protected_output.discard(old)
+                    branch.protected_output.update(exp)
+                if old in branch.touch_output:
+                    branch.touch_output.discard(old)
+                    branch.touch_output.update(exp)
+
+            branch.wildcard_names.clear()
+            non_dynamic_wildcards = dict((name, values[0])
+                                         for name, values in wildcards.items()
+                                         if len(set(values)) == 1)
+            # TODO have a look into how to concretize dependencies here
+            (branch._input, branch._output, branch._params, branch._log,
+             branch._benchmark, _, branch.dependencies
+             ) = branch.expand_wildcards(wildcards=non_dynamic_wildcards)
+            return branch, non_dynamic_wildcards
+        return branch
+
+    def has_wildcards(self):
+        """
+        Return True if rule contains wildcards.
+        """
+        return bool(self.wildcard_names)
+
+    @property
+    def benchmark(self):
+        return self._benchmark
+
+    @benchmark.setter
+    def benchmark(self, benchmark):
+        self._benchmark = IOFile(benchmark, rule=self)
+
+    @property
+    def input(self):
+        return self._input
+
+    def set_input(self, *input, **kwinput):
+        """
+        Add a list of input files. Recursive lists are flattened.
+
+        Arguments
+        input -- the list of input files
+        """
+        for item in input:
+            self._set_inoutput_item(item)
+        for name, item in kwinput.items():
+            self._set_inoutput_item(item, name=name)
+
+    @property
+    def output(self):
+        return self._output
+
+    @property
+    def products(self):
+        products = list(self.output)
+        if self.benchmark:
+            products.append(self.benchmark)
+        return products
+
+    def set_output(self, *output, **kwoutput):
+        """
+        Add a list of output files. Recursive lists are flattened.
+
+        Arguments
+        output -- the list of output files
+        """
+        for item in output:
+            self._set_inoutput_item(item, output=True)
+        for name, item in kwoutput.items():
+            self._set_inoutput_item(item, output=True, name=name)
+
+        for item in self.output:
+            if self.dynamic_output and item not in self.dynamic_output:
+                raise SyntaxError(
+                    "A rule with dynamic output may not define any "
+                    "non-dynamic output files.")
+            wildcards = item.get_wildcard_names()
+            if self.wildcard_names:
+                if self.wildcard_names != wildcards:
+                    raise SyntaxError(
+                        "Not all output files of rule {} "
+                        "contain the same wildcards.".format(self.name))
+            else:
+                self.wildcard_names = wildcards
+
+    def _set_inoutput_item(self, item, output=False, name=None):
+        """
+        Set an item to be input or output.
+
+        Arguments
+        item     -- the item
+        inoutput -- either a Namedlist of input or output items
+        name     -- an optional name for the item
+        """
+        inoutput = self.output if output else self.input
+        if isinstance(item, str):
+            # add the rule to the dependencies
+            if isinstance(item, _IOFile):
+                self.dependencies[item] = item.rule
+            _item = IOFile(item, rule=self)
+            if is_flagged(item, "temp"):
+                if not output:
+                    raise SyntaxError("Only output files may be temporary")
+                self.temp_output.add(_item)
+            if is_flagged(item, "protected"):
+                if not output:
+                    raise SyntaxError("Only output files may be protected")
+                self.protected_output.add(_item)
+            if is_flagged(item, "touch"):
+                if not output:
+                    raise SyntaxError(
+                        "Only output files may be marked for touching.")
+                self.touch_output.add(_item)
+            if is_flagged(item, "dynamic"):
+                if output:
+                    self.dynamic_output.add(_item)
+                else:
+                    self.dynamic_input.add(_item)
+            if is_flagged(item, "subworkflow"):
+                if output:
+                    raise SyntaxError(
+                        "Only input files may refer to a subworkflow")
+                else:
+                    # record the workflow this item comes from
+                    self.subworkflow_input[_item] = item.flags["subworkflow"]
+            inoutput.append(_item)
+            if name:
+                inoutput.add_name(name)
+        elif callable(item):
+            if output:
+                raise SyntaxError(
+                    "Only input files can be specified as functions")
+            inoutput.append(item)
+            if name:
+                inoutput.add_name(name)
+        else:
+            try:
+                start = len(inoutput)
+                for i in item:
+                    self._set_inoutput_item(i, output=output)
+                if name:
+                    # if the list was named, make it accessible
+                    inoutput.set_name(name, start, end=len(inoutput))
+            except TypeError:
+                raise SyntaxError(
+                    "Input and output files have to be specified as strings or lists of strings.")
+
+    @property
+    def params(self):
+        return self._params
+
+    def set_params(self, *params, **kwparams):
+        for item in params:
+            self._set_params_item(item)
+        for name, item in kwparams.items():
+            self._set_params_item(item, name=name)
+
+    def _set_params_item(self, item, name=None):
+        if isinstance(item, str) or callable(item):
+            self.params.append(item)
+            if name:
+                self.params.add_name(name)
+        else:
+            try:
+                start = len(self.params)
+                for i in item:
+                    self._set_params_item(i)
+                if name:
+                    self.params.set_name(name, start, end=len(self.params))
+            except TypeError:
+                raise SyntaxError("Params have to be specified as strings.")
+
+    @property
+    def log(self):
+        return self._log
+
+    def set_log(self, *logs, **kwlogs):
+        for item in logs:
+            self._set_log_item(item)
+        for name, item in kwlogs.items():
+            self._set_log_item(item, name=name)
+
+    def _set_log_item(self, item, name=None):
+        if isinstance(item, str) or callable(item):
+            self.log.append(IOFile(item,
+                                   rule=self)
+                            if isinstance(item, str) else item)
+            if name:
+                self.log.add_name(name)
+        else:
+            try:
+                start = len(self.log)
+                for i in item:
+                    self._set_log_item(i)
+                if name:
+                    self.log.set_name(name, start, end=len(self.log))
+            except TypeError:
+                raise SyntaxError("Log files have to be specified as strings.")
+
+    def expand_wildcards(self, wildcards=None):
+        """
+        Expand wildcards depending on the requested output
+        or given wildcards dict.
+        """
+
+        def concretize_iofile(f, wildcards):
+            if not isinstance(f, _IOFile):
+                return IOFile(f, rule=self)
+            else:
+                return f.apply_wildcards(wildcards,
+                                         fill_missing=f in self.dynamic_input,
+                                         fail_dynamic=self.dynamic_output)
+
+        def _apply_wildcards(newitems, olditems, wildcards, wildcards_obj,
+                             concretize=apply_wildcards,
+                             ruleio=None):
+            for name, item in olditems.allitems():
+                start = len(newitems)
+                is_iterable = True
+                if callable(item):
+                    try:
+                        item = item(wildcards_obj)
+                    except (Exception, BaseException) as e:
+                        raise InputFunctionException(e, rule=self)
+                    if not_iterable(item):
+                        item = [item]
+                        is_iterable = False
+                    for item_ in item:
+                        if not isinstance(item_, str):
+                            raise RuleException(
+                                "Input function did not return str or list of str.",
+                                rule=self)
+                        concrete = concretize(item_, wildcards)
+                        newitems.append(concrete)
+                        if ruleio is not None:
+                            ruleio[concrete] = item_
+                else:
+                    if not_iterable(item):
+                        item = [item]
+                        is_iterable = False
+                    for item_ in item:
+                        concrete = concretize(item_, wildcards)
+                        newitems.append(concrete)
+                        if ruleio is not None:
+                            ruleio[concrete] = item_
+                if name:
+                    newitems.set_name(
+                        name, start,
+                        end=len(newitems) if is_iterable else None)
+
+        if wildcards is None:
+            wildcards = dict()
+        missing_wildcards = self.wildcard_names - set(wildcards.keys())
+
+        if missing_wildcards:
+            raise RuleException(
+                "Could not resolve wildcards in rule {}:\n{}".format(
+                    self.name, "\n".join(self.wildcard_names)),
+                lineno=self.lineno,
+                snakefile=self.snakefile)
+
+        ruleio = dict()
+
+        try:
+            input = InputFiles()
+            wildcards_obj = Wildcards(fromdict=wildcards)
+            _apply_wildcards(input, self.input, wildcards, wildcards_obj,
+                             concretize=concretize_iofile,
+                             ruleio=ruleio)
+
+            params = Params()
+            _apply_wildcards(params, self.params, wildcards, wildcards_obj)
+
+            output = OutputFiles(o.apply_wildcards(wildcards)
+                                 for o in self.output)
+            output.take_names(self.output.get_names())
+
+            dependencies = {
+                None if f is None else f.apply_wildcards(wildcards): rule
+                for f, rule in self.dependencies.items()
+            }
+
+            ruleio.update(dict((f, f_) for f, f_ in zip(output, self.output)))
+
+            log = Log()
+            _apply_wildcards(log, self.log, wildcards, wildcards_obj,
+                             concretize=concretize_iofile)
+
+            benchmark = self.benchmark.apply_wildcards(
+                wildcards) if self.benchmark else None
+            return input, output, params, log, benchmark, ruleio, dependencies
+        except WildcardError as ex:
+            # this can only happen if an input contains an unresolved wildcard.
+            raise RuleException(
+                "Wildcards in input, params, log or benchmark file of rule {} cannot be "
+                "determined from output files:\n{}".format(self, str(ex)),
+                lineno=self.lineno,
+                snakefile=self.snakefile)
+
+    def is_producer(self, requested_output):
+        """
+        Returns True if this rule is a producer of the requested output.
+        """
+        try:
+            for o in self.products:
+                if o.match(requested_output):
+                    return True
+            return False
+        except sre_constants.error as ex:
+            raise IOFileException("{} in wildcard statement".format(ex),
+                                  snakefile=self.snakefile,
+                                  lineno=self.lineno)
+        except ValueError as ex:
+            raise IOFileException("{}".format(ex),
+                                  snakefile=self.snakefile,
+                                  lineno=self.lineno)
+
+    def get_wildcards(self, requested_output):
+        """
+        Update the given wildcard dictionary by matching regular expression
+        output files to the requested concrete ones.
+
+        Arguments
+        wildcards -- a dictionary of wildcards
+        requested_output -- a concrete filepath
+        """
+        if requested_output is None:
+            return dict()
+        bestmatchlen = 0
+        bestmatch = None
+
+        for o in self.products:
+            match = o.match(requested_output)
+            if match:
+                l = self.get_wildcard_len(match.groupdict())
+                if not bestmatch or bestmatchlen > l:
+                    bestmatch = match.groupdict()
+                    bestmatchlen = l
+        return bestmatch
+
+    @staticmethod
+    def get_wildcard_len(wildcards):
+        """
+        Return the length of the given wildcard values.
+
+        Arguments
+        wildcards -- a dict of wildcards
+        """
+        return sum(map(len, wildcards.values()))
+
+    def __lt__(self, rule):
+        comp = self.workflow._ruleorder.compare(self, rule)
+        return comp < 0
+
+    def __gt__(self, rule):
+        comp = self.workflow._ruleorder.compare(self, rule)
+        return comp > 0
+
+    def __str__(self):
+        return self.name
+
+    def __hash__(self):
+        return self.name.__hash__()
+
+    def __eq__(self, other):
+        return self.name == other.name
+
+
+class Ruleorder:
+    def __init__(self):
+        self.order = list()
+
+    def add(self, *rulenames):
+        """
+        Records the order of given rules as rule1 > rule2 > rule3, ...
+        """
+        self.order.append(list(rulenames))
+
+    def compare(self, rule1, rule2):
+        """
+        Return whether rule2 has a higher priority than rule1.
+        """
+        # try the last clause first,
+        # i.e. clauses added later overwrite those before.
+        for clause in reversed(self.order):
+            try:
+                i = clause.index(rule1.name)
+                j = clause.index(rule2.name)
+                # rules with higher priority should have a smaller index
+                comp = j - i
+                if comp < 0:
+                    comp = -1
+                elif comp > 0:
+                    comp = 1
+                return comp
+            except ValueError:
+                pass
+
+        # if not ruleorder given, prefer rule without wildcards
+        wildcard_cmp = rule2.has_wildcards() - rule1.has_wildcards()
+        if wildcard_cmp != 0:
+            return wildcard_cmp
+
+        return 0
+
+    def __iter__(self):
+        return self.order.__iter__()
diff --git a/snakemake/scheduler.py b/snakemake/scheduler.py
new file mode 100644
index 0000000..abfdd52
--- /dev/null
+++ b/snakemake/scheduler.py
@@ -0,0 +1,411 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os, signal
+import threading
+import multiprocessing
+import operator
+from functools import partial
+from collections import defaultdict
+from itertools import chain, accumulate
+
+from snakemake.executors import DryrunExecutor, TouchExecutor, CPUExecutor
+from snakemake.executors import GenericClusterExecutor, SynchronousClusterExecutor, DRMAAExecutor
+
+from snakemake.logging import logger
+
+
+def cumsum(iterable, zero=[0]):
+    return list(chain(zero, accumulate(iterable)))
+
+
+_ERROR_MSG_FINAL = ("Exiting because a job execution failed. "
+                    "Look above for error message")
+
+
+class JobScheduler:
+    def __init__(self, workflow, dag, cores,
+                 dryrun=False,
+                 touch=False,
+                 cluster=None,
+                 cluster_config=None,
+                 cluster_sync=None,
+                 drmaa=None,
+                 jobname=None,
+                 immediate_submit=False,
+                 quiet=False,
+                 printreason=False,
+                 printshellcmds=False,
+                 keepgoing=False,
+                 latency_wait=3,
+                 benchmark_repeats=1,
+                 greediness=1.0):
+        """ Create a new instance of KnapsackJobScheduler. """
+        self.cluster = cluster
+        self.cluster_config = cluster_config
+        self.cluster_sync = cluster_sync
+        self.dag = dag
+        self.workflow = workflow
+        self.dryrun = dryrun
+        self.quiet = quiet
+        self.keepgoing = keepgoing
+        self.running = set()
+        self.failed = set()
+        self.finished_jobs = 0
+        self.greediness = greediness
+        self.select_by_rule = False
+        if not self.select_by_rule:
+            self.greediness = 1
+
+        self.resources = dict(self.workflow.global_resources)
+
+        use_threads = os.name != "posix"
+        if not use_threads:
+            self._open_jobs = multiprocessing.Event()
+            self._lock = multiprocessing.Lock()
+        else:
+            self._open_jobs = threading.Event()
+            self._lock = threading.Lock()
+        self._errors = False
+        self._finished = False
+        self._job_queue = None
+        self._submit_callback = self._noop
+        self._finish_callback = partial(
+            self._proceed,
+            update_dynamic=not self.dryrun,
+            print_progress=not self.quiet and not self.dryrun)
+
+        if dryrun:
+            self._executor = DryrunExecutor(workflow, dag,
+                                            printreason=printreason,
+                                            quiet=quiet,
+                                            printshellcmds=printshellcmds,
+                                            latency_wait=latency_wait)
+            self.rule_reward = self.dryrun_rule_reward
+            self.job_reward = self.dryrun_job_reward
+        elif touch:
+            self._executor = TouchExecutor(workflow, dag,
+                                           printreason=printreason,
+                                           quiet=quiet,
+                                           printshellcmds=printshellcmds,
+                                           latency_wait=latency_wait)
+        elif cluster or cluster_sync or (drmaa is not None):
+            # TODO properly set cores
+            workers = min(sum(1 for _ in dag.local_needrun_jobs),
+                          multiprocessing.cpu_count())
+            self._local_executor = CPUExecutor(
+                workflow, dag, workers,
+                printreason=printreason,
+                quiet=quiet,
+                printshellcmds=printshellcmds,
+                threads=use_threads,
+                latency_wait=latency_wait,
+                benchmark_repeats=benchmark_repeats)
+            self.run = self.run_cluster_or_local
+            if cluster or cluster_sync:
+                constructor = SynchronousClusterExecutor if cluster_sync \
+                              else GenericClusterExecutor
+                self._executor = constructor(
+                    workflow, dag, None,
+                    submitcmd=(cluster or cluster_sync),
+                    cluster_config=cluster_config,
+                    jobname=jobname,
+                    printreason=printreason,
+                    quiet=quiet,
+                    printshellcmds=printshellcmds,
+                    latency_wait=latency_wait,
+                    benchmark_repeats=benchmark_repeats, )
+                if immediate_submit:
+                    self.rule_reward = self.dryrun_rule_reward
+                    self.job_reward = self.dryrun_job_reward
+                    self._submit_callback = partial(self._proceed,
+                                                    update_dynamic=False,
+                                                    print_progress=False,
+                                                    update_resources=False, )
+            else:
+                self._executor = DRMAAExecutor(
+                    workflow, dag, None,
+                    drmaa_args=drmaa,
+                    jobname=jobname,
+                    printreason=printreason,
+                    quiet=quiet,
+                    printshellcmds=printshellcmds,
+                    latency_wait=latency_wait,
+                    benchmark_repeats=benchmark_repeats,
+                    cluster_config=cluster_config, )
+        else:
+            # local execution or execution of cluster job
+            # calculate how many parallel workers the executor shall spawn
+            # each job has at least one thread, hence we need to have
+            # the minimum of given cores and number of jobs
+            workers = min(cores, len(dag))
+            self._executor = CPUExecutor(workflow, dag, workers,
+                                         printreason=printreason,
+                                         quiet=quiet,
+                                         printshellcmds=printshellcmds,
+                                         threads=use_threads,
+                                         latency_wait=latency_wait,
+                                         benchmark_repeats=benchmark_repeats, )
+        self._open_jobs.set()
+
+    @property
+    def stats(self):
+        try:
+            return self._executor.stats
+        except AttributeError:
+            raise TypeError("Executor does not support stats")
+
+    def candidate(self, job):
+        """ Return whether a job is a candidate to be executed. """
+        return (job not in self.running and job not in self.failed and
+                (self.dryrun or
+                 (not job.dynamic_input and not self.dag.dynamic(job))))
+
+    @property
+    def open_jobs(self):
+        """ Return open jobs. """
+        return filter(self.candidate, list(self.dag.ready_jobs))
+
+    def schedule(self):
+        """ Schedule jobs that are ready, maximizing cpu usage. """
+        try:
+            while True:
+                # work around so that the wait does not prevent keyboard interrupts
+                while not self._open_jobs.wait(1):
+                    pass
+
+                self._open_jobs.clear()
+                if not self.keepgoing and self._errors:
+                    logger.info("Will exit after finishing "
+                                "currently running jobs.")
+                    if not self.running:
+                        self._executor.shutdown()
+                        logger.error(_ERROR_MSG_FINAL)
+                        return False
+                    continue
+                if not any(self.open_jobs) and not self.running:
+                    self._executor.shutdown()
+                    if self._errors:
+                        logger.error(_ERROR_MSG_FINAL)
+                    return not self._errors
+
+                needrun = list(self.open_jobs)
+                if not needrun:
+                    continue
+
+                logger.debug("Resources before job selection: {}".format(
+                    self.resources))
+                logger.debug("Ready jobs ({}):\n\t".format(len(needrun)) +
+                             "\n\t".join(map(str, needrun)))
+
+                run = self.job_selector(needrun)
+                logger.debug("Selected jobs ({}):\n\t".format(len(run)) +
+                             "\n\t".join(map(str, run)))
+                self.running.update(run)
+                logger.debug(
+                    "Resources after job selection: {}".format(self.resources))
+                for job in run:
+                    self.run(job)
+        except (KeyboardInterrupt, SystemExit):
+            logger.info("Terminating processes on user request.")
+            self._executor.cancel()
+            for job in self.running:
+                job.cleanup()
+            return False
+
+    def run(self, job):
+        self._executor.run(job,
+                           callback=self._finish_callback,
+                           submit_callback=self._submit_callback,
+                           error_callback=self._error)
+
+    def run_cluster_or_local(self, job):
+        executor = self._local_executor if self.workflow.is_local(
+            job.rule) else self._executor
+        executor.run(job,
+                     callback=self._finish_callback,
+                     submit_callback=self._submit_callback,
+                     error_callback=self._error)
+
+    def _noop(self, job):
+        pass
+
+    def _free_resources(self, job):
+        for name, value in job.resources.items():
+            if name in self.resources:
+                value = self.calc_resource(name, value)
+                self.resources[name] += value
+                logger.debug("Releasing {} {} (now {}).".format(
+                    value, name, self.resources[name]))
+
+    def _proceed(self, job,
+                 update_dynamic=True,
+                 print_progress=False,
+                 update_resources=True):
+        """ Do stuff after job is finished. """
+        with self._lock:
+            if update_resources:
+                self.finished_jobs += 1
+                self.running.remove(job)
+                self._free_resources(job)
+
+            self.dag.finish(job, update_dynamic=update_dynamic)
+
+            logger.job_finished(jobid=self.dag.jobid(job))
+
+            if print_progress:
+                self.progress()
+
+            if any(self.open_jobs) or not self.running:
+                # go on scheduling if open jobs are ready or no job is running
+                self._open_jobs.set()
+
+    def _error(self, job):
+        """ Clear jobs and stop the workflow. """
+        with self._lock:
+            self._errors = True
+            self.running.remove(job)
+            self.failed.add(job)
+            self._free_resources(job)
+            if self.keepgoing:
+                logger.info("Job failed, going on with independent jobs.")
+            self._open_jobs.set()
+
+    def job_selector(self, jobs):
+        """
+        Using the greedy heuristic from
+        "A Greedy Algorithm for the General Multidimensional Knapsack
+Problem", Akcay, Li, Xu, Annals of Operations Research, 2012
+
+        Args:
+            jobs (list):    list of jobs
+        """
+        with self._lock:
+            if self.select_by_rule:
+                # solve over the rules instead of jobs (much less, but might miss the best solution)
+                # each rule is an item with as many copies as jobs
+                _jobs = defaultdict(list)
+                for job in jobs:
+                    _jobs[job.rule].append(job)
+
+                jobs = _jobs
+
+                # sort the jobs by priority
+                for _jobs in jobs.values():
+                    _jobs.sort(key=self.dag.priority, reverse=True)
+                rules = list(jobs)
+
+                # Step 1: initialization
+                n = len(rules)
+                x = [0] * n  # selected jobs of each rule
+                E = set(range(n))  # rules free to select
+                u = [len(jobs[rule]) for rule in rules]  # number of jobs left
+                a = list(map(self.rule_weight,
+                             rules))  # resource usage of rules
+                c = list(map(partial(self.rule_reward,
+                                     jobs=jobs),
+                             rules))  # matrix of cumulative rewards over jobs
+
+                def calc_reward():
+                    return [([(crit[x_j + y_j] - crit[x_j]) for crit in c_j] if
+                             j in E else [0] * len(c_j))
+                            for j, (c_j, y_j, x_j) in enumerate(zip(c, y, x))]
+            else:
+                # each job is an item with one copy (0-1 MDKP)
+                n = len(jobs)
+                x = [0] * n  # selected jobs
+                E = set(range(n))  # jobs still free to select
+                u = [1] * n
+                a = list(map(self.job_weight, jobs))  # resource usage of jobs
+                c = list(map(self.job_reward, jobs))  # job rewards
+
+                def calc_reward():
+                    return [c_j * y_j for c_j, y_j in zip(c, y)]
+
+            b = [self.resources[name]
+                 for name in self.workflow.global_resources
+                 ]  # resource capacities
+
+            while True:
+                # Step 2: compute effective capacities
+                y = [(min((min(u[j], b_i // a_j_i) if a_j_i > 0 else u[j])
+                          for b_i, a_j_i in zip(b, a[j]) if a_j_i) if j in E
+                      else 0) for j in range(n)]
+                if not any(y):
+                    break
+                y = [(max(1, int(self.greediness * y_j)) if y_j > 0 else 0)
+                     for y_j in y]
+
+                # Step 3: compute rewards on cumulative sums
+                reward = calc_reward()
+                j_sel = max(E, key=reward.__getitem__)  # argmax
+
+                # Step 4: batch increment
+                y_sel = y[j_sel]
+
+                # Step 5: update information
+                x[j_sel] += y_sel
+                b = [b_i - (a_j_i * y_sel) for b_i, a_j_i in zip(b, a[j_sel])]
+                u[j_sel] -= y_sel
+                if not u[j_sel] or self.greediness == 1:
+                    E.remove(j_sel)
+                if not E:
+                    break
+
+            if self.select_by_rule:
+                # Solution is the list of jobs that was selected from the selected rules
+                solution = list(chain(*[jobs[rules[j]][:x_]
+                                        for j, x_ in enumerate(x)]))
+            else:
+                solution = [job for job, sel in zip(jobs, x) if sel]
+            # update resources
+            for name, b_i in zip(self.workflow.global_resources, b):
+                self.resources[name] = b_i
+            return solution
+
+    def calc_resource(self, name, value):
+        return min(value, self.workflow.global_resources[name])
+
+    def rule_weight(self, rule):
+        res = rule.resources
+        return [self.calc_resource(name, res.get(name, 0))
+                for name in self.workflow.global_resources]
+
+    def rule_reward(self, rule, jobs=None):
+        jobs = jobs[rule]
+        return (self.priority_reward(jobs), self.downstream_reward(jobs),
+                cumsum([job.inputsize for job in jobs]))
+
+    def dryrun_rule_reward(self, rule, jobs=None):
+        jobs = jobs[rule]
+        return (self.priority_reward(jobs), self.downstream_reward(jobs),
+                [0] * (len(jobs) + 1))
+
+    def priority_reward(self, jobs):
+        return cumsum(self.dag.priorities(jobs))
+
+    def downstream_reward(self, jobs):
+        return cumsum(self.dag.downstream_sizes(jobs))
+
+    def thread_reward(self, jobs):
+        """ Thread-based reward for jobs. Using this maximizes core
+        saturation, but does not lead to faster computation in general."""
+        return cumsum([job.threads for job in jobs])
+
+    def job_weight(self, job):
+        res = job.resources_dict
+        return [self.calc_resource(name, res.get(name, 0))
+                for name in self.workflow.global_resources]
+
+    def job_reward(self, job):
+        return (self.dag.priority(job), self.dag.downstream_size(job),
+                job.inputsize)
+
+    def dryrun_job_reward(self, job):
+        return (self.dag.priority(job), self.dag.downstream_size(job))
+
+    def progress(self):
+        """ Display the progress. """
+        logger.progress(done=self.finished_jobs, total=len(self.dag))
diff --git a/snakemake/shell.py b/snakemake/shell.py
new file mode 100644
index 0000000..95bc729
--- /dev/null
+++ b/snakemake/shell.py
@@ -0,0 +1,76 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import _io
+import sys
+import os
+import subprocess as sp
+
+from snakemake.utils import format
+from snakemake.logging import logger
+
+__author__ = "Johannes Köster"
+
+STDOUT = sys.stdout
+if not isinstance(sys.stdout, _io.TextIOWrapper):
+    # workaround for nosetest since it overwrites sys.stdout
+    # in a strange way that does not work with Popen
+    STDOUT = None
+
+
+class shell:
+    _process_args = {}
+    _process_prefix = ""
+
+    @classmethod
+    def executable(cls, cmd):
+        cls._process_args["executable"] = cmd
+
+    @classmethod
+    def prefix(cls, prefix):
+        cls._process_prefix = format(prefix, stepout=2)
+
+    def __new__(cls, cmd, *args,
+                async=False,
+                iterable=False,
+                read=False, **kwargs):
+        if "stepout" in kwargs:
+            raise KeyError("Argument stepout is not allowed in shell command.")
+        cmd = format(cmd, *args, stepout=2, **kwargs)
+
+        logger.shellcmd(cmd)
+
+        stdout = sp.PIPE if iterable or async or read else STDOUT
+
+        close_fds = sys.platform != 'win32'
+        proc = sp.Popen(cls._process_prefix + cmd,
+                        bufsize=-1,
+                        shell=True,
+                        stdout=stdout,
+                        close_fds=close_fds, **cls._process_args)
+
+        ret = None
+        if iterable:
+            return cls.iter_stdout(proc, cmd)
+        if read:
+            ret = proc.stdout.read()
+        elif async:
+            return proc
+        retcode = proc.wait()
+        if retcode:
+            raise sp.CalledProcessError(retcode, cmd)
+        return ret
+
+    @staticmethod
+    def iter_stdout(proc, cmd):
+        for l in proc.stdout:
+            yield l[:-1].decode()
+        retcode = proc.wait()
+        if retcode:
+            raise sp.CalledProcessError(retcode, cmd)
+
+
+if "SHELL" in os.environ:
+    shell.executable(os.environ["SHELL"])
diff --git a/snakemake/stats.py b/snakemake/stats.py
new file mode 100644
index 0000000..96292c9
--- /dev/null
+++ b/snakemake/stats.py
@@ -0,0 +1,78 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import time
+import csv
+import json
+from collections import defaultdict
+
+import snakemake.jobs
+
+fmt_time = time.ctime
+
+
+class Stats:
+    def __init__(self):
+        self.starttime = dict()
+        self.endtime = dict()
+
+    def report_job_start(self, job):
+        self.starttime[job] = time.time()
+
+    def report_job_end(self, job):
+        self.endtime[job] = time.time()
+
+    @property
+    def rule_stats(self):
+        runtimes = defaultdict(list)
+        for job, t in self.starttime.items():
+            runtimes[job.rule].append(self.endtime[job] - t)
+        for rule, runtimes in runtimes.items():
+            yield (rule, sum(runtimes) / len(runtimes), min(runtimes),
+                   max(runtimes))
+
+    @property
+    def file_stats(self):
+        for job, t in self.starttime.items():
+            for f in job.expanded_output:
+                start, stop = t, self.endtime[job]
+                yield f, fmt_time(start), fmt_time(stop), stop - start, job
+
+    @property
+    def overall_runtime(self):
+        if self.starttime and self.endtime:
+            return max(self.endtime.values()) - min(self.starttime.values())
+        else:
+            return 0
+
+    def to_json(self, path):
+        rule_stats = {
+            rule.name: {
+                "mean-runtime": mean_runtime,
+                "min-runtime": min_runtime,
+                "max-runtime": max_runtime
+            }
+            for rule, mean_runtime, min_runtime, max_runtime in self.rule_stats
+        }
+        file_stats = {
+            f: {
+                "start-time": start,
+                "stop-time": stop,
+                "duration": duration,
+                "priority": job.priority
+                if job.priority != snakemake.jobs.Job.HIGHEST_PRIORITY else
+                "highest",
+                "resources": job.resources_dict
+            }
+            for f, start, stop, duration, job in self.file_stats
+        }
+
+        with open(path, "w") as f:
+            json.dump({
+                "total_runtime": self.overall_runtime,
+                "rules": rule_stats,
+                "files": file_stats
+            }, f,
+                      indent=4)
diff --git a/snakemake/utils.py b/snakemake/utils.py
new file mode 100644
index 0000000..0908a8d
--- /dev/null
+++ b/snakemake/utils.py
@@ -0,0 +1,220 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import os
+import json
+import re
+import inspect
+import textwrap
+from itertools import chain
+
+from snakemake.io import regex, Namedlist
+from snakemake.logging import logger
+from snakemake.exceptions import WorkflowError
+import snakemake
+
+
+def linecount(filename):
+    """Return the number of lines of given file.
+
+    Args:
+        filename (str): the path to the file
+    """
+    with open(filename) as f:
+        return sum(1 for l in f)
+
+
+def listfiles(pattern, restriction=None, omit_value=None):
+    """Yield a tuple of existing filepaths for the given pattern.
+
+    Wildcard values are yielded as the second tuple item.
+
+    Args:
+        pattern (str):       a filepattern. Wildcards are specified in snakemake syntax, e.g. "{id}.txt"
+        restriction (dict):  restrict to wildcard values given in this dictionary
+        omit_value (str):    wildcard value to omit
+
+    Yields:
+        tuple: The next file matching the pattern, and the corresponding wildcards object
+    """
+    pattern = os.path.normpath(pattern)
+    first_wildcard = re.search("{[^{]", pattern)
+    if first_wildcard:
+        dirname = os.path.dirname(pattern[:first_wildcard.start()])
+        if not dirname:
+            dirname = "."
+    else:
+        dirname = os.path.dirname(pattern)
+    pattern = re.compile(regex(pattern))
+    for dirpath, dirnames, filenames in os.walk(dirname):
+        for f in chain(filenames, dirnames):
+            if dirpath != ".":
+                f = os.path.normpath(os.path.join(dirpath, f))
+            match = re.match(pattern, f)
+            if match:
+                wildcards = Namedlist(fromdict=match.groupdict())
+                if restriction is not None:
+                    invalid = any(omit_value not in v and v != wildcards[k]
+                                  for k, v in restriction.items())
+                    if not invalid:
+                        yield f, wildcards
+                else:
+                    yield f, wildcards
+
+
+def makedirs(dirnames):
+    """Recursively create the given directory or directories without
+    reporting errors if they are present.
+    """
+    if isinstance(dirnames, str):
+        dirnames = [dirnames]
+    for dirname in dirnames:
+        if not os.path.exists(dirname):
+            os.makedirs(dirname)
+
+
+def report(text, path,
+           stylesheet=os.path.join(os.path.dirname(__file__), "report.css"),
+           defaultenc="utf8",
+           template=None,
+           metadata=None, **files):
+    """Create an HTML report using python docutils.
+
+    Attention: This function needs Python docutils to be installed for the
+    python installation you use with Snakemake.
+
+    All keywords not listed below are intepreted as paths to files that shall
+    be embedded into the document. They keywords will be available as link
+    targets in the text. E.g. append a file as keyword arg via F1=input[0]
+    and put a download link in the text like this:
+
+    .. code:: python
+
+        report('''
+        ==============
+        Report for ...
+        ==============
+
+        Some text. A link to an embedded file: F1_.
+
+        Further text.
+        ''', outputpath, F1=input[0])
+
+        Instead of specifying each file as a keyword arg, you can also expand
+        the input of your rule if it is completely named, e.g.:
+
+        report('''
+        Some text...
+        ''', outputpath, **input)
+
+    Args:
+        text (str):         The "restructured text" as it is expected by python docutils.
+        path (str):         The path to the desired output file
+        stylesheet (str):   An optional path to a css file that defines the style of the document. This defaults to <your snakemake install>/report.css. Use the default to get a hint how to create your own.
+        defaultenc (str):   The encoding that is reported to the browser for embedded text files, defaults to utf8.
+        template (str):     An optional path to a docutils HTML template.
+        metadata (str):     E.g. an optional author name or email address.
+
+    """
+    try:
+        import snakemake.report
+    except ImportError:
+        raise WorkflowError(
+            "Python 3 package docutils needs to be installed to use the report function.")
+    snakemake.report.report(text, path,
+                            stylesheet=stylesheet,
+                            defaultenc=defaultenc,
+                            template=template,
+                            metadata=metadata, **files)
+
+
+def R(code):
+    """Execute R code
+
+    This function executes the R code given as a string.
+    The function requires rpy2 to be installed.
+
+    Args:
+        code (str): R code to be executed
+    """
+    try:
+        import rpy2.robjects as robjects
+    except ImportError:
+        raise WorkflowError(
+            "Python 3 package rpy2 needs to be installed to use the R function.")
+    robjects.r(format(textwrap.dedent(code), stepout=2))
+
+
+def format(_pattern, *args, stepout=1, **kwargs):
+    """Format a pattern in Snakemake style.
+
+    This means that keywords embedded in braces are replaced by any variable
+    values that are available in the current namespace.
+    """
+
+    class SequenceFormatter:
+        def __init__(self, sequence):
+            self._sequence = sequence
+
+        def __getitem__(self, i):
+            return self._sequence[i]
+
+        def __str__(self):
+            return " ".join(self._sequence)
+
+    frame = inspect.currentframe().f_back
+    while stepout > 1:
+        if not frame.f_back:
+            break
+        frame = frame.f_back
+        stepout -= 1
+
+    variables = dict(frame.f_globals)
+    # add local variables from calling rule/function
+    variables.update(frame.f_locals)
+    variables.update(kwargs)
+    for key, value in list(variables.items()):
+        if type(value) in (list, tuple, set, frozenset):
+            variables[key] = SequenceFormatter(value)
+    try:
+        return _pattern.format(*args, **variables)
+    except KeyError as ex:
+        raise NameError("The name {} is unknown in this context. Please "
+                        "make sure that you defined that variable. "
+                        "Also note that braces not used for variable access "
+                        "have to be escaped by repeating them, "
+                        "i.e. {{{{print $1}}}}".format(str(ex)))
+
+
+class Unformattable:
+    def __init__(self, errormsg="This cannot be used for formatting"):
+        self.errormsg = errormsg
+
+    def __str__(self):
+        raise ValueError(self.errormsg)
+
+
+def read_job_properties(jobscript,
+                        prefix="# properties",
+                        pattern=re.compile("# properties = (.*)")):
+    """Read the job properties defined in a snakemake jobscript.
+
+    This function is a helper for writing custom wrappers for the
+    snakemake --cluster functionality. Applying this function to a
+    jobscript will return a dict containing information about the job.
+    """
+    with open(jobscript) as jobscript:
+        for m in map(pattern.match, jobscript):
+            if m:
+                return json.loads(m.group(1))
+
+
+def min_version(version):
+    """Require minimum snakemake version, raise workflow error if not met."""
+    import pkg_resources
+    if pkg_resources.parse_version(
+        snakemake.__version__) < pkg_resources.parse_version(version):
+        raise WorkflowError(
+            "Expecting Snakemake version {} or higher.".format(version))
diff --git a/snakemake/version.py b/snakemake/version.py
new file mode 100644
index 0000000..31b8f46
--- /dev/null
+++ b/snakemake/version.py
@@ -0,0 +1 @@
+__version__ = "3.4"
diff --git a/snakemake/workflow.py b/snakemake/workflow.py
new file mode 100644
index 0000000..4e98568
--- /dev/null
+++ b/snakemake/workflow.py
@@ -0,0 +1,729 @@
+__author__ = "Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import re
+import os
+import sys
+import signal
+import json
+import urllib
+from collections import OrderedDict
+from itertools import filterfalse, chain
+from functools import partial
+from operator import attrgetter
+
+from snakemake.logging import logger, format_resources, format_resource_names
+from snakemake.rules import Rule, Ruleorder
+from snakemake.exceptions import RuleException, CreateRuleException, \
+    UnknownRuleException, NoRulesException, print_exception, WorkflowError
+from snakemake.shell import shell
+from snakemake.dag import DAG
+from snakemake.scheduler import JobScheduler
+from snakemake.parser import parse
+import snakemake.io
+from snakemake.io import protected, temp, temporary, expand, dynamic, glob_wildcards, flag, not_iterable, touch
+from snakemake.persistence import Persistence
+
+
+class Workflow:
+    def __init__(self,
+                 snakefile=None,
+                 snakemakepath=None,
+                 jobscript=None,
+                 overwrite_shellcmd=None,
+                 overwrite_config=dict(),
+                 overwrite_workdir=None,
+                 overwrite_configfile=None,
+                 config_args=None,
+                 debug=False):
+        """
+        Create the controller.
+        """
+        self._rules = OrderedDict()
+        self.first_rule = None
+        self._workdir = None
+        self.overwrite_workdir = overwrite_workdir
+        self.workdir_init = os.path.abspath(os.curdir)
+        self._ruleorder = Ruleorder()
+        self._localrules = set()
+        self.linemaps = dict()
+        self.rule_count = 0
+        self.basedir = os.path.dirname(snakefile)
+        self.snakefile = os.path.abspath(snakefile)
+        self.snakemakepath = snakemakepath
+        self.included = []
+        self.included_stack = []
+        self.jobscript = jobscript
+        self.persistence = None
+        self.global_resources = None
+        self.globals = globals()
+        self._subworkflows = dict()
+        self.overwrite_shellcmd = overwrite_shellcmd
+        self.overwrite_config = overwrite_config
+        self.overwrite_configfile = overwrite_configfile
+        self.config_args = config_args
+        self._onsuccess = lambda log: None
+        self._onerror = lambda log: None
+        self.debug = debug
+
+        global config
+        config = dict()
+        config.update(self.overwrite_config)
+
+        global rules
+        rules = Rules()
+
+    @property
+    def subworkflows(self):
+        return self._subworkflows.values()
+
+    @property
+    def rules(self):
+        return self._rules.values()
+
+    @property
+    def concrete_files(self):
+        return (
+            file
+            for rule in self.rules for file in chain(rule.input, rule.output)
+            if not callable(file) and not file.contains_wildcard()
+        )
+
+    def check(self):
+        for clause in self._ruleorder:
+            for rulename in clause:
+                if not self.is_rule(rulename):
+                    raise UnknownRuleException(
+                        rulename,
+                        prefix="Error in ruleorder definition.")
+
+    def add_rule(self, name=None, lineno=None, snakefile=None):
+        """
+        Add a rule.
+        """
+        if name is None:
+            name = str(len(self._rules) + 1)
+        if self.is_rule(name):
+            raise CreateRuleException(
+                "The name {} is already used by another rule".format(name))
+        rule = Rule(name, self, lineno=lineno, snakefile=snakefile)
+        self._rules[rule.name] = rule
+        self.rule_count += 1
+        if not self.first_rule:
+            self.first_rule = rule.name
+        return name
+
+    def is_rule(self, name):
+        """
+        Return True if name is the name of a rule.
+
+        Arguments
+        name -- a name
+        """
+        return name in self._rules
+
+    def get_rule(self, name):
+        """
+        Get rule by name.
+
+        Arguments
+        name -- the name of the rule
+        """
+        if not self._rules:
+            raise NoRulesException()
+        if not name in self._rules:
+            raise UnknownRuleException(name)
+        return self._rules[name]
+
+    def list_rules(self, only_targets=False):
+        rules = self.rules
+        if only_targets:
+            rules = filterfalse(Rule.has_wildcards, rules)
+        for rule in rules:
+            logger.rule_info(name=rule.name, docstring=rule.docstring)
+
+    def list_resources(self):
+        for resource in set(
+            resource for rule in self.rules for resource in rule.resources):
+            if resource not in "_cores _nodes".split():
+                logger.info(resource)
+
+    def is_local(self, rule):
+        return rule.name in self._localrules or rule.norun
+
+    def execute(self,
+                targets=None,
+                dryrun=False,
+                touch=False,
+                cores=1,
+                nodes=1,
+                forcetargets=False,
+                forceall=False,
+                forcerun=None,
+                prioritytargets=None,
+                quiet=False,
+                keepgoing=False,
+                printshellcmds=False,
+                printreason=False,
+                printdag=False,
+                cluster=None,
+                cluster_config=None,
+                cluster_sync=None,
+                jobname=None,
+                immediate_submit=False,
+                ignore_ambiguity=False,
+                printrulegraph=False,
+                printd3dag=False,
+                drmaa=None,
+                stats=None,
+                force_incomplete=False,
+                ignore_incomplete=False,
+                list_version_changes=False,
+                list_code_changes=False,
+                list_input_changes=False,
+                list_params_changes=False,
+                summary=False,
+                detailed_summary=False,
+                latency_wait=3,
+                benchmark_repeats=3,
+                wait_for_files=None,
+                nolock=False,
+                unlock=False,
+                resources=None,
+                notemp=False,
+                nodeps=False,
+                cleanup_metadata=None,
+                subsnakemake=None,
+                updated_files=None,
+                keep_target_files=False,
+                allowed_rules=None,
+                greediness=1.0):
+
+        self.global_resources = dict() if resources is None else resources
+        self.global_resources["_cores"] = cores
+        self.global_resources["_nodes"] = nodes
+
+        def rules(items):
+            return map(self._rules.__getitem__, filter(self.is_rule, items))
+
+        if keep_target_files:
+
+            def files(items):
+                return filterfalse(self.is_rule, items)
+        else:
+
+            def files(items):
+                return map(os.path.relpath, filterfalse(self.is_rule, items))
+
+        if not targets:
+            targets = [self.first_rule
+                       ] if self.first_rule is not None else list()
+        if prioritytargets is None:
+            prioritytargets = list()
+        if forcerun is None:
+            forcerun = list()
+
+        priorityrules = set(rules(prioritytargets))
+        priorityfiles = set(files(prioritytargets))
+        forcerules = set(rules(forcerun))
+        forcefiles = set(files(forcerun))
+        targetrules = set(chain(rules(targets),
+                                filterfalse(Rule.has_wildcards, priorityrules),
+                                filterfalse(Rule.has_wildcards, forcerules)))
+        targetfiles = set(chain(files(targets), priorityfiles, forcefiles))
+        if forcetargets:
+            forcefiles.update(targetfiles)
+            forcerules.update(targetrules)
+
+        rules = self.rules
+        if allowed_rules:
+            rules = [rule for rule in rules if rule.name in set(allowed_rules)]
+
+        if wait_for_files is not None:
+            try:
+                snakemake.io.wait_for_files(wait_for_files,
+                                            latency_wait=latency_wait)
+            except IOError as e:
+                logger.error(str(e))
+                return False
+
+        dag = DAG(
+            self, rules,
+            dryrun=dryrun,
+            targetfiles=targetfiles,
+            targetrules=targetrules,
+            forceall=forceall,
+            forcefiles=forcefiles,
+            forcerules=forcerules,
+            priorityfiles=priorityfiles,
+            priorityrules=priorityrules,
+            ignore_ambiguity=ignore_ambiguity,
+            force_incomplete=force_incomplete,
+            ignore_incomplete=ignore_incomplete or printdag or printrulegraph,
+            notemp=notemp)
+
+        self.persistence = Persistence(
+            nolock=nolock,
+            dag=dag,
+            warn_only=dryrun or printrulegraph or printdag or summary or
+            list_version_changes or list_code_changes or list_input_changes or
+            list_params_changes)
+
+        if cleanup_metadata:
+            for f in cleanup_metadata:
+                self.persistence.cleanup_metadata(f)
+            return True
+
+        dag.init()
+        dag.check_dynamic()
+
+        if unlock:
+            try:
+                self.persistence.cleanup_locks()
+                logger.info("Unlocking working directory.")
+                return True
+            except IOError:
+                logger.error("Error: Unlocking the directory {} failed. Maybe "
+                             "you don't have the permissions?")
+                return False
+        try:
+            self.persistence.lock()
+        except IOError:
+            logger.error(
+                "Error: Directory cannot be locked. Please make "
+                "sure that no other Snakemake process is trying to create "
+                "the same files in the following directory:\n{}\n"
+                "If you are sure that no other "
+                "instances of snakemake are running on this directory, "
+                "the remaining lock was likely caused by a kill signal or "
+                "a power loss. It can be removed with "
+                "the --unlock argument.".format(os.getcwd()))
+            return False
+
+        if self.subworkflows and not printdag and not printrulegraph:
+            # backup globals
+            globals_backup = dict(self.globals)
+            # execute subworkflows
+            for subworkflow in self.subworkflows:
+                subworkflow_targets = subworkflow.targets(dag)
+                updated = list()
+                if subworkflow_targets:
+                    logger.info(
+                        "Executing subworkflow {}.".format(subworkflow.name))
+                    if not subsnakemake(subworkflow.snakefile,
+                                        workdir=subworkflow.workdir,
+                                        targets=subworkflow_targets,
+                                        updated_files=updated):
+                        return False
+                    dag.updated_subworkflow_files.update(subworkflow.target(f)
+                                                         for f in updated)
+                else:
+                    logger.info("Subworkflow {}: Nothing to be done.".format(
+                        subworkflow.name))
+            if self.subworkflows:
+                logger.info("Executing main workflow.")
+            # rescue globals
+            self.globals.update(globals_backup)
+
+        dag.check_incomplete()
+        dag.postprocess()
+
+        if nodeps:
+            missing_input = [f for job in dag.targetjobs for f in job.input
+                             if dag.needrun(job) and not os.path.exists(f)]
+            if missing_input:
+                logger.error(
+                    "Dependency resolution disabled (--nodeps) "
+                    "but missing input "
+                    "files detected. If this happens on a cluster, please make sure "
+                    "that you handle the dependencies yourself or turn of "
+                    "--immediate-submit. Missing input files:\n{}".format(
+                        "\n".join(missing_input)))
+                return False
+
+        updated_files.extend(f for job in dag.needrun_jobs for f in job.output)
+
+        if printd3dag:
+            dag.d3dag()
+            return True
+        elif printdag:
+            print(dag)
+            return True
+        elif printrulegraph:
+            print(dag.rule_dot())
+            return True
+        elif summary:
+            print("\n".join(dag.summary(detailed=False)))
+            return True
+        elif detailed_summary:
+            print("\n".join(dag.summary(detailed=True)))
+            return True
+        elif list_version_changes:
+            items = list(
+                chain(*map(self.persistence.version_changed, dag.jobs)))
+            if items:
+                print(*items, sep="\n")
+            return True
+        elif list_code_changes:
+            items = list(chain(*map(self.persistence.code_changed, dag.jobs)))
+            if items:
+                print(*items, sep="\n")
+            return True
+        elif list_input_changes:
+            items = list(chain(*map(self.persistence.input_changed, dag.jobs)))
+            if items:
+                print(*items, sep="\n")
+            return True
+        elif list_params_changes:
+            items = list(
+                chain(*map(self.persistence.params_changed, dag.jobs)))
+            if items:
+                print(*items, sep="\n")
+            return True
+
+        scheduler = JobScheduler(self, dag, cores,
+                                 dryrun=dryrun,
+                                 touch=touch,
+                                 cluster=cluster,
+                                 cluster_config=cluster_config,
+                                 cluster_sync=cluster_sync,
+                                 jobname=jobname,
+                                 immediate_submit=immediate_submit,
+                                 quiet=quiet,
+                                 keepgoing=keepgoing,
+                                 drmaa=drmaa,
+                                 printreason=printreason,
+                                 printshellcmds=printshellcmds,
+                                 latency_wait=latency_wait,
+                                 benchmark_repeats=benchmark_repeats,
+                                 greediness=greediness)
+
+        if not dryrun and not quiet:
+            if len(dag):
+                if cluster or cluster_sync or drmaa:
+                    logger.resources_info(
+                        "Provided cluster nodes: {}".format(nodes))
+                else:
+                    logger.resources_info("Provided cores: {}".format(cores))
+                    logger.resources_info("Rules claiming more threads will be scaled down.")
+                provided_resources = format_resources(resources)
+                if provided_resources:
+                    logger.resources_info(
+                        "Provided resources: " + provided_resources)
+                ignored_resources = format_resource_names(
+                    set(resource for job in dag.needrun_jobs for resource in
+                        job.resources_dict if resource not in resources))
+                if ignored_resources:
+                    logger.resources_info(
+                        "Ignored resources: " + ignored_resources)
+                logger.run_info("\n".join(dag.stats()))
+            else:
+                logger.info("Nothing to be done.")
+        if dryrun and not len(dag):
+            logger.info("Nothing to be done.")
+
+        success = scheduler.schedule()
+
+        if success:
+            if dryrun:
+                if not quiet and len(dag):
+                    logger.run_info("\n".join(dag.stats()))
+            elif stats:
+                scheduler.stats.to_json(stats)
+            if not dryrun:
+                self._onsuccess(logger.get_logfile())
+            return True
+        else:
+            if not dryrun:
+                self._onerror(logger.get_logfile())
+            return False
+
+    def include(self, snakefile,
+                overwrite_first_rule=False,
+                print_compilation=False,
+                overwrite_shellcmd=None):
+        """
+        Include a snakefile.
+        """
+        # check if snakefile is a path to the filesystem
+        if not urllib.parse.urlparse(snakefile).scheme:
+            if not os.path.isabs(snakefile) and self.included_stack:
+                current_path = os.path.dirname(self.included_stack[-1])
+                snakefile = os.path.join(current_path, snakefile)
+            snakefile = os.path.abspath(snakefile)
+        # else it could be an url.
+        # at least we don't want to modify the path for clarity.
+
+        if snakefile in self.included:
+            logger.info("Multiple include of {} ignored".format(snakefile))
+            return
+        self.included.append(snakefile)
+        self.included_stack.append(snakefile)
+
+        global workflow
+
+        workflow = self
+
+        first_rule = self.first_rule
+        code, linemap = parse(snakefile,
+                              overwrite_shellcmd=self.overwrite_shellcmd)
+
+        if print_compilation:
+            print(code)
+
+        # insert the current directory into sys.path
+        # this allows to import modules from the workflow directory
+        sys.path.insert(0, os.path.dirname(snakefile))
+
+        self.linemaps[snakefile] = linemap
+        exec(compile(code, snakefile, "exec"), self.globals)
+        if not overwrite_first_rule:
+            self.first_rule = first_rule
+        self.included_stack.pop()
+
+    def onsuccess(self, func):
+        self._onsuccess = func
+
+    def onerror(self, func):
+        self._onerror = func
+
+    def workdir(self, workdir):
+        if self.overwrite_workdir is None:
+            if not os.path.exists(workdir):
+                os.makedirs(workdir)
+            self._workdir = workdir
+            os.chdir(workdir)
+
+    def configfile(self, jsonpath):
+        """ Update the global config with the given dictionary. """
+        global config
+        c = snakemake.io.load_configfile(jsonpath)
+        for key, val in c.items():
+            if key not in self.overwrite_config:
+                config[key] = val
+
+    def ruleorder(self, *rulenames):
+        self._ruleorder.add(*rulenames)
+
+    def subworkflow(self, name, snakefile=None, workdir=None):
+        sw = Subworkflow(self, name, snakefile, workdir)
+        self._subworkflows[name] = sw
+        self.globals[name] = sw.target
+
+    def localrules(self, *rulenames):
+        self._localrules.update(rulenames)
+
+    def rule(self, name=None, lineno=None, snakefile=None):
+        name = self.add_rule(name, lineno, snakefile)
+        rule = self.get_rule(name)
+
+        def decorate(ruleinfo):
+            if ruleinfo.input:
+                rule.set_input(*ruleinfo.input[0], **ruleinfo.input[1])
+            if ruleinfo.output:
+                rule.set_output(*ruleinfo.output[0], **ruleinfo.output[1])
+            if ruleinfo.params:
+                rule.set_params(*ruleinfo.params[0], **ruleinfo.params[1])
+            if ruleinfo.threads:
+                if not isinstance(ruleinfo.threads, int):
+                    raise RuleException("Threads value has to be an integer.",
+                                        rule=rule)
+                rule.resources["_cores"] = ruleinfo.threads
+            if ruleinfo.resources:
+                args, resources = ruleinfo.resources
+                if args:
+                    raise RuleException("Resources have to be named.")
+                if not all(map(lambda r: isinstance(r, int),
+                               resources.values())):
+                    raise RuleException(
+                        "Resources values have to be integers.",
+                        rule=rule)
+                rule.resources.update(resources)
+            if ruleinfo.priority:
+                if (not isinstance(ruleinfo.priority, int) and
+                    not isinstance(ruleinfo.priority, float)):
+                    raise RuleException("Priority values have to be numeric.",
+                                        rule=rule)
+                rule.priority = ruleinfo.priority
+            if ruleinfo.version:
+                rule.version = ruleinfo.version
+            if ruleinfo.log:
+                rule.set_log(*ruleinfo.log[0], **ruleinfo.log[1])
+            if ruleinfo.message:
+                rule.message = ruleinfo.message
+            if ruleinfo.benchmark:
+                rule.benchmark = ruleinfo.benchmark
+            rule.norun = ruleinfo.norun
+            rule.docstring = ruleinfo.docstring
+            rule.run_func = ruleinfo.func
+            rule.shellcmd = ruleinfo.shellcmd
+            ruleinfo.func.__name__ = "__{}".format(name)
+            self.globals[ruleinfo.func.__name__] = ruleinfo.func
+            setattr(rules, name, rule)
+            return ruleinfo.func
+
+        return decorate
+
+    def docstring(self, string):
+        def decorate(ruleinfo):
+            ruleinfo.docstring = string
+            return ruleinfo
+
+        return decorate
+
+    def input(self, *paths, **kwpaths):
+        def decorate(ruleinfo):
+            ruleinfo.input = (paths, kwpaths)
+            return ruleinfo
+
+        return decorate
+
+    def output(self, *paths, **kwpaths):
+        def decorate(ruleinfo):
+            ruleinfo.output = (paths, kwpaths)
+            return ruleinfo
+
+        return decorate
+
+    def params(self, *params, **kwparams):
+        def decorate(ruleinfo):
+            ruleinfo.params = (params, kwparams)
+            return ruleinfo
+
+        return decorate
+
+    def message(self, message):
+        def decorate(ruleinfo):
+            ruleinfo.message = message
+            return ruleinfo
+
+        return decorate
+
+    def benchmark(self, benchmark):
+        def decorate(ruleinfo):
+            ruleinfo.benchmark = benchmark
+            return ruleinfo
+
+        return decorate
+
+    def threads(self, threads):
+        def decorate(ruleinfo):
+            ruleinfo.threads = threads
+            return ruleinfo
+
+        return decorate
+
+    def resources(self, *args, **resources):
+        def decorate(ruleinfo):
+            ruleinfo.resources = (args, resources)
+            return ruleinfo
+
+        return decorate
+
+    def priority(self, priority):
+        def decorate(ruleinfo):
+            ruleinfo.priority = priority
+            return ruleinfo
+
+        return decorate
+
+    def version(self, version):
+        def decorate(ruleinfo):
+            ruleinfo.version = version
+            return ruleinfo
+
+        return decorate
+
+    def log(self, *logs, **kwlogs):
+        def decorate(ruleinfo):
+            ruleinfo.log = (logs, kwlogs)
+            return ruleinfo
+
+        return decorate
+
+    def shellcmd(self, cmd):
+        def decorate(ruleinfo):
+            ruleinfo.shellcmd = cmd
+            return ruleinfo
+
+        return decorate
+
+    def norun(self):
+        def decorate(ruleinfo):
+            ruleinfo.norun = True
+            return ruleinfo
+
+        return decorate
+
+    def run(self, func):
+        return RuleInfo(func)
+
+    @staticmethod
+    def _empty_decorator(f):
+        return f
+
+
+class RuleInfo:
+    def __init__(self, func):
+        self.func = func
+        self.shellcmd = None
+        self.norun = False
+        self.input = None
+        self.output = None
+        self.params = None
+        self.message = None
+        self.benchmark = None
+        self.threads = None
+        self.resources = None
+        self.priority = None
+        self.version = None
+        self.log = None
+        self.docstring = None
+
+
+class Subworkflow:
+    def __init__(self, workflow, name, snakefile, workdir):
+        self.workflow = workflow
+        self.name = name
+        self._snakefile = snakefile
+        self._workdir = workdir
+
+    @property
+    def snakefile(self):
+        if self._snakefile is None:
+            return os.path.abspath(os.path.join(self.workdir, "Snakefile"))
+        if not os.path.isabs(self._snakefile):
+            return os.path.abspath(os.path.join(self.workflow.basedir,
+                                                self._snakefile))
+        return self._snakefile
+
+    @property
+    def workdir(self):
+        workdir = "." if self._workdir is None else self._workdir
+        if not os.path.isabs(workdir):
+            return os.path.abspath(os.path.join(self.workflow.basedir,
+                                                workdir))
+        return workdir
+
+    def target(self, paths):
+        if not_iterable(paths):
+            return flag(os.path.join(self.workdir, paths), "subworkflow", self)
+        return [self.target(path) for path in paths]
+
+    def targets(self, dag):
+        return [f for job in dag.jobs for f in job.subworkflow_input
+                if job.subworkflow_input[f] is self]
+
+
+class Rules:
+    """ A namespace for rules so that they can be accessed via dot notation. """
+    pass
+
+
+def srcdir(path):
+    """Return the absolute path, relative to the source directory of the current Snakefile."""
+    if not workflow.included_stack:
+        return None
+    return os.path.join(os.path.dirname(workflow.included_stack[-1]), path)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/knapsack/1.txt b/tests/knapsack/1.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/knapsack/2.txt b/tests/knapsack/2.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/knapsack/3.txt b/tests/knapsack/3.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/knapsack/Snakefile b/tests/knapsack/Snakefile
new file mode 100644
index 0000000..f25ac8b
--- /dev/null
+++ b/tests/knapsack/Snakefile
@@ -0,0 +1,25 @@
+# kate: syntax python;
+
+DATASETS = '1 2 3'.split()
+
+rule mapmature_all:
+	input: '{ds}.bam'.format(ds=ds) for ds in DATASETS
+
+rule gzip:
+	output: '{file}.gz'
+	input: '{file}'
+	shell: 'gzip < {input} > {output}'
+
+rule cutadapt:
+	output: fastq=temp('{ds}.fastq')
+	input: '{ds}.txt'
+	shell:
+		'echo hello > {output.fastq}'
+
+rule bwa_mature:
+	output: bam='{ds}.bam'
+	input: reads='{ds}.fastq.gz'
+	threads: 4
+	shell:
+		'echo starting with {threads} threads; sleep 10; touch {output.bam}'
+
diff --git a/tests/test01/Snakefile b/tests/test01/Snakefile
new file mode 100644
index 0000000..b943a2a
--- /dev/null
+++ b/tests/test01/Snakefile
@@ -0,0 +1,65 @@
+from snakemake.utils import min_version
+
+
+shell.executable("/bin/bash")
+shell.prefix("source ~/.bashrc; ")
+
+min_version("2.1")
+
+TEST = "abc"
+
+
+onsuccess:
+    print("Workflow finished")
+    print("Log:")
+    print(log)
+
+
+onerror:
+    print("Workflow failed")
+    print("Log:")
+    print(log)
+
+
+ruleorder: rule2 > rule4
+
+def testin(wildcards):
+	return "test.in"
+
+def version():
+	return "3.3"
+
+rule rule1:
+	# This rule creates an intermediate file.
+	input: 
+		'test.inter'
+	output: 'dir/test.out'
+	log:    a='log/logfile.log'
+	version: version()
+	threads: 4
+	shell: 
+		'if [ {threads} -ne 3 ]; then echo "This test has to be run with -j3 in order to succeed!"; exit 1; fi; ' \
+		'echo {TEST}; echo {version}; cp {input[0]} {output[0]}; ' # append a comment
+		'echo test > {log.a}'
+
+rule rule2:
+	input: testin
+	output: 'test.inter'
+#	message: 'Copying {input[0]} to {output[0]}'
+	shell: 
+		'''
+		cp {input[0]} {output[0]}
+		'''
+
+rule rule4:
+	input: "test.in"
+	output: "test.inter"
+	shell: "cp {input} {output}"
+
+
+# this should be ignored since test.in is present
+rule rule3:
+	input: "dir/{a}.out"
+	output: "{a}.in"
+	shell: "cp {input} {output}"
+
diff --git a/tests/test01/expected-results/dir/test.out b/tests/test01/expected-results/dir/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test01/expected-results/dir/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test01/expected-results/test.inter b/tests/test01/expected-results/test.inter
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test01/expected-results/test.inter
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test01/test.in b/tests/test01/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test01/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test02/Snakefile b/tests/test02/Snakefile
new file mode 100644
index 0000000..5bf2113
--- /dev/null
+++ b/tests/test02/Snakefile
@@ -0,0 +1,18 @@
+rule all:
+	input: 'test.out'
+
+rule rule1:
+	input: '{name}.in'
+	output: '{name}.inter'
+	#message: 'Copying {input[0]} to {output[0]}'
+	log: "logs/{name}.log"
+	shell:
+		'echo test > {log}; cp {input[0]} {output[0]}'
+
+rule rule2:
+	input: '{name}.inter'
+	output: '{name}.out'
+	#message: 'Copying {input[0]} to {output[0]}'
+	shell: 
+		'cp {input[0]} {output[0]}'
+
diff --git a/tests/test02/expected-results/test.out b/tests/test02/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test02/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test02/test.in b/tests/test02/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test02/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test03/Snakefile b/tests/test03/Snakefile
new file mode 100644
index 0000000..930484d
--- /dev/null
+++ b/tests/test03/Snakefile
@@ -0,0 +1,6 @@
+rule rule1:
+	input: '{name}.in'
+	output: '{name}.out'
+	message: 'Copying {input[0]} to {output[0]}'
+	shell:
+		'cp {input[0]} {output[0]}'
diff --git a/tests/test03/expected-results/test.out b/tests/test03/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test03/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test03/params b/tests/test03/params
new file mode 100644
index 0000000..70427a4
--- /dev/null
+++ b/tests/test03/params
@@ -0,0 +1 @@
+test.out
diff --git a/tests/test03/test.in b/tests/test03/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test03/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test04/Snakefile b/tests/test04/Snakefile
new file mode 100644
index 0000000..ecdfab1
--- /dev/null
+++ b/tests/test04/Snakefile
@@ -0,0 +1,17 @@
+rule rule1:
+	input: '{name}.bogus.in'
+	output: '{name}.out'
+	message: 'Writing junk to {output[0]}'
+	shell: 'echo "junk" > {output[0]}; cat {input}'
+
+rule rule2:
+	input: '{name}.in'
+	output: '{name}.out'
+	message: 'Copying {input[0]} to {output[0]}'
+	shell: 'cp {input[0]} {output[0]}'
+
+rule rule3:
+	input: '{name}.more.bogus.in'
+	output: '{name}.out'
+	message: 'Writing junk to {output[0]}'
+	shell: 'echo "junk" > {output[0]}; cat{input}'
diff --git a/tests/test04/expected-results/test.out b/tests/test04/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test04/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test04/params b/tests/test04/params
new file mode 100644
index 0000000..70427a4
--- /dev/null
+++ b/tests/test04/params
@@ -0,0 +1 @@
+test.out
diff --git a/tests/test04/test.in b/tests/test04/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test04/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test05/Snakefile b/tests/test05/Snakefile
new file mode 100644
index 0000000..33d8a4c
--- /dev/null
+++ b/tests/test05/Snakefile
@@ -0,0 +1,35 @@
+chromosomes = [1,2,3]
+
+#shell('rm test.*.inter 2> /dev/null | true')
+
+rule all:
+	input: 'test.predictions'
+
+rule compute1:
+	input: '{name}.in'
+	output: inter=expand('{{name}}.{chr}.inter', chr=chromosomes)
+	resources: gpu=1
+	run:
+		assert len(output.inter) > 0
+		print(output.inter)
+		for out in output:
+			shell('(cat {input[0]} && echo "Part {out}") > {out}')
+
+rule compute2:
+	input: '{name}.{chromosome}.inter', 'other.txt'
+	output: '{name}.{chromosome}.inter2'
+        threads: 2
+	resources: io=1
+	shell: 'cp {input[0]} {output[0]}'
+
+rule gather:
+	input: ['{name}.%s.inter2'%c for c in chromosomes]
+	output: '{name}.predictions'
+	run:
+		shell('cat {} > {}'.format(' '.join(input), output[0]))
+
+rule other:
+	output: 'other.txt'
+	priority: 50
+	resources: gpu=1
+	shell: 'touch other.txt'
diff --git a/tests/test05/expected-results/test.1.inter b/tests/test05/expected-results/test.1.inter
new file mode 100644
index 0000000..5cc1d91
--- /dev/null
+++ b/tests/test05/expected-results/test.1.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.1.inter
diff --git a/tests/test05/expected-results/test.1.inter2 b/tests/test05/expected-results/test.1.inter2
new file mode 100644
index 0000000..5cc1d91
--- /dev/null
+++ b/tests/test05/expected-results/test.1.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.1.inter
diff --git a/tests/test05/expected-results/test.2.inter b/tests/test05/expected-results/test.2.inter
new file mode 100644
index 0000000..8b02f7f
--- /dev/null
+++ b/tests/test05/expected-results/test.2.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.2.inter
diff --git a/tests/test05/expected-results/test.2.inter2 b/tests/test05/expected-results/test.2.inter2
new file mode 100644
index 0000000..8b02f7f
--- /dev/null
+++ b/tests/test05/expected-results/test.2.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.2.inter
diff --git a/tests/test05/expected-results/test.3.inter b/tests/test05/expected-results/test.3.inter
new file mode 100644
index 0000000..5144542
--- /dev/null
+++ b/tests/test05/expected-results/test.3.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.3.inter
diff --git a/tests/test05/expected-results/test.3.inter2 b/tests/test05/expected-results/test.3.inter2
new file mode 100644
index 0000000..5144542
--- /dev/null
+++ b/tests/test05/expected-results/test.3.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.3.inter
diff --git a/tests/test05/expected-results/test.predictions b/tests/test05/expected-results/test.predictions
new file mode 100644
index 0000000..30731f5
--- /dev/null
+++ b/tests/test05/expected-results/test.predictions
@@ -0,0 +1,6 @@
+testz0r
+Part test.1.inter
+testz0r
+Part test.2.inter
+testz0r
+Part test.3.inter
diff --git a/tests/test05/test.in b/tests/test05/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test05/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test06/Snakefile b/tests/test06/Snakefile
new file mode 100644
index 0000000..95beddd
--- /dev/null
+++ b/tests/test06/Snakefile
@@ -0,0 +1,8 @@
+rule all:
+	input: 'test.bla.out'
+
+rule wildcards:
+	input: 'test.in'
+	output: 'test.{xyz}.out'
+	message: 'Creating file {output[0]}, xyz={wildcards.xyz}'
+	shell: 'echo {wildcards.xyz} > {output[0]}'
diff --git a/tests/test06/expected-results/test.bla.out b/tests/test06/expected-results/test.bla.out
new file mode 100644
index 0000000..a7f8d9e
--- /dev/null
+++ b/tests/test06/expected-results/test.bla.out
@@ -0,0 +1 @@
+bla
diff --git a/tests/test06/test.in b/tests/test06/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test06/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test07/Snakefile b/tests/test07/Snakefile
new file mode 100644
index 0000000..6e7e657
--- /dev/null
+++ b/tests/test07/Snakefile
@@ -0,0 +1,12 @@
+
+rule rule1:
+	input: 'test.in'
+	output: 'test.out'
+	shell: 
+		'cp {input} {output}'
+
+rule rule2:
+	input: 'test.in'
+	output: 'test2.out'
+	shell:
+		'cp {input} {output}'
diff --git a/tests/test07/expected-results/test.out b/tests/test07/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test07/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test07/expected-results/test2.out b/tests/test07/expected-results/test2.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test07/expected-results/test2.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test07/test.in b/tests/test07/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test07/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test08/Snakefile b/tests/test08/Snakefile
new file mode 100644
index 0000000..81011cf
--- /dev/null
+++ b/tests/test08/Snakefile
@@ -0,0 +1,11 @@
+rule rule1:
+	input: '{file}.in'
+	output: '{file}.inter'
+	shell: 
+		'cp {input} {output}'
+
+rule rule2:
+	input: '{file}.inter'
+	output: '{file}.out'
+	shell: 
+		'cp {input} {output}'
diff --git a/tests/test08/expected-results/test.out b/tests/test08/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test08/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test08/expected-results/test2.out b/tests/test08/expected-results/test2.out
new file mode 100644
index 0000000..161829c
--- /dev/null
+++ b/tests/test08/expected-results/test2.out
@@ -0,0 +1 @@
+Hoi
diff --git a/tests/test08/test.in b/tests/test08/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test08/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test08/test2.in b/tests/test08/test2.in
new file mode 100644
index 0000000..161829c
--- /dev/null
+++ b/tests/test08/test2.in
@@ -0,0 +1 @@
+Hoi
diff --git a/tests/test09/Snakefile b/tests/test09/Snakefile
new file mode 100644
index 0000000..acacf6a
--- /dev/null
+++ b/tests/test09/Snakefile
@@ -0,0 +1,16 @@
+
+def fail(input, output):
+	shell("false && cp {input} {output}")
+
+def x(input, output):
+	fail(input, output)
+
+rule rule2:
+	input: 'test.inter'
+	output: 'test.out'
+	shell: 'cp {input} {output}'
+
+rule rule1:
+	input: 'test.in'
+	output: 'test.inter'
+	shell: "false && cp {input} {output}"
diff --git a/tests/test09/expected-results/.gitignore b/tests/test09/expected-results/.gitignore
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test09/test.in b/tests/test09/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test09/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test10/Snakefile b/tests/test10/Snakefile
new file mode 100644
index 0000000..208465e
--- /dev/null
+++ b/tests/test10/Snakefile
@@ -0,0 +1,5 @@
+rule rule1:
+	input: 'test.in'
+	output: 'test.out'
+	run:
+		shell('cp {input} {output}')
diff --git a/tests/test10/expected-results/test.out b/tests/test10/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test10/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test10/test.in b/tests/test10/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test10/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test11/Snakefile b/tests/test11/Snakefile
new file mode 100644
index 0000000..a64557d
--- /dev/null
+++ b/tests/test11/Snakefile
@@ -0,0 +1,7 @@
+include: "import.snakefile"
+
+rule:
+	input: 'test.inter'
+	output: 'test.out'
+	shell: 
+		'cp {input} {output}'
diff --git a/tests/test11/expected-results/test.inter b/tests/test11/expected-results/test.inter
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test11/expected-results/test.inter
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test11/expected-results/test.out b/tests/test11/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test11/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test11/import.snakefile b/tests/test11/import.snakefile
new file mode 100644
index 0000000..458d9be
--- /dev/null
+++ b/tests/test11/import.snakefile
@@ -0,0 +1,5 @@
+rule:
+	input: 'test.in'
+	output: 'test.inter'
+	shell: 
+		'cp {input} {output}'
diff --git a/tests/test11/test.in b/tests/test11/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test11/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test12/Snakefile b/tests/test12/Snakefile
new file mode 100644
index 0000000..26da01c
--- /dev/null
+++ b/tests/test12/Snakefile
@@ -0,0 +1,14 @@
+
+rule rule1:
+	input: 'test.inter'
+	output: 'test.out'
+	#message: 'Copying {input[0]} to {output[0]}'
+	shell: 
+		'cp {input[0]} {output[0]}'
+
+rule rule2:
+	input: 'test.in'
+	output: temp('test.inter')
+	#message: 'Copying {input[0]} to {output[0]}'
+	shell:
+		'cp {input[0]} {output[0]}'
diff --git a/tests/test12/expected-results/test.out b/tests/test12/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test12/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test12/test.in b/tests/test12/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test12/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test13/Snakefile b/tests/test13/Snakefile
new file mode 100644
index 0000000..f878f6d
--- /dev/null
+++ b/tests/test13/Snakefile
@@ -0,0 +1,12 @@
+rule all:
+	input: 'test.algo1-p7-improved.out'
+
+rule run_algo1:
+	input: '{dataset}.in'
+	output: '{dataset}.algo1-p{param,[0-9]+}.out'
+	shell: 'echo "algo1 / {wildcards.param}" > {output}'
+
+rule postprocess:
+	input: '{dataset}.{algorithm}.out'
+	output: '{dataset}.{algorithm}-improved.out'
+	shell: 'cp {input} {output} && (echo "IMPROVED" >> {output})'
diff --git a/tests/test13/expected-results/test.algo1-p7-improved.out b/tests/test13/expected-results/test.algo1-p7-improved.out
new file mode 100644
index 0000000..74fdc73
--- /dev/null
+++ b/tests/test13/expected-results/test.algo1-p7-improved.out
@@ -0,0 +1,2 @@
+algo1 / 7
+IMPROVED
diff --git a/tests/test13/expected-results/test.algo1-p7.out b/tests/test13/expected-results/test.algo1-p7.out
new file mode 100644
index 0000000..e8ecd7a
--- /dev/null
+++ b/tests/test13/expected-results/test.algo1-p7.out
@@ -0,0 +1 @@
+algo1 / 7
diff --git a/tests/test13/test.in b/tests/test13/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test13/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test14/Snakefile.nonstandard b/tests/test14/Snakefile.nonstandard
new file mode 100644
index 0000000..0eb2605
--- /dev/null
+++ b/tests/test14/Snakefile.nonstandard
@@ -0,0 +1,27 @@
+chromosomes = [1,2,3,4,5]
+
+
+localrules: all
+
+
+rule all:
+	input: 'test.predictions', 'test.2.inter2'
+
+rule compute1:
+	input: '{name}.in'
+	output: ['{name}.%s.inter'%c for c in chromosomes]
+	run:
+		for out in output:
+			shell('(cat {input[0]} && echo "Part {out}") > {out}')
+
+rule compute2:
+	input: '{name}.{chromosome}.inter'
+	output: '{name}.{chromosome}.inter2'
+	params: test="a=b"
+	shell: 'echo copy; cp {input[0]} {output[0]}'
+
+rule gather:
+	input: ['{name}.%s.inter2'%c for c in chromosomes]
+	output: '{name}.predictions'
+	run:
+		shell('cat {} > {}'.format(' '.join(input), output[0]))
diff --git a/tests/test14/expected-results/test.1.inter b/tests/test14/expected-results/test.1.inter
new file mode 100644
index 0000000..5cc1d91
--- /dev/null
+++ b/tests/test14/expected-results/test.1.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.1.inter
diff --git a/tests/test14/expected-results/test.1.inter2 b/tests/test14/expected-results/test.1.inter2
new file mode 100644
index 0000000..5cc1d91
--- /dev/null
+++ b/tests/test14/expected-results/test.1.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.1.inter
diff --git a/tests/test14/expected-results/test.2.inter b/tests/test14/expected-results/test.2.inter
new file mode 100644
index 0000000..8b02f7f
--- /dev/null
+++ b/tests/test14/expected-results/test.2.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.2.inter
diff --git a/tests/test14/expected-results/test.2.inter2 b/tests/test14/expected-results/test.2.inter2
new file mode 100644
index 0000000..8b02f7f
--- /dev/null
+++ b/tests/test14/expected-results/test.2.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.2.inter
diff --git a/tests/test14/expected-results/test.3.inter b/tests/test14/expected-results/test.3.inter
new file mode 100644
index 0000000..5144542
--- /dev/null
+++ b/tests/test14/expected-results/test.3.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.3.inter
diff --git a/tests/test14/expected-results/test.3.inter2 b/tests/test14/expected-results/test.3.inter2
new file mode 100644
index 0000000..5144542
--- /dev/null
+++ b/tests/test14/expected-results/test.3.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.3.inter
diff --git a/tests/test14/expected-results/test.predictions b/tests/test14/expected-results/test.predictions
new file mode 100644
index 0000000..7d97db6
--- /dev/null
+++ b/tests/test14/expected-results/test.predictions
@@ -0,0 +1,10 @@
+testz0r
+Part test.1.inter
+testz0r
+Part test.2.inter
+testz0r
+Part test.3.inter
+testz0r
+Part test.4.inter
+testz0r
+Part test.5.inter
diff --git a/tests/test14/qsub b/tests/test14/qsub
new file mode 100755
index 0000000..f330628
--- /dev/null
+++ b/tests/test14/qsub
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo `date` >> qsub.log
+tail -n1 $1 >> qsub.log
+# simulate printing of job id by a random number
+echo $RANDOM
+sh $1
diff --git a/tests/test14/qsub.py b/tests/test14/qsub.py
new file mode 100755
index 0000000..c3d4fcb
--- /dev/null
+++ b/tests/test14/qsub.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python3
+import sys
+import os
+import random
+
+from snakemake.utils import read_job_properties
+
+jobscript = sys.argv[1]
+job_properties = read_job_properties(jobscript)
+with open("qsub.log", "a") as log:
+    print(job_properties, file=log)
+
+print(random.randint(1, 100))
+os.system("sh {}".format(jobscript))
diff --git a/tests/test14/test.in b/tests/test14/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test14/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test15/Snakefile b/tests/test15/Snakefile
new file mode 100644
index 0000000..ca2a77f
--- /dev/null
+++ b/tests/test15/Snakefile
@@ -0,0 +1,7 @@
+
+rule a:
+	input: test = lambda wildcards: "test2.in" if os.path.exists("test2.in") else "test.in"
+	output: "test.out"
+	shell: "cp {input.test} {output}"
+
+
diff --git a/tests/test15/expected-results/test.out b/tests/test15/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test15/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test15/test.in b/tests/test15/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test15/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/testHighWorkload/Snakefile b/tests/testHighWorkload/Snakefile
new file mode 100644
index 0000000..eb234fc
--- /dev/null
+++ b/tests/testHighWorkload/Snakefile
@@ -0,0 +1,19 @@
+import os.path
+import sys
+
+instances = [os.path.basename(s[:len(s)-4]) for s in os.listdir('mfa') if s.endswith('.mfa')]
+
+rule all:
+	input: ['fisher/%s.pairs.gz'%s for s in instances] + ['ph/%s.ph'%s for s in instances]
+
+rule extract_mismatch_counts:
+	input: 'mfa/{instance}.mfa'
+	output: 'fisher/{instance}.pairs.gz'
+	message: 'Extracting number mismatches for each pair of sequences from {input}'
+	shell: 'sleep 2; touch {output}'
+
+rule create_tree:
+	input: 'mfa/{instance}.mfa'
+	output: 'ph/{instance}.ph', 'ph/{instance}.ph.log'
+	message: 'Running CLUSTALW to compute NJ tree from {input}'
+	shell: "sleep 2; touch {output[0]}; touch {output[1]}"
diff --git a/tests/testHighWorkload/mfa/00.mfa b/tests/testHighWorkload/mfa/00.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/01.mfa b/tests/testHighWorkload/mfa/01.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/02.mfa b/tests/testHighWorkload/mfa/02.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/03.mfa b/tests/testHighWorkload/mfa/03.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/04.mfa b/tests/testHighWorkload/mfa/04.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/05.mfa b/tests/testHighWorkload/mfa/05.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/06.mfa b/tests/testHighWorkload/mfa/06.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/07.mfa b/tests/testHighWorkload/mfa/07.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/08.mfa b/tests/testHighWorkload/mfa/08.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/09.mfa b/tests/testHighWorkload/mfa/09.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/testHighWorkload/mfa/10.mfa b/tests/testHighWorkload/mfa/10.mfa
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_benchmark/Snakefile b/tests/test_benchmark/Snakefile
new file mode 100644
index 0000000..90a4b63
--- /dev/null
+++ b/tests/test_benchmark/Snakefile
@@ -0,0 +1,11 @@
+
+
+rule all:
+    input:
+        "test.benchmark.json"
+
+rule:
+    benchmark:
+        "{v}.benchmark.json"
+    shell:
+        "sleep 1"
diff --git a/tests/test_benchmark/expected-results/test.benchmark.json b/tests/test_benchmark/expected-results/test.benchmark.json
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_cluster_dynamic/Snakefile b/tests/test_cluster_dynamic/Snakefile
new file mode 100644
index 0000000..29b7d88
--- /dev/null
+++ b/tests/test_cluster_dynamic/Snakefile
@@ -0,0 +1,26 @@
+import re, os, sys
+
+
+rule all:
+	input:
+		"out.txt"
+
+
+rule split:
+    input: 'test.txt'
+    output: dynamic('prefix{split_id}.txt')
+    run:
+        shell('split -l 2 {input} prefix')
+        for f in os.listdir(os.getcwd()):
+            if re.search('prefix[a-z][a-z]', f):
+                os.rename(f, f + '.txt')
+
+rule cut:
+    input: 'prefix{split_id,[a-z][a-z]}.txt'
+    output: '{split_id}_cut.txt'
+    shell: 'cut -f 1,2 {input} > {output}'
+
+rule merge:
+    input: dynamic('{split_id}_cut.txt')
+    output: 'out.txt'
+    shell: 'cat {input} > {output}'
diff --git a/tests/test_cluster_dynamic/expected-results/out.txt b/tests/test_cluster_dynamic/expected-results/out.txt
new file mode 100644
index 0000000..9e5f5f8
--- /dev/null
+++ b/tests/test_cluster_dynamic/expected-results/out.txt
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/tests/test_cluster_dynamic/qsub b/tests/test_cluster_dynamic/qsub
new file mode 100755
index 0000000..f330628
--- /dev/null
+++ b/tests/test_cluster_dynamic/qsub
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo `date` >> qsub.log
+tail -n1 $1 >> qsub.log
+# simulate printing of job id by a random number
+echo $RANDOM
+sh $1
diff --git a/tests/test_cluster_dynamic/test.txt b/tests/test_cluster_dynamic/test.txt
new file mode 100644
index 0000000..9e5f5f8
--- /dev/null
+++ b/tests/test_cluster_dynamic/test.txt
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/tests/test_conditional/Snakefile b/tests/test_conditional/Snakefile
new file mode 100644
index 0000000..26308ea
--- /dev/null
+++ b/tests/test_conditional/Snakefile
@@ -0,0 +1,18 @@
+
+CONDITION = False
+
+if CONDITION:
+	rule:
+		output: "test.out"
+		shell:  "echo bla; exit 1"
+else:
+	rule:
+		output: "test.out"
+		shell:  "touch {output}"
+
+
+# the following is bad style and just for testing. You should rather write a rule that collects test{i}.out as input files and have ONE rule that generates any of the files with a wildcard.
+for i in range(3):
+	rule:
+		output: "test.{i}.out".format(i=i)
+		shell:  "touch {output}"
diff --git a/tests/test_conditional/expected-results/test.0.out b/tests/test_conditional/expected-results/test.0.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_conditional/expected-results/test.1.out b/tests/test_conditional/expected-results/test.1.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_conditional/expected-results/test.2.out b/tests/test_conditional/expected-results/test.2.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_conditional/expected-results/test.out b/tests/test_conditional/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_config/Snakefile b/tests/test_config/Snakefile
new file mode 100644
index 0000000..46237e4
--- /dev/null
+++ b/tests/test_config/Snakefile
@@ -0,0 +1,13 @@
+
+
+include: "test.rules"
+
+
+configfile: "test.json"
+
+
+rule:
+    output:
+        config["outfile"]
+    shell:
+        "touch {output}"
diff --git a/tests/test_config/expected-results/test.out b/tests/test_config/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_config/test.json b/tests/test_config/test.json
new file mode 100644
index 0000000..3e51ba4
--- /dev/null
+++ b/tests/test_config/test.json
@@ -0,0 +1,3 @@
+{
+    "outfile": "test.out"
+}
diff --git a/tests/test_config/test.rules b/tests/test_config/test.rules
new file mode 100644
index 0000000..4a1cf33
--- /dev/null
+++ b/tests/test_config/test.rules
@@ -0,0 +1 @@
+configfile: "test2.json"
diff --git a/tests/test_config/test2.json b/tests/test_config/test2.json
new file mode 100644
index 0000000..7e154ae
--- /dev/null
+++ b/tests/test_config/test2.json
@@ -0,0 +1,3 @@
+{
+    "outfile": "test2.out"
+}
diff --git a/tests/test_config/test3.json b/tests/test_config/test3.json
new file mode 100644
index 0000000..35cbdd7
--- /dev/null
+++ b/tests/test_config/test3.json
@@ -0,0 +1,3 @@
+{
+    "outfile": "test3.out"
+}
diff --git a/tests/test_dynamic/Snakefile b/tests/test_dynamic/Snakefile
new file mode 100644
index 0000000..b078ba6
--- /dev/null
+++ b/tests/test_dynamic/Snakefile
@@ -0,0 +1,26 @@
+# snakemake
+
+rule all:
+	input: a=dynamic("test.{n}.{bla}.out"), b=dynamic("test.{n}.{bla}.csv")
+	run:
+		print(input.b)
+
+rule inter:
+	input: "test.{n}.{bla}.inter"
+	output: "test.{n}.{bla}.out"
+	shell: "cp {input} {output}"
+
+rule dynoutput:
+	input: "test.xy.in"
+	output: dynamic("test.{n}.{bla}.inter")
+	shell: "for i in {{0..2}}; do touch test.0$i.xy.inter; done"
+
+rule:
+	input: "test.{n}.{bla}.inter"
+	output: "test.{n}.{bla}.txt"
+	shell: "touch {output}"
+
+rule:
+	input: "test.{n}.{bla}.txt"
+	output: "test.{n}.{bla}.csv"
+	shell: "cp {input} {output}"
diff --git a/tests/test_dynamic/expected-results/test.00.xy.csv b/tests/test_dynamic/expected-results/test.00.xy.csv
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic/expected-results/test.00.xy.out b/tests/test_dynamic/expected-results/test.00.xy.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic/expected-results/test.01.xy.csv b/tests/test_dynamic/expected-results/test.01.xy.csv
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic/expected-results/test.01.xy.out b/tests/test_dynamic/expected-results/test.01.xy.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic/expected-results/test.02.xy.csv b/tests/test_dynamic/expected-results/test.02.xy.csv
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic/expected-results/test.02.xy.out b/tests/test_dynamic/expected-results/test.02.xy.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic/test.xy.in b/tests/test_dynamic/test.xy.in
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic_complex/Snakefile b/tests/test_dynamic_complex/Snakefile
new file mode 100644
index 0000000..4876c3b
--- /dev/null
+++ b/tests/test_dynamic_complex/Snakefile
@@ -0,0 +1,19 @@
+rule all:
+    input: expand("{sample}_final", sample=["a", "b", "c"])
+
+rule init:
+    output: "{sample}.init"
+    shell: "touch {output}"
+
+rule random_clusters:
+    input: "{sample}.init"
+    output: dynamic("{sample}_clusters/cluster_{clustid}")
+    run:
+        shell("mkdir -p {wildcards.sample}_clusters")
+        for i in range(3):
+            shell("touch {wildcards.sample}_clusters/cluster_" + str(i))
+
+rule concatenate_clusters:
+    input: dynamic("{sample}_clusters/cluster_{clustid}")
+    output: "{sample}_final"
+    shell: "touch {output}"
diff --git a/tests/test_dynamic_complex/expected-results/a_final b/tests/test_dynamic_complex/expected-results/a_final
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic_complex/expected-results/b_final b/tests/test_dynamic_complex/expected-results/b_final
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_dynamic_complex/expected-results/c_final b/tests/test_dynamic_complex/expected-results/c_final
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_globwildcards/Snakefile b/tests/test_globwildcards/Snakefile
new file mode 100644
index 0000000..714f464
--- /dev/null
+++ b/tests/test_globwildcards/Snakefile
@@ -0,0 +1,11 @@
+
+IDS, = glob_wildcards("test.{id}.txt")
+
+
+rule all:
+	input: expand("test.{id}.out", id=IDS)
+
+rule:
+	input: "test.{id}.txt"
+	output: "test.{id}.out"
+	shell: "touch {output}"
diff --git a/tests/test_globwildcards/expected-results/test.0.out b/tests/test_globwildcards/expected-results/test.0.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_globwildcards/expected-results/test.1.out b/tests/test_globwildcards/expected-results/test.1.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_globwildcards/expected-results/test.2.out b/tests/test_globwildcards/expected-results/test.2.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_globwildcards/test.0.txt b/tests/test_globwildcards/test.0.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_globwildcards/test.1.txt b/tests/test_globwildcards/test.1.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_globwildcards/test.2.txt b/tests/test_globwildcards/test.2.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_keyword_list/Snakefile b/tests/test_keyword_list/Snakefile
new file mode 100644
index 0000000..0deb164
--- /dev/null
+++ b/tests/test_keyword_list/Snakefile
@@ -0,0 +1,7 @@
+rule:
+	input: bla="test.in1 test.in2".split()
+	output: "test.out"
+	run:
+		print(input.bla)
+		assert len(input.bla) == 2
+		shell("touch {output}")
diff --git a/tests/test_keyword_list/expected-results/test.out b/tests/test_keyword_list/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_keyword_list/test.in1 b/tests/test_keyword_list/test.in1
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_keyword_list/test.in2 b/tests/test_keyword_list/test.in2
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_local_import/Snakefile b/tests/test_local_import/Snakefile
new file mode 100644
index 0000000..42f4c33
--- /dev/null
+++ b/tests/test_local_import/Snakefile
@@ -0,0 +1,6 @@
+import bar
+import foo
+
+rule:
+	output: "test.out"
+	shell: "touch {output}"
diff --git a/tests/test_local_import/bar.py b/tests/test_local_import/bar.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_local_import/expected-results/test.out b/tests/test_local_import/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_local_import/foo/__init__.py b/tests/test_local_import/foo/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_many_jobs/Snakefile b/tests/test_many_jobs/Snakefile
new file mode 100644
index 0000000..9b45b9d
--- /dev/null
+++ b/tests/test_many_jobs/Snakefile
@@ -0,0 +1,23 @@
+
+rule:
+	input: expand("{sample}.out", sample=range(50000))
+	
+
+rule:
+	input: "{sample}.inter2"
+	output: "{sample}.out"
+	shell: "touch {output}"
+
+rule:
+	input: "{sample}.inter1"
+	output: "{sample}.inter2"
+	shell: "touch {output}"
+
+rule:
+	input: "{sample}.in"
+	output: "{sample}.inter1"
+	shell: "touch {output}"
+
+rule:
+	output: "{sample}.in"
+	shell: "touch {output}"
diff --git a/tests/test_multiple_includes/Snakefile b/tests/test_multiple_includes/Snakefile
new file mode 100644
index 0000000..f8a661f
--- /dev/null
+++ b/tests/test_multiple_includes/Snakefile
@@ -0,0 +1,5 @@
+include: 'test_rule.smk'
+include: 'test_second_rule.smk'
+
+rule all:
+    input: rules.test_second_rule.output
\ No newline at end of file
diff --git a/tests/test_multiple_includes/expected-results/test1.txt b/tests/test_multiple_includes/expected-results/test1.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_multiple_includes/expected-results/test2.txt b/tests/test_multiple_includes/expected-results/test2.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_multiple_includes/test_rule.smk b/tests/test_multiple_includes/test_rule.smk
new file mode 100644
index 0000000..a36f2dc
--- /dev/null
+++ b/tests/test_multiple_includes/test_rule.smk
@@ -0,0 +1,3 @@
+rule test_rule: 
+    output: 'test1.txt'
+    shell: 'touch {output}'
\ No newline at end of file
diff --git a/tests/test_multiple_includes/test_second_rule.smk b/tests/test_multiple_includes/test_second_rule.smk
new file mode 100644
index 0000000..409854d
--- /dev/null
+++ b/tests/test_multiple_includes/test_second_rule.smk
@@ -0,0 +1,4 @@
+rule test_second_rule: 
+    input: rules.test_rule.output
+    output: 'test2.txt'
+    shell: 'touch {output}'
\ No newline at end of file
diff --git a/tests/test_params/Snakefile b/tests/test_params/Snakefile
new file mode 100644
index 0000000..f284def
--- /dev/null
+++ b/tests/test_params/Snakefile
@@ -0,0 +1,12 @@
+
+"""
+This is a test for the params syntax.
+"""
+
+rule:
+	input: "somedir/test.out"
+
+rule:
+	params: lambda wildcards: "-f", dir="{dir}"
+	output: "{dir}/test.out"
+	shell: "rm -r {params.dir}; mkdir -p {params.dir}; touch {params[0]} {output}"
diff --git a/tests/test_params/expected-results/somedir/test.out b/tests/test_params/expected-results/somedir/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_parser/Snakefile b/tests/test_parser/Snakefile
new file mode 100644
index 0000000..22d414f
--- /dev/null
+++ b/tests/test_parser/Snakefile
@@ -0,0 +1,13 @@
+
+
+class Test:
+	def __init__(self):
+		self.include = "test.out"
+
+
+rule:
+	output: Test().include
+	shell: 
+        "touch {output}"
+
+
diff --git a/tests/test_parser/expected-results/test.out b/tests/test_parser/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_parser/test.out b/tests/test_parser/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_persistent_dict/Snakefile b/tests/test_persistent_dict/Snakefile
new file mode 100644
index 0000000..6360a42
--- /dev/null
+++ b/tests/test_persistent_dict/Snakefile
@@ -0,0 +1,33 @@
+try:
+	from pytools.persistent_dict import PersistentDict
+	
+
+	storage = PersistentDict("mystorage")
+
+	storage.store("var1", 100)
+
+	rule all:
+		input: expand("test.{i}.out", i=range(3))
+
+
+	rule:
+		input: "test.in"
+		output: "test.{i}.out"
+		run:
+			assert storage.fetch("var1") == 100
+			with open(output[0], "w") as out:
+				v = storage.fetch("var2")
+				assert v == 1
+				print(v, file=out)
+
+
+	rule:
+		output: temp("test.in")  # mark output as temp, since var1 has to be stored in each run
+		run:
+			storage.store("var2", 1)
+			shell("touch {output}")
+
+
+except ImportError:
+	# do not run the test if pytools is not installed
+	pass
diff --git a/tests/test_persistent_dict/expected-results/.gitignore b/tests/test_persistent_dict/expected-results/.gitignore
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_report/Snakefile b/tests/test_report/Snakefile
new file mode 100644
index 0000000..8587d45
--- /dev/null
+++ b/tests/test_report/Snakefile
@@ -0,0 +1,31 @@
+
+from snakemake.utils import report
+
+rule report:
+    input: "Snakefile", fig1="fig.png", fig2="fig2.png"
+    output: "report.html"
+    run:
+        report("""
+        ======================
+        Report of some project
+        ======================
+
+
+        Here is an embedded image:
+
+        .. embeddedimage:: {input.fig1}
+            :width: 200px
+
+
+        Here is an example embedded figure:
+
+        .. embeddedfigure:: {input.fig2}
+
+            Figure title goes here
+
+            Descriptive figure legend goes here
+
+
+        Embedded data F1_ and F2_.
+
+        """, output[0], F1=input[0], F2=input[0])
diff --git a/tests/test_report/expected-results/report.html b/tests/test_report/expected-results/report.html
new file mode 100644
index 0000000..e3ca2e2
--- /dev/null
+++ b/tests/test_report/expected-results/report.html
@@ -0,0 +1,171 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Docutils 0.11: http://docutils.sourceforge.net/" />
+<title>Report of some project</title>
+<style type="text/css">
+
+/**
+Credits for the colors and font selection go to the Twitter Bootstrap framework.
+*/
+
+
+body {
+    color: rgb(51, 51, 51);
+    font-size: 10pt;
+    padding-top: 10px;
+    font-family: "Helvetica Neue",Helvetica,Arial,sans-serif;
+}
+
+h1 {
+    font-size: 150%;
+}
+
+h2 {
+    font-size: 140%;
+}
+
+h3 {
+    font-size: 130%;
+}
+
+h4 {
+    font-size: 120%;
+}
+
+h5 {
+    font-size: 110%;
+}
+
+h6 {
+    font-size: 100%;
+}
+
+div#attachments {
+    color: gray;
+    padding: 0px;
+    border: 1px solid white;
+    border-radius: 4px 4px 4px 4px;
+    padding-top: 20px;
+}
+
+div#attachments :target a {
+    color: rgb(70, 136, 71);
+    border: 1px solid rgb(221, 221, 221);
+    border-radius: 4px 4px 4px 4px;
+}
+
+h1.title {
+    text-align: center;
+    font-size: 180%;
+}
+
+div.document {
+    position: relative;
+    background: white;
+    max-width: 800px;
+    margin: auto;
+    padding: 20px;
+    border: 1px solid rgb(221, 221, 221);
+    border-radius: 4px 4px 4px 4px;
+}
+
+div.document:after {
+    content: "snakemake report";
+    position: absolute;
+    top: -1px;
+    right: -1px;
+    padding: 3px 7px;
+    background-color: #f5f5f5;
+    border: 1px solid rgb(221, 221, 221);
+    color: #9da0a4;
+    font-weight: bold;
+    font-size: 12pt;
+    border-radius: 0 0 0 4px;
+}
+
+div.document p {
+    text-align: justify;
+}
+
+div#metadata {
+    text-align: right;
+}
+
+table.docutils {
+    border: none;
+    border-collapse: collapse;
+    border-top: 2px solid gray;
+    border-bottom: 2px solid gray;
+    text-align: center;
+}
+
+table.docutils th {
+    border: none;
+    border-top: 2px solid gray;
+    border-bottom: 2px solid gray;
+    padding: 5px;
+}
+
+table.docutils td {
+    border: none;
+    padding: 5px;
+}
+
+table.docutils th:last-child, td:last-child {
+    text-align: left;
+}
+
+table.docutils th:first-child, td:first-child {
+    text-align: right;
+}
+
+table.docutils th:only-child, td:only-child {
+    text-align: center;
+}
+
+table.docutils.footnote {
+    border: none;
+    text-align: left;
+}
+
+a {
+    color: rgb(0, 136, 204);
+    text-decoration: none;
+}
+
+a:hover {
+    color: rgb(0, 85, 128);
+    text-decoration: underline;
+}
+
+
+</style>
+</head>
+<body>
+<div class="document" id="report-of-some-project">
+<h1 class="title">Report of some project</h1>
+
+<p>Here is an embedded image:</p>
+<img alt="fig.png" src="fig.png" style="width: 200px;" />
+<p>Here is an example embedded figure:</p>
+<div class="figure">
+<img alt="fig.png" src="fig.png" />
+<p class="caption">Figure title goes here</p>
+<div class="legend">
+Descriptive figure legend goes here</div>
+</div>
+<p>Embedded data <a class="reference internal" href="#f1">F1</a> and <a class="reference internal" href="#f2">F2</a>.</p>
+<div class="container" id="attachments">
+<div class="container" id="f1">
+[F1] <span class="raw-html"><a href="data:text/plain;charset=utf8;filename=Snakefile;base64,CmZyb20gc25ha2VtYWtlLnV0aWxzIGltcG9ydCByZXBvcnQKCnJ1bGUgcmVwb3J0OgogICAgaW5wdXQ6ICJTbmFrZWZpbGUiCiAgICBvdXRwdXQ6ICJyZXBvcnQuaHRtbCIKICAgIHJ1bjoKICAgICAgICByZXBvcnQoIiIiCiAgICAgICAgPT09PT09PT09PT09PT09PT09PT09PQogICAgICAgIFJlcG9ydCBvZiBzb21lIHByb2plY3QKICAgICAgICA9PT09PT09PT09PT09PT09PT09PT09CgoKICAgICAgICBIZXJlIGlzIGFuIGVtYmVkZGVkIGltYWdlOgoKICAgICAgICAuLiBlbWJlZGRlZGltYWdlOjogZmlnLnBuZwogICAgICAg [...]
+<div class="container" id="f2">
+[F2] <span class="raw-html"><a href="data:text/plain;charset=utf8;filename=Snakefile;base64,CmZyb20gc25ha2VtYWtlLnV0aWxzIGltcG9ydCByZXBvcnQKCnJ1bGUgcmVwb3J0OgogICAgaW5wdXQ6ICJTbmFrZWZpbGUiCiAgICBvdXRwdXQ6ICJyZXBvcnQuaHRtbCIKICAgIHJ1bjoKICAgICAgICByZXBvcnQoIiIiCiAgICAgICAgPT09PT09PT09PT09PT09PT09PT09PQogICAgICAgIFJlcG9ydCBvZiBzb21lIHByb2plY3QKICAgICAgICA9PT09PT09PT09PT09PT09PT09PT09CgoKICAgICAgICBIZXJlIGlzIGFuIGVtYmVkZGVkIGltYWdlOgoKICAgICAgICAuLiBlbWJlZGRlZGltYWdlOjogZmlnLnBuZwogICAgICAg [...]
+</div>
+<div class="container" id="metadata">
+None | 2014-08-12</div>
+</div>
+</body>
+</html>
diff --git a/tests/test_report/fig.png b/tests/test_report/fig.png
new file mode 100644
index 0000000..3e00068
Binary files /dev/null and b/tests/test_report/fig.png differ
diff --git a/tests/test_report/fig2.png b/tests/test_report/fig2.png
new file mode 100644
index 0000000..ce97c85
Binary files /dev/null and b/tests/test_report/fig2.png differ
diff --git a/tests/test_ruledag/1.a b/tests/test_ruledag/1.a
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_ruledag/2.a b/tests/test_ruledag/2.a
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_ruledag/3.a b/tests/test_ruledag/3.a
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_ruledag/4.a b/tests/test_ruledag/4.a
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_ruledag/5.a b/tests/test_ruledag/5.a
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_ruledag/Snakefile b/tests/test_ruledag/Snakefile
new file mode 100644
index 0000000..05cee5a
--- /dev/null
+++ b/tests/test_ruledag/Snakefile
@@ -0,0 +1,18 @@
+
+rule all:
+	input: expand("{id}.d", id="1 2 3 4 5".split())
+
+rule rule1:
+	input: "{id}.c"
+	output: "{id}.d"
+	shell: "touch {output}"
+
+rule rule2:
+	input: lambda wildcards: (expand("{id}.b", id="1 2 3".split()) if wildcards.id == "1" else expand("{id}.b", id=wildcards.id))
+	output: "{id}.c"
+	shell: "touch {output}"
+
+rule rule3:
+	input: "{id}.a"
+	output: "{id}.b"
+	shell: "touch {output}"
diff --git a/tests/test_ruledeps/Snakefile b/tests/test_ruledeps/Snakefile
new file mode 100644
index 0000000..e566cbc
--- /dev/null
+++ b/tests/test_ruledeps/Snakefile
@@ -0,0 +1,27 @@
+
+
+rule all:
+    input: "test.out"
+
+
+rule a:
+    output: "test.in"
+    shell:  "touch {output}"
+
+
+rule b:
+    input:  rules.a.output
+    output: "test.inter"
+    shell:  "touch {output}"
+
+
+rule c:
+    input:  rules.a.output
+    output: "test.inter"
+    shell:  "exit 1"
+
+
+rule d:
+    input:  rules.b.output
+    output: "test.out"
+    shell:  "touch {output}"
diff --git a/tests/test_ruledeps/expected-results/test.out b/tests/test_ruledeps/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_same_wildcard/Snakefile b/tests/test_same_wildcard/Snakefile
new file mode 100644
index 0000000..788093a
--- /dev/null
+++ b/tests/test_same_wildcard/Snakefile
@@ -0,0 +1,8 @@
+
+rule:
+	input:  "test_test.out"
+
+rule:
+	input: "{name}_{name}.in"
+	output: "{name}_{name}.out"
+	shell: "echo {wildcards.name} > {output}"
diff --git a/tests/test_same_wildcard/expected-results/test_test.out b/tests/test_same_wildcard/expected-results/test_test.out
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/test_same_wildcard/expected-results/test_test.out
@@ -0,0 +1 @@
+test
diff --git a/tests/test_same_wildcard/test_test.in b/tests/test_same_wildcard/test_test.in
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/test_same_wildcard/test_test.in
@@ -0,0 +1 @@
+test
diff --git a/tests/test_shell/Snakefile b/tests/test_shell/Snakefile
new file mode 100644
index 0000000..a710559
--- /dev/null
+++ b/tests/test_shell/Snakefile
@@ -0,0 +1,10 @@
+
+rule:
+	input: "test.in"
+	output: "test.out"
+	run:
+		test = shell("echo 42;", read=True)
+		assert int(test) == 42
+		with open(output[0], "w") as f:
+			for l in shell("cat {input}", iterable=True):
+				print(l, file=f)
diff --git a/tests/test_shell/expected-results/test.out b/tests/test_shell/expected-results/test.out
new file mode 100644
index 0000000..3bd1f0e
--- /dev/null
+++ b/tests/test_shell/expected-results/test.out
@@ -0,0 +1,2 @@
+foo
+bar
diff --git a/tests/test_shell/test.in b/tests/test_shell/test.in
new file mode 100644
index 0000000..3bd1f0e
--- /dev/null
+++ b/tests/test_shell/test.in
@@ -0,0 +1,2 @@
+foo
+bar
diff --git a/tests/test_srcdir/Snakefile b/tests/test_srcdir/Snakefile
new file mode 100644
index 0000000..4e0c978
--- /dev/null
+++ b/tests/test_srcdir/Snakefile
@@ -0,0 +1,7 @@
+rule:
+    output:
+        "test.out"
+    params:
+        srcdir("script.sh")
+    shell:
+        "sh {params} > {output}"
diff --git a/tests/test_srcdir/expected-results/test.out b/tests/test_srcdir/expected-results/test.out
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/tests/test_srcdir/expected-results/test.out
@@ -0,0 +1 @@
+test
diff --git a/tests/test_srcdir/script.sh b/tests/test_srcdir/script.sh
new file mode 100644
index 0000000..7e182fd
--- /dev/null
+++ b/tests/test_srcdir/script.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+echo test
diff --git a/tests/test_subworkflows/Snakefile b/tests/test_subworkflows/Snakefile
new file mode 100644
index 0000000..4eff116
--- /dev/null
+++ b/tests/test_subworkflows/Snakefile
@@ -0,0 +1,12 @@
+import os.path
+
+subworkflow test02:
+    workdir: config["subworkdir"]
+
+rule:
+    input: "test.out"
+
+rule:
+    input: test02("test.out")
+    output: "test.out"
+    shell: "cp {input} {output}"
diff --git a/tests/test_subworkflows/expected-results/test.out b/tests/test_subworkflows/expected-results/test.out
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test_subworkflows/expected-results/test.out
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test_temp/Snakefile b/tests/test_temp/Snakefile
new file mode 100644
index 0000000..5e8d286
--- /dev/null
+++ b/tests/test_temp/Snakefile
@@ -0,0 +1,21 @@
+rule indelrealigner:
+    input: bam="{base}.bam",intervals="{base}.intervals",bai='{base}.bam.bai'
+    output: bam=temp("{base}.realigned.bam")
+    params: batch="-q ccr -l nodes=1:gpfs"
+    threads: 1
+    shell: "touch {output}"
+
+rule realignertargetcreator:
+    input: "{base}.bam", "{base}.bam.bai"
+    output: temp("{base}.intervals")
+    params: batch="-q ccr -l nodes=1:gpfs"
+    threads: 32
+    shell: "touch {output}"
+
+
+rule indexbam:
+    threads: 1
+    input: '{base}.bam'
+    output: temp('{base}.bam.bai')
+    params: batch="-q ccr -l nodes=1:gpfs"
+    shell: 'touch {output}'
diff --git a/tests/test_temp/expected-results/test.realigned.bam b/tests/test_temp/expected-results/test.realigned.bam
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_temp/qsub b/tests/test_temp/qsub
new file mode 100755
index 0000000..63a46f9
--- /dev/null
+++ b/tests/test_temp/qsub
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo `date` >> qsub.log
+tail -n1 $1 >> qsub.log
+# simulate printing of job id by a random number
+echo $RANDOM
+$1
diff --git a/tests/test_temp/test.bam b/tests/test_temp/test.bam
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_temp_expand/Snakefile b/tests/test_temp_expand/Snakefile
new file mode 100644
index 0000000..02f155a
--- /dev/null
+++ b/tests/test_temp_expand/Snakefile
@@ -0,0 +1,16 @@
+
+
+rule:
+    input:
+        "a.txt"
+    output:
+        "test.txt"
+    shell:
+        "touch {output}"
+
+
+rule:
+    output:
+        temp(expand("{ds}.txt", ds="a b c".split()))
+    shell:
+        "touch {output}"
diff --git a/tests/test_temp_expand/expected-results/test.txt b/tests/test_temp_expand/expected-results/test.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_touch/Snakefile b/tests/test_touch/Snakefile
new file mode 100644
index 0000000..52e81d7
--- /dev/null
+++ b/tests/test_touch/Snakefile
@@ -0,0 +1,5 @@
+rule:
+    output:
+        touch("test.out")
+    shell:
+        "exit 0"
diff --git a/tests/test_touch/expected-results/test.out b/tests/test_touch/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_url_include/Snakefile b/tests/test_url_include/Snakefile
new file mode 100644
index 0000000..cb72233
--- /dev/null
+++ b/tests/test_url_include/Snakefile
@@ -0,0 +1,6 @@
+
+
+include: "https://bitbucket.org/johanneskoester/snakemake/raw/master/tests/test05/Snakefile"
+
+rule:
+	input: "test.predictions"
diff --git a/tests/test_url_include/expected-results/test.1.inter b/tests/test_url_include/expected-results/test.1.inter
new file mode 100644
index 0000000..5cc1d91
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.1.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.1.inter
diff --git a/tests/test_url_include/expected-results/test.1.inter2 b/tests/test_url_include/expected-results/test.1.inter2
new file mode 100644
index 0000000..5cc1d91
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.1.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.1.inter
diff --git a/tests/test_url_include/expected-results/test.2.inter b/tests/test_url_include/expected-results/test.2.inter
new file mode 100644
index 0000000..8b02f7f
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.2.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.2.inter
diff --git a/tests/test_url_include/expected-results/test.2.inter2 b/tests/test_url_include/expected-results/test.2.inter2
new file mode 100644
index 0000000..8b02f7f
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.2.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.2.inter
diff --git a/tests/test_url_include/expected-results/test.3.inter b/tests/test_url_include/expected-results/test.3.inter
new file mode 100644
index 0000000..5144542
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.3.inter
@@ -0,0 +1,2 @@
+testz0r
+Part test.3.inter
diff --git a/tests/test_url_include/expected-results/test.3.inter2 b/tests/test_url_include/expected-results/test.3.inter2
new file mode 100644
index 0000000..5144542
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.3.inter2
@@ -0,0 +1,2 @@
+testz0r
+Part test.3.inter
diff --git a/tests/test_url_include/expected-results/test.predictions b/tests/test_url_include/expected-results/test.predictions
new file mode 100644
index 0000000..30731f5
--- /dev/null
+++ b/tests/test_url_include/expected-results/test.predictions
@@ -0,0 +1,6 @@
+testz0r
+Part test.1.inter
+testz0r
+Part test.2.inter
+testz0r
+Part test.3.inter
diff --git a/tests/test_url_include/test.in b/tests/test_url_include/test.in
new file mode 100644
index 0000000..ce66783
--- /dev/null
+++ b/tests/test_url_include/test.in
@@ -0,0 +1 @@
+testz0r
diff --git a/tests/test_wildcard_count_ambiguity/Snakefile b/tests/test_wildcard_count_ambiguity/Snakefile
new file mode 100644
index 0000000..35997e9
--- /dev/null
+++ b/tests/test_wildcard_count_ambiguity/Snakefile
@@ -0,0 +1,14 @@
+
+
+rule all:
+    input: "test.out"
+
+
+rule a:
+    output: "{prefix}.out"
+    shell: "touch {output}"
+
+
+rule b:
+    output: "test.out"
+    shell: "touch {output}"
diff --git a/tests/test_wildcard_count_ambiguity/expected-results/test.out b/tests/test_wildcard_count_ambiguity/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_yaml_config/Snakefile b/tests/test_yaml_config/Snakefile
new file mode 100644
index 0000000..577a5c7
--- /dev/null
+++ b/tests/test_yaml_config/Snakefile
@@ -0,0 +1,9 @@
+
+
+configfile: "test.yaml"
+
+rule:
+    output:
+        config["outfile"]
+    shell:
+        "touch {output}"
diff --git a/tests/test_yaml_config/expected-results/test.out b/tests/test_yaml_config/expected-results/test.out
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_yaml_config/test.yaml b/tests/test_yaml_config/test.yaml
new file mode 100644
index 0000000..6cb9def
--- /dev/null
+++ b/tests/test_yaml_config/test.yaml
@@ -0,0 +1,2 @@
+---
+outfile: test.out
\ No newline at end of file
diff --git a/tests/tests.py b/tests/tests.py
new file mode 100644
index 0000000..5cc9be6
--- /dev/null
+++ b/tests/tests.py
@@ -0,0 +1,268 @@
+__author__ = "Tobias Marschall, Marcel Martin, Johannes Köster"
+__copyright__ = "Copyright 2015, Johannes Köster"
+__email__ = "koester at jimmy.harvard.edu"
+__license__ = "MIT"
+
+import sys
+import os
+from os.path import join
+from subprocess import call
+from tempfile import mkdtemp
+import hashlib
+import urllib
+from shutil import rmtree
+
+from snakemake import snakemake
+
+
+def dpath(path):
+    """get path to a data file (relative to the directory this
+	test lives in)"""
+    return join(os.path.dirname(__file__), path)
+
+
+SCRIPTPATH = dpath("../bin/snakemake")
+
+
+def md5sum(filename):
+    data = open(filename, 'rb').read()
+    return hashlib.md5(data).hexdigest()
+
+
+def is_connected():
+    try:
+        urllib.request.urlopen("http://www.google.com", timeout=1)
+        return True
+    except urllib.request.URLError:
+        return False
+
+
+def run(path,
+        shouldfail=False,
+        needs_connection=False,
+        snakefile="Snakefile",
+        subpath=None,
+        check_md5=True, **params):
+    """
+    Test the Snakefile in path.
+    There must be a Snakefile in the path and a subdirectory named
+    expected-results.
+    """
+    if needs_connection and not is_connected():
+        print("Skipping test because of missing internet connection",
+              file=sys.stderr)
+        return False
+
+    results_dir = join(path, 'expected-results')
+    snakefile = join(path, snakefile)
+    assert os.path.exists(snakefile)
+    assert os.path.exists(results_dir) and os.path.isdir(
+        results_dir), '{} does not exist'.format(results_dir)
+    tmpdir = mkdtemp()
+    try:
+        config = {}
+        if subpath is not None:
+            # set up a working directory for the subworkflow and pass it in `config`
+            # for now, only one subworkflow is supported
+            assert os.path.exists(subpath) and os.path.isdir(
+                subpath), '{} does not exist'.format(subpath)
+            subworkdir = os.path.join(tmpdir, "subworkdir")
+            os.mkdir(subworkdir)
+            call('cp `find {} -maxdepth 1 -type f` {}'.format(subpath,
+                                                              subworkdir),
+                 shell=True)
+            config['subworkdir'] = subworkdir
+
+        call('cp `find {} -maxdepth 1 -type f` {}'.format(path, tmpdir),
+             shell=True)
+        success = snakemake(snakefile,
+                            cores=3,
+                            workdir=tmpdir,
+                            stats="stats.txt",
+                            snakemakepath=SCRIPTPATH,
+                            config=config, **params)
+        if shouldfail:
+            assert not success, "expected error on execution"
+        else:
+            assert success, "expected successful execution"
+            for resultfile in os.listdir(results_dir):
+                if resultfile == ".gitignore" or not os.path.isfile(
+                    os.path.join(results_dir, resultfile)):
+                    # this means tests cannot use directories as output files
+                    continue
+                targetfile = join(tmpdir, resultfile)
+                expectedfile = join(results_dir, resultfile)
+                assert os.path.exists(
+                    targetfile), 'expected file "{}" not produced'.format(
+                        resultfile)
+                if check_md5:
+                    assert md5sum(targetfile) == md5sum(
+                        expectedfile), 'wrong result produced for file "{}"'.format(
+                            resultfile)
+    finally:
+        rmtree(tmpdir)
+
+
+def test01():
+    run(dpath("test01"))
+
+
+def test02():
+    run(dpath("test02"))
+
+
+def test03():
+    run(dpath("test03"), targets=['test.out'])
+
+
+def test04():
+    run(dpath("test04"), targets=['test.out'])
+
+
+def test05():
+    run(dpath("test05"))
+
+
+def test06():
+    run(dpath("test06"), targets=['test.bla.out'])
+
+
+def test07():
+    run(dpath("test07"), targets=['test.out', 'test2.out'])
+
+
+def test08():
+    run(dpath("test08"), targets=['test.out', 'test2.out'])
+
+
+def test09():
+    run(dpath("test09"), shouldfail=True)
+
+
+def test10():
+    run(dpath("test10"))
+
+
+def test11():
+    run(dpath("test11"))
+
+
+def test12():
+    run(dpath("test12"))
+
+
+def test13():
+    run(dpath("test13"))
+
+
+def test14():
+    run(dpath("test14"), snakefile="Snakefile.nonstandard", cluster="./qsub")
+
+
+def test15():
+    run(dpath("test15"))
+
+
+def test_report():
+    run(dpath("test_report"), check_md5=False)
+
+
+def test_dynamic():
+    run(dpath("test_dynamic"))
+
+
+def test_params():
+    run(dpath("test_params"))
+
+
+def test_same_wildcard():
+    run(dpath("test_same_wildcard"))
+
+
+def test_conditional():
+    run(dpath("test_conditional"),
+        targets="test.out test.0.out test.1.out test.2.out".split())
+
+
+def test_shell():
+    run(dpath("test_shell"))
+
+
+def test_temp():
+    run(dpath("test_temp"),
+        cluster="./qsub",
+        targets="test.realigned.bam".split())
+
+
+def test_keyword_list():
+    run(dpath("test_keyword_list"))
+
+
+def test_subworkflows():
+    run(dpath("test_subworkflows"), subpath=dpath("test02"))
+
+
+def test_globwildcards():
+    run(dpath("test_globwildcards"))
+
+
+def test_local_import():
+    run(dpath("test_local_import"))
+
+
+def test_ruledeps():
+    run(dpath("test_ruledeps"))
+
+
+def test_persistent_dict():
+    run(dpath("test_persistent_dict"))
+
+
+def test_url_include():
+    run(dpath("test_url_include"), needs_connection=True)
+
+
+def test_touch():
+    run(dpath("test_touch"))
+
+
+def test_config():
+    run(dpath("test_config"))
+
+
+def test_benchmark():
+    run(dpath("test_benchmark"), check_md5=False)
+
+
+def test_temp_expand():
+    run(dpath("test_temp_expand"))
+
+
+def test_wildcard_count_ambiguity():
+    run(dpath("test_wildcard_count_ambiguity"))
+
+
+def test_cluster_dynamic():
+    run(dpath("test_cluster_dynamic"), cluster="./qsub")
+
+
+def test_dynamic_complex():
+    run(dpath("test_dynamic_complex"))
+
+
+def test_srcdir():
+    run(dpath("test_srcdir"))
+
+
+def test_multiple_includes():
+    run(dpath("test_multiple_includes"))
+
+
+def test_yaml_config():
+    run(dpath("test_yaml_config"))
+
+
+def test_cluster_sync():
+    run(dpath("test14"),
+        snakefile="Snakefile.nonstandard",
+        cluster_sync="./qsub")

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/snakemake.git



More information about the debian-med-commit mailing list