[Python-modules-commits] [apscheduler] 01/08: import apscheduler_2.1.2.orig.tar.gz

Brian May bam at moszumanska.debian.org
Thu Mar 24 01:44:51 UTC 2016


This is an automated email from the git hooks/post-receive script.

bam pushed a commit to branch master
in repository apscheduler.

commit 3c013787238f5af500e22d90bf48b897a2d1e909
Author: Brian May <brian at linuxpenguins.xyz>
Date:   Thu Mar 24 10:16:12 2016 +1100

    import apscheduler_2.1.2.orig.tar.gz
---
 APScheduler.egg-info/PKG-INFO             |  84 +++++
 APScheduler.egg-info/SOURCES.txt          |  53 +++
 APScheduler.egg-info/dependency_links.txt |   1 +
 APScheduler.egg-info/top_level.txt        |   1 +
 MANIFEST.in                               |   4 +
 PKG-INFO                                  |  84 +++++
 README.rst                                |  63 ++++
 apscheduler/__init__.py                   |   3 +
 apscheduler/events.py                     |  64 ++++
 apscheduler/job.py                        | 137 +++++++
 apscheduler/jobstores/__init__.py         |   0
 apscheduler/jobstores/base.py             |  25 ++
 apscheduler/jobstores/mongodb_store.py    |  84 +++++
 apscheduler/jobstores/ram_store.py        |  25 ++
 apscheduler/jobstores/redis_store.py      |  91 +++++
 apscheduler/jobstores/shelve_store.py     |  74 ++++
 apscheduler/jobstores/sqlalchemy_store.py |  91 +++++
 apscheduler/scheduler.py                  | 607 ++++++++++++++++++++++++++++++
 apscheduler/threadpool.py                 | 133 +++++++
 apscheduler/triggers/__init__.py          |   3 +
 apscheduler/triggers/cron/__init__.py     | 144 +++++++
 apscheduler/triggers/cron/expressions.py  | 194 ++++++++++
 apscheduler/triggers/cron/fields.py       | 100 +++++
 apscheduler/triggers/interval.py          |  39 ++
 apscheduler/triggers/simple.py            |  17 +
 apscheduler/util.py                       | 230 +++++++++++
 docs/conf.py                              | 196 ++++++++++
 docs/cronschedule.rst                     | 118 ++++++
 docs/dateschedule.rst                     |  39 ++
 docs/extending.rst                        |  46 +++
 docs/index.rst                            | 449 ++++++++++++++++++++++
 docs/intervalschedule.rst                 |  55 +++
 docs/migration.rst                        |  34 ++
 docs/modules/events.rst                   |  16 +
 docs/modules/job.rst                      |   9 +
 docs/modules/jobstores/mongodb.rst        |  10 +
 docs/modules/jobstores/ram.rst            |  10 +
 docs/modules/jobstores/redis.rst          |  10 +
 docs/modules/jobstores/shelve.rst         |  10 +
 docs/modules/jobstores/sqlalchemy.rst     |  10 +
 docs/modules/scheduler.rst                |  11 +
 examples/interval.py                      |  22 ++
 examples/persistent.py                    |  29 ++
 examples/threaded.py                      |  26 ++
 setup.cfg                                 |  15 +
 setup.py                                  |  45 +++
 tests/testexpressions.py                  | 177 +++++++++
 tests/testintegration.py                  | 164 ++++++++
 tests/testjob.py                          | 164 ++++++++
 tests/testjobstores.py                    | 223 +++++++++++
 tests/testscheduler.py                    | 458 ++++++++++++++++++++++
 tests/testthreadpool.py                   |  54 +++
 tests/testtriggers.py                     | 207 ++++++++++
 tests/testutil.py                         | 217 +++++++++++
 54 files changed, 5175 insertions(+)

diff --git a/APScheduler.egg-info/PKG-INFO b/APScheduler.egg-info/PKG-INFO
new file mode 100644
index 0000000..3a9b409
--- /dev/null
+++ b/APScheduler.egg-info/PKG-INFO
@@ -0,0 +1,84 @@
+Metadata-Version: 1.0
+Name: APScheduler
+Version: 2.1.2
+Summary: In-process task scheduler with Cron-like capabilities
+Home-page: http://pypi.python.org/pypi/APScheduler/
+Author: Alex Gronholm
+Author-email: apscheduler at nextday.fi
+License: MIT
+Description: Advanced Python Scheduler (APScheduler) is a light but powerful in-process task
+        scheduler that lets you schedule jobs (functions or any python callables) to be
+        executed at times of your choosing.
+        
+        This can be a far better alternative to externally run cron scripts for
+        long-running applications (e.g. web applications), as it is platform neutral
+        and can directly access your application's variables and functions.
+        
+        The development of APScheduler was heavily influenced by the `Quartz
+        <http://www.quartz-scheduler.org/>`_ task scheduler written in Java.
+        APScheduler provides most of the major features that Quartz does, but it also
+        provides features not present in Quartz (such as multiple job stores).
+        
+        
+        Features
+        ========
+        
+        * No (hard) external dependencies
+        * Thread-safe API
+        * Excellent test coverage (tested on CPython 2.5 - 2.7, 3.2 - 3.3, Jython 2.5.3, PyPy 2.2)
+        * Configurable scheduling mechanisms (triggers):
+        
+          * Cron-like scheduling
+          * Delayed scheduling of single run jobs (like the UNIX "at" command)
+          * Interval-based (run a job at specified time intervals)
+        * Multiple, simultaneously active job stores:
+        
+          * RAM 
+          * File-based simple database (shelve)
+          * `SQLAlchemy <http://www.sqlalchemy.org/>`_ (any supported RDBMS works)
+          * `MongoDB <http://www.mongodb.org/>`_
+          * `Redis <http://redis.io/>`_
+        
+        
+        Documentation
+        =============
+        
+        Documentation can be found `here <http://readthedocs.org/docs/apscheduler/en/latest/>`_.
+        
+        
+        Source
+        ======
+        
+        The source can be browsed at `Bitbucket
+        <http://bitbucket.org/agronholm/apscheduler/src/>`_.
+        
+        
+        Reporting bugs
+        ==============
+        
+        A `bug tracker <http://bitbucket.org/agronholm/apscheduler/issues/>`_
+        is provided by bitbucket.org.
+        
+        
+        Getting help
+        ============
+        
+        If you have problems or other questions, you can either:
+        
+        * Ask on the `APScheduler Google group
+          <http://groups.google.com/group/apscheduler>`_, or
+        * Ask on the ``#apscheduler`` channel on
+          `Freenode IRC <http://freenode.net/irc_servers.shtml>`_
+        
+Keywords: scheduling cron
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
diff --git a/APScheduler.egg-info/SOURCES.txt b/APScheduler.egg-info/SOURCES.txt
new file mode 100644
index 0000000..25869a1
--- /dev/null
+++ b/APScheduler.egg-info/SOURCES.txt
@@ -0,0 +1,53 @@
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+APScheduler.egg-info/PKG-INFO
+APScheduler.egg-info/SOURCES.txt
+APScheduler.egg-info/dependency_links.txt
+APScheduler.egg-info/top_level.txt
+apscheduler/__init__.py
+apscheduler/events.py
+apscheduler/job.py
+apscheduler/scheduler.py
+apscheduler/threadpool.py
+apscheduler/util.py
+apscheduler/jobstores/__init__.py
+apscheduler/jobstores/base.py
+apscheduler/jobstores/mongodb_store.py
+apscheduler/jobstores/ram_store.py
+apscheduler/jobstores/redis_store.py
+apscheduler/jobstores/shelve_store.py
+apscheduler/jobstores/sqlalchemy_store.py
+apscheduler/triggers/__init__.py
+apscheduler/triggers/interval.py
+apscheduler/triggers/simple.py
+apscheduler/triggers/cron/__init__.py
+apscheduler/triggers/cron/expressions.py
+apscheduler/triggers/cron/fields.py
+docs/conf.py
+docs/cronschedule.rst
+docs/dateschedule.rst
+docs/extending.rst
+docs/index.rst
+docs/intervalschedule.rst
+docs/migration.rst
+docs/modules/events.rst
+docs/modules/job.rst
+docs/modules/scheduler.rst
+docs/modules/jobstores/mongodb.rst
+docs/modules/jobstores/ram.rst
+docs/modules/jobstores/redis.rst
+docs/modules/jobstores/shelve.rst
+docs/modules/jobstores/sqlalchemy.rst
+examples/interval.py
+examples/persistent.py
+examples/threaded.py
+tests/testexpressions.py
+tests/testintegration.py
+tests/testjob.py
+tests/testjobstores.py
+tests/testscheduler.py
+tests/testthreadpool.py
+tests/testtriggers.py
+tests/testutil.py
\ No newline at end of file
diff --git a/APScheduler.egg-info/dependency_links.txt b/APScheduler.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/APScheduler.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/APScheduler.egg-info/top_level.txt b/APScheduler.egg-info/top_level.txt
new file mode 100644
index 0000000..d31d10d
--- /dev/null
+++ b/APScheduler.egg-info/top_level.txt
@@ -0,0 +1 @@
+apscheduler
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..31b7e02
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+include README.rst
+recursive-include tests *.py
+recursive-include examples *.py
+recursive-include docs *.rst *.py
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..3a9b409
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,84 @@
+Metadata-Version: 1.0
+Name: APScheduler
+Version: 2.1.2
+Summary: In-process task scheduler with Cron-like capabilities
+Home-page: http://pypi.python.org/pypi/APScheduler/
+Author: Alex Gronholm
+Author-email: apscheduler at nextday.fi
+License: MIT
+Description: Advanced Python Scheduler (APScheduler) is a light but powerful in-process task
+        scheduler that lets you schedule jobs (functions or any python callables) to be
+        executed at times of your choosing.
+        
+        This can be a far better alternative to externally run cron scripts for
+        long-running applications (e.g. web applications), as it is platform neutral
+        and can directly access your application's variables and functions.
+        
+        The development of APScheduler was heavily influenced by the `Quartz
+        <http://www.quartz-scheduler.org/>`_ task scheduler written in Java.
+        APScheduler provides most of the major features that Quartz does, but it also
+        provides features not present in Quartz (such as multiple job stores).
+        
+        
+        Features
+        ========
+        
+        * No (hard) external dependencies
+        * Thread-safe API
+        * Excellent test coverage (tested on CPython 2.5 - 2.7, 3.2 - 3.3, Jython 2.5.3, PyPy 2.2)
+        * Configurable scheduling mechanisms (triggers):
+        
+          * Cron-like scheduling
+          * Delayed scheduling of single run jobs (like the UNIX "at" command)
+          * Interval-based (run a job at specified time intervals)
+        * Multiple, simultaneously active job stores:
+        
+          * RAM 
+          * File-based simple database (shelve)
+          * `SQLAlchemy <http://www.sqlalchemy.org/>`_ (any supported RDBMS works)
+          * `MongoDB <http://www.mongodb.org/>`_
+          * `Redis <http://redis.io/>`_
+        
+        
+        Documentation
+        =============
+        
+        Documentation can be found `here <http://readthedocs.org/docs/apscheduler/en/latest/>`_.
+        
+        
+        Source
+        ======
+        
+        The source can be browsed at `Bitbucket
+        <http://bitbucket.org/agronholm/apscheduler/src/>`_.
+        
+        
+        Reporting bugs
+        ==============
+        
+        A `bug tracker <http://bitbucket.org/agronholm/apscheduler/issues/>`_
+        is provided by bitbucket.org.
+        
+        
+        Getting help
+        ============
+        
+        If you have problems or other questions, you can either:
+        
+        * Ask on the `APScheduler Google group
+          <http://groups.google.com/group/apscheduler>`_, or
+        * Ask on the ``#apscheduler`` channel on
+          `Freenode IRC <http://freenode.net/irc_servers.shtml>`_
+        
+Keywords: scheduling cron
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..871ba9c
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,63 @@
+Advanced Python Scheduler (APScheduler) is a light but powerful in-process task
+scheduler that lets you schedule jobs (functions or any python callables) to be
+executed at times of your choosing.
+
+This can be a far better alternative to externally run cron scripts for
+long-running applications (e.g. web applications), as it is platform neutral
+and can directly access your application's variables and functions.
+
+The development of APScheduler was heavily influenced by the `Quartz
+<http://www.quartz-scheduler.org/>`_ task scheduler written in Java.
+APScheduler provides most of the major features that Quartz does, but it also
+provides features not present in Quartz (such as multiple job stores).
+
+
+Features
+========
+
+* No (hard) external dependencies
+* Thread-safe API
+* Excellent test coverage (tested on CPython 2.5 - 2.7, 3.2 - 3.3, Jython 2.5.3, PyPy 2.2)
+* Configurable scheduling mechanisms (triggers):
+
+  * Cron-like scheduling
+  * Delayed scheduling of single run jobs (like the UNIX "at" command)
+  * Interval-based (run a job at specified time intervals)
+* Multiple, simultaneously active job stores:
+
+  * RAM 
+  * File-based simple database (shelve)
+  * `SQLAlchemy <http://www.sqlalchemy.org/>`_ (any supported RDBMS works)
+  * `MongoDB <http://www.mongodb.org/>`_
+  * `Redis <http://redis.io/>`_
+
+
+Documentation
+=============
+
+Documentation can be found `here <http://readthedocs.org/docs/apscheduler/en/latest/>`_.
+
+
+Source
+======
+
+The source can be browsed at `Bitbucket
+<http://bitbucket.org/agronholm/apscheduler/src/>`_.
+
+
+Reporting bugs
+==============
+
+A `bug tracker <http://bitbucket.org/agronholm/apscheduler/issues/>`_
+is provided by bitbucket.org.
+
+
+Getting help
+============
+
+If you have problems or other questions, you can either:
+
+* Ask on the `APScheduler Google group
+  <http://groups.google.com/group/apscheduler>`_, or
+* Ask on the ``#apscheduler`` channel on
+  `Freenode IRC <http://freenode.net/irc_servers.shtml>`_
diff --git a/apscheduler/__init__.py b/apscheduler/__init__.py
new file mode 100644
index 0000000..71cc53d
--- /dev/null
+++ b/apscheduler/__init__.py
@@ -0,0 +1,3 @@
+version_info = (2, 1, 2)
+version = '.'.join(str(n) for n in version_info[:3])
+release = '.'.join(str(n) for n in version_info)
diff --git a/apscheduler/events.py b/apscheduler/events.py
new file mode 100644
index 0000000..80bde8e
--- /dev/null
+++ b/apscheduler/events.py
@@ -0,0 +1,64 @@
+__all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN',
+           'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED',
+           'EVENT_JOBSTORE_JOB_ADDED', 'EVENT_JOBSTORE_JOB_REMOVED',
+           'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED',
+           'EVENT_ALL', 'SchedulerEvent', 'JobStoreEvent', 'JobEvent')
+
+
+EVENT_SCHEDULER_START = 1        # The scheduler was started
+EVENT_SCHEDULER_SHUTDOWN = 2     # The scheduler was shut down
+EVENT_JOBSTORE_ADDED = 4         # A job store was added to the scheduler
+EVENT_JOBSTORE_REMOVED = 8       # A job store was removed from the scheduler
+EVENT_JOBSTORE_JOB_ADDED = 16    # A job was added to a job store
+EVENT_JOBSTORE_JOB_REMOVED = 32  # A job was removed from a job store
+EVENT_JOB_EXECUTED = 64          # A job was executed successfully
+EVENT_JOB_ERROR = 128            # A job raised an exception during execution
+EVENT_JOB_MISSED = 256           # A job's execution was missed
+EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN |
+             EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED |
+             EVENT_JOBSTORE_JOB_ADDED | EVENT_JOBSTORE_JOB_REMOVED |
+             EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)
+
+
+class SchedulerEvent(object):
+    """
+    An event that concerns the scheduler itself.
+
+    :var code: the type code of this event
+    """
+    def __init__(self, code):
+        self.code = code
+
+
+class JobStoreEvent(SchedulerEvent):
+    """
+    An event that concerns job stores.
+
+    :var alias: the alias of the job store involved
+    :var job: the new job if a job was added
+    """
+    def __init__(self, code, alias, job=None):
+        SchedulerEvent.__init__(self, code)
+        self.alias = alias
+        if job:
+            self.job = job
+
+
+class JobEvent(SchedulerEvent):
+    """
+    An event that concerns the execution of individual jobs.
+
+    :var job: the job instance in question
+    :var scheduled_run_time: the time when the job was scheduled to be run
+    :var retval: the return value of the successfully executed job
+    :var exception: the exception raised by the job
+    :var traceback: the traceback object associated with the exception
+    """
+    def __init__(self, code, job, scheduled_run_time, retval=None,
+                 exception=None, traceback=None):
+        SchedulerEvent.__init__(self, code)
+        self.job = job
+        self.scheduled_run_time = scheduled_run_time
+        self.retval = retval
+        self.exception = exception
+        self.traceback = traceback
diff --git a/apscheduler/job.py b/apscheduler/job.py
new file mode 100644
index 0000000..cfc09a2
--- /dev/null
+++ b/apscheduler/job.py
@@ -0,0 +1,137 @@
+"""
+Jobs represent scheduled tasks.
+"""
+
+from threading import Lock
+from datetime import timedelta
+
+from apscheduler.util import to_unicode, ref_to_obj, get_callable_name,\
+    obj_to_ref
+
+
+class MaxInstancesReachedError(Exception):
+    pass
+
+
+class Job(object):
+    """
+    Encapsulates the actual Job along with its metadata. Job instances
+    are created by the scheduler when adding jobs, and should not be
+    directly instantiated. These options can be set when adding jobs
+    to the scheduler (see :ref:`job_options`).
+
+    :var trigger: trigger that determines the execution times
+    :var func: callable to call when the trigger is triggered
+    :var args: list of positional arguments to call func with
+    :var kwargs: dict of keyword arguments to call func with
+    :var name: name of the job
+    :var misfire_grace_time: seconds after the designated run time that
+        the job is still allowed to be run
+    :var coalesce: run once instead of many times if the scheduler determines
+        that the job should be run more than once in succession
+    :var max_runs: maximum number of times this job is allowed to be
+        triggered
+    :var max_instances: maximum number of concurrently running
+        instances allowed for this job
+    :var runs: number of times this job has been triggered
+    :var instances: number of concurrently running instances of this job
+    """
+    id = None
+    next_run_time = None
+
+    def __init__(self, trigger, func, args, kwargs, misfire_grace_time,
+                 coalesce, name=None, max_runs=None, max_instances=1):
+        if not trigger:
+            raise ValueError('The trigger must not be None')
+        if not hasattr(func, '__call__'):
+            raise TypeError('func must be callable')
+        if not hasattr(args, '__getitem__'):
+            raise TypeError('args must be a list-like object')
+        if not hasattr(kwargs, '__getitem__'):
+            raise TypeError('kwargs must be a dict-like object')
+        if misfire_grace_time <= 0:
+            raise ValueError('misfire_grace_time must be a positive value')
+        if max_runs is not None and max_runs <= 0:
+            raise ValueError('max_runs must be a positive value')
+        if max_instances <= 0:
+            raise ValueError('max_instances must be a positive value')
+
+        self._lock = Lock()
+
+        self.trigger = trigger
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+        self.name = to_unicode(name or get_callable_name(func))
+        self.misfire_grace_time = misfire_grace_time
+        self.coalesce = coalesce
+        self.max_runs = max_runs
+        self.max_instances = max_instances
+        self.runs = 0
+        self.instances = 0
+
+    def compute_next_run_time(self, now):
+        if self.runs == self.max_runs:
+            self.next_run_time = None
+        else:
+            self.next_run_time = self.trigger.get_next_fire_time(now)
+
+        return self.next_run_time
+
+    def get_run_times(self, now):
+        """
+        Computes the scheduled run times between ``next_run_time`` and ``now``.
+        """
+        run_times = []
+        run_time = self.next_run_time
+        increment = timedelta(microseconds=1)
+        while ((not self.max_runs or self.runs < self.max_runs) and
+               run_time and run_time <= now):
+            run_times.append(run_time)
+            run_time = self.trigger.get_next_fire_time(run_time + increment)
+
+        return run_times
+
+    def add_instance(self):
+        self._lock.acquire()
+        try:
+            if self.instances == self.max_instances:
+                raise MaxInstancesReachedError
+            self.instances += 1
+        finally:
+            self._lock.release()
+
+    def remove_instance(self):
+        self._lock.acquire()
+        try:
+            assert self.instances > 0, 'Already at 0 instances'
+            self.instances -= 1
+        finally:
+            self._lock.release()
+
+    def __getstate__(self):
+        # Prevents the unwanted pickling of transient or unpicklable variables
+        state = self.__dict__.copy()
+        state.pop('instances', None)
+        state.pop('func', None)
+        state.pop('_lock', None)
+        state['func_ref'] = obj_to_ref(self.func)
+        return state
+
+    def __setstate__(self, state):
+        state['instances'] = 0
+        state['func'] = ref_to_obj(state.pop('func_ref'))
+        state['_lock'] = Lock()
+        self.__dict__ = state
+
+    def __eq__(self, other):
+        if isinstance(other, Job):
+            return self.id is not None and other.id == self.id or self is other
+        return NotImplemented
+
+    def __repr__(self):
+        return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger))
+
+    def __str__(self):
+        return '%s (trigger: %s, next run at: %s)' % (
+            self.name, str(self.trigger), str(self.next_run_time))
diff --git a/apscheduler/jobstores/__init__.py b/apscheduler/jobstores/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/apscheduler/jobstores/base.py b/apscheduler/jobstores/base.py
new file mode 100644
index 0000000..f0a16dd
--- /dev/null
+++ b/apscheduler/jobstores/base.py
@@ -0,0 +1,25 @@
+"""
+Abstract base class that provides the interface needed by all job stores.
+Job store methods are also documented here.
+"""
+
+
+class JobStore(object):
+    def add_job(self, job):
+        """Adds the given job from this store."""
+        raise NotImplementedError
+
+    def update_job(self, job):
+        """Persists the running state of the given job."""
+        raise NotImplementedError
+
+    def remove_job(self, job):
+        """Removes the given jobs from this store."""
+        raise NotImplementedError
+
+    def load_jobs(self):
+        """Loads jobs from this store into memory."""
+        raise NotImplementedError
+
+    def close(self):
+        """Frees any resources still bound to this job store."""
diff --git a/apscheduler/jobstores/mongodb_store.py b/apscheduler/jobstores/mongodb_store.py
new file mode 100644
index 0000000..3f522c2
--- /dev/null
+++ b/apscheduler/jobstores/mongodb_store.py
@@ -0,0 +1,84 @@
+"""
+Stores jobs in a MongoDB database.
+"""
+import logging
+
+from apscheduler.jobstores.base import JobStore
+from apscheduler.job import Job
+
+try:
+    import cPickle as pickle
+except ImportError:  # pragma: nocover
+    import pickle
+
+try:
+    from bson.binary import Binary
+    from pymongo.connection import Connection
+except ImportError:  # pragma: nocover
+    raise ImportError('MongoDBJobStore requires PyMongo installed')
+
+logger = logging.getLogger(__name__)
+
+
+class MongoDBJobStore(JobStore):
+    def __init__(self, database='apscheduler', collection='jobs',
+                 connection=None, pickle_protocol=pickle.HIGHEST_PROTOCOL,
+                 **connect_args):
+        self.jobs = []
+        self.pickle_protocol = pickle_protocol
+
+        if not database:
+            raise ValueError('The "database" parameter must not be empty')
+        if not collection:
+            raise ValueError('The "collection" parameter must not be empty')
+
+        if connection:
+            self.connection = connection
+        else:
+            self.connection = Connection(**connect_args)
+
+        self.collection = self.connection[database][collection]
+
+    def add_job(self, job):
+        job_dict = job.__getstate__()
+        job_dict['trigger'] = Binary(pickle.dumps(job.trigger,
+                                                  self.pickle_protocol))
+        job_dict['args'] = Binary(pickle.dumps(job.args,
+                                               self.pickle_protocol))
+        job_dict['kwargs'] = Binary(pickle.dumps(job.kwargs,
+                                                 self.pickle_protocol))
+        job.id = self.collection.insert(job_dict)
+        self.jobs.append(job)
+
+    def remove_job(self, job):
+        self.collection.remove(job.id)
+        self.jobs.remove(job)
+
+    def load_jobs(self):
+        jobs = []
+        for job_dict in self.collection.find():
+            try:
+                job = Job.__new__(Job)
+                job_dict['id'] = job_dict.pop('_id')
+                job_dict['trigger'] = pickle.loads(job_dict['trigger'])
+                job_dict['args'] = pickle.loads(job_dict['args'])
+                job_dict['kwargs'] = pickle.loads(job_dict['kwargs'])
+                job.__setstate__(job_dict)
+                jobs.append(job)
+            except Exception:
+                job_name = job_dict.get('name', '(unknown)')
+                logger.exception('Unable to restore job "%s"', job_name)
+        self.jobs = jobs
+
+    def update_job(self, job):
+        spec = {'_id': job.id}
+        document = {'$set': {'next_run_time': job.next_run_time},
+                    '$inc': {'runs': 1}}
+        self.collection.update(spec, document)
+
+    def close(self):
+        self.connection.disconnect()
+
+    def __repr__(self):
+        connection = self.collection.database.connection
+        return '<%s (connection=%s)>' % (self.__class__.__name__, connection)
diff --git a/apscheduler/jobstores/ram_store.py b/apscheduler/jobstores/ram_store.py
new file mode 100644
index 0000000..60458fb
--- /dev/null
+++ b/apscheduler/jobstores/ram_store.py
@@ -0,0 +1,25 @@
+"""
+Stores jobs in an array in RAM. Provides no persistence support.
+"""
+
+from apscheduler.jobstores.base import JobStore
+
+
+class RAMJobStore(JobStore):
+    def __init__(self):
+        self.jobs = []
+
+    def add_job(self, job):
+        self.jobs.append(job)
+
+    def update_job(self, job):
+        pass
+
+    def remove_job(self, job):
+        self.jobs.remove(job)
+
+    def load_jobs(self):
+        pass
+
+    def __repr__(self):
+        return '<%s>' % (self.__class__.__name__)
diff --git a/apscheduler/jobstores/redis_store.py b/apscheduler/jobstores/redis_store.py
new file mode 100644
index 0000000..5eabf4b
--- /dev/null
+++ b/apscheduler/jobstores/redis_store.py
@@ -0,0 +1,91 @@
+"""
+Stores jobs in a Redis database.
+"""
+from uuid import uuid4
+from datetime import datetime
+import logging
+
+from apscheduler.jobstores.base import JobStore
+from apscheduler.job import Job
+
+try:
+    import cPickle as pickle
+except ImportError:  # pragma: nocover
+    import pickle
+
+try:
+    from redis import StrictRedis
+except ImportError:  # pragma: nocover
+    raise ImportError('RedisJobStore requires redis installed')
+
+try:
+    long = long
+except NameError:
+    long = int
+
+logger = logging.getLogger(__name__)
+
+
+class RedisJobStore(JobStore):
+    def __init__(self, db=0, key_prefix='jobs.',
+                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
+        self.jobs = []
+        self.pickle_protocol = pickle_protocol
+        self.key_prefix = key_prefix
+
+        if db is None:
+            raise ValueError('The "db" parameter must not be empty')
+        if not key_prefix:
+            raise ValueError('The "key_prefix" parameter must not be empty')
+
+        self.redis = StrictRedis(db=db, **connect_args)
+
+    def add_job(self, job):
+        job.id = str(uuid4())
+        job_state = job.__getstate__()
+        job_dict = {
+            'job_state': pickle.dumps(job_state, self.pickle_protocol),
+            'runs': '0',
+            'next_run_time': job_state.pop('next_run_time').isoformat()}
+        self.redis.hmset(self.key_prefix + job.id, job_dict)
+        self.jobs.append(job)
+
+    def remove_job(self, job):
+        self.redis.delete(self.key_prefix + job.id)
+        self.jobs.remove(job)
+
+    def load_jobs(self):
+        jobs = []
+        keys = self.redis.keys(self.key_prefix + '*')
+        pipeline = self.redis.pipeline()
+        for key in keys:
+            pipeline.hgetall(key)
+        results = pipeline.execute()
+
+        for job_dict in results:
+            job_state = {}
+            try:
+                job = Job.__new__(Job)
+                job_state = pickle.loads(job_dict['job_state'.encode()])
+                job_state['runs'] = long(job_dict['runs'.encode()])
+                dateval = job_dict['next_run_time'.encode()].decode()
+                job_state['next_run_time'] = datetime.strptime(
+                    dateval, '%Y-%m-%dT%H:%M:%S')
+                job.__setstate__(job_state)
+                jobs.append(job)
+            except Exception:
+                job_name = job_state.get('name', '(unknown)')
+                logger.exception('Unable to restore job "%s"', job_name)
+        self.jobs = jobs
+
+    def update_job(self, job):
+        attrs = {
+            'next_run_time': job.next_run_time.isoformat(),
+            'runs': job.runs}
+        self.redis.hmset(self.key_prefix + job.id, attrs)
+
+    def close(self):
+        self.redis.connection_pool.disconnect()
+
+    def __repr__(self):
+        return '<%s>' % self.__class__.__name__
diff --git a/apscheduler/jobstores/shelve_store.py b/apscheduler/jobstores/shelve_store.py
new file mode 100644
index 0000000..d1be58f
--- /dev/null
+++ b/apscheduler/jobstores/shelve_store.py
@@ -0,0 +1,74 @@
+"""
+Stores jobs in a file governed by the :mod:`shelve` module.
+"""
+
+import shelve
+import pickle
+import random
+import logging
+
+from apscheduler.jobstores.base import JobStore
+from apscheduler.job import Job
+from apscheduler.util import itervalues
+
+logger = logging.getLogger(__name__)
+
+
+class ShelveJobStore(JobStore):
+    MAX_ID = 1000000
+
+    def __init__(self, path, pickle_protocol=pickle.HIGHEST_PROTOCOL):
+        self.jobs = []
+        self.path = path
+        self.pickle_protocol = pickle_protocol
+        self._open_store()
+
+    def _open_store(self):
+        self.store = shelve.open(self.path, 'c', self.pickle_protocol)
+
+    def _generate_id(self):
+        id = None
+        while not id:
+            id = str(random.randint(1, self.MAX_ID))
+            if not id in self.store:
+                return id
+
+    def add_job(self, job):
+        job.id = self._generate_id()
+        self.store[job.id] = job.__getstate__()
+        self.store.close()
+        self._open_store()
+        self.jobs.append(job)
+
+    def update_job(self, job):
+        job_dict = self.store[job.id]
+        job_dict['next_run_time'] = job.next_run_time
+        job_dict['runs'] = job.runs
+        self.store[job.id] = job_dict
+        self.store.close()
+        self._open_store()
+
+    def remove_job(self, job):
+        del self.store[job.id]
+        self.store.close()
+        self._open_store()
+        self.jobs.remove(job)
+
+    def load_jobs(self):
+        jobs = []
+        for job_dict in itervalues(self.store):
+            try:
+                job = Job.__new__(Job)
+                job.__setstate__(job_dict)
+                jobs.append(job)
+            except Exception:
+                job_name = job_dict.get('name', '(unknown)')
+                logger.exception('Unable to restore job "%s"', job_name)
+
+        self.jobs = jobs
+
+    def close(self):
+        self.store.close()
+
+    def __repr__(self):
+        return '<%s (path=%s)>' % (self.__class__.__name__, self.path)
diff --git a/apscheduler/jobstores/sqlalchemy_store.py b/apscheduler/jobstores/sqlalchemy_store.py
new file mode 100644
index 0000000..5b64a35
--- /dev/null
+++ b/apscheduler/jobstores/sqlalchemy_store.py
@@ -0,0 +1,91 @@
+"""
+Stores jobs in a database table using SQLAlchemy.
+"""
+import pickle
+import logging
+
+import sqlalchemy
+
+from apscheduler.jobstores.base import JobStore
+from apscheduler.job import Job
+
+try:
+    from sqlalchemy import *
+except ImportError:  # pragma: nocover
+    raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
+
+logger = logging.getLogger(__name__)
+
+
+class SQLAlchemyJobStore(JobStore):
+    def __init__(self, url=None, engine=None, tablename='apscheduler_jobs',
+                 metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL):
+        self.jobs = []
+        self.pickle_protocol = pickle_protocol
+
+        if engine:
+            self.engine = engine
+        elif url:
+            self.engine = create_engine(url)
+        else:
+            raise ValueError('Need either "engine" or "url" defined')
+
+        if sqlalchemy.__version__ < '0.7':
+            pickle_coltype = PickleType(pickle_protocol, mutable=False)
+        else:
+            pickle_coltype = PickleType(pickle_protocol)
+        self.jobs_t = Table(
+            tablename, metadata or MetaData(),
+            Column('id', Integer,
+                   Sequence(tablename + '_id_seq', optional=True),
+                   primary_key=True),
+            Column('trigger', pickle_coltype, nullable=False),
+            Column('func_ref', String(1024), nullable=False),
+            Column('args', pickle_coltype, nullable=False),
+            Column('kwargs', pickle_coltype, nullable=False),
... 4559 lines suppressed ...

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/apscheduler.git



More information about the Python-modules-commits mailing list