[Pkg-privacy-commits] [onionbalance] 21/117: Major refactor to restructure and tidy the codebase

Donncha O'Cearbahill donncha-guest at moszumanska.debian.org
Wed Dec 16 23:18:42 UTC 2015


This is an automated email from the git hooks/post-receive script.

donncha-guest pushed a commit to branch debian/sid
in repository onionbalance.

commit c788e2717a6477818c0cd2661536ea21b658e502
Author: Donncha O'Cearbhaill <donncha at donncha.is>
Date:   Wed Jun 17 15:18:21 2015 +0100

    Major refactor to restructure and tidy the codebase
---
 MANIFEST.in                                     |   2 +
 config.yaml.example => data/config.yaml.example |   0
 data/torrc.local                                |  13 ++
 docs/conf.py                                    |   2 +-
 onionbalance/__main__.py                        |   4 +-
 onionbalance/config.py                          |   1 +
 onionbalance/descriptor.py                      | 100 +++++---
 onionbalance/eventhandler.py                    |  39 ++--
 onionbalance/hiddenservice.py                   | 293 ------------------------
 onionbalance/instance.py                        | 107 +++++++++
 onionbalance/manager.py                         | 129 ++++++-----
 onionbalance/service.py                         | 209 +++++++++++++++++
 onionbalance/util.py                            |  15 +-
 requirements.txt                                |   2 +-
 setup.py                                        |  46 ++--
 tox.ini                                         |  15 +-
 16 files changed, 532 insertions(+), 445 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in
index 9491f80..a9dc1ac 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,4 @@
 include README.rst
 include COPYING
+include requirements.txt
+include data/
diff --git a/config.yaml.example b/data/config.yaml.example
similarity index 100%
rename from config.yaml.example
rename to data/config.yaml.example
diff --git a/data/torrc.local b/data/torrc.local
new file mode 100644
index 0000000..a9ea0ba
--- /dev/null
+++ b/data/torrc.local
@@ -0,0 +1,13 @@
+ControlPort 9251
+SocksPort 9250
+CookieAuthentication 0
+DataDirectory ./tor-data
+# SafeLogging 0
+Log notice stdout
+
+FascistFirewall 1
+
+FetchDirInfoEarly 1
+FetchDirInfoExtraEarly 1
+FetchUselessDescriptors 1
+UseMicrodescriptors 0
diff --git a/docs/conf.py b/docs/conf.py
index c837abf..3521c29 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -88,7 +88,7 @@ language = 'en'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ['_build', 'modules.rst']
 
 # The reST default role (used for this markup: `text`) to use for all
 # documents.
diff --git a/onionbalance/__main__.py b/onionbalance/__main__.py
index 7d38406..120122b 100644
--- a/onionbalance/__main__.py
+++ b/onionbalance/__main__.py
@@ -2,4 +2,6 @@
 
 from onionbalance.manager import main
 
-main()
+
+if __name__ == "__main__":
+    main()
diff --git a/onionbalance/config.py b/onionbalance/config.py
index 79880d2..89c3d0b 100644
--- a/onionbalance/config.py
+++ b/onionbalance/config.py
@@ -8,6 +8,7 @@ DESCRIPTOR_VALIDITY_PERIOD = 24 * 60 * 60
 DESCRIPTOR_OVERLAP_PERIOD = 60 * 60
 DESCRIPTOR_UPLOAD_PERIOD = 60 * 60  # Re-upload descriptor every hour
 REFRESH_INTERVAL = 10 * 60
+PUBLISH_CHECK_INTERVAL = 5 * 60
 
 # Store global data about onion services and their instance nodes.
 services = []
diff --git a/onionbalance/descriptor.py b/onionbalance/descriptor.py
index ae221b0..ca77e32 100644
--- a/onionbalance/descriptor.py
+++ b/onionbalance/descriptor.py
@@ -3,24 +3,21 @@ import hashlib
 import base64
 import textwrap
 import datetime
-import sys
 
 import Crypto.Util.number
 import stem
 
 from onionbalance import util
 from onionbalance import log
+from onionbalance import config
 
 logger = log.get_logger()
 
 
-def generate_hs_descriptor(permanent_key, introduction_point_list=None,
-                           replica=0, timestamp=None):
+def generate_service_descriptor(permanent_key, introduction_point_list=None,
+                                replica=0, timestamp=None, deviation=0):
     """
     High-level interface for generating a signed HS descriptor
-
-    .. todo:: Allow generation of descriptors for future time periods
-          to help clients which have a skewed clock.
     """
 
     if not timestamp:
@@ -31,17 +28,20 @@ def generate_hs_descriptor(permanent_key, introduction_point_list=None,
     permanent_id = util.calc_permanent_id(permanent_key)
 
     # Calculate the current secret-id-part for this hidden service
-    time_period = util.get_time_period(unix_timestamp, permanent_id)
+    # Deviation allows the generation of a descriptor for a different time
+    # period.
+    time_period = (util.get_time_period(unix_timestamp, permanent_id)
+                   + int(deviation))
+
     secret_id_part = util.calc_secret_id_part(time_period, None, replica)
     descriptor_id = util.calc_descriptor_id(permanent_id, secret_id_part)
 
-    # If we have no introduction
     if not introduction_point_list:
-        logger.warning("No introduction points for service '%s'. "
-                       "Skipping upload." % util.calc_onion_address(
-                           permanent_key))
-        return None
+        onion_address = util.calc_onion_address(permanent_key)
+        raise ValueError("No introduction points for service '%s'.",
+                         onion_address)
 
+    # Generate the introduction point section of the descriptor
     intro_section = make_introduction_points_part(
         introduction_point_list
     )
@@ -153,14 +153,14 @@ def sign_descriptor(descriptor, service_key):
     """
     Sign or resign a provided hidden service descriptor
     """
-    TOKEN_HSDESCRIPTOR_SIGNATURE = '\nsignature\n'
+    token_descriptor_signature = '\nsignature\n'
 
     # Remove signature block if it exists
-    if TOKEN_HSDESCRIPTOR_SIGNATURE in descriptor:
-        descriptor = descriptor[:descriptor.find(TOKEN_HSDESCRIPTOR_SIGNATURE)
-                                + len(TOKEN_HSDESCRIPTOR_SIGNATURE)]
+    if token_descriptor_signature in descriptor:
+        descriptor = descriptor[:descriptor.find(token_descriptor_signature)
+                                + len(token_descriptor_signature)]
     else:
-        descriptor = descriptor.strip() + TOKEN_HSDESCRIPTOR_SIGNATURE
+        descriptor = descriptor.strip() + token_descriptor_signature
 
     descriptor_digest = hashlib.sha1(descriptor.encode('utf-8')).digest()
     signature_with_headers = sign_digest(descriptor_digest, service_key)
@@ -169,24 +169,59 @@ def sign_descriptor(descriptor, service_key):
 
 def fetch_descriptor(controller, onion_address, hsdir=None):
     """
-    Try fetch a HS descriptor from any of the responsible HSDirs
-
-    .. todo:: Allow a custom HSDir to be specified
+    Try fetch a HS descriptor from any of the responsible HSDirs. Initiate
+    fetch from hsdir only if it is specified.
     """
-    logger.info("Sending HS descriptor fetch for %s.onion" % onion_address)
-    response = controller.msg("HSFETCH %s" % (onion_address))
-    (response_code, divider, response_content) = response.content()[0]
+
+    if hsdir:
+        server_arg = " SERVER={}".format(hsdir)
+    else:
+        server_arg = ""
+
+    logger.debug("Sending descriptor fetch for %s.onion.", onion_address)
+    response = controller.msg("HSFETCH %s%s" % (onion_address, server_arg))
+    (response_code, _, response_content) = response.content()[0]
     if not response.is_ok():
-        if response_code == "510":
-            logger.error("This version of Tor does not support HSFETCH "
-                         "command.")
-            sys.exit(1)
         if response_code == "552":
             raise stem.InvalidRequest(response_code, response_content)
         else:
             raise stem.ProtocolError("HSFETCH returned unexpected "
-                                     "response code: %s" % response_code)
-        pass
+                                     "response code: %s", response_code)
+
+
+def descriptor_received(descriptor_content):
+    """
+    Process onion service descriptors retrieved from the HSDir system or
+    received directly over the metadata channel.
+    """
+
+    try:
+        parsed_descriptor = stem.descriptor.hidden_service_descriptor.\
+            HiddenServiceDescriptor(descriptor_content, validate=True)
+    except ValueError:
+        logger.exception("Received an invalid service descriptor.")
+
+    # Ensure the received descriptor matches the requested descriptor
+    permanent_key = Crypto.PublicKey.RSA.importKey(
+        parsed_descriptor.permanent_key)
+    descriptor_onion_address = util.calc_onion_address(permanent_key)
+
+    # Find the HS instance for this descriptor
+    for service in config.services:
+        for instance in service.instances:
+            if instance.onion_address == descriptor_onion_address:
+                # Update the descriptor and exit
+                instance.update_descriptor(parsed_descriptor)
+                return None
+
+    # No matching service instance was found for the descriptor
+    logger.debug("Received a descriptor for an unknown service:\n%s",
+                 descriptor_content)
+    logger.warning("Received a descriptor with address %s.onion that "
+                   "did not match any configured service instances.",
+                   descriptor_onion_address)
+
+    return None
 
 
 def upload_descriptor(controller, signed_descriptor, hsdirs=None):
@@ -196,11 +231,12 @@ def upload_descriptor(controller, signed_descriptor, hsdirs=None):
     If no HSDir's are specified, Tor will upload to what it thinks are the
     responsible directories
     """
-    logger.debug("Sending HS descriptor upload")
+    logger.debug("Beginning service descriptor upload.")
 
     # Provide server fingerprints to control command if HSDirs are specified.
     if hsdirs:
-        server_args = ' '.join([("SERVER=%s" % hsdir) for hsdir in hsdirs])
+        server_args = ' '.join([("SERVER={}".format(hsdir))
+                                for hsdir in hsdirs])
     else:
         server_args = ""
 
@@ -213,4 +249,4 @@ def upload_descriptor(controller, signed_descriptor, hsdirs=None):
             raise stem.InvalidRequest(response_code, response_content)
         else:
             raise stem.ProtocolError("HSPOST returned unexpected response "
-                                     "code: %s" % response_code)
+                                     "code: %s", response_code)
diff --git a/onionbalance/eventhandler.py b/onionbalance/eventhandler.py
index 07b2c9b..541cf8a 100644
--- a/onionbalance/eventhandler.py
+++ b/onionbalance/eventhandler.py
@@ -2,7 +2,7 @@
 import stem
 
 from onionbalance import log
-from onionbalance import config
+from onionbalance import descriptor
 
 logger = log.get_logger()
 
@@ -13,49 +13,44 @@ class EventHandler(object):
     Handles asynchronous Tor events.
     """
 
-    def __init__(self, controller):
-
-        self.controller = controller
-
-    def new_desc(self, desc_event):
+    @staticmethod
+    def _new_desc(desc_event):
         """
         Parse HS_DESC response events
         """
-        logger.debug("Received new HS_DESC event: %s" %
-                     str(desc_event))
+        logger.debug("Received new HS_DESC event: %s", str(desc_event))
 
-    def new_desc_content(self, desc_content_event):
+    @staticmethod
+    def _new_desc_content(desc_content_event):
         """
         Parse HS_DESC_CONTENT response events for descriptor content
 
         Update the HS instance object with the data from the new descriptor.
         """
-        logger.debug("Received new HS_DESC_CONTENT event for %s.onion" %
+        logger.debug("Received new HS_DESC_CONTENT event for %s.onion",
                      desc_content_event.address)
 
-        # Make sure the descriptor is not empty
+        #  Check that the HSDir returned a descriptor that is not empty
         descriptor_text = str(desc_content_event.descriptor).encode('utf-8')
         if len(descriptor_text) < 5:
-            logger.debug("Empty descriptor received for %s.onion" %
+            logger.debug("Empty descriptor received for %s.onion",
                          desc_content_event.address)
-            return
+            return None
+
+        # Send content to callback function which will process the descriptor
+        descriptor.descriptor_received(descriptor_text)
 
-        # Find the HS instance for this descriptor
-        for service in config.services:
-            for instance in service.instances:
-                if instance.onion_address == desc_content_event.address:
-                    instance.update_descriptor(descriptor_text)
-                    return
+        return None
 
     def new_event(self, event):
         """
         Dispatches new Tor controller events to the appropriate handlers.
         """
         if isinstance(event, stem.response.events.HSDescEvent):
-            self.new_desc(event)
+            self._new_desc(event)
 
         elif isinstance(event, stem.response.events.HSDescContentEvent):
-            self.new_desc_content(event)
+            self._new_desc_content(event)
 
         else:
-            logger.warning("Received unexpected event %s." % str(event))
+            logger.warning("Received unexpected event %s.", str(event))
diff --git a/onionbalance/hiddenservice.py b/onionbalance/hiddenservice.py
deleted file mode 100644
index 7658b5a..0000000
--- a/onionbalance/hiddenservice.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# -*- coding: utf-8 -*-
-import datetime
-import random
-import time
-import base64
-
-import Crypto.PublicKey.RSA
-import stem.descriptor.hidden_service_descriptor
-
-from onionbalance import descriptor
-from onionbalance import util
-from onionbalance import log
-from onionbalance import config
-
-logger = log.get_logger()
-
-
-def fetch_all_descriptors(controller):
-    """
-    Try fetch fresh descriptors for all HS instances
-    """
-    logger.info("Running periodic descriptor fetch")
-
-    # Clear Tor descriptor cache before fetches by sending NEWNYM
-    controller.signal(stem.control.Signal.NEWNYM)
-    time.sleep(5)
-
-    for service in config.services:
-        for instance in service.instances:
-                instance.fetch_descriptor()
-
-
-def publish_all_descriptors():
-    """
-    Called periodically to upload new super-descriptors if needed
-    """
-    logger.info("Running periodic descriptor publish")
-    for service in config.services:
-        service.publish()
-
-
-class HiddenService(object):
-    """
-    HiddenService represents a front-facing hidden service which should
-    be load-balanced.
-    """
-
-    def __init__(self, controller, service_key=None, instances=None):
-        """
-        Initialise a HiddenService object.
-        """
-        self.controller = controller
-
-        # Service key must be a valid PyCrypto RSA key object
-        if not isinstance(service_key, Crypto.PublicKey.RSA._RSAobj):
-            raise ValueError("Service key is not a valid RSA object")
-        else:
-            self.service_key = service_key
-
-        # List of load balancing Instances for this hidden service
-        if not instances:
-            instances = []
-        self.instances = instances
-
-        self.onion_address = util.calc_onion_address(self.service_key)
-        self.last_uploaded = None
-
-    def _intro_points_modified(self):
-        """
-        Check if the introduction point set has changed since last
-        publish.
-        """
-        for instance in self.instances:
-            if instance.changed_since_published:
-                return True
-
-        # No introduction points have changed
-        return False
-
-    def _descriptor_expiring(self):
-        """
-        Check if the last uploaded super descriptor is old and should be
-        uploaded again.
-        """
-        if not self.last_uploaded:
-            # No descriptor uploaded yet, we should publish.
-            return True
-
-        descriptor_age = (datetime.datetime.utcnow() - self.last_uploaded)
-        if (descriptor_age.total_seconds() > config.DESCRIPTOR_UPLOAD_PERIOD):
-            return True
-
-        return False
-
-    def _descriptor_id_changing_soon(self):
-        """
-        If the descriptor ID will change soon, upload under both descriptor IDs
-        """
-        seconds_valid = util.get_seconds_valid(
-            time.time(), base64.b32decode(self.onion_address, 1))
-
-        # Check if descriptor ID will be changing within the overlap period.
-        if seconds_valid < config.DESCRIPTOR_OVERLAP_PERIOD:
-            return True
-
-        return False
-
-    def _get_combined_introduction_points(self):
-        """
-        Choose set of introduction points from all fresh descriptors
-
-        .. todo:: There are probably better algorithms for choosing which
-        introduction points to include. If we have more than
-        `max_intro_points` introduction points, we will need to exclude
-        some.  It probably sensible to prefer IPs which are new and
-        haven't been included in any previous descriptors. Clients with
-        an old descriptor will continue trying previously published IPs
-        if they still work.
-        """
-
-        combined_intro_points = []
-        for instance in self.instances:
-
-            if not instance.last_fetched:
-                logger.debug("No descriptor fetched for instance %s.onion "
-                            "yet. Skipping!" % instance.onion_address)
-                continue
-
-            # Check if the intro points are too old
-            intro_age = datetime.datetime.utcnow() - instance.last_fetched
-            if intro_age.total_seconds() > 60 * 60:
-                logger.info("Our introduction points for instance %s.onion "
-                            "are too old. Skipping!" % instance.onion_address)
-                continue
-
-            # Our IP's are recent enough, include them
-            instance.changed_since_published = False
-            combined_intro_points.extend(instance.introduction_points)
-
-        # Choose up to `MAX_INTRO_POINTS` IPs from the combined set
-        max_introduction_points = min(
-            len(combined_intro_points), config.MAX_INTRO_POINTS)
-
-        choosen_intro_points = random.sample(combined_intro_points,
-                                             max_introduction_points)
-        # Shuffle IP's to try reveal less information about which
-        # instances are online and have introduction points included.
-        random.shuffle(choosen_intro_points)
-
-        logger.debug("Selected %d IPs (of %d) for service %s.onion" %
-                     (len(choosen_intro_points), len(combined_intro_points),
-                      self.onion_address))
-
-        return choosen_intro_points
-
-    def _get_signed_descriptor(self, replica=0, timestamp=None):
-        """
-        Generate a signed HS descriptor for this hidden service
-        """
-
-        # Select a set of introduction points from this HS's load
-        # balancing instances.
-        introduction_points = self._get_combined_introduction_points()
-
-        signed_descriptor = descriptor.generate_hs_descriptor(
-            self.service_key,
-            introduction_point_list=introduction_points,
-            replica=replica,
-            timestamp=timestamp
-        )
-        return signed_descriptor
-
-    def _upload_descriptor(self, timestamp=None):
-        """
-        Create, sign and upload a super-descriptors for this HS
-        """
-        for replica in range(0, config.REPLICAS):
-            signed_descriptor = self._get_signed_descriptor(
-                replica=replica, timestamp=timestamp)
-
-            # Upload if a signed descriptor was generated successfully
-            if signed_descriptor:
-                descriptor.upload_descriptor(self.controller,
-                                             signed_descriptor)
-
-        self.last_uploaded = datetime.datetime.utcnow()
-
-    def publish(self, force=False):
-        """
-        Publish descriptor if have new IP's or if descriptor has expired
-        """
-
-        if (self._intro_points_modified() or
-            self._descriptor_expiring() or
-                force):
-            logger.info("Publishing new descriptor for service %s.onion" %
-                        self.onion_address)
-
-            self._upload_descriptor()
-
-            # If the descriptor ID will change soon, need to upload under
-            # the new ID too.
-            if self._descriptor_id_changing_soon():
-                logger.info("Publishing new descriptor for service %s.onion "
-                            "under next descriptor ID" % self.onion_address)
-                next_time = datetime.datetime.utcnow() + datetime.timedelta(1)
-                self._upload_descriptor(timestamp=next_time)
-
-        else:
-            logger.debug("Not publishing a descriptor for %s.onion" %
-                         self.onion_address)
-
-
-class Instance(object):
-    """
-    Instance represents a back-end load balancing hidden service.
-    """
-
-    def __init__(self, controller, onion_address, authentication_cookie):
-        """
-        Initialise an Instance object.
-        """
-        self.controller = controller
-        self.onion_address = onion_address
-        self.authentication_cookie = authentication_cookie
-
-        self.introduction_points = []
-        self.last_fetched = None
-        self.last_descriptor_timestamp = None
-        self.changed_since_published = False
-
-    def fetch_descriptor(self):
-        """
-        Try fetch a fresh descriptor for this back-end HS instance.
-        """
-        logger.debug("Trying to fetch a descriptor for instance %s.onion" %
-                     self.onion_address)
-        return descriptor.fetch_descriptor(self.controller, self.onion_address)
-
-    def update_descriptor(self, descriptor_content):
-        """
-        Update introduction points when a new HS descriptor is received
-
-        Parse the descriptor content and update the set of introduction
-        points for this HS instance.
-        """
-
-        logger.info("Received a descriptor for instance %s.onion" %
-                    self.onion_address)
-
-        self.last_fetched = datetime.datetime.utcnow()
-
-        # Parse and store this descriptors introduction points.
-        parsed_descriptor = stem.descriptor.hidden_service_descriptor.\
-            HiddenServiceDescriptor(descriptor_content, validate=True)
-
-        # Ensure the received descriptor matches the requested descriptor
-        permanent_key = Crypto.PublicKey.RSA.importKey(
-            parsed_descriptor.permanent_key)
-        descriptor_onion_address = util.calc_onion_address(permanent_key)
-
-        if self.onion_address != descriptor_onion_address:
-            logger.error("Received a descriptor with address %s.onion that "
-                        " did not match the expected onion address %s.onion" %
-                         (descriptor_onion_address, self.onion_address))
-            return None
-
-        # Reject descriptor if it timestamp is older than the latest
-        # descriptor. Prevents HSDir's replaying old, expired descriptors
-        if (self.last_descriptor_timestamp and
-                parsed_descriptor.published < self.last_descriptor_timestamp):
-            logger.error("Received descriptor for instance (%s) with "
-                         "publication timestamp older than the last received "
-                         "descriptor. Skipping descriptor." %
-                         self.onion_address)
-            return
-        else:
-            self.last_descriptor_timestamp = parsed_descriptor.published
-
-        # Parse the introduction point list, decrypting if necessary
-        introduction_points = parsed_descriptor.introduction_points(
-            authentication_cookie=self.authentication_cookie)
-
-        # If the new intro point set is different, flag this HS instance
-        # as modified.
-        if introduction_points != self.introduction_points:
-            logger.info("New IPs found for '%s'" % self.onion_address)
-            self.changed_since_publish = True
-            self.introduction_points = introduction_points
-
-        else:
-            logger.info("IPs for '%s' matched the cached set" %
-                        self.onion_address)
diff --git a/onionbalance/instance.py b/onionbalance/instance.py
new file mode 100644
index 0000000..0f3b452
--- /dev/null
+++ b/onionbalance/instance.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+import datetime
+import time
+
+import stem.control
+
+from onionbalance import descriptor
+from onionbalance import log
+from onionbalance import config
+
+logger = log.get_logger()
+
+
+def fetch_instance_descriptors(controller):
+    """
+    Try fetch fresh descriptors for all HS instances
+    """
+    logger.info("Initiating fetch of descriptors for all service instances.")
+
+    # Clear Tor descriptor cache before making fetches by sending NEWNYM
+    # pylint: disable=no-member
+    controller.signal(stem.control.Signal.NEWNYM)
+    time.sleep(5)
+
+    for service in config.services:
+        for instance in service.instances:
+            instance.fetch_descriptor()
+
+
+class Instance(object):
+    """
+    Instance represents a back-end load balancing hidden service.
+    """
+
+    def __init__(self, controller, onion_address, authentication_cookie=None):
+        """
+        Initialise an Instance object.
+        """
+        self.controller = controller
+
+        # Onion address for the service instance.
+        self.onion_address = onion_address
+        self.authentication_cookie = authentication_cookie
+
+        # Store the latest set of introduction points for this instance
+        self.introduction_points = []
+
+        # Timestamp when last received a descriptor for this instance
+        self.received = None
+
+        # Timestamp of the currently loaded descriptor
+        self.timestamp = None
+
+        # Flag this instance with it's introduction points change. A new
+        # master descriptor will then be published as the introduction
+        # points have changed.
+        self.changed_since_published = False
+
+    def fetch_descriptor(self):
+        """
+        Try fetch a fresh descriptor for this service instance from the HSDirs
+        """
+        logger.debug("Trying to fetch a descriptor for instance %s.onion.",
+                     self.onion_address)
+        return descriptor.fetch_descriptor(self.controller, self.onion_address)
+
+    def update_descriptor(self, parsed_descriptor):
+        """
+        Update introduction points when a new HS descriptor is received
+
+        Parse the descriptor content and update the set of introduction
+        points for this HS instance.
+        """
+
+        self.received = datetime.datetime.utcnow()
+
+        logger.debug("Received a descriptor for instance %s.onion.",
+                     self.onion_address)
+
+        # Reject descriptor if it's timestamp is older than the current
+        # descriptor. Prevent's HSDir's replaying old, expired descriptors.
+        if self.timestamp and parsed_descriptor.published < self.timestamp:
+            logger.error("Received descriptor for instance %s.onion with "
+                         "publication timestamp older than the latest "
+                         "descriptor. Ignoring the descriptor.",
+                         self.onion_address)
+            return
+        else:
+            self.timestamp = parsed_descriptor.published
+
+        # Parse the introduction point list, decrypting if necessary
+        introduction_points = parsed_descriptor.introduction_points(
+            authentication_cookie=self.authentication_cookie
+        )
+
+        # If the new introduction points are different, flag this instance
+        # as modified.
+        if not all(ip in self.introduction_points
+                   for ip in introduction_points):
+            logger.info("Found new introduction points for instance "
+                        "%s.onion.", self.onion_address)
+            self.changed_since_published = True
+            self.introduction_points = introduction_points
+
+        else:
+            logger.debug("Introduction points for instance %s.onion matched "
+                         "the cached set.", self.onion_address)
diff --git a/onionbalance/manager.py b/onionbalance/manager.py
index b62a8b4..16dba17 100644
--- a/onionbalance/manager.py
+++ b/onionbalance/manager.py
@@ -19,9 +19,11 @@ import schedule
 from onionbalance import log
 from onionbalance import util
 from onionbalance import config
-from onionbalance import hiddenservice
 from onionbalance import eventhandler
 
+import onionbalance.service
+import onionbalance.instance
+
 logger = log.get_logger()
 
 
@@ -55,18 +57,62 @@ def parse_config_file(config_file):
     """
     Parse config file contain load balancing node configuration
     """
-    if os.path.exists(config_file):
+    config_path = os.path.abspath(config_file)
+    if os.path.exists(config_path):
         with open(config_file, 'r') as handle:
             config_data = yaml.load(handle.read())
-            logger.info("Loaded the config file '%s'" % config_file)
+            logger.info("Loaded the config file '%s'.", config_path)
     else:
-        logger.error("The specified config file does not exist (%s)" %
-                     config_file)
+        logger.error("The specified config file '%s' does not exist.",
+                     config_path)
         sys.exit(1)
 
     return config_data
 
 
+def initialize_services(controller, services_config):
+    """
+    Load keys for services listed in the config
+    """
+
+    # Load the keys and config for each onion service
+    for service in services_config:
+        service_key = util.key_decrypt_prompt(service.get("key"))
+        if not service_key:
+            logger.error("Private key %s could not be loaded.",
+                         service.get("key"))
+            sys.exit(0)
+        else:
+            # Successfully imported the private key
+            onion_address = util.calc_onion_address(service_key)
+            logger.debug("Loaded private key for service %s.onion.",
+                         onion_address)
+
+        # Load all instances for the current onion service
+        instance_config = service.get("instances", [])
+        if not instance_config:
+            logger.error("Could not load and instances for service "
+                         "%s.onion.", onion_address)
+            sys.exit(1)
+        else:
+            instances = []
+            for instance in instance_config:
+                instances.append(onionbalance.instance.Instance(
+                    controller=controller,
+                    onion_address=instance.get("address"),
+                    authentication_cookie=instance.get("auth")
+                ))
+
+            logger.info("Loaded %d instances for service %s.onion.",
+                        len(instances), onion_address)
+
+        config.services.append(onionbalance.service.Service(
+            controller=controller,
+            service_key=service_key,
+            instances=instances
+        ))
+
+
 def main():
     """
     Entry point when invoked over the command line.
@@ -85,88 +131,55 @@ def main():
     try:
         controller = Controller.from_port(address=args.ip, port=args.port)
     except stem.SocketError as exc:
-        logger.error("Unable to connect to Tor control port: %s"
-                     % exc)
+        logger.error("Unable to connect to Tor control port: %s", exc)
         sys.exit(1)
     else:
-        logger.debug("Successfully connected to the Tor control port")
+        logger.debug("Successfully connected to the Tor control port.")
 
     try:
         controller.authenticate()
     except stem.connection.AuthenticationFailure as exc:
-        logger.error("Unable to authenticate to Tor control port: %s"
-                     % exc)
+        logger.error("Unable to authenticate to Tor control port: %s", exc)
         sys.exit(1)
     else:
-        logger.debug("Successfully authenticated to the Tor control port")
+        logger.debug("Successfully authenticated to the Tor control port.")
+
+    # Disable no-member due to bug with "Instance of 'Enum' has no * member"
+    # pylint: disable=no-member
 
     # Check that the Tor client supports the HSPOST control port command
     if not controller.get_version() >= stem.version.Requirement.HSPOST:
-        logger.error("A Tor version >= {} is required. You may need to "
+        logger.error("A Tor version >= %s is required. You may need to "
                      "compile Tor from source or install a package from "
-                     "the experimental Tor repository.".format(
-                        stem.version.Requirement.HSPOST))
+                     "the experimental Tor repository.",
+                     stem.version.Requirement.HSPOST)
         sys.exit(1)
 
     # Load the keys and config for each onion service
-    for service in config_file_options.get('services'):
-        service_key = util.key_decrypt_prompt(service.get("key"))
-        if not service_key:
-            logger.error("Private key %s could not be loaded" %
-                         args.key)
-            sys.exit(0)
-        else:
-            # Successfully imported the private key
-            onion_address = util.calc_onion_address(service_key)
-            logger.debug("Loaded private key for service %s.onion" %
-                         onion_address)
+    initialize_services(controller, config_file_options.get('services'))
 
-        # Load all instances for the current onion service
-        instance_config = service.get("instances", [])
-        if not instance_config:
-            logger.error("Could not load and instances for service "
-                         "%s.onion" % onion_address)
-            sys.exit(1)
-        else:
-            instances = []
-            for instance in instance_config:
-                instances.append(hiddenservice.Instance(
-                    controller=controller,
-                    onion_address=instance.get("address"),
-                    authentication_cookie=instance.get("auth")
-                ))
-
-            logger.info("Loaded {} instances for service {}.onion".format(
-                        len(instances), onion_address))
-
-        config.services.append(hiddenservice.HiddenService(
-            controller=controller,
-            service_key=service_key,
-            instances=instances
-        ))
     # Finished parsing all the config file.
 
-    handler = eventhandler.EventHandler(controller)
+    handler = eventhandler.EventHandler()
     controller.add_event_listener(handler.new_event,
                                   EventType.HS_DESC,
                                   EventType.HS_DESC_CONTENT)
 
     # Schedule descriptor fetch and upload events
     schedule.every(config.REFRESH_INTERVAL).seconds.do(
-        hiddenservice.fetch_all_descriptors, controller)
-    schedule.every(config.REFRESH_INTERVAL).seconds.do(
-        hiddenservice.publish_all_descriptors)
+        onionbalance.instance.fetch_instance_descriptors, controller)
+    schedule.every(config.PUBLISH_CHECK_INTERVAL).seconds.do(
+        onionbalance.service.publish_all_descriptors)
 
-    # Run initial fetch of HS instance descriptors
-    schedule.run_all(delay_seconds=15)
-
-    # Begin main loop to poll for HS descriptors
     try:
+        # Run initial fetch of HS instance descriptors
+        schedule.run_all(delay_seconds=30)
+
+        # Begin main loop to poll for HS descriptors
         while True:
             schedule.run_pending()
             time.sleep(1)
     except KeyboardInterrupt:
         logger.info("Keyboard interrupt received. Stopping the "
-                    "management server")
-
+                    "management server.")
     return 0
diff --git a/onionbalance/service.py b/onionbalance/service.py
new file mode 100644
index 0000000..87c6b6e
--- /dev/null
+++ b/onionbalance/service.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+import datetime
+import random
+import time
+import base64
+
+import Crypto.PublicKey.RSA
+
+from onionbalance import descriptor
+from onionbalance import util
+from onionbalance import log
+from onionbalance import config
+
+logger = log.get_logger()
+
+
+def publish_all_descriptors():
+    """
+    Called periodically to upload new super-descriptors if needed
+
+    .. todo:: Publishing descriptors for different services at the same time
+              will leak that they are related. Descriptors should
+              be published individually at a random interval to avoid
+              correlation.
+    """
+    logger.debug("Checking if any master descriptors should be published.")
+    for service in config.services:
+        service.descriptor_publish()
+
+
+class Service(object):
+    """
+    Service represents a front-facing hidden service which should
+    be load-balanced.
+    """
+
+    def __init__(self, controller, service_key=None, instances=None):
+        """
+        Initialise a HiddenService object.
+        """
+        self.controller = controller
+
+        # Service key must be a valid PyCrypto RSA key object
+        if isinstance(service_key, Crypto.PublicKey.RSA._RSAobj):
+            self.service_key = service_key
+        else:
+            raise ValueError("Service key is not a valid RSA object.")
+
+        # List of instances for this onion service
+        if not instances:
+            instances = []
+        self.instances = instances
+
+        # Calculate the onion address for this service
+        self.onion_address = util.calc_onion_address(self.service_key)
+
+        # Timestamp when this descriptor was last attempted
+        self.uploaded = None
+
+    def _intro_points_modified(self):
+        """
+        Check if the introduction point set has changed since last
+        publish.
+        """
+        return any(instance.changed_since_published
+                   for instance in self.instances)
+
+    def _descriptor_not_uploaded_recently(self):
+        """
+        Check if the master descriptor hasn't been uploaded recently
+        """
+        if not self.uploaded:
+            # Descriptor never uploaded
+            return True
+
+        descriptor_age = (datetime.datetime.utcnow() - self.uploaded)
+        if (descriptor_age.total_seconds() > config.DESCRIPTOR_UPLOAD_PERIOD):
+            return True
+        else:
+            return False
+
+    def _descriptor_id_changing_soon(self):
+        """
+        If the descriptor ID will change soon, upload under both descriptor IDs
+        """
+        permanent_id = base64.b32decode(self.onion_address, 1)
+        seconds_valid = util.get_seconds_valid(time.time(), permanent_id)
+
+        # Check if descriptor ID will be changing within the overlap period.
+        if seconds_valid < config.DESCRIPTOR_OVERLAP_PERIOD:
+            return True
+        else:
+            return False
+
+    def _select_introduction_points(self):
+        """
+        Choose set of introduction points from all fresh descriptors
+        """
+        available_intro_points = []
+
+        # Loop through each instance and determine fresh intro points
+        for instance in self.instances:
+            if not instance.received:
+                logger.debug("No descriptor received for instance %s.onion "
+                             "yet.", instance.onion_address)
+                continue
+
+            # The instance may be offline if no descriptor has been received
+            # for it recently or if the received descriptor's timestamp is
+            # too old
+            received_age = datetime.datetime.utcnow() - instance.received
+            timestamp_age = datetime.datetime.utcnow() - instance.timestamp
+            received_age = received_age.total_seconds()
+            timestamp_age = timestamp_age.total_seconds()
+
+            if (received_age > config.DESCRIPTOR_UPLOAD_PERIOD or
+                    timestamp_age > (4 * 60 * 60)):
+                logger.info("Our descriptor for instance %s.onion is too old. "
+                            "The instance may be offline. It's introduction "
+                            "points will not be included in the master "
+                            "descriptor.", instance.onion_address)
+                continue
+            else:
+                # Include this instance's introduction points
+                instance.changed_since_published = False
+                random.shuffle(instance.introduction_points)
+                available_intro_points.append(instance.introduction_points)
+
+        # Choose up to `MAX_INTRO_POINTS` IPs from the service instances.
+        num_instances = len(self.instances)
+        num_intro_points = sum(
+            len(ips) for ips in available_intro_points
+        )
+        max_introduction_points = min(num_intro_points,
+                                      config.MAX_INTRO_POINTS)
+
+        # Choose intro points from a maximum number of instances
+        intro_selection = [0] * num_instances
+        for count in range(0, max_introduction_points):
+            intro_selection[count % num_instances] += 1
+        random.shuffle(intro_selection)
+
+        choosen_intro_points = []
+        for count, intros in zip(intro_selection, available_intro_points):
+            choosen_intro_points.extend(random.sample(intros, count))
+
+        # Shuffle IP's to try reveal less information about which
+        # instances are online and have introduction points included.
+        random.shuffle(choosen_intro_points)
+
+        logger.debug("Selected %d IPs of %d for service %s.onion.",
+                     len(choosen_intro_points), num_intro_points,
+                     self.onion_address)
+
+        return choosen_intro_points
+
+    def _publish_descriptor(self, deviation=0):
+        """
+        Create, sign and uploads a master descriptor for this service
+        """
+        introduction_points = self._select_introduction_points()
+        for replica in range(0, config.REPLICAS):
+            try:
+                signed_descriptor = descriptor.generate_service_descriptor(
+                    self.service_key,
+                    introduction_point_list=introduction_points,
+                    replica=replica,
+                    deviation=deviation
+                )
+            except ValueError as exc:
+                logger.warning("Error generating master descriptor: %s", exc)
+            else:
+                # Signed descriptor was generated successfully, upload it
+                descriptor.upload_descriptor(self.controller,
+                                             signed_descriptor)
+                logger.info("Published a descriptor for service %s.onion "
+                            "under replica %d.", self.onion_address, replica)
+
+        # It would be better to set last_uploaded when an upload succeeds and
+        # not when an upload is just attempted. Unfortunately the HS_DESC #
+        # UPLOADED event does not provide information about the service and
+        # so it can't be used to determine when descriptor upload succeeds
+        self.uploaded = datetime.datetime.utcnow()
+
+    def descriptor_publish(self, force_publish=False):
+        """
+        Publish descriptor if have new IP's or if descriptor has expired
+        """
+
+        # A descriptor should be published if any of the following conditions
+        # are True
+        if any([self._intro_points_modified(),  # If any IPs have changed
+                self._descriptor_not_uploaded_recently(),
+                force_publish]):
+
+            logger.debug("Publishing a descriptor for service %s.onion.",
+                         self.onion_address)
+            self._publish_descriptor()
+
+            # If the descriptor ID will change soon, need to upload under
+            # the new ID too.
+            if self._descriptor_id_changing_soon():
+                logger.debug("Publishing a descriptor for service %s.onion "
+                             "under next descriptor ID.", self.onion_address)
+                self._publish_descriptor(deviation=1)
+
+        else:
+            logger.debug("Not publishing a new descriptor for service "
+                         "%s.onion.", self.onion_address)
diff --git a/onionbalance/util.py b/onionbalance/util.py
index 567eaee..8f4d571 100644
--- a/onionbalance/util.py
+++ b/onionbalance/util.py
@@ -15,11 +15,10 @@ def add_pkcs1_padding(message):
     padding = b''
     typeinfo = b'\x00\x01'
     separator = b'\x00'
-    for x in range(125 - len(message)):
-        padding += b'\xFF'
-    PKCS1paddedMessage = typeinfo + padding + separator + message
-    assert len(PKCS1paddedMessage) == 128
-    return PKCS1paddedMessage
+    padding = b'\xFF' * (125 - len(message))
+    padded_message = typeinfo + padding + separator + message
+    assert len(padded_message) == 128
+    return padded_message
 
 
 def get_asn1_sequence(rsa_key):
@@ -85,11 +84,11 @@ def rounded_timestamp(timestamp=None):
     return timestamp.strftime('%Y-%m-%d %H:%M:%S')
 
 
-def base32_encode_str(bytes):
+def base32_encode_str(byte_str):
     """
     Encode bytes as lowercase base32 string
     """
-    return base64.b32encode(bytes).lower().decode('utf-8')
+    return base64.b32encode(byte_str).lower().decode('utf-8')
 
 
 def key_decrypt_prompt(key_file, retries=3):
@@ -116,4 +115,4 @@ def key_decrypt_prompt(key_file, retries=3):
                 return permanent_key
 
     # No private key was imported
-    raise ValueError("Could not import RSA key")
+    raise ValueError("Could not import RSA key.")
diff --git a/requirements.txt b/requirements.txt
index 19392be..67bd0a3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-stem>=1.3.0-dev
+stem>=1.4.0-dev
 PyYAML>=3.11
 pycrypto>=2.6.1
 schedule>=0.3.1
diff --git a/setup.py b/setup.py
index 8f31618..3483609 100644
--- a/setup.py
+++ b/setup.py
@@ -2,28 +2,24 @@
 
 """setup.py: setuptools control."""
 
-import sys
+import io
 import os
 
 from setuptools import setup
-from setuptools.command.test import test as TestCommand
 
-import onionbalance
+# Read version and other info from package's __init.py file
+module_info = {}
+init_path = os.path.join(os.path.dirname(__file__), 'onionbalance',
+                         '__init__.py')
+with open(init_path) as init_file:
+    exec(init_file.read(), module_info)
 
-with open(os.path.join(os.path.dirname(__file__), "README.rst"), "rb") as f:
-    long_descr = f.read().decode("utf-8")
 
-
-class Tox(TestCommand):
-    def finalize_options(self):
-        TestCommand.finalize_options(self)
-        self.test_args = []
-        self.test_suite = True
-
-    def run_tests(self):
-        import tox
-        errcode = tox.cmdline(self.test_args)
-        sys.exit(errcode)
+def read(*names, **kwargs):
+    return io.open(
+        os.path.join(os.path.dirname(__file__), *names),
+        encoding=kwargs.get("encoding", "utf8")
+    ).read()
 
 setup(
     name = "OnionBalance",
@@ -33,19 +29,17 @@ setup(
         },
     description = "Tool for distributing Tor onion services connections to "
                   "multiple backend Tor instances",
-    long_description = long_descr,
-    version = onionbalance.__version__,
-    author = onionbalance.__author__,
-    author_email = onionbalance.__contact__,
-    url = onionbalance.__url__,
-    license = onionbalance.__license__,
+    long_description = read('README.rst'),
+    version = module_info.get('__version__'),
+    author = module_info.get('__author__'),
+    author_email = module_info.get('__contact__'),
+    url = module_info.get('__url__'),
+    license = module_info.get('__license__'),
     keywords = 'tor',
     install_requires = [
-        'stem>=1.3.0-dev',
+        'stem>=1.4.0-dev',
         'PyYAML>=3.11',
         'pycrypto>=2.6.1',
         'schedule>=0.3.1',
-        ],
-    tests_require = ['tox'],
-    cmdclass = {'test': Tox},
+        ]
 )
diff --git a/tox.ini b/tox.ini
index 4e19e29..1c543ab 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,10 +4,11 @@
 # and then run "tox" from this directory.
 
 [tox]
-envlist = py27, py34
+envlist = style, py27, py34, docs
 
 [testenv]
-deps = pytest
+deps = -rrequirements.txt
+       pytest
 commands = py.test
 
 [testenv:docs]
@@ -15,4 +16,12 @@ basepython=python
 changedir=docs
 deps=sphinx
 commands=
-    sphinx-build -W -b html -d {envtmpdir}/doctrees .  {envtmpdir}/html
+    sphinx-apidoc -e -f -o . ../onionbalance/
+    sphinx-build -W -b html -d {envtmpdir}/docs . {envtmpdir}/html
+
+[testenv:style]
+basepython=python
+deps=pylint
+     flake8
+commands=pylint onionbalance {posargs: -E}
+         flake8 onionbalance

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-privacy/packages/onionbalance.git



More information about the Pkg-privacy-commits mailing list