[Pkg-nagios-changes] [SCM] UNNAMED PROJECT branch, debian/master, updated. 810edbdd3feedbfe37f4a65bee50b57b2f60fa2a

Gerhard Lausser gerhard.lausser at consol.de
Tue Feb 28 22:09:04 UTC 2012


The following commit has been merged in the debian/master branch:
commit 9d96ece2a6ee586a44954403e9dec2f5cf56f867
Author: Gerhard Lausser <gerhard.lausser at consol.de>
Date:   Sat Dec 17 01:45:54 2011 +0100

    Enh : better performance for livestatus "GET logs" through sql-preselect

diff --git a/shinken/modules/livestatus_broker/__init__.py b/shinken/modules/livestatus_broker/__init__.py
index 0fbc78d..4713498 100644
--- a/shinken/modules/livestatus_broker/__init__.py
+++ b/shinken/modules/livestatus_broker/__init__.py
@@ -99,10 +99,11 @@ def get_instance(plugin):
     else:
         pnp_path = ''
 
+    use_aggressive_sql = getattr(plugin, 'use_aggressive_sql', True)
     debug = getattr(plugin, 'debug', None)
     debug_queries = (getattr(plugin, 'debug_queries', '0') == '1')
 
-    instance = Livestatus_broker(plugin, host, port, socket, allowed_hosts, database_file, archive_path, max_logs_age, pnp_path, debug, debug_queries)
+    instance = Livestatus_broker(plugin, host, port, socket, allowed_hosts, database_file, archive_path, max_logs_age, pnp_path, use_aggressive_sql, debug, debug_queries)
     return instance
 
 
diff --git a/shinken/modules/livestatus_broker/livestatus.py b/shinken/modules/livestatus_broker/livestatus.py
index 2a5a5bf..50709e4 100644
--- a/shinken/modules/livestatus_broker/livestatus.py
+++ b/shinken/modules/livestatus_broker/livestatus.py
@@ -41,7 +41,7 @@ class LiveStatus(object, Hooker):
     # Use out_map from the mapping.py file
     out_map = out_map
 
-    def __init__(self, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, db, pnp_path, return_queue):
+    def __init__(self, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, db, use_aggressive_sql, pnp_path, return_queue):
         self.configs = configs
         self.hosts = hosts
         self.services = services
@@ -56,6 +56,7 @@ class LiveStatus(object, Hooker):
         self.reactionners = reactionners
         self.brokers = brokers
         self.db = db
+        self.use_aggressive_sql = use_aggressive_sql
         LiveStatus.pnp_path = pnp_path
         self.debuglevel = 2
         self.return_queue = return_queue
@@ -83,7 +84,7 @@ class LiveStatus(object, Hooker):
         """
         request = LiveStatusRequest(data, self.configs, self.hosts, self.services, 
             self.contacts, self.hostgroups, self.servicegroups, self.contactgroups, self.timeperiods, self.commands, 
-            self.schedulers, self.pollers, self.reactionners, self.brokers, self.db, self.pnp_path, self.return_queue, self.counters)
+            self.schedulers, self.pollers, self.reactionners, self.brokers, self.db, self.use_aggressive_sql, self.pnp_path, self.return_queue, self.counters)
         request.parse_input(data)
         #print "REQUEST\n%s\n" % data
         to_del = []
diff --git a/shinken/modules/livestatus_broker/livestatus_broker.py b/shinken/modules/livestatus_broker/livestatus_broker.py
index 0a83dee..d36eebc 100644
--- a/shinken/modules/livestatus_broker/livestatus_broker.py
+++ b/shinken/modules/livestatus_broker/livestatus_broker.py
@@ -69,7 +69,7 @@ properties = {
 #Class for the Livestatus Broker
 #Get broks and listen to livestatus query language requests
 class Livestatus_broker(BaseModule):
-    def __init__(self, mod_conf, host, port, socket, allowed_hosts, database_file, archive_path, max_logs_age, pnp_path, debug=None, debug_queries=False):
+    def __init__(self, mod_conf, host, port, socket, allowed_hosts, database_file, archive_path, max_logs_age, pnp_path, use_aggressive_sql=False, debug=None, debug_queries=False):
         BaseModule.__init__(self, mod_conf)
         self.host = host
         self.port = port
@@ -79,6 +79,7 @@ class Livestatus_broker(BaseModule):
         self.archive_path = archive_path
         self.max_logs_age = max_logs_age
         self.pnp_path = pnp_path
+        self.use_aggressive_sql = use_aggressive_sql
         self.debug = debug
         self.debug_queries = debug_queries
 
@@ -804,7 +805,7 @@ class Livestatus_broker(BaseModule):
         self.db.log_db_do_archive()
 
         # This is the main object of this broker where the action takes place
-        self.livestatus = LiveStatus(self.configs, self.hosts, self.services, self.contacts, self.hostgroups, self.servicegroups, self.contactgroups, self.timeperiods, self.commands, self.schedulers, self.pollers, self.reactionners, self.brokers, self.db, self.pnp_path, self.from_q)
+        self.livestatus = LiveStatus(self.configs, self.hosts, self.services, self.contacts, self.hostgroups, self.servicegroups, self.contactgroups, self.timeperiods, self.commands, self.schedulers, self.pollers, self.reactionners, self.brokers, self.db, self.use_aggressive_sql, self.pnp_path, self.from_q)
 
         last_number_of_objects = 0
         backlog = 5
diff --git a/shinken/modules/livestatus_broker/livestatus_query.py b/shinken/modules/livestatus_broker/livestatus_query.py
index 2fd8521..69f4a6d 100644
--- a/shinken/modules/livestatus_broker/livestatus_query.py
+++ b/shinken/modules/livestatus_broker/livestatus_query.py
@@ -51,7 +51,7 @@ class LiveStatusQuery(Hooker):
 
     my_type = 'query'
 
-    def __init__(self, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, db, pnp_path, return_queue, counters):
+    def __init__(self, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, db, use_aggressive_sql, pnp_path, return_queue, counters):
         # Runtime data form the global LiveStatus object
         self.configs = configs
         self.hosts = hosts
@@ -67,6 +67,7 @@ class LiveStatusQuery(Hooker):
         self.reactionners = reactionners
         self.brokers = brokers
         self.db = db
+        self.use_aggressive_sql = use_aggressive_sql
         self.pnp_path = pnp_path
         self.return_queue = return_queue
         self.counters = counters
@@ -87,8 +88,13 @@ class LiveStatusQuery(Hooker):
         # Initialize the stacks which are needed for the Filter: and Stats:
         # filter- and count-operations
         self.filter_stack = LiveStatusStack()
+        # This stack is used to create a full-blown select-statement
         self.sql_filter_stack = LiveStatusStack()
         self.sql_filter_stack.type = 'sql'
+        # This stack is used to create a minimal select-statement which
+        # selects only by time >= and time <=
+        self.sql_time_filter_stack = LiveStatusStack()
+        self.sql_time_filter_stack.type = 'sql'
         self.stats_filter_stack = LiveStatusStack()
         self.stats_postprocess_stack = LiveStatusStack()
         self.stats_request = False
@@ -262,7 +268,8 @@ class LiveStatusQuery(Hooker):
                     self.filter_stack.put(self.make_filter(operator, attribute, reference))
                     if self.table == 'log':
                         if attribute == 'time':
-                            self.sql_filter_stack.put(self.make_sql_filter(operator, attribute, reference))
+                            self.sql_time_filter_stack.put(self.make_sql_filter(operator, attribute, reference))
+                        self.sql_filter_stack.put(self.make_sql_filter(operator, attribute, reference))
                 else:
                     print "illegal operation", operator
                     pass # illegal operation
@@ -272,12 +279,18 @@ class LiveStatusQuery(Hooker):
                 # Construct a new function which makes a logical and
                 # Put the function back onto the stack
                 self.filter_stack.and_elements(andnum)
+                if self.table == 'log':
+                    #self.sql_time_filter_stack.and_elements(andnum)
+                    self.sql_filter_stack.and_elements(andnum)
             elif keyword == 'Or':
                 cmd, ornum = self.split_option(line)
                 # Take the last ornum functions from the stack
                 # Construct a new function which makes a logical or
                 # Put the function back onto the stack
                 self.filter_stack.or_elements(ornum)
+                if self.table == 'log':
+                    #self.sql_time_filter_stack.or_elements(ornum)
+                    self.sql_filter_stack.or_elements(ornum)
             elif keyword == 'StatsGroupBy':
                 cmd, stats_group_by = self.split_option_with_columns(line)
                 self.filtercolumns.extend(stats_group_by)
@@ -364,6 +377,7 @@ class LiveStatusQuery(Hooker):
             # But we need to ask now, because get_live_data() will empty the stack
             num_stats_filters = self.stats_filter_stack.qsize()
             if self.table == 'log':
+                self.sql_time_filter_stack.and_elements(self.sql_time_filter_stack.qsize())
                 self.sql_filter_stack.and_elements(self.sql_filter_stack.qsize())
                 result = self.get_live_data_log()
             else:
@@ -638,7 +652,17 @@ member_key: the key to be used to sort each resulting element of a group member.
     def get_live_data_log(self):
         """Like get_live_data, but for log objects"""
         filter_func = self.filter_stack.get_stack()
-        sql_filter_func = self.sql_filter_stack.get_stack()
+        if self.use_aggressive_sql:
+            # Be aggressive, get preselected data from sqlite and do less
+            # filtering in python. But: only a subset of Filter:-attributes
+            # can be mapped to columns in the logs-table, for the others
+            # we must use "always-true"-clauses. This can result in
+            # funny and potentially ineffective sql-statements
+            sql_filter_func = self.sql_filter_stack.get_stack()
+        else:
+            # Be conservative, get everything from the database between
+            # two dates and apply the Filter:-clauses in python
+            sql_filter_func = self.sql_time_filter_stack.get_stack()
         out_map = self.out_map[self.out_map_name]
         filter_map = dict([(k, out_map.get(k)) for k in self.filtercolumns])
         output_map = dict([(k, out_map.get(k)) for k in self.columns]) or out_map
@@ -669,6 +693,7 @@ member_key: the key to be used to sort each resulting element of a group member.
             totime = int(lepat.group(3))
         # now find the list of datafiles
         filtresult = []
+        #print filter_clause, filter_values
         for dateobj, handle, archive, fromtime, totime in self.db.log_db_relevant_files(fromtime, totime):
             dbresult = self.select_live_data_log(filter_clause, filter_values, handle, archive, fromtime, totime)
             prefiltresult = [y for y in (x.fill(self.hosts, self.services, set(self.columns + self.filtercolumns)) for x in dbresult) if (without_filter or filter_func(self.create_output(filter_map, y)))]
@@ -930,6 +955,10 @@ member_key: the key to be used to sort each resulting element of a group member.
     def make_sql_filter(self, operator, attribute, reference):
         # The filters are text fragments which are put together to form a sql where-condition finally.
         # Add parameter Class (Host, Service), lookup datatype (default string), convert reference
+        # which attributes are suitable for a sql statement
+        good_attributes = ['time', 'attempt', 'class', 'command_name', 'comment', 'contact_name', 'host_name', 'plugin_output', 'service_description', 'state', 'state_type', 'type']
+        good_operators = ['=', '!=']
+
         def eq_filter():
             if reference == '':
                 return ['%s IS NULL' % attribute, ()]
@@ -950,6 +979,10 @@ member_key: the key to be used to sort each resulting element of a group member.
             return ['%s <= ?' % attribute, (reference, )]
         def match_filter():
             return ['%s LIKE ?' % attribute, ('%'+reference+'%', )]
+        def no_filter():
+            return ['1 = 1', ()]
+        if attribute not in good_attributes:
+            return no_filter
         if operator == '=':
             return eq_filter
         if operator == '>':
diff --git a/shinken/modules/livestatus_broker/livestatus_request.py b/shinken/modules/livestatus_broker/livestatus_request.py
index cffe366..3c18347 100644
--- a/shinken/modules/livestatus_broker/livestatus_request.py
+++ b/shinken/modules/livestatus_broker/livestatus_request.py
@@ -28,7 +28,7 @@ class LiveStatusRequest:
    
     """A class describing a livestatus request."""
     
-    def __init__(self, data, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, db, pnp_path, return_queue, counters):
+    def __init__(self, data, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, db, use_aggressive_sql, pnp_path, return_queue, counters):
         self.data = data
         # Runtime data form the global LiveStatus object
         self.configs = configs
@@ -45,6 +45,7 @@ class LiveStatusRequest:
         self.reactionners = reactionners
         self.brokers = brokers
         self.db = db
+        self.use_aggressive_sql = use_aggressive_sql
         self.pnp_path = pnp_path
         self.return_queue = return_queue
         self.counters = counters
@@ -92,7 +93,7 @@ class LiveStatusRequest:
             query.parse_input('\n'.join(wait_cmds))
             self.queries.append(query)
         if len(query_cmds) > 0:
-            query = LiveStatusQuery(self.configs, self.hosts, self.services, self.contacts, self.hostgroups, self.servicegroups, self.contactgroups, self.timeperiods, self.commands, self.schedulers, self.pollers, self.reactionners, self.brokers, self.db, self.pnp_path, self.return_queue, self.counters)
+            query = LiveStatusQuery(self.configs, self.hosts, self.services, self.contacts, self.hostgroups, self.servicegroups, self.contactgroups, self.timeperiods, self.commands, self.schedulers, self.pollers, self.reactionners, self.brokers, self.db, self.use_aggressive_sql, self.pnp_path, self.return_queue, self.counters)
             query.parse_input('\n'.join(query_cmds))
             self.queries.append(query)
 
diff --git a/test/shinken_test.py b/test/shinken_test.py
index 215126d..9c43fdb 100755
--- a/test/shinken_test.py
+++ b/test/shinken_test.py
@@ -335,7 +335,7 @@ class ShinkenTest(unittest.TestCase):
         self.livelogs = 'tmp/livelogs.db' + self.testid
         self.db_archives = os.path.join(os.path.dirname(self.livelogs), 'archives')
         self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid
-        self.livestatus_broker = Livestatus_broker(livestatus_modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, self.db_archives, 365, self.pnp4nagios)
+        self.livestatus_broker = Livestatus_broker(livestatus_modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, self.db_archives, 365, self.pnp4nagios, True)
         self.livestatus_broker.create_queues()
         #self.livestatus_broker.properties = {
         #    'to_queue' : 0,
@@ -344,7 +344,7 @@ class ShinkenTest(unittest.TestCase):
         #    }
         self.livestatus_broker.init()
         self.livestatus_broker.db = LiveStatusDb(self.livestatus_broker.database_file, self.livestatus_broker.archive_path, self.livestatus_broker.max_logs_age)
-        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
+        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.use_aggressive_sql, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
 
 
 if __name__ == '__main__':
diff --git a/test/test_livestatus.py b/test/test_livestatus.py
index 463efcc..524704a 100755
--- a/test/test_livestatus.py
+++ b/test/test_livestatus.py
@@ -307,6 +307,8 @@ class TestConfigSmall(TestConfig):
         self.livestatus_broker.db.close()
         if os.path.exists(self.livelogs):
             os.remove(self.livelogs)
+        if os.path.exists(self.livelogs+"-journal"):
+            os.remove(self.livelogs+"-journal")
         if os.path.exists(self.pnp4nagios):
             shutil.rmtree(self.pnp4nagios)
         if os.path.exists('var/nagios.log'):
@@ -2277,6 +2279,8 @@ class TestConfigBig(TestConfig):
         self.livestatus_broker.db.close()
         if os.path.exists(self.livelogs):
             os.remove(self.livelogs)
+        if os.path.exists(self.livelogs+"-journal"):
+            os.remove(self.livelogs+"-journal")
         if os.path.exists(self.pnp4nagios):
             shutil.rmtree(self.pnp4nagios)
         if os.path.exists('var/nagios.log'):
@@ -3282,6 +3286,8 @@ class TestConfigComplex(TestConfig):
         self.livestatus_broker.db.close()
         if os.path.exists(self.livelogs):
             os.remove(self.livelogs)
+        if os.path.exists(self.livelogs+"-journal"):
+            os.remove(self.livelogs+"-journal")
         if os.path.exists(self.pnp4nagios):
             shutil.rmtree(self.pnp4nagios)
         to_del = [attr for attr in self.livestatus_broker.livestatus.__class__.out_map['Host'].keys() if attr.startswith('host_')]
@@ -3350,6 +3356,8 @@ class TestConfigCrazy(TestConfig):
         self.livestatus_broker.db.close()
         if os.path.exists(self.livelogs):
             os.remove(self.livelogs)
+        if os.path.exists(self.livelogs+"-journal"):
+            os.remove(self.livelogs+"-journal")
         if os.path.exists(self.pnp4nagios):
             shutil.rmtree(self.pnp4nagios)
         to_del = [attr for attr in self.livestatus_broker.livestatus.__class__.out_map['Host'].keys() if attr.startswith('host_')]
diff --git a/test/test_livestatus_db.py b/test/test_livestatus_db.py
index f651e51..c96272b 100755
--- a/test/test_livestatus_db.py
+++ b/test/test_livestatus_db.py
@@ -93,6 +93,8 @@ class TestConfigSmall(TestConfig):
         self.livestatus_broker.db.close()
         if os.path.exists(self.livelogs):
             os.remove(self.livelogs)
+        if os.path.exists(self.livelogs+"-journal"):
+            os.remove(self.livelogs+"-journal")
         if os.path.exists(self.pnp4nagios):
             shutil.rmtree(self.pnp4nagios)
         if os.path.exists('var/nagios.log'):
@@ -415,7 +417,8 @@ class TestConfigBig(TestConfig):
 
 
     def init_livestatus(self):
-        self.livelogs = 'tmp/livelogs.db' + self.testid
+        #self.livelogs = 'tmp/livelogs.db' + self.testid
+        self.livelogs = 'tmp/livelogs.db' + "wrumm"
         self.db_archives = os.path.join(os.path.dirname(self.livelogs), 'archives')
         self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid
         self.livestatus_broker = Livestatus_broker(livestatus_modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, self.db_archives, 365, self.pnp4nagios)
@@ -428,29 +431,12 @@ class TestConfigBig(TestConfig):
         self.livestatus_broker.init()
         self.livestatus_broker.db = LiveStatusDb(self.livestatus_broker.database_file, self.livestatus_broker.archive_path, self.livestatus_broker.max_logs_age)
 
-        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
+        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.use_aggressive_sql, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
 
 
 
-    def tearDown(self):
-        self.livestatus_broker.db.commit()
-        self.livestatus_broker.db.close()
-        if os.path.exists(self.pnp4nagios):
-            shutil.rmtree(self.pnp4nagios)
-        if os.path.exists('var/nagios.log'):
-            os.remove('var/nagios.log')
-        if os.path.exists('var/retention.dat'):
-            os.remove('var/retention.dat')
-        if os.path.exists('var/status.dat'):
-            os.remove('var/status.dat')
-        to_del = [attr for attr in self.livestatus_broker.livestatus.__class__.out_map['Host'].keys() if attr.startswith('host_')]
-        for attr in to_del:
-            del self.livestatus_broker.livestatus.__class__.out_map['Host'][attr]
-        self.livestatus_broker = None
-
-
-    def x_test_a_long_history(self):
-        return
+    def test_a_long_history(self):
+        #return
         test_host_005 = self.sched.hosts.find_by_name("test_host_005")
         test_host_099 = self.sched.hosts.find_by_name("test_host_099")
         test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00")
@@ -459,76 +445,93 @@ class TestConfigBig(TestConfig):
         test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16")
         test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01")
         starttime = time.time()
-        # run silently
-        old_stdout = sys.stdout
-        sys.stdout = open(os.devnull, "w")
-        self.scheduler_loop(2, [
-            [test_ok_00, 0, "OK"],
-            [test_ok_01, 0, "OK"],
-            [test_ok_04, 0, "OK"],
-            [test_ok_16, 0, "OK"],
-            [test_ok_99, 0, "OK"],
-        ])
-        self.update_broker()
-        should_be = 0
-        #for i in xrange(3600 * 24 * 7):
-        for i in xrange(100000): 
-            if i % 399 == 0:
-                self.scheduler_loop(3, [
-                    [test_ok_00, 1, "WARN"],
-                    [test_ok_01, 2, "CRIT"],
-                    [test_ok_04, 3, "UNKN"],
-                    [test_ok_16, 1, "WARN"],
-                    [test_ok_99, 2, "CRIT"],
-                ])
-                should_be += 3
-            time.sleep(62)
-            if i % 399 == 0:
-                self.scheduler_loop(1, [
-                    [test_ok_00, 0, "OK"],
-                    [test_ok_01, 0, "OK"],
-                    [test_ok_04, 0, "OK"],
-                    [test_ok_16, 0, "OK"],
-                    [test_ok_99, 0, "OK"],
-                ])
-                should_be += 1
-            time.sleep(2)
-            if i % 199 == 0:
-                self.scheduler_loop(3, [
-                    [test_ok_00, 1, "WARN"],
-                    [test_ok_01, 2, "CRIT"],
-                ])
-            time.sleep(62)
-            if i % 199 == 0:
-                self.scheduler_loop(1, [
-                    [test_ok_00, 0, "OK"],
-                    [test_ok_01, 0, "OK"],
-                ])
-            time.sleep(2)
-            if i % 299 == 0:
-                self.scheduler_loop(3, [
-                    [test_host_005, 2, "DOWN"],
-                ])
-            if i % 19 == 0:
-                self.scheduler_loop(3, [
-                    [test_host_099, 2, "DOWN"],
-                ])
-            time.sleep(62)
-            if i % 299 == 0:
-                self.scheduler_loop(3, [
-                    [test_host_005, 0, "UP"],
-                ])
-            if i % 19 == 0:
-                self.scheduler_loop(3, [
-                    [test_host_099, 0, "UP"],
-                ])
-            time.sleep(2)
-        self.update_broker()
-        endtime = time.time()
-        sys.stdout.close()
-        sys.stdout = old_stdout
+        numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs")
+        if numlogs[0][0] == 0:
+            # run silently
+            old_stdout = sys.stdout
+            sys.stdout = open(os.devnull, "w")
+            self.scheduler_loop(2, [
+                [test_ok_00, 0, "OK"],
+                [test_ok_01, 0, "OK"],
+                [test_ok_04, 0, "OK"],
+                [test_ok_16, 0, "OK"],
+                [test_ok_99, 0, "OK"],
+            ])
+            self.update_broker()
+            should_be = 0
+            #for i in xrange(3600 * 24 * 7):
+            for i in xrange(10000): 
+                if i % 10000 == 0:
+                    sys.stderr.write(str(i))
+                if i % 399 == 0:
+                    self.scheduler_loop(3, [
+                        [test_ok_00, 1, "WARN"],
+                        [test_ok_01, 2, "CRIT"],
+                        [test_ok_04, 3, "UNKN"],
+                        [test_ok_16, 1, "WARN"],
+                        [test_ok_99, 2, "CRIT"],
+                    ])
+                    should_be += 3
+                time.sleep(62)
+                if i % 399 == 0:
+                    self.scheduler_loop(1, [
+                        [test_ok_00, 0, "OK"],
+                        [test_ok_01, 0, "OK"],
+                        [test_ok_04, 0, "OK"],
+                        [test_ok_16, 0, "OK"],
+                        [test_ok_99, 0, "OK"],
+                    ])
+                    should_be += 1
+                time.sleep(2)
+                if i % 199 == 0:
+                    self.scheduler_loop(3, [
+                        [test_ok_00, 1, "WARN"],
+                        [test_ok_01, 2, "CRIT"],
+                    ])
+                time.sleep(62)
+                if i % 199 == 0:
+                    self.scheduler_loop(1, [
+                        [test_ok_00, 0, "OK"],
+                        [test_ok_01, 0, "OK"],
+                    ])
+                time.sleep(2)
+                if i % 299 == 0:
+                    self.scheduler_loop(3, [
+                        [test_host_005, 2, "DOWN"],
+                    ])
+                if i % 19 == 0:
+                    self.scheduler_loop(3, [
+                        [test_host_099, 2, "DOWN"],
+                    ])
+                time.sleep(62)
+                if i % 299 == 0:
+                    self.scheduler_loop(3, [
+                        [test_host_005, 0, "UP"],
+                    ])
+                if i % 19 == 0:
+                    self.scheduler_loop(3, [
+                        [test_host_099, 0, "UP"],
+                    ])
+                time.sleep(2)
+                self.update_broker()
+                if i % 1000 == 0:
+                    self.livestatus_broker.db.commit()
+            endtime = time.time()
+            sys.stdout.close()
+            sys.stdout = old_stdout
+            self.livestatus_broker.db.commit()
+        else:
+            should_be = numlogs[0][0]
+            xxx = self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")
+            print xxx
+            starttime, endtime = [self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")][0][0]
+            
+        
         # now we have a lot of events
         # find type = HOST ALERT for test_host_005
+        q = int((endtime - starttime) / 8)
+        starttime += q
+        endtime -= q
         request = """GET log
 Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
 Filter: time >= """ + str(int(starttime)) + """
@@ -548,18 +551,56 @@ Filter: host_name = test_host_099
 Filter: service_description = test_ok_01
 And: 5
 OutputFormat: json"""
+        # switch back to realtime. we want to know how long it takes
+        fake_time_time = time.time
+        fake_time_sleep = time.sleep
         time.time = original_time_time
         time.sleep = original_time_sleep
+        print request
         response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
         pyresponse = eval(response)
         print "number of records", len(pyresponse)
         print "should be", should_be
         numlogs = self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")
         print starttime, endtime, numlogs
-        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
-        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
-        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
-        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+        self.livestatus_broker.livestatus.use_aggressive_sql = True
+        print "aggrosql", self.livestatus_broker.livestatus.use_aggressive_sql
+        response2, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+        self.assert_(response2 == response)
+        print "aggrosql", self.livestatus_broker.livestatus.use_aggressive_sql
+        response2, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+        self.assert_(response2 == response)
+        self.livestatus_broker.livestatus.use_aggressive_sql = False
+        print "aggrosql", self.livestatus_broker.livestatus.use_aggressive_sql
+        response2, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+        self.assert_(response2 == response)
+        print "aggrosql", self.livestatus_broker.livestatus.use_aggressive_sql
+        response2, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+        self.assert_(response2 == response)
+        # back to fake time for the other tests can run faster
+        time.time = fake_time_time
+        time.sleep = fake_time_sleep
+
+
+    def tearDown(self):
+        self.livestatus_broker.db.commit()
+        self.livestatus_broker.db.close()
+        if os.path.exists(self.livelogs):
+            os.remove(self.livelogs)
+        if os.path.exists(self.livelogs+"-journal"):
+            os.remove(self.livelogs+"-journal")
+        if os.path.exists(self.pnp4nagios):
+            shutil.rmtree(self.pnp4nagios)
+        if os.path.exists('var/nagios.log'):
+            os.remove('var/nagios.log')
+        if os.path.exists('var/retention.dat'):
+            os.remove('var/retention.dat')
+        if os.path.exists('var/status.dat'):
+            os.remove('var/status.dat')
+        to_del = [attr for attr in self.livestatus_broker.livestatus.__class__.out_map['Host'].keys() if attr.startswith('host_')]
+        for attr in to_del:
+            del self.livestatus_broker.livestatus.__class__.out_map['Host'][attr]
+        self.livestatus_broker = None
 
 
 

-- 
UNNAMED PROJECT



More information about the Pkg-nagios-changes mailing list