[Pkg-nagios-changes] [SCM] UNNAMED PROJECT branch, debian/master, updated. 810edbdd3feedbfe37f4a65bee50b57b2f60fa2a
Gerhard Lausser
gerhard.lausser at consol.de
Tue Feb 28 22:20:16 UTC 2012
The following commit has been merged in the debian/master branch:
commit 56c030863ef645d8d016e1b562ae174bd28592ea
Author: Gerhard Lausser <gerhard.lausser at consol.de>
Date: Sat Feb 11 02:48:05 2012 +0100
Fix a minor bug in livestatus cache
diff --git a/shinken/modules/livestatus_broker/livestatus_query_cache.py b/shinken/modules/livestatus_broker/livestatus_query_cache.py
index 3aa478c..1ab0b57 100644
--- a/shinken/modules/livestatus_broker/livestatus_query_cache.py
+++ b/shinken/modules/livestatus_broker/livestatus_query_cache.py
@@ -134,14 +134,14 @@ class QueryData(object):
self.client_localtime = int(time.time())
self.stats_columns = [f[1] for f in self.structured_data if f[0] == 'Stats']
self.filter_columns = [f[1] for f in self.structured_data if f[0] == 'Filter']
+ self.columns = [f[1] for f in self.structured_data if f[0] == 'Columns'][0]
self.categorize()
- print self
- print self.category
def __str__(self):
text = "table %s\n" % self.table
text += "columns %s\n" % self.columns
text += "stats_columns %s\n" % self.stats_columns
+ text += "filter_columns %s\n" % self.filter_columns
text += "is_stats %s\n" % self.is_stats
text += "is_cacheable %s\n" % str(self.category != CACHE_IMPOSSIBLE)
return text
@@ -265,8 +265,10 @@ class QueryData(object):
can not change over time. (ex. current_host_num_critical_services)
"""
logline_elements = ['attempt', 'class', 'command_name', 'comment', 'contact_name', 'host_name', 'message', 'options', 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type']
+ logline_elements.extend(['current_host_groups', 'current_service_groups'])
if self.table == 'log':
- limits = sorted([(f[2], f[3]) for f in self.structured_data if f[0] == 'Filter' and f[1] == 'time'], key=lambda x: x[1])
+ limits = sorted([(f[2], int(f[3])) for f in self.structured_data if f[0] == 'Filter' and f[1] == 'time'], key=lambda x: x[1])
+
if len(limits) == 2 and limits[1][1] <= int(time.time()) and limits[0][0].startswith('>') and limits[1][0].startswith('<'):
if has_not_more_than(self.columns, logline_elements):
return True
@@ -339,8 +341,8 @@ class LiveStatusQueryCache(object):
def get_cached_query(self, data):
if not self.enabled:
return (False, [])
- print "I SEARCH THE CACHE FOR", data
query = QueryData(data)
+ print "I SEARCH THE CACHE FOR", query.category, query.key, data
if self.categories[query.category].get(query.key):
print "CACHE HIT"
return (query.category != CACHE_IMPOSSIBLE, self.categories[query.category].get(query.key))
@@ -351,7 +353,7 @@ class LiveStatusQueryCache(object):
if not self.enabled:
return
query = QueryData(data)
- print "I PUT IN THE CACHE FOR", query.key
+ print "I PUT IN THE CACHE FOR", query.category, query.key
self.categories[query.category].put(query.key, result)
def impact_assessment(self, brok, obj):
diff --git a/shinken/modules/logstore_sqlite.py b/shinken/modules/logstore_sqlite.py
index 1e13776..6c7be8f 100644
--- a/shinken/modules/logstore_sqlite.py
+++ b/shinken/modules/logstore_sqlite.py
@@ -413,8 +413,6 @@ class LiveStatusLogStoreSqlite(BaseModule):
# We can apply the filterstack here as well. we have columns and filtercolumns.
# the only additional step is to enrich log lines with host/service-attributes
# A timerange can be useful for a faster preselection of lines
- x = sql_filter_func
- print "x is", x
filter_clause, filter_values = sql_filter_func()
full_filter_clause = filter_clause
matchcount = 0
@@ -515,12 +513,10 @@ class LiveStatusSqlStack(LiveStatusStack):
negate_clause = '(NOT ' + top_filter()[0] + ')'
negate_values = top_filter()[1]
negate_filter = lambda: [negate_clause, negate_values]
- print "not_element", negate_clause
self.put_stack(negate_filter)
def and_elements(self, num):
"""Take num filters from the stack, and them and put the result back"""
- print "this is sql and_elements", num, self.qsize()
if num > 1:
filters = []
for _ in range(num):
diff --git a/test/test_livestatus_cache.py b/test/test_livestatus_cache.py
index 8565b4b..ac63459 100644
--- a/test/test_livestatus_cache.py
+++ b/test/test_livestatus_cache.py
@@ -147,6 +147,141 @@ Stats: state = 3"""
print 'query_6_______________\n%s\n%s\n' % (request, response)
self.assert_(response == '2000;1994;3;3;0\n')
+ def test_a_long_history(self):
+ #return
+ test_host_005 = self.sched.hosts.find_by_name("test_host_005")
+ test_host_099 = self.sched.hosts.find_by_name("test_host_099")
+ test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00")
+ test_ok_01 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_01")
+ test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04")
+ test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16")
+ test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01")
+ time_warp(-1 * 20 * 24 * 3600)
+ starttime = time.time()
+ numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs")
+ if numlogs[0][0] == 0:
+ # run silently
+ old_stdout = sys.stdout
+ sys.stdout = open(os.devnull, "w")
+ self.scheduler_loop(2, [
+ [test_ok_00, 0, "OK"],
+ [test_ok_01, 0, "OK"],
+ [test_ok_04, 0, "OK"],
+ [test_ok_16, 0, "OK"],
+ [test_ok_99, 0, "OK"],
+ ])
+ self.update_broker()
+ should_be = 0
+ #for i in xrange(3600 * 24 * 7):
+ for i in xrange(10000):
+ if i % 10000 == 0:
+ sys.stderr.write(str(i))
+ if i % 399 == 0:
+ self.scheduler_loop(3, [
+ [test_ok_00, 1, "WARN"],
+ [test_ok_01, 2, "CRIT"],
+ [test_ok_04, 3, "UNKN"],
+ [test_ok_16, 1, "WARN"],
+ [test_ok_99, 2, "CRIT"],
+ ])
+ should_be += 3
+ time.sleep(62)
+ if i % 399 == 0:
+ self.scheduler_loop(1, [
+ [test_ok_00, 0, "OK"],
+ [test_ok_01, 0, "OK"],
+ [test_ok_04, 0, "OK"],
+ [test_ok_16, 0, "OK"],
+ [test_ok_99, 0, "OK"],
+ ])
+ should_be += 1
+ time.sleep(2)
+ if i % 199 == 0:
+ self.scheduler_loop(3, [
+ [test_ok_00, 1, "WARN"],
+ [test_ok_01, 2, "CRIT"],
+ ])
+ time.sleep(62)
+ if i % 199 == 0:
+ self.scheduler_loop(1, [
+ [test_ok_00, 0, "OK"],
+ [test_ok_01, 0, "OK"],
+ ])
+ time.sleep(2)
+ if i % 299 == 0:
+ self.scheduler_loop(3, [
+ [test_host_005, 2, "DOWN"],
+ ])
+ if i % 19 == 0:
+ self.scheduler_loop(3, [
+ [test_host_099, 2, "DOWN"],
+ ])
+ time.sleep(62)
+ if i % 299 == 0:
+ self.scheduler_loop(3, [
+ [test_host_005, 0, "UP"],
+ ])
+ if i % 19 == 0:
+ self.scheduler_loop(3, [
+ [test_host_099, 0, "UP"],
+ ])
+ time.sleep(2)
+ self.update_broker()
+ if i % 1000 == 0:
+ self.livestatus_broker.db.commit()
+ endtime = time.time()
+ sys.stdout.close()
+ sys.stdout = old_stdout
+ self.livestatus_broker.db.commit()
+ else:
+ should_be = numlogs[0][0]
+ xxx = self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")
+ print xxx
+ starttime, endtime = [self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")][0][0]
+
+
+ # now we have a lot of events
+ # find type = HOST ALERT for test_host_005
+ q = int((endtime - starttime) / 8)
+ starttime += q
+ endtime -= q
+ request = """GET log
+Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
+Filter: time >= """ + str(int(starttime)) + """
+Filter: time <= """ + str(int(endtime)) + """
+Filter: type = SERVICE ALERT
+And: 1
+Filter: type = HOST ALERT
+And: 1
+Filter: type = SERVICE FLAPPING ALERT
+Filter: type = HOST FLAPPING ALERT
+Filter: type = SERVICE DOWNTIME ALERT
+Filter: type = HOST DOWNTIME ALERT
+Filter: type ~ starting...
+Filter: type ~ shutting down...
+Or: 8
+Filter: host_name = test_host_099
+Filter: service_description = test_ok_01
+And: 5
+OutputFormat: json"""
+ # switch back to realtime. we want to know how long it takes
+ fake_time_time = time.time
+ fake_time_sleep = time.sleep
+ time.time = original_time_time
+ time.sleep = original_time_sleep
+ print request
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ pyresponse = eval(response)
+ print "number of records", len(pyresponse)
+ print "should be", should_be
+ print "now with cache"
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ pyresponse = eval(response)
+ print "number of records", len(pyresponse)
+ print "should be", should_be
+ time.time = fake_time_time
+ time.sleep = fake_time_sleep
+
if __name__ == '__main__':
#import cProfile
command = """unittest.main()"""
--
UNNAMED PROJECT
More information about the Pkg-nagios-changes
mailing list