[Pkg-nagios-changes] [SCM] UNNAMED PROJECT branch, debian/master, updated. 810edbdd3feedbfe37f4a65bee50b57b2f60fa2a
Gerhard Lausser
gerhard.lausser at consol.de
Tue Feb 28 22:20:31 UTC 2012
The following commit has been merged in the debian/master branch:
commit 312663274a260d71d25651ecaea71fbb4cffb9af
Author: Gerhard Lausser <gerhard.lausser at consol.de>
Date: Sun Feb 12 21:41:49 2012 +0100
Add the Negate: statement to the livestatus module
Fix minor bugs and make the db test cases easier to understand
diff --git a/shinken/modules/livestatus_broker/livestatus_query.py b/shinken/modules/livestatus_broker/livestatus_query.py
index c38c919..ee00388 100644
--- a/shinken/modules/livestatus_broker/livestatus_query.py
+++ b/shinken/modules/livestatus_broker/livestatus_query.py
@@ -176,6 +176,10 @@ class LiveStatusQuery(object):
self.filter_stack.or_elements(ornum)
if self.table == 'log':
self.db.add_filter_or(ornum)
+ elif keyword == 'Negate':
+ self.filter_stack.not_elements()
+ if self.table == 'log':
+ self.db.add_filter_not()
elif keyword == 'StatsGroupBy':
_, stats_group_by = self.split_option_with_columns(line)
self.filtercolumns.extend(stats_group_by)
diff --git a/shinken/modules/livestatus_broker/livestatus_query_cache.py b/shinken/modules/livestatus_broker/livestatus_query_cache.py
index e87a0a5..bbc6487 100644
--- a/shinken/modules/livestatus_broker/livestatus_query_cache.py
+++ b/shinken/modules/livestatus_broker/livestatus_query_cache.py
@@ -210,6 +210,8 @@ class QueryData(object):
elif keyword == 'Or':
_, ornum = self.split_option(line)
self.structured_data.append((keyword, ornum))
+ elif keyword == 'Negate':
+ self.structured_data.append((keyword, ))
elif keyword == 'StatsGroupBy':
_, columns = self.split_option_with_columns(line)
self.structured_data.append((keyword, columns))
diff --git a/shinken/modules/livestatus_broker/mapping.py b/shinken/modules/livestatus_broker/mapping.py
index beedc1c..6e9f7ae 100644
--- a/shinken/modules/livestatus_broker/mapping.py
+++ b/shinken/modules/livestatus_broker/mapping.py
@@ -89,7 +89,6 @@ def worst_service_state(state_1, state_2):
def find_pnp_perfdata_xml(name, request):
"""Check if a pnp xml file exists for a given host or service name."""
- print "find_pnp_perfdata_xml", name
if request.pnp_path_readable:
if '/' in name:
# It is a service
diff --git a/shinken/modules/logstore_mongodb.py b/shinken/modules/logstore_mongodb.py
index 1339072..a34af93 100644
--- a/shinken/modules/logstore_mongodb.py
+++ b/shinken/modules/logstore_mongodb.py
@@ -78,7 +78,7 @@ class LiveStatusLogStoreMongoDB(BaseModule):
self.max_logs_age = int(maxmatch.group(1)) * 31
elif maxmatch.group(2) == 'y':
self.max_logs_age = int(maxmatch.group(1)) * 365
-
+ self.use_aggressive_sql = (getattr(modconf, 'use_aggressive_sql', '1') == '1')
# This stack is used to create a full-blown select-statement
self.mongo_filter_stack = LiveStatusMongoStack()
# This stack is used to create a minimal select-statement which
@@ -206,12 +206,14 @@ class LiveStatusLogStoreMongoDB(BaseModule):
def add_filter_or(self, ornum):
self.mongo_filter_stack.or_elements(ornum)
+ def add_filter_not(self):
+ self.mongo_filter_stack.not_elements()
+
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
# finalize the filter stacks
self.mongo_time_filter_stack.and_elements(self.mongo_time_filter_stack.qsize())
self.mongo_filter_stack.and_elements(self.mongo_filter_stack.qsize())
- self.use_aggressive_sql = True
if self.use_aggressive_sql:
# Be aggressive, get preselected data from sqlite and do less
# filtering in python. But: only a subset of Filter:-attributes
@@ -271,7 +273,7 @@ class LiveStatusLogStoreMongoDB(BaseModule):
def match_filter():
return '\'%s\' : { \'$regex\' : %s }' % (attribute, reference)
def no_filter():
- return '\'%s\' : { \'$exists\' : true }' % (attribute,)
+ return '\'time\' : { \'$exists\' : True }'
if attribute not in good_attributes:
return no_filter
if operator == '=':
@@ -313,6 +315,19 @@ class LiveStatusMongoStack(LiveStatusStack):
self.type = 'mongo'
self.__class__.__bases__[0].__init__(self, *args, **kw)
+ def not_elements(self):
+ top_filter = self.get_stack()
+ #negate_filter = lambda: '\'$not\' : { %s }' % top_filter()
+ # mongodb doesn't have the not-operator like sql, which can negate
+ # a complete expression. Mongodb $not can only reverse one operator
+ # at a time. This qould require rewriting of the whole expression.
+ # So instead of deciding whether a record can pass the filter or not,
+ # we let it pass in any case. That's no problem, because the result
+ # of the database query will have to go through the in-memory-objects
+ # filter too.
+ negate_filter = lambda: '\'time\' : { \'$exists\' : True }'
+ self.put_stack(negate_filter)
+
def and_elements(self, num):
"""Take num filters from the stack, and them and put the result back"""
if num > 1:
diff --git a/shinken/modules/logstore_sqlite.py b/shinken/modules/logstore_sqlite.py
index ad3a0e6..f7330ed 100644
--- a/shinken/modules/logstore_sqlite.py
+++ b/shinken/modules/logstore_sqlite.py
@@ -391,6 +391,9 @@ class LiveStatusLogStoreSqlite(BaseModule):
def add_filter_or(self, ornum):
self.sql_filter_stack.or_elements(ornum)
+ def add_filter_not(self):
+ self.sql_filter_stack.not_elements()
+
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
diff --git a/test/test_livestatus.py b/test/test_livestatus.py
index 1f85a8d..e4599ae 100755
--- a/test/test_livestatus.py
+++ b/test/test_livestatus.py
@@ -2377,6 +2377,62 @@ class TestConfigBig(TestConfig):
self.livestatus_broker = None
+ def test_negate(self):
+ # test_host_005 is in hostgroup_01
+ # 20 services from 400 services
+ hostgroup_01 = self.sched.hostgroups.find_by_name("hostgroup_01")
+ host_005 = self.sched.hosts.find_by_name("test_host_005")
+ test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00")
+ query = """GET services
+Columns: host_name description
+Filter: host_name = test_host_005
+Filter: description = test_ok_00
+OutputFormat: python
+"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(query)
+ pyresponse = eval(response)
+ print len(pyresponse)
+ query = """GET services
+Columns: host_name description
+OutputFormat: python
+"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(query)
+ allpyresponse = eval(response)
+ print len(allpyresponse)
+ query = """GET services
+Columns: host_name description
+Filter: host_name = test_host_005
+Filter: description = test_ok_00
+And: 2
+Negate:
+OutputFormat: python
+"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(query)
+ negpyresponse = eval(response)
+ print len(negpyresponse)
+ # only test_ok_00 + without test_ok_00 must be all services
+ self.assert_(len(allpyresponse) == len(pyresponse) + len(negpyresponse))
+
+ query = """GET hosts
+Columns: host_name num_services
+Filter: host_name = test_host_005
+OutputFormat: python
+"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(query)
+ numsvc = eval(response)
+ print response, numsvc
+
+ query = """GET services
+Columns: host_name description
+Filter: host_name = test_host_005
+Filter: description = test_ok_00
+Negate:
+OutputFormat: python
+"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(query)
+ numsvcwithout = eval(response)
+ self.assert_(numsvc[0][1] - 1 == len(numsvcwithout))
+
def test_worst_service_state(self):
# test_host_005 is in hostgroup_01
# 20 services from 400 services
diff --git a/test/test_livestatus_db.py b/test/test_livestatus_db.py
index 8e5f57e..4a28292 100755
--- a/test/test_livestatus_db.py
+++ b/test/test_livestatus_db.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.6
+ #!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
#Copyright (C) 2009-2010 :
@@ -69,6 +69,29 @@ class TestConfig(ShinkenTest):
self.livestatus_broker.manage_brok(brok)
self.sched.broks = {}
+ def tearDown(self):
+ self.livestatus_broker.db.commit()
+ self.livestatus_broker.db.close()
+ if os.path.exists(self.livelogs):
+ os.remove(self.livelogs)
+ if os.path.exists(self.livelogs+"-journal"):
+ os.remove(self.livelogs+"-journal")
+ print "i clean up"
+ print "i clean up"
+ print "i clean up"
+ print "i clean up"
+ if os.path.exists("tmp/archives"):
+ for db in os.listdir("tmp/archives"):
+ print "cleanup", db
+ os.remove(os.path.join("tmp/archives", db))
+ if os.path.exists('var/nagios.log'):
+ os.remove('var/nagios.log')
+ if os.path.exists('var/retention.dat'):
+ os.remove('var/retention.dat')
+ if os.path.exists('var/status.dat'):
+ os.remove('var/status.dat')
+ self.livestatus_broker = None
+
class TestConfigSmall(TestConfig):
@@ -88,29 +111,6 @@ class TestConfigSmall(TestConfig):
host = self.sched.hosts.find_by_name("test_host_0")
host.__class__.use_aggressive_host_checking = 1
-
-
-
- def tearDown(self):
- self.livestatus_broker.db.commit()
- self.livestatus_broker.db.close()
- #if os.path.exists(self.livelogs):
- # os.remove(self.livelogs)
- #if os.path.exists(self.livelogs+"-journal"):
- # os.remove(self.livelogs+"-journal")
- if os.path.exists('var/nagios.log'):
- os.remove('var/nagios.log')
- if os.path.exists('var/retention.dat'):
- os.remove('var/retention.dat')
- if os.path.exists('var/status.dat'):
- os.remove('var/status.dat')
- self.livestatus_broker = None
-
- if os.path.exists("tmp/archives"):
- for db in os.listdir("tmp/archives"):
- os.remove(os.path.join("tmp/archives", db))
-
-
def write_logs(self, host, loops=0):
for loop in range(0, loops):
host.state = 'DOWN'
@@ -506,12 +506,42 @@ class TestConfigBig(TestConfig):
test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04")
test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16")
test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01")
- starttime = time.time()
- numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs")
- if numlogs[0][0] == 0:
- # run silently
- old_stdout = sys.stdout
- sys.stdout = open(os.devnull, "w")
+
+ days = 4
+ etime = time.time()
+ print "now it is", time.ctime(etime)
+ print "now it is", time.gmtime(etime)
+ etime_midnight = (etime - (etime % 86400)) + time.altzone
+ print "midnight was", time.ctime(etime_midnight)
+ print "midnight was", time.gmtime(etime_midnight)
+ query_start = etime_midnight - (days - 1) * 86400
+ query_end = etime_midnight
+ print "query_start", time.ctime(query_start)
+ print "query_end ", time.ctime(query_end)
+
+ # |----------|----------|----------|----------|----------|---x
+ # etime
+ # etime_midnight
+ # ---x------
+ # etime - 4 days
+ # |---
+ # query_start
+ #
+ # ............................................
+ # events in the log database ranging till now
+ #
+ # |________________________________|
+ # events which will be read from db
+ #
+ loops = int(86400 / 192)
+ time_warp(-1 * days * 86400)
+ print "warp back to", time.ctime(time.time())
+ # run silently
+ old_stdout = sys.stdout
+ sys.stdout = open(os.devnull, "w")
+ should_be = 0
+ for day in xrange(days):
+ sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops))
self.scheduler_loop(2, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
@@ -520,9 +550,8 @@ class TestConfigBig(TestConfig):
[test_ok_99, 0, "OK"],
])
self.update_broker()
- should_be = 0
#for i in xrange(3600 * 24 * 7):
- for i in xrange(10000):
+ for i in xrange(loops):
if i % 10000 == 0:
sys.stderr.write(str(i))
if i % 399 == 0:
@@ -533,7 +562,9 @@ class TestConfigBig(TestConfig):
[test_ok_16, 1, "WARN"],
[test_ok_99, 2, "CRIT"],
])
- should_be += 3
+ if int(time.time()) >= query_start and int(time.time()) <= query_end:
+ should_be += 3
+ sys.stderr.write("now it should be %s\n" % should_be)
time.sleep(62)
if i % 399 == 0:
self.scheduler_loop(1, [
@@ -543,34 +574,37 @@ class TestConfigBig(TestConfig):
[test_ok_16, 0, "OK"],
[test_ok_99, 0, "OK"],
])
- should_be += 1
+ if int(time.time()) >= query_start and int(time.time()) <= query_end:
+ should_be += 1
+ sys.stderr.write("now it should be %s\n" % should_be)
time.sleep(2)
- if i % 199 == 0:
+ if i % 17 == 0:
self.scheduler_loop(3, [
[test_ok_00, 1, "WARN"],
[test_ok_01, 2, "CRIT"],
])
+
time.sleep(62)
- if i % 199 == 0:
+ if i % 17 == 0:
self.scheduler_loop(1, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
])
time.sleep(2)
- if i % 299 == 0:
+ if i % 14 == 0:
self.scheduler_loop(3, [
[test_host_005, 2, "DOWN"],
])
- if i % 19 == 0:
+ if i % 12 == 0:
self.scheduler_loop(3, [
[test_host_099, 2, "DOWN"],
])
time.sleep(62)
- if i % 299 == 0:
+ if i % 14 == 0:
self.scheduler_loop(3, [
[test_host_005, 0, "UP"],
])
- if i % 19 == 0:
+ if i % 12 == 0:
self.scheduler_loop(3, [
[test_host_099, 0, "UP"],
])
@@ -579,25 +613,20 @@ class TestConfigBig(TestConfig):
if i % 1000 == 0:
self.livestatus_broker.db.commit()
endtime = time.time()
- sys.stdout.close()
- sys.stdout = old_stdout
self.livestatus_broker.db.commit()
- else:
- should_be = numlogs[0][0]
- xxx = self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")
- print xxx
- starttime, endtime = [self.livestatus_broker.db.execute("SELECT min(time), max(time) FROM logs")][0][0]
-
-
+ sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time())))
+ sys.stdout.close()
+ sys.stdout = old_stdout
+ self.livestatus_broker.db.commit_and_rotate_log_db()
+ numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs")
+ print "numlogs is", numlogs
+
# now we have a lot of events
# find type = HOST ALERT for test_host_005
- q = int((endtime - starttime) / 8)
- starttime += q
- endtime -= q
request = """GET log
Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
-Filter: time >= """ + str(int(starttime)) + """
-Filter: time <= """ + str(int(endtime)) + """
+Filter: time >= """ + str(int(query_start)) + """
+Filter: time <= """ + str(int(query_end)) + """
Filter: type = SERVICE ALERT
And: 1
Filter: type = HOST ALERT
@@ -619,30 +648,67 @@ OutputFormat: json"""
time.time = original_time_time
time.sleep = original_time_sleep
print request
+ print "query 1 --------------------------------------------------"
+ tic = time.time()
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ tac = time.time()
pyresponse = eval(response)
- print "number of records", len(pyresponse)
- print "should be", should_be
+ print "number of records with test_ok_01", len(pyresponse)
+ self.assert_(len(pyresponse) == should_be)
+
+ # and now test Negate:
+ request = """GET log
+Filter: time >= """ + str(int(query_start)) + """
+Filter: time <= """ + str(int(query_end)) + """
+Filter: type = SERVICE ALERT
+And: 1
+Filter: type = HOST ALERT
+And: 1
+Filter: type = SERVICE FLAPPING ALERT
+Filter: type = HOST FLAPPING ALERT
+Filter: type = SERVICE DOWNTIME ALERT
+Filter: type = HOST DOWNTIME ALERT
+Filter: type ~ starting...
+Filter: type ~ shutting down...
+Or: 8
+Filter: host_name = test_host_099
+Filter: service_description = test_ok_01
+And: 2
+Negate:
+And: 2
+OutputFormat: json"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ print "got response with true instead of negate"
+ notpyresponse = eval(response)
+ print "number of records without test_ok_01", len(notpyresponse)
+
+ request = """GET log
+Filter: time >= """ + str(int(query_start)) + """
+Filter: time <= """ + str(int(query_end)) + """
+Filter: type = SERVICE ALERT
+And: 1
+Filter: type = HOST ALERT
+And: 1
+Filter: type = SERVICE FLAPPING ALERT
+Filter: type = HOST FLAPPING ALERT
+Filter: type = SERVICE DOWNTIME ALERT
+Filter: type = HOST DOWNTIME ALERT
+Filter: type ~ starting...
+Filter: type ~ shutting down...
+Or: 8
+OutputFormat: json"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ allpyresponse = eval(response)
+ print "all records", len(allpyresponse)
+ self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse))
+ # the numlogs above only counts records in the currently attached db
+ numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs WHERE time >= %d AND time <= %d" %(int(query_start), int(query_end)))
+ print "numlogs is", numlogs
+
time.time = fake_time_time
time.sleep = fake_time_sleep
- def tearDown(self):
- self.livestatus_broker.db.commit()
- self.livestatus_broker.db.close()
- if os.path.exists(self.livelogs):
- os.remove(self.livelogs)
- if os.path.exists(self.livelogs+"-journal"):
- os.remove(self.livelogs+"-journal")
- if os.path.exists(self.livestatus_broker.pnp_path):
- shutil.rmtree(self.livestatus_broker.pnp_path)
- if os.path.exists('var/nagios.log'):
- os.remove('var/nagios.log')
- if os.path.exists('var/retention.dat'):
- os.remove('var/retention.dat')
- if os.path.exists('var/status.dat'):
- os.remove('var/status.dat')
- self.livestatus_broker = None
class TestConfigNoLogstore(TestConfig):
diff --git a/test/test_livestatus_mongodb.py b/test/test_livestatus_mongodb.py
index 14608a5..9c67914 100755
--- a/test/test_livestatus_mongodb.py
+++ b/test/test_livestatus_mongodb.py
@@ -296,7 +296,7 @@ class TestConfigBig(TestConfig):
'mongodb_uri' : "mongodb://127.0.0.1:27017",
#'mongodb_uri' : "mongodb://10.0.12.50:27017,10.0.12.51:27017",
# 'replica_set' : 'livestatus',
- 'max_logs_age' : '14',
+ 'max_logs_age' : '7',
})
modconf.modules = [dbmodconf]
self.livestatus_broker = LiveStatus_broker(modconf)
@@ -323,11 +323,14 @@ class TestConfigBig(TestConfig):
self.livestatus_broker.rg = LiveStatusRegenerator()
self.livestatus_broker.datamgr = datamgr
datamgr.load(self.livestatus_broker.rg)
+ self.livestatus_broker.query_cache = LiveStatusQueryCache()
+ self.livestatus_broker.query_cache.disable()
+ self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
#--- livestatus_broker.main
self.livestatus_broker.init()
self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
- self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
+ self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
#--- livestatus_broker.do_main
self.livestatus_broker.db.open()
@@ -340,6 +343,7 @@ class TestConfigBig(TestConfig):
def test_a_long_history(self):
if not has_pymongo:
return
+ # copied from test_livestatus_cache
test_host_005 = self.sched.hosts.find_by_name("test_host_005")
test_host_099 = self.sched.hosts.find_by_name("test_host_099")
test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00")
@@ -347,17 +351,42 @@ class TestConfigBig(TestConfig):
test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04")
test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16")
test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01")
- starttime = time.time()
-
- num_log_broks = 0
- try:
- numlogs = self.livestatus_broker.db.conn.bigbigbig.find().count()
- except Exception:
- numlogs = 0
- if numlogs == 0:
- # run silently
- old_stdout = sys.stdout
- sys.stdout = open(os.devnull, "w")
+
+ days = 4
+ etime = time.time()
+ print "now it is", time.ctime(etime)
+ print "now it is", time.gmtime(etime)
+ etime_midnight = (etime - (etime % 86400)) + time.altzone
+ print "midnight was", time.ctime(etime_midnight)
+ print "midnight was", time.gmtime(etime_midnight)
+ query_start = etime_midnight - (days - 1) * 86400
+ query_end = etime_midnight
+ print "query_start", time.ctime(query_start)
+ print "query_end ", time.ctime(query_end)
+
+ # |----------|----------|----------|----------|----------|---x
+ # etime
+ # etime_midnight
+ # ---x------
+ # etime - 4 days
+ # |---
+ # query_start
+ #
+ # ............................................
+ # events in the log database ranging till now
+ #
+ # |________________________________|
+ # events which will be read from db
+ #
+ loops = int(86400 / 192)
+ time_warp(-1 * days * 86400)
+ print "warp back to", time.ctime(time.time())
+ # run silently
+ old_stdout = sys.stdout
+ sys.stdout = open(os.devnull, "w")
+ should_be = 0
+ for day in xrange(days):
+ sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops))
self.scheduler_loop(2, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
@@ -365,26 +394,22 @@ class TestConfigBig(TestConfig):
[test_ok_16, 0, "OK"],
[test_ok_99, 0, "OK"],
])
- num_log_broks += self.count_log_broks()
self.update_broker()
- should_be = 0
- should_be_huhu = 0
- huhuhus = []
#for i in xrange(3600 * 24 * 7):
- for i in xrange(10000):
- if i % 1000 == 0:
- sys.stderr.write("loop "+str(i))
+ for i in xrange(loops):
+ if i % 10000 == 0:
+ sys.stderr.write(str(i))
if i % 399 == 0:
self.scheduler_loop(3, [
[test_ok_00, 1, "WARN"],
[test_ok_01, 2, "CRIT"],
[test_ok_04, 3, "UNKN"],
[test_ok_16, 1, "WARN"],
- [test_ok_99, 2, "HUHU"+str(i)],
+ [test_ok_99, 2, "CRIT"],
])
- should_be += 3
- should_be_huhu += 3
- huhuhus.append(i)
+ if int(time.time()) >= query_start and int(time.time()) <= query_end:
+ should_be += 3
+ sys.stderr.write("now it should be %s\n" % should_be)
time.sleep(62)
if i % 399 == 0:
self.scheduler_loop(1, [
@@ -394,66 +419,59 @@ class TestConfigBig(TestConfig):
[test_ok_16, 0, "OK"],
[test_ok_99, 0, "OK"],
])
- should_be += 1
+ if int(time.time()) >= query_start and int(time.time()) <= query_end:
+ should_be += 1
+ sys.stderr.write("now it should be %s\n" % should_be)
time.sleep(2)
- if i % 199 == 0:
+ if i % 17 == 0:
self.scheduler_loop(3, [
[test_ok_00, 1, "WARN"],
[test_ok_01, 2, "CRIT"],
])
+
time.sleep(62)
- if i % 199 == 0:
+ if i % 17 == 0:
self.scheduler_loop(1, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
])
time.sleep(2)
- if i % 299 == 0:
+ if i % 14 == 0:
self.scheduler_loop(3, [
[test_host_005, 2, "DOWN"],
])
- if i % 19 == 0:
+ if i % 12 == 0:
self.scheduler_loop(3, [
[test_host_099, 2, "DOWN"],
])
time.sleep(62)
- if i % 299 == 0:
+ if i % 14 == 0:
self.scheduler_loop(3, [
[test_host_005, 0, "UP"],
])
- if i % 19 == 0:
+ if i % 12 == 0:
self.scheduler_loop(3, [
[test_host_099, 0, "UP"],
])
time.sleep(2)
- num_log_broks += self.count_log_broks()
self.update_broker()
if i % 1000 == 0:
self.livestatus_broker.db.commit()
endtime = time.time()
- sys.stdout.close()
- sys.stdout = old_stdout
self.livestatus_broker.db.commit()
- else:
- should_be = numlogs
- starttime = int(time.time())
- endtime = 0
- for doc in self.livestatus_broker.db.conn.bigbigbig.logs.find():
- if doc['time'] < starttime:
- starttime = doc['time']
- if doc['time'] > endtime:
- endtime = doc['time']
- print "starttime, endtime", starttime, endtime
-
+ sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time())))
+ sys.stdout.close()
+ sys.stdout = old_stdout
+ self.livestatus_broker.db.commit_and_rotate_log_db()
+ numlogs = self.livestatus_broker.db.conn.bigbigbig.logs.find().count()
+ print "numlogs is", numlogs
+
# now we have a lot of events
# find type = HOST ALERT for test_host_005
- q = int((endtime - starttime) / 8)
- starttime += q
- endtime -= q
request = """GET log
Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
-Filter: time >= """ + str(int(starttime)) + """
-Filter: time <= """ + str(int(endtime)) + """
+Filter: time >= """ + str(int(query_start)) + """
+Filter: time <= """ + str(int(query_end)) + """
Filter: type = SERVICE ALERT
And: 1
Filter: type = HOST ALERT
@@ -468,7 +486,6 @@ Or: 8
Filter: host_name = test_host_099
Filter: service_description = test_ok_01
And: 5
-Filter: plugin_output ~ HUHU
OutputFormat: json"""
# switch back to realtime. we want to know how long it takes
fake_time_time = time.time
@@ -476,22 +493,58 @@ OutputFormat: json"""
time.time = original_time_time
time.sleep = original_time_sleep
print request
+ print "query 1 --------------------------------------------------"
+ tic = time.time()
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ tac = time.time()
pyresponse = eval(response)
- print "number of all documents", self.livestatus_broker.db.conn.bigbigbig.logs.find().count()
- print "number of log broks sent", num_log_broks
- print "number of lines in the response", len(pyresponse)
- print "should be", should_be
- time.time = fake_time_time
- time.sleep = fake_time_sleep
- hosts = set([h[4] for h in pyresponse])
- services = set([h[5] for h in pyresponse])
- print "found hosts", hosts
- print "found services", services
- alldocs = [d for d in self.livestatus_broker.db.conn.bigbigbig.logs.find()]
- clientselected = [d for d in alldocs if (d['time'] >= int(starttime) and d['time'] <= int(endtime) and d['host_name'] == 'test_host_099' and d['service_description'] == 'test_ok_01' and 'HUHU' in d['plugin_output'])]
- print "clientselected", len(clientselected)
- self.assert_(len(pyresponse) == len(clientselected))
+ print "number of records with test_ok_01", len(pyresponse)
+ self.assert_(len(pyresponse) == should_be)
+
+ # and now test Negate:
+ request = """GET log
+Filter: time >= """ + str(int(query_start)) + """
+Filter: time <= """ + str(int(query_end)) + """
+Filter: type = SERVICE ALERT
+And: 1
+Filter: type = HOST ALERT
+And: 1
+Filter: type = SERVICE FLAPPING ALERT
+Filter: type = HOST FLAPPING ALERT
+Filter: type = SERVICE DOWNTIME ALERT
+Filter: type = HOST DOWNTIME ALERT
+Filter: type ~ starting...
+Filter: type ~ shutting down...
+Or: 8
+Filter: host_name = test_host_099
+Filter: service_description = test_ok_01
+And: 2
+Negate:
+And: 2
+OutputFormat: json"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ notpyresponse = eval(response)
+ print "number of records without test_ok_01", len(notpyresponse)
+
+ request = """GET log
+Filter: time >= """ + str(int(query_start)) + """
+Filter: time <= """ + str(int(query_end)) + """
+Filter: type = SERVICE ALERT
+And: 1
+Filter: type = HOST ALERT
+And: 1
+Filter: type = SERVICE FLAPPING ALERT
+Filter: type = HOST FLAPPING ALERT
+Filter: type = SERVICE DOWNTIME ALERT
+Filter: type = HOST DOWNTIME ALERT
+Filter: type ~ starting...
+Filter: type ~ shutting down...
+Or: 8
+OutputFormat: json"""
+ response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
+ allpyresponse = eval(response)
+ print "all records", len(allpyresponse)
+ self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse))
# now delete too old entries from the database (> 14days)
# that's the job of commit_and_rotate_log_db()
@@ -531,8 +584,11 @@ OutputFormat: json"""
# simply an estimation. the cleanup-routine in the mongodb logstore
# cuts off the old data at midnight, but here in the test we have
# only accuracy of a day.
- self.assert_(numlogs >= sum(daycount[:14]))
- self.assert_(numlogs <= sum(daycount[:15]))
+ self.assert_(numlogs >= sum(daycount[:7]))
+ self.assert_(numlogs <= sum(daycount[:8]))
+
+ time.time = fake_time_time
+ time.sleep = fake_time_sleep
if __name__ == '__main__':
--
UNNAMED PROJECT
More information about the Pkg-nagios-changes
mailing list