[Pkg-nagios-changes] [SCM] UNNAMED PROJECT branch, debian/master, updated. 810edbdd3feedbfe37f4a65bee50b57b2f60fa2a

Sebastien Coavoux s.coavoux at free.fr
Tue Feb 28 22:05:46 UTC 2012


The following commit has been merged in the debian/master branch:
commit a6cee17b0a7937a08c0772b4b7083724a35c1dcb
Merge: fd04aa96d47ab00c62e852d995fcf0a841c388ff f6845c7961014337252b525611530516ac93ee61
Author: Sebastien Coavoux <s.coavoux at free.fr>
Date:   Mon Nov 14 10:48:02 2011 +0100

    Merge branch 'master' of git://github.com/naparuba/shinken

diff --combined etc/shinken-specific.cfg
index b20d109,bf675b2..791f753
mode 100755,100644..100755
--- a/etc/shinken-specific.cfg
+++ b/etc/shinken-specific.cfg
@@@ -1,18 -1,18 +1,18 @@@
  #This config file defines Shinken specific objects like
  #satellites or Realms
  #
- #This file can be used for defining a simple environement :
+ #This file can be used for defining a simple environment :
  #*one scheduler that schedules the checks (but doesn't launch them)
  #*one poller (that launches the checks)
- #*one reactionner (that sends the notifiations)
- #*one broker (that gives jobs to modules. Modules export data like logs, status.dat, mysql export, etc etc)
- #*some of the brokers modules (that do the jobs)
+ #*one reactionner (that sends the notifications)
+ #*one broker (that gives jobs to modules. Modules export data such as logs, status.dat, mysql export, etc etc)
+ #*some of the broker modules (that do the jobs)
  #*one arbiter (that reads the configuration and dispatches it to all others)
  
  #So there is no high availability here, just a simple "Nagios equivalent" (but with
  #more perf and less code! )
  
- #The scheduler is a "Host manager". It gets the hosts and theirs
+ #The scheduler is a "Host manager". It gets the hosts and their
  #services and it schedules the checks for the pollers.
  define scheduler{
         scheduler_name	scheduler-1   ; just the name
@@@ -27,6 -27,7 +27,7 @@@
         max_check_attempts	3     ;  if at least max_check_attempts ping failed, the node is DEAD
         check_interval		60    ; ping it every minute
  #       modules			PickleRetention
+ 
  # Interesting modules :
  #   PickleRetention    :        Save data before exiting in flat file
  #   MemcacheRetention  :        Same but in a memcache server
@@@ -46,10 -47,10 +47,10 @@@ define poller
  
         #optional
         manage_sub_realms 0	; optional and advanced: does it take jobs from schedulers of sub realms?
-        min_workers	 4	; optional : starts with N processes workers. 0 means : "number of cpus"
-        max_workers	 4	; optional : no more than N processes workers. 0 means : "number of cpus"
-        processes_by_worker	256	   ; optional : each workers manage 256 checks
-        polling_interval		1       ; optional : take jobs from schedulers each 1 second
+        min_workers	 0	; optional : starts with N worker processes. 0 means : "number of cpus"
+        max_workers	 0	; optional : no more than N worker processes. 0 means : "number of cpus"
+        processes_by_worker	256	   ; optional : each worker manages 256 checks
+        polling_interval		1       ; optional : get jobs from schedulers each 1 second
         timeout		3	      ; 'ping' timeout 
         data_timeout	120	      ; 'data send' timeout
         check_interval		60    ; ping it every minute
@@@ -92,12 -93,18 +93,18 @@@ define reactionner
         check_interval		60    ; ping it every minute
         max_check_attempts	3     ;  if at least max_check_attempts ping failed, the node is DEAD
  
+        # Modules
+        # modules		AndroidSMS
+ 
         #optionnal
         realm 			All
         }
  
+ 
+ 
+ 
  #The broker manages data export (in flat file or in database)
- #with it's modules
+ #with its modules
  #Here just log files and status.dat file modules
  define broker{
         broker_name	broker-1
@@@ -112,12 -119,13 +119,13 @@@
  #      Other interesting modules to add :
  #      PickleRetentionBroker    :  save data when quitting
  #      ToNdodb_Mysql            :  NDO database support
- #      NPCD			: Use the PNP addon
+ #      NPCDMOD			: Use the PNP addon
+ #      Graphite-Perfdata        : Use teh Graphite backend for perfdata
  #      WebUI			: Shinken Web interface
  
-        # Optionnal
-        manage_sub_realms 1   ; optionnal, like for poller
-        manage_arbiters	 1   ; optionnal : take data from Arbiter. There should be
+        # Optional
+        manage_sub_realms 1   ; optional, like for poller
+        manage_arbiters	 1   ; optional : take data from Arbiter. There should be
         			     ;only one broker for the arbiter
  
         check_interval		60    ; ping it every minute
@@@ -132,7 -140,7 +140,7 @@@
  
  ##Now the modules for the broker. The first 2 that are used, and all the others :)
  
- #The log managment for ALL daemons (all in one log, cool isn't it? ).
+ #The log management for ALL daemons (all in one log, cool isn't it? ).
  define module{
         module_name      Simple-log
         module_type      simple_log
@@@ -151,7 -159,7 +159,7 @@@ define module
         status_update_interval   15 ; update status.dat every 15s
  }
  
- #The log managment for ALL daemons (all in one log, cool isn't it? ).
+ #The log management for ALL daemons (all in one log, cool isn't it? ).
  define module{
         module_name      WebUI
         module_type      webui
@@@ -159,23 -167,34 +167,34 @@@
         host		0.0.0.0       ; mean all interfaces
         port		7767
  
-        # CHANGE THIS VALUE!!!!!!!
+        # CHANGE THIS VALUE or someone may forge cookies !!!!!!!
         auth_secret	CHANGE_ME
  
+        # Allow or not the html characters in plugins output
+        # WARNING : so far, it can be a security issue
+        allow_html_output  0
+ 
+        # Uncomment to present a text in the login form
+        # login_text     	Welcome on Shinken WebUI.
  
         # Advanced options. Do not touch it if you don't
         # know what you are doing
  
-        #http_backend    wsgiref
-        # ; can be also : cherrypy, paste, tornado, twisted
-        # ; or gevent
+        #http_backend    auto
+        # ; can be also : wsgiref, cherrypy, paste, tornado, twisted
+        # ; or gevent. automeans beast find in the system.
  
         modules		Apache_passwd,ActiveDir_UI,Cfg_password
         # Modules for the WebUI.
+        # Apache_passwd : use an Apache htpasswd files for auth
+        # ActiveDir_UI : use AD for auth and photo collect
+        # Cfg_password : use passwords in contacts configuration for auth
+        # PNP_UI : Use PNP graphs in the UI
+        # GRAPHITE_UI : Use graphs from Graphite
  
  }
  
- #The log managment for ALL daemons (all in one log, cool isn't it? ).
+ #The log management for ALL daemons (all in one log, cool isn't it? ).
  define module{
         module_name      ActiveDir_UI
         module_type      ad_webui
@@@ -190,7 -209,7 +209,7 @@@
  }
  
  
- #The log managment for ALL daemons (all in one log, cool isn't it? ).
+ #The log management for ALL daemons (all in one log, cool isn't it? ).
  define module{
         module_name      Apache_passwd
         module_type      passwd_webui
@@@ -201,7 -220,7 +220,7 @@@
  }
  
  
- #The log managment for ALL daemons (all in one log, cool isn't it? ).
+ #The log management for ALL daemons (all in one log, cool isn't it? ).
  define module{
         module_name      Cfg_password
         module_type      cfg_password_webui
@@@ -224,16 -243,14 +243,16 @@@ define module
         character_set	utf8      ;optionnal, UTF8 is the default
  
         # If you want to mix Shinken AND Nagios/icinga in the same db
 -       # you need to offset shinken instance id so they will not
 -       # override/delete other ones. Here for 5 nagios box.
 -       # nagios_mix_offset	  5 
 +       # you need to synchronize shinken instance_id otherwise it
 +       # override/delete other ones.
 +       # Warning : This decrease performances because it query the db
 +       # for every new instance id needed (one per scheduler)
 +       synchronise_database_id    1
  }
  
  
  #Here a NDO/Oracle module. For Icinga web connection
- #Or for DBA that do not like MySQL
+ #Or for DBAs who don't like MySQL
  define module{
         module_name	ToNdodb_Oracle
         module_type	ndodb_oracle
@@@ -253,13 -270,13 +272,13 @@@ define module
         user             root     ; ? .. yes, the user of the database...
         password         root     ; wtf? you ask?
         host		localhost ; host of the database
-        character_set    utf8     ;optionnal, UTF8 is the default
+        character_set    utf8     ;optional, UTF8 is the default
  }
  
  
  #Here the Merlin/Sqlite. No one uses it for now :)
  #You look at something : it's also the merlindb module, like the previous,
- #it's the same code, it's just the backend parameter that change (and path).
+ #it's the same code, only the backend parameter (and path, of course ;-) has changed .
  define module{
         module_name      ToMerlindb_Sqlite
         module_type      merlindb
@@@ -268,11 -285,11 +287,11 @@@
  }
  
  
- #Here the couchdb export. Maybe use one day...
- #I should do a mangodb too one day...
+ #Here the couchdb export. Maybe use someday...
+ #I should do a mangodb too some other day...
  #and casandra...
  #and voldemort...
- #and all other NoSQL database in fact :)
+ #and all other NoSQL databases in fact :)
  define module{
         module_name      ToCouchdb
         module_type      couchdb
@@@ -305,7 -322,25 +324,25 @@@ define module
  }
  
  
- # You know livestatus? Yes, there a Livestatus module for shinken too :)
+ # Graphite is a Graph backend
+ # http://graphite.wikidot.com/start
+ define module{
+        module_name      Graphite-Perfdata
+        module_type      graphite_perfdata
+        host		localhost
+        port 		2003
+ }
+ 
+ 
+ # Use PNP graphs in the WebUI
+ define module{
+  module_name  GRAPHITE_UI
+  module_type  graphite_webui
+  uri 	      http://YOURSERVERNAME/   ; put the real PNP uri here. YOURSERVERNAME will be changed
+                                           ; by the localname of the server
+ }
+ 
+ # You know Livestatus? Yes, there is a Livestatus module for Shinken too :)
  define module{
         module_name      Livestatus
         module_type      livestatus
@@@ -313,7 -348,7 +350,7 @@@
         port		50000   ; port to listen
         database_file    livestatus.db
  
-        # Only set debug if you got problem with this module
+        # Only set debug if you're having problems with this module
         # debug		/tmp/ls.debug
         # Set to 1 if you want to dump queries/responses too
         # warning : it's very verbose
@@@ -329,11 -364,30 +366,30 @@@ define module
  
  #Use with the PNP interface
  define module{
-  module_name  NPCD
+  module_name  NPCDMOD
   module_type  npcdmod
   config_file  /usr/local/pnp4nagios/etc/npcd.cfg
  }
  
+ # Use PNP graphs in the WebUI
+ define module{
+  module_name  PNP_UI
+  module_type  pnp_webui
+  uri 	      http://YOURSERVERNAME/pnp4nagios/   ; put the real PNP uri here. YOURSERVERNAME will be changed
+                                               ; bu the localname of the server
+ }
+ 
+ # send into GLPI DB
+ # =============== Work with Plugin Monitoring of GLPI =============== 
+ define module{
+   module_name  glpidb
+   module_type  glpidb
+   database		glpi       ; database name
+   user		   root      ; database user
+   password		root      ; must be changed
+   host		   localhost ; host to connect to
+ }
+ 
  
  
  ############################# For the schedulers
@@@ -407,13 -461,14 +463,14 @@@ define module
         uri 		http://localhost/glpi/plugins/webservices/xmlrpc.php
         login_name	glpi
         login_password	glpi
- #       use_property       otherserial   ;optionnal. Will take use value from the otherserial
+ #       use_property       otherserial   ;optional. Will take use value from the otherserial
  #			   field in GLPI (Inventory number:  in the web interface)
  }
  
- #You know GLPI? You can load all configuration from this app (
+ #You know GLPI? You can load all configuration from this app(
  #with the webservices plugins for GLPI, in xmlrpc mode
- # and with plugin monitoring for GLPI)
+ #and with plugin monitoring for GLPI)
+ # =============== Work with Plugin Monitoring of GLPI =============== 
  #All configuration read from this will be added to the others of the
  #standard flat file
  define module{
@@@ -425,7 -480,7 +482,7 @@@
  }
  
  
- #You know NSCA? You can send check results to shinken
+ #You know NSCA? You can send check results to Shinken
  #using send_nsca command
  define module{
         module_name       NSCA
@@@ -436,7 -491,7 +493,7 @@@
         password          helloworld
  }
  
- #This module implement TSCA, a thrift interface to submit checks result
+ #This module implements TSCA, a thrift interface to submit check results
  define module{
          module_name     TSCA
          module_type     tsca_server
@@@ -445,8 -500,8 +502,8 @@@
  }
  
  # You know VMWare? It's cool to VMotion VM, but after it's hard to
- # follow host dependencies when it move. With this module, you can
- # just lookup at the vcenter from times to times and update dependencies
+ # follow host dependencies when it moves. With this module, you can
+ # just lookup at the vcenter from time to time and update dependencies
  define module{
         module_name	 VMWare_auto_linking
         module_type       hot_dependencies
@@@ -460,9 -515,20 +517,20 @@@
         # debug 	    1
  }
  
+ # Another way to update dependencies is to update a flat file
+ # See some examples to do that in the python script
+ define module{
+       module_name      External_auto_linking
+       module_type      hot_dependencies
+       mapping_file     /tmp/external_mapping_file.json
+       mapping_command  /usr/local/shinken/libexec/external_mapping.py -i /tmp/shinken_flat_mapping -o /tmp/external_mapping_file.json
+       mapping_command_interval  60   ; optionnal
+       mapping_command_timeout   300 ; optionnal
+ }
+ 
  # Arbiter module to change on the fly a poller tag of a 
  # command by another.
- # Useful when you use a fixed configuration tool that do not allow you
+ # Useful when you use a fixed configuration tool that doesn't allow you
  # to configure poller_tag.
  define module{
         module_name	HackCommandsPollerTag
@@@ -471,6 -537,19 +539,19 @@@
         poller_tag	esx3
  }
  
+ 
+ # Arbiter module to change on the fly a poller tag of hosts
+ # and services by search a custom macro
+ # Useful when you use a fixed configuration tool that doesn't allow you
+ # to configure poller_tag.
+ define module{
+        module_name	   HackPollerTagByMacros
+        module_type         hack_poller_tag_by_macros 
+        host_macro_name     _poller_tag
+        service_macro_name  _poller_tag
+ }
+ 
+ 
  #Hosts and Services configuration can be pulled from a MySQL database
  #All hosts and services read from the database will be added to the others of the
  #standard flat file
@@@ -489,7 -568,7 +570,7 @@@ define module
         reqcontacts	SELECT contact_name, email, template AS 'use' FROM contacts
  }
  
- #The arbiter definition is optionnal
+ #The arbiter definition is optional
  #WARNING : You must change host_name with the
  #hostname of your machine !!!!
  define arbiter{
@@@ -498,11 -577,11 +579,11 @@@
         address		localhost   ;IP or DNS adress
         port		7770
         spare		0
- #      uncomment the line below if you want to use the GLPI module and the NSCA one
+ #      uncomment the line below if you want to use the GLPI and NSCA modules
  #      modules		CommandFile,GLPIImport, NSCA, VMWare_auto_linking, TSCA
  
- #      Uncomment theses lines in a HA architecture so the master
- #      and slaves know how much time to wait the other
+ #      Uncomment these lines in a HA architecture so the master
+ #      and slaves know how long they may wait for each other
  #       check_interval          60    ; ping it every minute
  #       timeout         3             ; 'ping' timeout
  #       data_timeout    120           ; 'data send' timeout
@@@ -521,7 -600,7 +602,7 @@@
  
  
  # The receiver manages passive information. It's just a "buffer" that
- # will be readfrom the arbiter to dispatch data
+ # will be read from the arbiter to dispatch data
  define receiver{
         receiver_name	receiver-1
         address		localhost
@@@ -543,8 -622,8 +624,8 @@@
  
  
  
- #Very advanced feature for multisite managment.
- #Consider to look at the doc before touching it :)
+ #Very advanced feature for multisite management.
+ #Read the docs VERY CAREFULLY before changing these settings :)
  define realm{
         realm_name  All
         default		1
@@@ -552,3 -631,42 +633,42 @@@
  
  
  
+ 
+ 
+ # Sample of an android SMS reactionner.
+ # 2 requirements :
+ # * modules AndroidSMS  : so you will load SMS sending code
+ # * reactionner_tags android_sms : so ONLY commands with this tag will
+ #   be sent to this reactionner, no mail things.
+ #define reactionner{
+ #       reactionner_name		reactionner-Android
+ #       address			IPOFYOURANDROIDPHONE
+ #       port			7769
+ #       spare			0
+ #
+ #       timeout		3	      ; 'ping' timeout 
+ #       data_timeout	120	      ; 'data send' timeout
+ #       check_interval		60    ; ping it every minute
+ #       max_check_attempts	3     ;  if at least max_check_attempts ping failed, the node is DEAD
+ #
+ #       # Modules
+ #       modules		AndroidSMS
+ #
+ #	reactionner_tags	android_sms
+ #
+ #       #optional
+ #       realm 			All
+ #       }
+ 
+ 
+ # Reactionner can be launched under an android device
+ # and can be used to send SMS with this module
+ define module{
+        module_name	AndroidSMS
+        module_type	android_sms
+ }
+ 
+ 
+ 
+ 
+ 
diff --combined shinken/modules/ndodb_mysql_broker/ndodb_mysql_broker.py
index e90e005,16509b7..f758676
--- a/shinken/modules/ndodb_mysql_broker/ndodb_mysql_broker.py
+++ b/shinken/modules/ndodb_mysql_broker/ndodb_mysql_broker.py
@@@ -37,8 -37,6 +37,8 @@@ properties = 
  
  from shinken.db_mysql import DBMysql
  from shinken.basemodule import BaseModule
 +#Do we need?	 	
 +import _mysql_exceptions
  
  def de_unixify(t):
      return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t))
@@@ -64,7 -62,7 +64,7 @@@ class Ndodb_Mysql_broker(BaseModule)
          self.password = conf.password
          self.database = conf.database
          self.character_set = conf.character_set
 -        self.nagios_mix_offset = int(conf.nagios_mix_offset)
 +        self.synchronise_database_id = int(conf.synchronise_database_id)
  
  
      #Called by Broker so we can do init stuff
@@@ -81,112 -79,33 +81,112 @@@
          self.services_cache = {}
          self.hosts_cache = {}
  
 +        #Cache for database id
 +        #In order not to query the database every time
 +        self.database_id_cache={}
 +
 +
 +        #Todo list to manage brok
 +        self.todo=[]
  
      #Get a brok, parse it, and put in in database
      #We call functions like manage_ TYPEOFBROK _brok that return us queries
      def manage_brok(self, b):
          # We need to do some brok mod, so we copy it
          new_b = copy.deepcopy(b)
 -
 -        # We've got problem with instance_id == 0 so we add 1 every where
 +    
          if 'instance_id' in new_b.data:
 -            #For nagios mix install, move more than 1
 -            if self.nagios_mix_offset != 0:
 -                new_b.data['instance_id'] = new_b.data['instance_id'] + self.nagios_mix_offset
 -            else:
 +            
 +            if self.synchronise_database_id != 1:
 +                # We've got problem with instance_id == 0 so we add 1 every where
                  new_b.data['instance_id'] = new_b.data['instance_id'] + 1
 -
 -        queries = BaseModule.manage_brok(self, new_b)
 +            
 +            #We have to synchronise database id
 +            #so we wait for the instance name
 +            elif 'instance_name' not in new_b.data :
 +                self.todo.append(new_b)
 +                #print("No instance name for %s : " % new_b.data)
 +                return  
 +                  
 +            #We convert the id to write properly in the base using the 
 +            #instance_name to reuse the instance_id in the base.
 +            else:
 +                new_b.data['instance_id'] = self.convert_id(new_b.data['instance_id'],new_b.data['instance_name'])
 +                self.todo.append(new_b)
 +                for brok in self.todo :
 +                    #We have to put the good instance ID to all brok waiting
 +                    #in the list then execute the query
 +                    brok.data['instance_id']=new_b.data['instance_id']
 +                    queries = BaseModule.manage_brok(self, brok)
 +                    if queries is not None:
 +                        for q in queries :
 +                            self.db.execute_query(q)
 +
 +                self.todo=[]
 +                return
 +
 +        #Executed if we don't synchronise or there is no instance_id
 +        queries = BaseModule.manage_brok(self,new_b)
 +        
          if queries is not None:
              for q in queries :
                  self.db.execute_query(q)
              return
 -        #print "(ndodb)I don't manage this brok type", b
 +
 +
 +        
  
  
      #Create the database connection
      #TODO : finish (begin :) ) error catch and conf parameters...
      def connect_database(self):
 -        self.db.connect_database()
 +    
 +        try :
 +            self.db.connect_database()
 +            
 +        except _mysql_exceptions.OperationalError as exp:
 +
 +            #TODO : Stop properly the module
 +            #Otherwise the module will keep running and fail on 
 +            #an non understandable exception
 +
 +            print "[MysqlDB] Module raise an exception : %s . Please check the arguments!" % exp
 +            #Do we need?
 +            #exit 
 +
 +
 +    def get_instance_id(self,name):
 +        query1 = u"SELECT  max(instance_id) + 1 from nagios_instances"
 +        query2 = u"SELECT instance_id from nagios_instances where instance_name = '%s';" % name
 +
 +        self.db.execute_query(query1)
 +        row1  = self.db.fetchone()
 +
 +        self.db.execute_query(query2)
 +        row2 = self.db.fetchone()
 +
 +        if len(row1)<1 :
 +            return -1
 +        #We are the first process writing in base      
 +        elif row1[0] is None:
 +            return 1
 +        #No previous instance found return max
 +        elif row2 is None :
 +            return row1[0]
 +        #Return the previous instance
 +        else:
 +            return row2[0]
 +
 +
 +
 +    def convert_id(self,id,name):
 +        #Look if we have already encountered this id
 +        if id in self.database_id_cache :
 +            return self.database_id_cache[id]
 +        else :
 +            data_id = self.get_instance_id(name)
 +            self.database_id_cache[id]=data_id
 +            return data_id
  
  
      def get_host_object_id_by_name(self, host_name):
@@@ -583,7 -502,7 +583,7 @@@
          #Only the host is impacted
          where_clause = {'host_object_id' : host_id}
  
 -        #Just update the host status
 +        #Just update teh host status
          hoststatus_data = {'next_check' : de_unixify(data['next_chk'])}
          hoststatus_query = self.db.create_update_query('hoststatus' , hoststatus_data, where_clause)
  
@@@ -807,7 -726,6 +807,6 @@@
          return res
  
  
- 
      #A notification have just be created, we INSERT it
      def manage_notification_raise_brok(self, b):
  

-- 
UNNAMED PROJECT



More information about the Pkg-nagios-changes mailing list