[med-svn] [Git][med-team/gnumed-server][master] 4 commits: New upstream version 22.2
Andreas Tille
gitlab at salsa.debian.org
Mon May 28 20:27:38 BST 2018
Andreas Tille pushed to branch master at Debian Med / gnumed-server
Commits:
d65ebadb by Andreas Tille at 2018-05-28T21:22:55+02:00
New upstream version 22.2
- - - - -
c3496253 by Andreas Tille at 2018-05-28T21:24:21+02:00
Update upstream source from tag 'upstream/22.2'
Update to upstream version '22.2'
with Debian dir 40054f24bb05eb0b18e93822f11d6475f2f904bb
- - - - -
c7209fb9 by Andreas Tille at 2018-05-28T21:24:22+02:00
New upstream version
- - - - -
416b4709 by Andreas Tille at 2018-05-28T21:25:48+02:00
Upload to unstable
- - - - -
9 changed files:
- debian/changelog
- + server/bootstrap/bootstrap_gm_db_system.py.bak
- server/bootstrap/fixup_db-v22.conf
- + server/bootstrap/gmAuditSchemaGenerator.py.bak
- server/bootstrap/update_db-v21_v22.conf
- server/doc/schema/gnumed-entire_schema.html
- server/sql/v21-v22/data/v22-Begleitbrief.tex
- server/sql/v21-v22/fixups/v22-release_notes-fixup.sql
- + server/sql/v21-v22/python/v22-2-fixup-form-templates.py
Changes:
=====================================
debian/changelog
=====================================
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+gnumed-server (22.2-1) unstable; urgency=medium
+
+ * New upstream version
+
+ -- Andreas Tille <tille at debian.org> Mon, 28 May 2018 21:24:34 +0200
+
gnumed-server (22.1-1) unstable; urgency=medium
* New upstream version
=====================================
server/bootstrap/bootstrap_gm_db_system.py.bak
=====================================
--- /dev/null
+++ b/server/bootstrap/bootstrap_gm_db_system.py.bak
@@ -0,0 +1,1888 @@
+#!/usr/bin/python3
+##!/usr/bin/env python
+##!/usr/bin/python2.7-dbg
+
+__doc__="""GNUmed schema installation.
+
+This script bootstraps a GNUmed database system.
+
+This will set up databases, tables, groups, permissions and
+possibly users. Most of this will be handled via SQL
+scripts, not directly in the bootstrapper itself.
+
+There's a special user called "gm-dbo" who owns all the
+database objects.
+
+For all this to work you must be able to access the database
+server as the standard "postgres" superuser.
+
+This script does NOT set up user specific configuration options.
+
+All definitions are loaded from a config file.
+
+Please consult the User Manual in the GNUmed CVS for
+further details.
+
+--quiet
+--log-file=
+--conf-file=
+
+Requires psycopg 2.7.4 !
+"""
+#==================================================================
+# TODO
+# - perhaps create PGPASSFILE
+# - warn if empty password
+# - verify that pre-created database is owned by "gm-dbo"
+# - rework under assumption that there is only one DB
+#==================================================================
+__author__ = "Karsten.Hilbert at gmx.net"
+__license__ = "GPL v2 or later"
+
+# standard library
+import sys
+import os.path
+import fileinput
+import os
+import getpass
+import glob
+import re as regex
+import tempfile
+import io
+import logging
+import faulthandler
+
+
+faulthandler.enable()
+
+
+# adjust Python path
+local_python_base_dir = os.path.dirname (
+ os.path.abspath(os.path.join(sys.argv[0], '..', '..'))
+)
+
+# does the GNUmed import path exist at all, physically ?
+# (*broken* links are reported as False)
+if not os.path.exists(os.path.join(local_python_base_dir, 'Gnumed')):
+ real_dir = os.path.join(local_python_base_dir, 'server')
+ is_useful_import_dir = (
+ os.path.exists(os.path.join(real_dir, 'pycommon'))
+ and
+ os.path.exists(os.path.join(real_dir, '__init__.py'))
+ )
+ if not is_useful_import_dir:
+ real_dir = os.path.join(local_python_base_dir, 'client') # CVS tree
+ link_name = os.path.join(local_python_base_dir, 'Gnumed')
+ print("Creating module import symlink ...")
+ print(' real dir:', real_dir)
+ print(' link:', link_name)
+ os.symlink(real_dir, link_name)
+
+print("Adjusting PYTHONPATH ...")
+sys.path.insert(0, local_python_base_dir)
+
+
+# GNUmed imports
+try:
+ from Gnumed.pycommon import gmLog2
+except ImportError:
+ print("Please make sure the GNUmed Python modules are in the Python path !")
+ raise
+from Gnumed.pycommon import gmCfg2
+from Gnumed.pycommon import gmPsql
+from Gnumed.pycommon import gmPG2
+from Gnumed.pycommon import gmTools
+from Gnumed.pycommon import gmI18N
+from Gnumed.pycommon.gmExceptions import ConstructorError
+
+
+# local imports
+import gmAuditSchemaGenerator
+aud_gen = gmAuditSchemaGenerator
+
+
+_log = logging.getLogger('gm.bootstrapper')
+#faulthandler.enable(file = gmLog2._logfile)
+
+
+_cfg = gmCfg2.gmCfgData()
+
+
+_interactive = None
+_bootstrapped_servers = {}
+_bootstrapped_dbs = {}
+_dbowner = None
+cached_host = None
+cached_passwd = {}
+_keep_temp_files = False
+
+conn_ref_count = []
+#==================================================================
+pg_hba_sermon = """
+I have found a connection to the database, but I am forbidden
+to connect due to the settings in pg_hba.conf. This is a
+PostgreSQL configuration file that controls who can connect
+to the database.
+
+Depending on your setup, it can be found in
+/etc/postgresql/pg_hba.conf (Debian)
+/usr/local/pgsql/pgdata/pg_hba.conf (FreeBSD, ?? Mac OS X)
+FIXME: where do RedHat & friends put it
+ or whichever directory your database files are located.
+
+For gnumed, pg_hba.conf must allow password authentication.
+For deveopment systems, I suggest the following
+
+local template1 postgres ident sameuser
+local gnumed all md5
+host gnumed all 127.0.0.1 255.255.255.255 md5
+
+For production systems, a different configuration will be
+required, but gnumed is not production ready.
+There is also a pg_hba.conf.example in this directory.
+
+You must then restart (or SIGHUP) your PostgreSQL server.
+"""
+
+no_server_sermon = """
+I cannot find a PostgreSQL server running on this machine.
+
+Try (as root):
+/etc/init.d/postgresql start
+
+if that fails, you can build a database from scratch:
+
+PGDATA=some directory you can use
+initdb
+cp pg_hba.conf.example $PGDATA/pg_hba.conf
+pg_ctl start
+
+if none of these commands work, or you don't know what PostgreSQL
+is, go to the website to download for your OS at:
+
+http://www.postgresql.org/
+
+On the other hand, if you have a PostgreSQL server
+running somewhere strange, type hostname[:port]
+below, or press RETURN to quit.
+"""
+
+superuser_sermon = """
+I can't log on as the PostgreSQL database owner.
+Try running this script as the system administrator (user "root")
+to get the neccessary permissions.
+
+NOTE: I expect the PostgreSQL database owner to be called "%s"
+If for some reason it is not, you need to adjust my configuration
+script, and run again as that user.
+"""
+
+no_clues = """
+Logging on to the PostgreSQL database returned this error
+%s
+on %s
+
+Please contact the GNUmed development team on gnumed-devel at gnu.org.
+Make sure you include this error message in your mail.
+"""
+
+welcome_sermon = """
+Welcome to the GNUmed server instllation script.
+
+You must have a PostgreSQL server running and
+administrator access.
+
+Please select a database configuation from the list below.
+"""
+
+SQL_add_foreign_key = u"""
+ALTER TABLE %(src_schema)s.%(src_tbl)s
+ ADD FOREIGN KEY (%(src_col)s)
+ REFERENCES %(target_schema)s.%(target_tbl)s(%(target_col)s)
+ ON UPDATE CASCADE
+ ON DELETE RESTRICT
+;"""
+
+SQL_add_index = u"""
+-- idempotent:
+DROP INDEX IF EXISTS %(idx_schema)s.%(idx_name)s CASCADE;
+
+CREATE INDEX %(idx_name)s ON %(idx_schema)s.%(idx_table)s(%(idx_col)s);
+"""
+
+
+#==================================================================
+def user_exists(cursor=None, user=None):
+ cmd = "SELECT usename FROM pg_user WHERE usename = %(usr)s"
+ args = {'usr': user}
+ try:
+ cursor.execute(cmd, args)
+ except:
+ _log.exception(u">>>[%s]<<< failed for user [%s]", cmd, user)
+ return None
+ res = cursor.fetchone()
+ if cursor.rowcount == 1:
+ _log.info(u"user [%s] exists", user)
+ return True
+ _log.info(u"user [%s] does not exist", user)
+ return None
+#------------------------------------------------------------------
+def db_group_exists(cursor=None, group=None):
+ cmd = 'SELECT groname FROM pg_group WHERE groname = %(grp)s'
+ args = {'grp': group}
+ try:
+ cursor.execute(cmd, args)
+ except:
+ _log.exception(u">>>[%s]<<< failed for group [%s]", cmd, group)
+ return False
+ rows = cursor.fetchall()
+ if len(rows) > 0:
+ _log.info(u"group [%s] exists" % group)
+ return True
+ _log.info(u"group [%s] does not exist" % group)
+ return False
+#------------------------------------------------------------------
+def create_db_group(cursor=None, group=None):
+
+ # does this group already exist ?
+ if db_group_exists(cursor, group):
+ return True
+
+ cmd = 'create group "%s"' % group
+ try:
+ cursor.execute(cmd)
+ except:
+ _log.exception(u">>>[%s]<<< failed for group [%s]", cmd, group)
+ return False
+
+ # paranoia is good
+ if not db_group_exists(cursor, group):
+ return False
+
+ return True
+#==================================================================
+def connect(host, port, db, user, passwd, conn_name=None):
+ """
+ This is a wrapper to the database connect function.
+ Will try to recover gracefully from connection errors where possible
+ """
+ global cached_host
+ if len(host) == 0 or host == 'localhost':
+ if cached_host:
+ host, port = cached_host
+ else:
+ host = ''
+ if passwd == 'blank' or passwd is None or len(passwd) == 0:
+ if cached_passwd.has_key (user):
+ passwd = cached_passwd[user]
+ else:
+ passwd = ''
+
+ dsn = gmPG2.make_psycopg2_dsn(database=db, host=host, port=port, user=user, password=passwd)
+ _log.info(u"trying DB connection to %s on %s as %s", db, host or 'localhost', user)
+ try:
+ conn = gmPG2.get_connection(dsn=dsn, readonly=False, pooled=False, verbose=True, connection_name = conn_name)
+ except:
+ _log.exception(u'connection failed')
+ raise
+
+ cached_host = (host, port) # learn from past successes
+ cached_passwd[user] = passwd
+ conn_ref_count.append(conn)
+
+ _log.info(u'successfully connected')
+ return conn
+
+#==================================================================
+class user:
+ def __init__(self, anAlias = None, aPassword = None):
+ if anAlias is None:
+ raise ConstructorError("need user alias")
+ self.alias = anAlias
+ self.group = "user %s" % self.alias
+
+ self.name = cfg_get(self.group, "name")
+ if self.name is None:
+ raise ConstructorError("cannot get user name")
+
+ self.password = aPassword
+
+ # password not passed in, try to get it from elsewhere
+ if self.password is None:
+ # look into config file
+ self.password = cfg_get(self.group, "password")
+ # undefined or commented out:
+ # this means the user does not need a password
+ # but connects via IDENT or TRUST
+ if self.password is None:
+ _log.info(u'password not defined, assuming connect via IDENT/TRUST')
+ # defined but empty:
+ # this means to ask the user if interactive
+ elif self.password == '':
+ if _interactive:
+ print("I need the password for the database user [%s]." % self.name)
+ self.password = getpass.getpass("Please type the password: ")
+ else:
+ _log.warning('cannot get password for database user [%s]', self.name)
+ raise ValueError('no password for user %s' % self.name)
+
+ return None
+
+#==================================================================
+class db_server:
+ def __init__(self, aSrv_alias, auth_group):
+ _log.info(u"bootstrapping server [%s]" % aSrv_alias)
+
+ global _bootstrapped_servers
+
+ if _bootstrapped_servers.has_key(aSrv_alias):
+ _log.info(u"server [%s] already bootstrapped" % aSrv_alias)
+ return None
+
+ self.alias = aSrv_alias
+ self.section = "server %s" % self.alias
+ self.auth_group = auth_group
+ self.conn = None
+
+ if not self.__bootstrap():
+ raise ConstructorError("db_server.__init__(): Cannot bootstrap db server.")
+
+ _bootstrapped_servers[self.alias] = self
+
+ _log.info(u'done bootstrapping server [%s]', aSrv_alias)
+ #--------------------------------------------------------------
+ def __bootstrap(self):
+ self.superuser = user(anAlias = cfg_get(self.section, "super user alias"))
+
+ # connect to server level template database
+ if not self.__connect_superuser_to_srv_template():
+ _log.error(u"Cannot connect to server template database.")
+ return None
+
+ # add users/groups
+ if not self.__bootstrap_db_users():
+ _log.error(u"Cannot bootstrap database users.")
+ return None
+
+ self.conn.close()
+ return True
+ #--------------------------------------------------------------
+ def __connect_superuser_to_srv_template(self):
+ _log.info(u"connecting to server template database")
+
+ # sanity checks
+ self.template_db = cfg_get(self.section, "template database")
+ if self.template_db is None:
+ _log.error(u"Need to know the template database name.")
+ return None
+
+ self.name = cfg_get(self.section, "name")
+ if self.name is None:
+ _log.error(u"Need to know the server name.")
+ return None
+
+ env_var = 'GM_DB_PORT'
+ self.port = os.getenv(env_var)
+ if self.port is None:
+ _log.info(u'environment variable [%s] is not set, using database port from config file' % env_var)
+ self.port = cfg_get(self.section, "port")
+ else:
+ _log.info(u'using database port [%s] from environment variable [%s]' % (self.port, env_var))
+ if self.port is None:
+ _log.error(u"Need to know the database server port address.")
+ return None
+
+ if self.conn is not None:
+ if self.conn.closed == 0:
+ self.conn.close()
+
+ self.conn = connect(self.name, self.port, self.template_db, self.superuser.name, self.superuser.password, conn_name = u'root at template.server')
+ if self.conn is None:
+ _log.error(u'Cannot connect.')
+ return None
+
+ self.conn.cookie = 'db_server.__connect_superuser_to_srv_template'
+
+ # verify encoding
+ curs = self.conn.cursor()
+ curs.execute(u"select setting from pg_settings where name = 'lc_ctype'")
+ data = curs.fetchall()
+ lc_ctype = data[0][0]
+ _log.info(u'template database LC_CTYPE is [%s]', lc_ctype)
+ lc_ctype = lc_ctype.lower()
+ if lc_ctype in ['c', 'posix']:
+ _log.warning('while this cluster setting allows to store databases')
+ _log.warning('in any encoding as is it does not allow for locale')
+ _log.warning('sorting etc, hence it is not recommended for use')
+ _log.warning('(although it will, technically, work)')
+ elif not (lc_ctype.endswith('.utf-8') or lc_ctype.endswith('.utf8')):
+ _log.error(u'LC_CTYPE does not end in .UTF-8 or .UTF8')
+ curs.execute(u"show server_encoding")
+ data = curs.fetchall()
+ srv_enc = data[0][0]
+ _log.info(u'server_encoding is [%s]', srv_enc)
+ srv_enc = srv_enc.lower()
+ if not srv_enc in ['utf8', 'utf-8']:
+ _log.error(u'cluster encoding incompatible with utf8 encoded databases but')
+ _log.error(u'for GNUmed installation the cluster must accept this encoding')
+ _log.error(u'you may need to re-initdb or create a new cluster')
+ return None
+ _log.info(u'server encoding seems compatible despite not being reported in LC_CTYPE')
+
+ # make sure we get english messages
+ curs.execute(u"set lc_messages to 'C'")
+ curs.close()
+
+ _log.info(u"successfully connected to template database [%s]" % self.template_db)
+ return True
+ #--------------------------------------------------------------
+ # user and group related
+ #--------------------------------------------------------------
+ def __bootstrap_db_users(self):
+ _log.info(u"bootstrapping database users and groups")
+
+ # insert standard groups
+ if not self.__create_groups():
+ _log.error(u"Cannot create GNUmed standard groups.")
+ return None
+
+ # create GNUmed owner
+ if self.__create_dbowner() is None:
+ _log.error(u"Cannot install GNUmed database owner.")
+ return None
+
+# if not _import_schema(group=self.section, schema_opt='schema', conn=self.conn):
+# _log.error(u"Cannot import schema definition for server [%s] into database [%s]." % (self.name, self.template_db))
+# return None
+
+ return True
+ #--------------------------------------------------------------
+ def __create_dbowner(self):
+ global _dbowner
+
+ dbowner_alias = cfg_get("GnuMed defaults", "database owner alias")
+ if dbowner_alias is None:
+ _log.error(u"Cannot load GNUmed database owner name from config file.")
+ return None
+
+ cursor = self.conn.cursor()
+ # does this user already exist ?
+ name = cfg_get('user %s' % dbowner_alias, 'name')
+ if user_exists(cursor, name):
+ cmd = (
+ 'alter group "gm-logins" add user "%s";' # postgres
+ 'alter group "gm-logins" add user "%s";' # gm-dbo
+ 'alter group "%s" add user "%s";'
+ 'alter role "%s" createdb createrole;'
+ ) % (
+ self.superuser.name,
+ name,
+ self.auth_group, name,
+ name,
+ )
+ try:
+ cursor.execute(cmd)
+ except:
+ _log.error(u">>>[%s]<<< failed." % cmd)
+ _log.exception(u"Cannot add GNUmed database owner [%s] to groups [gm-logins] and [%s]." % (name, self.auth_group))
+ cursor.close()
+ return False
+ self.conn.commit()
+ cursor.close()
+ _dbowner = user(anAlias = dbowner_alias, aPassword = 'should not matter')
+ return True
+
+ print_msg ((
+u"""The database owner [%s] will be created.
+
+You will have to provide a new password for it
+unless it is pre-defined in the configuration file.
+
+Make sure to remember the password for later use !
+""") % name)
+ _dbowner = user(anAlias = dbowner_alias)
+
+ cmd = 'create user "%s" with password \'%s\' createdb createrole in group "%s", "gm-logins"' % (_dbowner.name, _dbowner.password, self.auth_group)
+ try:
+ cursor.execute(cmd)
+ except:
+ _log.error(u">>>[%s]<<< failed." % cmd)
+ _log.exception(u"Cannot create GNUmed database owner [%s]." % _dbowner.name)
+ cursor.close()
+ return None
+
+ # paranoia is good
+ if not user_exists(cursor, _dbowner.name):
+ cursor.close()
+ return None
+
+ self.conn.commit()
+ cursor.close()
+ return True
+ #--------------------------------------------------------------
+ def __create_groups(self, aSection = None):
+
+ if aSection is None:
+ section = "GnuMed defaults"
+ else:
+ section = aSection
+
+ groups = cfg_get(section, "groups")
+ if groups is None:
+ _log.error(u"Cannot load GNUmed group names from config file (section [%s])." % section)
+ groups = [self.auth_group]
+ else:
+ groups.append(self.auth_group)
+
+ cursor = self.conn.cursor()
+ for group in groups:
+ if not create_db_group(cursor, group):
+ cursor.close()
+ return False
+
+ self.conn.commit()
+ cursor.close()
+ return True
+#==================================================================
+class database:
+ def __init__(self, aDB_alias):
+ _log.info(u"bootstrapping database [%s]" % aDB_alias)
+
+ self.section = "database %s" % aDB_alias
+
+ # find database name
+ overrider = cfg_get(self.section, 'override name by')
+ if overrider is not None:
+ self.name = os.getenv(overrider)
+ if self.name is None:
+ _log.info(u'environment variable [%s] is not set, using database name from config file' % overrider)
+ self.name = cfg_get(self.section, 'name')
+ else:
+ self.name = cfg_get(self.section, 'name')
+
+ if self.name is None or str(self.name).strip() == '':
+ _log.error(u"Need to know database name.")
+ raise ConstructorError("database.__init__(): Cannot bootstrap database.")
+
+ # already bootstrapped ?
+ global _bootstrapped_dbs
+ if _bootstrapped_dbs.has_key(aDB_alias):
+ if _bootstrapped_dbs[aDB_alias].name == self.name:
+ _log.info(u"database [%s] already bootstrapped", self.name)
+ return None
+
+ # no, so bootstrap from scratch
+ _log.info(u'bootstrapping database [%s] alias "%s"', self.name, aDB_alias)
+
+ for db in _bootstrapped_dbs.values():
+ if db.conn.closed == 0:
+ db.conn.close()
+ _bootstrapped_dbs = {}
+ self.conn = None
+
+ self.server_alias = cfg_get(self.section, "server alias")
+ if self.server_alias is None:
+ _log.error(u"Server alias missing.")
+ raise ConstructorError("database.__init__(): Cannot bootstrap database.")
+
+ self.template_db = cfg_get(self.section, "template database")
+ if self.template_db is None:
+ _log.error(u"Template database name missing.")
+ raise ConstructorError("database.__init__(): Cannot bootstrap database.")
+
+ # make sure server is bootstrapped
+ db_server(self.server_alias, auth_group = self.name)
+ self.server = _bootstrapped_servers[self.server_alias]
+
+ if not self.__bootstrap():
+ raise ConstructorError("database.__init__(): Cannot bootstrap database.")
+
+ _bootstrapped_dbs[aDB_alias] = self
+
+ return None
+ #--------------------------------------------------------------
+ def __bootstrap(self):
+
+ global _dbowner
+
+ # get owner
+ if _dbowner is None:
+ _dbowner = user(anAlias = cfg_get("GnuMed defaults", "database owner alias"))
+
+ if _dbowner is None:
+ _log.error(u"Cannot load GNUmed database owner name from config file.")
+ return None
+
+ self.owner = _dbowner
+
+ # connect as owner to template
+ if not self.__connect_superuser_to_template():
+ _log.error(u"Cannot connect to template database.")
+ return False
+
+ # make sure db exists
+ if not self.__create_db():
+ _log.error(u"Cannot create database.")
+ return False
+
+ # reconnect as superuser to db
+ if not self.__connect_superuser_to_db():
+ _log.error(u"Cannot connect to database.")
+ return None
+
+ # create authentication group
+ _log.info(u'creating database-specific authentication group role')
+ curs = self.conn.cursor()
+ if not create_db_group(cursor = curs, group = self.name):
+ curs.close()
+ _log.error(u'cannot create authentication group role')
+ return False
+ self.conn.commit()
+ curs.close()
+
+ # paranoia check
+ curs = self.conn.cursor()
+ if not db_group_exists(cursor = curs, group = self.name):
+ curs.close()
+ _log.error(u'cannot find authentication group role')
+ return False
+ curs.close()
+
+ # reindex db so upgrade doesn't fail on broken index
+ if not self.reindex_all():
+ _log.error(u'cannot REINDEX cloned target database')
+ return False
+
+ if not self.revalidate_constraints():
+ _log.error(u'cannot VALIDATE CONSTRAINTs in cloned target database')
+ return False
+
+ tmp = cfg_get(self.section, 'superuser schema')
+ if tmp is not None:
+ if not _import_schema(group=self.section, schema_opt='superuser schema', conn=self.conn):
+ _log.error(u"cannot import schema definition for database [%s]" % (self.name))
+ return False
+ del tmp
+
+ # transfer users
+ if not self.transfer_users():
+ _log.error(u"Cannot transfer users from old to new database.")
+ return False
+
+ # reconnect as owner to db
+ if not self.__connect_owner_to_db():
+ _log.error(u"Cannot connect to database.")
+ return None
+ if not _import_schema(group=self.section, schema_opt='schema', conn=self.conn):
+ _log.error(u"cannot import schema definition for database [%s]" % (self.name))
+ return None
+
+ # don't close this here, the connection will
+ # be reused later by check_data*/import_data etc.
+ #self.conn.close()
+
+ return True
+
+ #--------------------------------------------------------------
+ def __connect_superuser_to_template(self):
+ if self.conn is not None:
+ if self.conn.closed == 0:
+ self.conn.close()
+
+ self.conn = connect (
+ self.server.name,
+ self.server.port,
+ self.template_db,
+ self.server.superuser.name,
+ self.server.superuser.password,
+ conn_name = u'postgres at template.db'
+ )
+
+ self.conn.cookie = 'database.__connect_superuser_to_template'
+
+ curs = self.conn.cursor()
+ curs.execute(u"set lc_messages to 'C'")
+ curs.close()
+
+ return self.conn and 1
+
+ #--------------------------------------------------------------
+ def __connect_superuser_to_db(self):
+ if self.conn is not None:
+ if self.conn.closed == 0:
+ self.conn.close()
+
+ self.conn = connect (
+ self.server.name,
+ self.server.port,
+ self.name,
+ self.server.superuser.name,
+ self.server.superuser.password,
+ conn_name = u'postgres at gnumed_vX'
+ )
+
+ self.conn.cookie = 'database.__connect_superuser_to_db'
+
+ curs = self.conn.cursor()
+ curs.execute(u'set default_transaction_read_only to off')
+ # we need English messages to detect errors
+ curs.execute(u"set lc_messages to 'C'")
+ curs.execute(u"alter database %s set lc_messages to 'C'" % self.name)
+ # we want READ ONLY default transactions for maximum patient data safety
+ curs.execute("alter database %s set default_transaction_read_only to on" % self.name)
+ # we want checking of function bodies
+ curs.execute("alter database %s set check_function_bodies to on" % self.name)
+ # we want checking of data checksums if available
+ curs.execute("alter database %s set ignore_checksum_failure to off" % self.name)
+ curs.close()
+ self.conn.commit()
+
+ # we need inheritance or else things will fail miserably but:
+ # default now ON and PG10.0 hardwired to ON
+ # so remove database specific setting
+ curs = self.conn.cursor()
+ try:
+ curs.execute("alter database %s set sql_inheritance to DEFAULT" % self.name)
+ except:
+ _log.exception('PostgreSQL 10 onwards: <sql_inheritance> hardwired')
+ curs.close()
+ self.conn.commit()
+
+ # we want to track commit timestamps if available
+ # remove exception handler when 9.5 is default
+ curs = self.conn.cursor()
+ try:
+ curs.execute("alter database %s set track_commit_timestamp to on" % self.name)
+ except:
+ _log.exception(u'PostgreSQL version < 9.5 does not support <track_commit_timestamp> OR <track_commit_timestamp> cannot be set at runtime')
+ curs.close()
+ self.conn.commit()
+
+ curs = self.conn.cursor()
+ gmPG2._log_PG_settings(curs = curs)
+ curs.close()
+ self.conn.commit()
+
+ return self.conn and 1
+ #--------------------------------------------------------------
+ def __connect_owner_to_db(self):
+
+ _log.debug(u'__connect_owner_to_db')
+
+ # reconnect as superuser to db
+ if not self.__connect_superuser_to_db():
+ _log.error(u"Cannot connect to database.")
+ return False
+
+ self.conn.cookie = 'database.__connect_owner_to_db via database.__connect_superuser_to_db'
+
+ _log.debug(u'setting session authorization to user %s', self.owner.name)
+
+ curs = self.conn.cursor()
+ cmd = "set session authorization %(usr)s"
+ curs.execute(cmd, {'usr': self.owner.name})
+ curs.close()
+
+ return self.conn and 1
+ #--------------------------------------------------------------
+ def __db_exists(self):
+ #cmd = "BEGIN; SELECT datname FROM pg_database WHERE datname='%s'" % self.name
+ cmd = "SELECT datname FROM pg_database WHERE datname='%s'" % self.name
+
+ aCursor = self.conn.cursor()
+ try:
+ aCursor.execute(cmd)
+ except:
+ _log.exception(u">>>[%s]<<< failed." % cmd)
+ return None
+
+ res = aCursor.fetchall()
+ tmp = aCursor.rowcount
+ aCursor.close()
+ if tmp == 1:
+ _log.info(u"Database [%s] exists." % self.name)
+ return True
+
+ _log.info(u"Database [%s] does not exist." % self.name)
+ return None
+
+ #--------------------------------------------------------------
+ def __create_db(self):
+
+ # verify template database hash
+ template_version = cfg_get(self.section, 'template version')
+ if template_version is None:
+ _log.warning('cannot check template database identity hash, no version specified')
+ else:
+ converted, version = gmTools.input2int(template_version.lstrip('v'), 0)
+ if not converted:
+ _log.error(u'invalid template database definition: %s', template_version)
+ return False
+ if not gmPG2.database_schema_compatible(link_obj = self.conn, version = version):
+ _log.error(u'invalid [%s] schema structure in GNUmed template database [%s]', template_version, self.template_db)
+ return False
+
+ # check for target database
+ if self.__db_exists():
+ drop_existing = bool(int(cfg_get(self.section, 'drop target database')))
+ if drop_existing:
+ print_msg("==> dropping pre-existing target database [%s] ..." % self.name)
+ _log.info(u'trying to drop target database')
+ cmd = 'DROP DATABASE "%s"' % self.name
+ # DROP DATABASE must be run outside transactions
+ self.conn.commit()
+ self.conn.set_session(readonly = False, autocommit = True)
+ cursor = self.conn.cursor()
+ try:
+ _log.debug(u'running SQL: %s', cmd)
+ cursor.execute(cmd)
+ except:
+ _log.exception(u">>>[%s]<<< failed" % cmd)
+ _log.debug(u'conn state after failed DROP: %s', gmPG2.capture_conn_state(self.conn))
+ return False
+ finally:
+ cursor.close()
+ self.conn.set_session(readonly = False, autocommit = False)
+ else:
+ use_existing = bool(int(cfg_get(self.section, 'use existing target database')))
+ if use_existing:
+ # FIXME: verify that database is owned by "gm-dbo"
+ print_msg("==> using pre-existing target database [%s] ..." % self.name)
+ _log.info(u'using existing database [%s]', self.name)
+ return True
+ else:
+ _log.info(u'not using existing database [%s]', self.name)
+ return False
+
+ tablespace = cfg_get(self.section, 'tablespace')
+ if tablespace is None:
+ create_db_cmd = """
+ CREATE DATABASE \"%s\" with
+ owner = \"%s\"
+ template = \"%s\"
+ encoding = 'unicode'
+ ;""" % (self.name, self.owner.name, self.template_db)
+ else:
+ create_db_cmd = """
+ CREATE DATABASE \"%s\" with
+ owner = \"%s\"
+ template = \"%s\"
+ encoding = 'unicode'
+ tablespace = '%s'
+ ;""" % (self.name, self.owner.name, self.template_db, tablespace)
+
+ # get size
+ cursor = self.conn.cursor()
+ size_cmd = "SELECT pg_size_pretty(pg_database_size('%s'))" % self.template_db
+ cursor.execute(size_cmd)
+ size = cursor.fetchone()[0]
+ cursor.close()
+
+ # create database by cloning
+ print_msg("==> cloning [%s] (%s) as target database [%s] ..." % (self.template_db, size, self.name))
+ # CREATE DATABASE must be run outside transactions
+ self.conn.commit()
+ self.conn.set_session(readonly = False, autocommit = True)
+ cursor = self.conn.cursor()
+ try:
+ cursor.execute(create_db_cmd)
+ except:
+ _log.exception(u">>>[%s]<<< failed" % create_db_cmd)
+ return False
+ finally:
+ cursor.close()
+ self.conn.set_session(readonly = False, autocommit = False)
+
+ if not self.__db_exists():
+ return None
+ _log.info(u"Successfully created GNUmed database [%s]." % self.name)
+
+ return True
+
+ #--------------------------------------------------------------
+ def check_data_plausibility(self):
+
+ print_msg("==> checking migrated data for plausibility ...")
+
+ plausibility_queries = cfg_get(self.section, 'upgrade plausibility checks')
+ if plausibility_queries is None:
+ _log.warning('no plausibility checks defined')
+ print_msg(" ... skipped (no checks defined)")
+ return True
+
+ no_of_queries, remainder = divmod(len(plausibility_queries), 2)
+ if remainder != 0:
+ _log.error(u'odd number of plausibility queries defined, aborting')
+ print_msg(" ... failed (configuration error)")
+ return False
+
+ template_conn = connect (
+ self.server.name,
+ self.server.port,
+ self.template_db,
+ self.server.superuser.name,
+ self.server.superuser.password
+ )
+ template_conn.cookie = 'check_data_plausibility: template'
+
+ target_conn = connect (
+ self.server.name,
+ self.server.port,
+ self.name,
+ self.server.superuser.name,
+ self.server.superuser.password
+ )
+ target_conn.cookie = 'check_data_plausibility: target'
+
+ all_tests_successful = True
+
+ for idx in range(no_of_queries):
+ check_def = plausibility_queries[idx*2]
+ if check_def.startswith('--'):
+ _log.debug(u'skipped: %s', check_def)
+ continue
+
+ tag = u'?'
+ old_query = u'?'
+ try:
+ tag, old_query = check_def.split('::::')
+ except:
+ _log.exception(u'error in plausibility check, aborting')
+ _log.error(u'check definition: %s', check_def)
+ print_msg(" ... failed (check definition error)")
+ all_tests_successful = False
+ continue
+ new_query = plausibility_queries[(idx*2) + 1]
+
+ try:
+ rows, idx = gmPG2.run_ro_queries (
+ link_obj = template_conn,
+ queries = [{'cmd': unicode(old_query)}]
+ )
+ old_val = rows[0][0]
+ except:
+ _log.exception(u'error in plausibility check [%s] (old), aborting' % tag)
+ _log.error(u'SQL: %s', old_query)
+ print_msg(" ... failed (SQL error)")
+ all_tests_successful = False
+ continue
+
+ try:
+ rows, idx = gmPG2.run_ro_queries (
+ link_obj = target_conn,
+ queries = [{'cmd': unicode(new_query)}]
+ )
+ new_val = rows[0][0]
+ except:
+ _log.exception(u'error in plausibility check [%s] (new), aborting' % tag)
+ _log.error(u'SQL: %s', new_query)
+ print_msg(" ... failed (SQL error)")
+ all_tests_successful = False
+ continue
+
+ if new_val != old_val:
+ _log.error(u'plausibility check [%s] failed, expected: %s (in old DB), found: %s (in new DB)' % (tag, old_val, new_val))
+ _log.error(u'SQL (old DB): %s', old_query)
+ _log.error(u'SQL (new DB): %s', new_query)
+ print_msg(" ... failed (data error, check [%s])" % tag)
+ all_tests_successful = False
+ continue
+
+ _log.info(u'plausibility check [%s] succeeded' % tag)
+
+ template_conn.close()
+ target_conn.close()
+
+ return all_tests_successful
+
+ #--------------------------------------------------------------
+ def check_holy_auth_line(self):
+
+ holy_pattern = 'local.*samerole.*\+gm-logins'
+ holy_pattern_inactive = '#\s*local.*samerole.*\+gm-logins'
+
+ conn = connect (
+ self.server.name,
+ self.server.port,
+ self.name,
+ self.server.superuser.name,
+ self.server.superuser.password
+ )
+ conn.cookie = u'holy auth check connection'
+
+ cmd = u"select setting from pg_settings where name = 'hba_file'"
+ rows, idx = gmPG2.run_ro_queries(link_obj = conn, queries = [{'cmd': cmd}])
+ conn.close()
+ if len(rows) == 0:
+ _log.info(u'cannot check pg_hba.conf for authentication information - not detectable in pg_settings')
+ return
+
+ hba_file = rows[0][0]
+ _log.info(u'hba file: %s', hba_file)
+
+ try:
+ f = io.open(hba_file, mode = 'rt').close()
+ except Exception:
+ _log.exception(u'cannot check pg_hba.conf for authentication information - not readable')
+ return
+
+ found_holy_line = False
+ for line in fileinput.input(hba_file):
+ if regex.match(holy_pattern, line) is not None:
+ found_holy_line = True
+ _log.info(u'found standard GNUmed authentication directive in pg_hba.conf')
+ _log.info(u'[%s]', line)
+ _log.info(u'it may still be in the wrong place, though, so double-check if clients cannot connect')
+ break
+
+ if not found_holy_line:
+ _log.info(u'did not find active standard GNUmed authentication directive in pg_hba.conf')
+ _log.info(u'regex: %s' % holy_pattern)
+
+ found_holy_line_inactive = False
+ for line in fileinput.input(hba_file):
+ if regex.match(holy_pattern_inactive, line) is not None:
+ found_holy_line_inactive = True
+ _log.info(u'found inactive standard GNUmed authentication directive in pg_hba.conf')
+ _log.info(u'[%s]', line)
+ _log.info(u'it may still be in the wrong place, though, so double-check if clients cannot connect')
+ break
+ if not found_holy_line_inactive:
+ _log.info(u'did not find inactive standard GNUmed authentication directive in pg_hba.conf either')
+ _log.info(u'regex: %s' % holy_pattern_inactive)
+
+ _log.info(u'bootstrapping is likely to have succeeded but clients probably cannot connect yet')
+ print_msg('==> sanity checking PostgreSQL authentication settings ...')
+ print_msg('')
+ print_msg('Note that even after successfully bootstrapping the GNUmed ')
+ print_msg('database PostgreSQL may still need to be configured to')
+ print_msg('allow GNUmed clients to connect to it.')
+ print_msg('')
+ print_msg('In many standard PostgreSQL installations this amounts to')
+ print_msg('adding (or uncommenting) the authentication directive:')
+ print_msg('')
+ print_msg(' "local samerole +gm-logins md5"')
+ print_msg('')
+ print_msg('in the proper place of the file:')
+ print_msg('')
+ print_msg(' %s' % hba_file)
+ print_msg('')
+ print_msg('For details refer to the GNUmed documentation at:')
+ print_msg('')
+ print_msg(' http://wiki.gnumed.de/bin/view/Gnumed/ConfigurePostgreSQL')
+ print_msg('')
+
+ #--------------------------------------------------------------
+ def import_data(self):
+
+ print_msg("==> upgrading reference data sets ...")
+
+ import_scripts = cfg_get(self.section, "data import scripts")
+ if (import_scripts is None) or (len(import_scripts) == 0):
+ _log.info(u'skipped data import: no scripts to run')
+ print_msg(" ... skipped (no scripts to run)")
+ return True
+
+ script_base_dir = cfg_get(self.section, "script base directory")
+ script_base_dir = os.path.expanduser(script_base_dir)
+ # doesn't work on MacOSX:
+ #script_base_dir = os.path.abspath(os.path.expanduser(script_base_dir))
+ script_base_dir = os.path.normcase(os.path.normpath(os.path.join('.', script_base_dir)))
+
+ for import_script in import_scripts:
+ try:
+ script = gmTools.import_module_from_directory(module_path = script_base_dir, module_name = import_script, always_remove_path = True)
+ except ImportError:
+ print_msg(" ... failed (cannot load script [%s])" % import_script)
+ _log.error(u'cannot load data set import script [%s/%s]' % (script_base_dir, import_script))
+ return False
+
+ try:
+ script.run(conn = self.conn)
+ except:
+ print_msg(" ... failed (cannot run script [%s])" % import_script)
+ _log.exception(u'cannot run import script [%s]' % import_script)
+ return False
+
+ if import_script.endswith('.py'):
+ import_script = import_script[:-3]
+ import gc
+ try:
+ del sys.modules[import_script]
+ del script
+ gc.collect()
+ except:
+ _log.exception(u'cannot remove data import script module [%s], hoping for the best', import_script)
+
+ return True
+
+ #--------------------------------------------------------------
+ def verify_result_hash(self):
+
+ print_msg("==> verifying target database schema ...")
+
+ target_version = cfg_get(self.section, 'target version')
+ if target_version == 'devel':
+ print_msg(" ... skipped (devel version)")
+ _log.info(u'result schema hash: %s', gmPG2.get_schema_hash(link_obj = self.conn))
+ _log.warning('testing/development only, not failing due to invalid target database identity hash')
+ return True
+ converted, version = gmTools.input2int(target_version.lstrip('v'), 2)
+ if not converted:
+ _log.error(u'cannot convert target database version: %s', target_version)
+ print_msg(" ... failed (invalid target version specification)")
+ return False
+ if gmPG2.database_schema_compatible(link_obj = self.conn, version = version):
+ _log.info(u'database identity hash properly verified')
+ return True
+ _log.error(u'target database identity hash invalid')
+ print_msg(" ... failed (hash mismatch)")
+ return False
+
+ #--------------------------------------------------------------
+ def reindex_all(self):
+
+ print_msg("==> reindexing target database (can take a while) ...")
+
+ do_reindex = cfg_get(self.section, 'reindex')
+ if do_reindex is None:
+ do_reindex = True
+ else:
+ do_reindex = (int(do_reindex) == 1)
+ if not do_reindex:
+ _log.warning('skipping REINDEXing')
+ print_msg(" ... skipped")
+ return True
+
+ _log.info(u'REINDEXing cloned target database so upgrade does not fail in case of a broken index')
+ _log.info(u'this may potentially take "quite a long time" depending on how much data there is in the database')
+ _log.info(u'you may want to monitor the PostgreSQL log for signs of progress')
+
+ # REINDEX must be run outside transactions
+ self.conn.commit()
+ self.conn.set_session(readonly = False, autocommit = True)
+ curs_outer = self.conn.cursor()
+ cmd = 'REINDEX (VERBOSE) DATABASE %s' % self.name
+ try:
+ curs_outer.execute(cmd)
+ except:
+ _log.exception(u">>>[%s]<<< failed" % cmd)
+ # re-attempt w/o VERBOSE
+ _log.info(u'attempting REINDEXing without VERBOSE')
+ curs_inner = self.conn.cursor()
+ cmd = 'REINDEX DATABASE %s' % self.name
+ try:
+ curs_inner.execute(cmd)
+ except:
+ _log.exception(u">>>[%s]<<< failed" % cmd)
+ return False
+ finally:
+ curs_inner.close()
+ self.conn.set_session(readonly = False, autocommit = False)
+ finally:
+ curs_outer.close()
+ self.conn.set_session(readonly = False, autocommit = False)
+
+ return True
+
+ #--------------------------------------------------------------
+ def revalidate_constraints(self):
+
+ print_msg("==> revalidating constraints in target database (can take a while) ...")
+
+ do_revalidate = cfg_get(self.section, 'revalidate')
+ if do_revalidate is None:
+ do_revalidate = True # default: do it
+ else:
+ do_revalidate = (int(do_revalidate) == 1)
+ if not do_revalidate:
+ _log.warning('skipping VALIDATE CONSTRAINT')
+ print_msg(" ... skipped")
+ return True
+
+ _log.info(u'reVALIDATing CONSTRAINTs in cloned target database so upgrade does not fail due to broken data')
+ _log.info(u'this may potentially take "quite a long time" depending on how much data there is in the database')
+ _log.info(u'you may want to monitor the PostgreSQL log for signs of progress')
+
+ curs = self.conn.cursor()
+ cmd = u"""do $$
+ DECLARE
+ r record;
+ BEGIN
+ FOR r IN (
+ select con.connamespace, nsp.nspname, con.conname, con.conrelid, rel.relname
+ from pg_constraint con
+ join pg_namespace nsp on nsp.oid = con.connamespace
+ join pg_class rel on rel.oid = con.conrelid
+ where contype in ('c','f')
+ ) LOOP
+ RAISE NOTICE 'validating [%] on [%.%]', r.conname, r.nspname, r.relname;
+ EXECUTE 'UPDATE pg_constraint SET convalidated=false WHERE conname=$1 AND connamespace=$2 AND conrelid=$3' USING r.conname, r.connamespace, r.conrelid;
+ EXECUTE 'ALTER TABLE ' || r.nspname || '.' || r.relname || ' VALIDATE CONSTRAINT "' || r.conname || '"';
+ END LOOP;
+ END
+ $$;"""
+ try:
+ curs.execute(cmd)
+ except:
+ _log.exception(u">>>[VALIDATE CONSTRAINT]<<< failed")
+ return False
+ finally:
+ curs.close()
+ return True
+
+ #--------------------------------------------------------------
+ def transfer_users(self):
+ print_msg("==> transferring users ...")
+ do_user_transfer = cfg_get(self.section, 'transfer users')
+ if do_user_transfer is None:
+ _log.info(u'user transfer not defined')
+ print_msg(" ... skipped (unconfigured)")
+ return True
+ do_user_transfer = int(do_user_transfer)
+ if not do_user_transfer:
+ _log.info(u'configured to not transfer users')
+ print_msg(" ... skipped (disabled)")
+ return True
+
+ cmd = u"select gm.transfer_users('%s'::text)" % self.template_db
+ try:
+ rows, idx = gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}], end_tx = True, return_data = True)
+ except gmPG2.dbapi.ProgrammingError:
+ # maybe an old database
+ _log.info(u'problem running gm.transfer_users(), trying gm_transfer_users()')
+ cmd = u"select gm_transfer_users('%s'::text)" % self.template_db
+ rows, idx = gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}], end_tx = True, return_data = True)
+
+ if rows[0][0]:
+ _log.info(u'users properly transferred from [%s] to [%s]' % (self.template_db, self.name))
+ return True
+ _log.error(u'error transferring user from [%s] to [%s]' % (self.template_db, self.name))
+ print_msg(" ... failed")
+ return False
+
+ #--------------------------------------------------------------
+ def bootstrap_auditing(self):
+ print_msg("==> setting up auditing ...")
+ # get audit trail configuration
+ tmp = cfg_get(self.section, 'audit disable')
+ # if this option is not given, assume we want auditing
+ if tmp is not None:
+ # if we don't want auditing on these tables, return without error
+ if int(tmp) == 1:
+ print_msg(' ... skipped (disabled)')
+ return True
+
+ tmp = cfg_get(self.section, 'audit trail parent table')
+ if tmp is None:
+ return None
+ aud_gen.audit_trail_parent_table = tmp
+
+ tmp = cfg_get(self.section, 'audit trail table prefix')
+ if tmp is None:
+ return None
+ aud_gen.audit_trail_table_prefix = tmp
+
+ tmp = cfg_get(self.section, 'audit fields table')
+ if tmp is None:
+ return None
+ aud_gen.audit_fields_table = tmp
+
+ # create auditing schema
+ curs = self.conn.cursor()
+ audit_schema = gmAuditSchemaGenerator.create_audit_ddl(curs)
+ curs.close()
+ if audit_schema is None:
+ _log.error(u'cannot generate audit trail schema for GNUmed database [%s]' % self.name)
+ return None
+ # write schema to file
+ tmpfile = os.path.join(tempfile.gettempdir(), 'audit-trail-schema.sql')
+ f = io.open(tmpfile, mode = 'wt', encoding = 'utf8')
+ for line in audit_schema:
+ f.write(u'%s;\n' % line)
+ f.close()
+
+ # import auditing schema
+ psql = gmPsql.Psql(self.conn)
+ if psql.run(tmpfile) != 0:
+ _log.error(u"cannot import audit schema definition for database [%s]" % (self.name))
+ return None
+
+ if _keep_temp_files:
+ return True
+
+ try:
+ os.remove(tmpfile)
+ except Exception:
+ _log.exception(u'cannot remove audit trail schema file [%s]' % tmpfile)
+ return True
+
+ #--------------------------------------------------------------
+ def bootstrap_notifications(self):
+
+ # setup clin.clin_root_item child tables FK's
+ print_msg("==> setting up encounter/episode FKs and IDXs ...")
+ child_tables = gmPG2.get_child_tables(link_obj = self.conn, schema = 'clin', table = 'clin_root_item')
+ _log.info(u'clin.clin_root_item child tables:')
+ for child in child_tables:
+ _log.info(u'%s.%s', child['namespace'], child['table'])
+ for child in child_tables:
+ # .fk_episode
+ FKs = gmPG2.get_foreign_key_names (
+ link_obj = self.conn,
+ src_schema = child['namespace'],
+ src_table = child['table'],
+ src_column = 'fk_episode',
+ target_schema = 'clin',
+ target_table = 'episode',
+ target_column = 'pk',
+ )
+ if len(FKs) > 0:
+ _log.info(u'%s FK(s) exist:', len(FKs))
+ for idx in range(len(FKs)):
+ FK = FKs[idx]
+ _log.info(u' #%s = %s.%s: %s.%s.%s -> %s.%s.%s', idx + 1, FK['constraint_schema'], FK['constraint_name'], FK['source_schema'], FK['source_table'], FK['source_column'], FK['target_schema'], FK['target_table'], FK['target_column'])
+ else:
+ _log.info(u'adding FK: %s.%s.fk_episode -> clin.episode.pk', child['namespace'], child['table'])
+ cmd = SQL_add_foreign_key % {
+ 'src_schema': child['namespace'],
+ 'src_tbl': child['table'],
+ 'src_col': 'fk_episode',
+ 'target_schema': 'clin',
+ 'target_tbl': 'episode',
+ 'target_col': 'pk'
+ }
+ gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}])
+ # index on .fk_episode
+ idx_defs = gmPG2.get_index_name(indexed_table = u'%s.%s' % (child['namespace'], child['table']), indexed_column = u'fk_episode', link_obj = self.conn)
+ # drop any existing
+ for idx_def in idx_defs:
+ _log.info(u'dropping index %s.%s', idx_def['index_schema'], idx_def['index_name'])
+ cmd = u'DROP INDEX IF EXISTS %s.%s CASCADE' % (idx_def['index_schema'], idx_def['index_name'])
+ gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}])
+ # create
+ _log.info(u'creating index idx_%s_%s_fk_episode', child['namespace'], child['table'])
+ cmd = SQL_add_index % {
+ 'idx_schema': child['namespace'],
+ 'idx_name': u'idx_%s_%s_fk_episode' % (child['namespace'], child['table']),
+ 'idx_table': child['table'],
+ 'idx_col': u'fk_episode'
+ }
+ gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}])
+
+ # .fk_encounter
+ FKs = gmPG2.get_foreign_key_names (
+ link_obj = self.conn,
+ src_schema = child['namespace'],
+ src_table = child['table'],
+ src_column = 'fk_encounter',
+ target_schema = 'clin',
+ target_table = 'encounter',
+ target_column = 'pk'
+ )
+ if len(FKs) > 0:
+ _log.info(u'%s FK(s) exist:', len(FKs))
+ for idx in range(len(FKs)):
+ FK = FKs[idx]
+ _log.info(u' #%s = %s.%s: %s.%s.%s -> %s.%s.%s', idx + 1, FK['constraint_schema'], FK['constraint_name'], FK['source_schema'], FK['source_table'], FK['source_column'], FK['target_schema'], FK['target_table'], FK['target_column'])
+ else:
+ _log.info(u'adding FK: %s.%s.fk_encounter -> clin.encounter.pk', child['namespace'], child['table'])
+ cmd = SQL_add_foreign_key % {
+ 'src_schema': child['namespace'],
+ 'src_tbl': child['table'],
+ 'src_col': 'fk_encounter',
+ 'target_schema': 'clin',
+ 'target_tbl': 'encounter',
+ 'target_col': 'pk'
+ }
+ gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}])
+ # index on .fk_encounter
+ idx_defs = gmPG2.get_index_name(indexed_table = u'%s.%s' % (child['namespace'], child['table']), indexed_column = u'fk_encounter', link_obj = self.conn)
+ # drop any existing
+ for idx_def in idx_defs:
+ _log.info(u'dropping index %s.%s', idx_def['index_schema'], idx_def['index_name'])
+ cmd = u'DROP INDEX IF EXISTS %s.%s CASCADE' % (idx_def['index_schema'], idx_def['index_name'])
+ gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}])
+ # create
+ _log.info(u'creating index idx_%s_%s_fk_encounter', child['namespace'], child['table'])
+ cmd = SQL_add_index % {
+ 'idx_schema': child['namespace'],
+ 'idx_name': u'idx_%s_%s_fk_encounter' % (child['namespace'], child['table']),
+ 'idx_table': child['table'],
+ 'idx_col': u'fk_encounter'
+ }
+ gmPG2.run_rw_queries(link_obj = self.conn, queries = [{'cmd': cmd}])
+
+ curs = self.conn.cursor()
+
+ # re-create fk_encounter/fk_episode sanity check triggers on all tables
+ if gmPG2.function_exists(link_obj = curs, schema = u'gm', function = u'create_all_enc_epi_sanity_check_triggers'):
+ print_msg("==> setting up encounter/episode FK sanity check triggers ...")
+ _log.debug(u'attempting to set up sanity check triggers on all tables linking to encounter AND episode')
+ cmd = u'select gm.create_all_enc_epi_sanity_check_triggers()'
+ curs.execute(cmd)
+ result = curs.fetchone()
+ if result[0] is False:
+ _log.error(u'error creating sanity check triggers on all tables linking to clin.encounter AND clin.episode')
+ curs.close()
+ return None
+
+ # always re-create generic super signal (if exists)
+ if gmPG2.function_exists(link_obj = curs, schema = u'gm', function = u'create_all_table_mod_triggers'):
+ print_msg("==> setting up generic notifications ...")
+ _log.debug(u'attempting to create generic modification announcement triggers on all registered tables')
+
+ cmd = u"SELECT gm.create_all_table_mod_triggers(True::boolean)"
+ curs.execute(cmd)
+ result = curs.fetchone()
+ curs.close()
+ if result[0] is False:
+ _log.error(u'cannot create generic modification announcement triggers on all tables')
+ return None
+
+ self.conn.commit()
+ return True
+
+#==================================================================
+class gmBundle:
+ def __init__(self, aBundleAlias = None):
+ # sanity check
+ if aBundleAlias is None:
+ raise ConstructorError("Need to know bundle name to install it.")
+
+ self.alias = aBundleAlias
+ self.section = "bundle %s" % aBundleAlias
+ #--------------------------------------------------------------
+ def bootstrap(self):
+ _log.info(u"bootstrapping bundle [%s]" % self.alias)
+
+ # load bundle definition
+ database_alias = cfg_get(self.section, "database alias")
+ if database_alias is None:
+ _log.error(u"Need to know database name to install bundle [%s]." % self.alias)
+ return None
+ # bootstrap database
+ try:
+ database(aDB_alias = database_alias)
+ except:
+ _log.exception(u"Cannot bootstrap bundle [%s].", self.alias)
+ return None
+ self.db = _bootstrapped_dbs[database_alias]
+
+ # check PostgreSQL version
+ if not self.__verify_pg_version():
+ _log.error(u"Wrong PostgreSQL version.")
+ return None
+
+ # import schema
+ if not _import_schema(group=self.section, schema_opt='schema', conn=self.db.conn):
+ _log.error(u"Cannot import schema definition for bundle [%s] into database [%s]." % (self.alias, database_alias))
+ return None
+
+ return True
+ #--------------------------------------------------------------
+ def __verify_pg_version(self):
+ """Verify database version information."""
+
+ required_version = cfg_get(self.section, "minimum postgresql version")
+ if required_version is None:
+ _log.error(u"Cannot load minimum required PostgreSQL version from config file.")
+ return None
+
+ _log.info(u"minimum required PostgreSQL version: %s" % required_version)
+
+ converted, pg_ver = gmTools.input2decimal(gmPG2.postgresql_version)
+ if not converted:
+ _log.error(u'error checking PostgreSQL version')
+ return None
+ converted, req_version = gmTools.input2decimal(required_version)
+ if not converted:
+ _log.error(u'error checking PostgreSQL version')
+ _log.error(u'required: %s', required_version)
+ return None
+ if pg_ver < req_version:
+ _log.error(u"Reported live PostgreSQL version [%s] is smaller than the required minimum version [%s]." % (gmPG2.postgresql_version, required_version))
+ return None
+
+ _log.info(u"installed PostgreSQL version: %s - this is fine with me" % gmPG2.postgresql_version)
+ return True
+#==================================================================
+def bootstrap_bundles():
+ # get bundle list
+ bundles = cfg_get("installation", "bundles")
+ if bundles is None:
+ exit_with_msg("Bundle list empty. Nothing to do here.")
+ # run through bundles
+ for bundle_alias in bundles:
+ print_msg('==> bootstrapping "%s" ...' % bundle_alias)
+ bundle = gmBundle(bundle_alias)
+ if not bundle.bootstrap():
+ return None
+ return True
+#--------------------------------------------------------------
+def import_data():
+ for db_key in _bootstrapped_dbs.keys():
+ db = _bootstrapped_dbs[db_key]
+ if not db.import_data():
+ return None
+ return True
+#--------------------------------------------------------------
+def bootstrap_auditing():
+ """bootstrap auditing in all bootstrapped databases"""
+ for db_key in _bootstrapped_dbs.keys():
+ db = _bootstrapped_dbs[db_key]
+ if not db.bootstrap_auditing():
+ return None
+ return True
+#--------------------------------------------------------------
+def bootstrap_notifications():
+ """bootstrap notification in all bootstrapped databases"""
+ for db_key in _bootstrapped_dbs.keys():
+ db = _bootstrapped_dbs[db_key]
+ if not db.bootstrap_notifications():
+ return None
+ return True
+#------------------------------------------------------------------
+def _run_query(aCurs, aQuery, args=None):
+ # FIXME: use gmPG2.run_rw_query()
+ if args is None:
+ try:
+ aCurs.execute(aQuery)
+ except:
+ _log.exception(u">>>%s<<< failed" % aQuery)
+ return False
+ else:
+ try:
+ aCurs.execute(aQuery, args)
+ except:
+ _log.exception(u">>>%s<<< failed" % aQuery)
+ _log.error(str(args))
+ return False
+ return True
+
+#------------------------------------------------------------------
+def ask_for_confirmation():
+ bundles = cfg_get("installation", "bundles")
+ if bundles is None:
+ return True
+ if len(bundles) == 0:
+ return True
+
+ if not _interactive:
+ print_msg("You are about to install the following parts of GNUmed:")
+ print_msg("-------------------------------------------------------")
+ for bundle in bundles:
+ db_alias = cfg_get("bundle %s" % bundle, "database alias")
+ db_name = cfg_get("database %s" % db_alias, "name")
+ srv_alias = cfg_get("database %s" % db_alias, "server alias")
+ srv_name = cfg_get("server %s" % srv_alias, "name")
+ print_msg('bundle "%s" in <%s> (or overridden) on <%s>' % (bundle, db_name, srv_name))
+ print_msg("-------------------------------------------------------")
+ desc = cfg_get("installation", "description")
+ if desc is not None:
+ for line in desc:
+ print_msg(line)
+ return True
+
+ print("You are about to install the following parts of GNUmed:")
+ print("-------------------------------------------------------")
+ for bundle in bundles:
+ db_alias = cfg_get("bundle %s" % bundle, "database alias")
+ db_name = cfg_get("database %s" % db_alias, "name")
+ srv_alias = cfg_get("database %s" % db_alias, "server alias")
+ srv_name = cfg_get("server %s" % srv_alias, "name")
+ print('bundle "%s" in <%s> (or overridden) on <%s>' % (bundle, db_name, srv_name))
+ print("-------------------------------------------------------")
+ desc = cfg_get("installation", "description")
+ if desc is not None:
+ for line in desc:
+ print(line)
+
+ print("Do you really want to install this database setup ?")
+ answer = input("Type yes or no: ")
+ if answer == "yes":
+ return True
+ return None
+
+#--------------------------------------------------------------
+def _import_schema (group=None, schema_opt="schema", conn=None):
+ # load schema
+ schema_files = cfg_get(group, schema_opt)
+ if schema_files is None:
+ _log.error(u"Need to know schema definition to install it.")
+ return None
+
+ schema_base_dir = cfg_get(group, "schema base directory")
+ if schema_base_dir is None:
+ _log.warning("no schema files base directory specified")
+ # look for base dirs for schema files
+ if os.path.exists (os.path.join ('.', 'sql')):
+ schema_base_dir = '.'
+ if os.path.exists ('../sql'):
+ schema_base_dir = '..'
+ if os.path.exists ('/usr/share/gnumed/server/sql'):
+ schema_base_dir = '/usr/share/gnumed/server'
+ if os.path.exists (os.path.expandvars('$GNUMED_DIR/server/sql')):
+ schema_base_dir = os.path.expandvars('$GNUMED_DIR/server')
+
+ # and import them
+ psql = gmPsql.Psql(conn)
+ for filename in schema_files:
+ if filename.strip() == u'':
+ continue # skip empty line
+ if filename.startswith(u'# '):
+ _log.info(filename) # log as comment
+ continue
+ full_path = os.path.join(schema_base_dir, filename)
+ if psql.run(full_path) == 0:
+ #_log.info(u'success')
+ continue
+ _log.error(u'failure')
+ return None
+
+ return True
+
+#------------------------------------------------------------------
+def exit_with_msg(aMsg = None):
+ if aMsg is not None:
+ print(aMsg)
+ print('')
+ print("Please check the log file for details:")
+ print('')
+ print(' ', gmLog2._logfile_name)
+ print('')
+
+ _log.error(aMsg)
+ _log.info(u'shutdown')
+ sys.exit(1)
+
+#------------------------------------------------------------------
+def print_msg(msg=None):
+ if quiet:
+ return
+ print(msg)
+
+#-----------------------------------------------------------------
+def become_pg_demon_user():
+ """Become "postgres" user.
+
+ On UNIX type systems, attempt to use setuid() to
+ become the postgres user if possible.
+
+ This is so we can use the IDENT method to get to
+ the database (NB by default, at least on Debian and
+ postgres source installs, this is the only way,
+ as the postgres user has no password [-- and TRUST
+ is not allowed -KH])
+ """
+ try:
+ import pwd
+ except ImportError:
+ _log.warning("running on broken OS -- can't import pwd module")
+ return None
+
+ try:
+ running_as = pwd.getpwuid(os.getuid())[0]
+ _log.info(u'running as user [%s]' % running_as)
+ except:
+ running_as = None
+
+ gmPG2.log_auth_environment()
+
+ pg_demon_user_passwd_line = None
+ try:
+ pg_demon_user_passwd_line = pwd.getpwnam('postgres')
+ # make sure we actually use this name to log in
+ _cfg.set_option(group = 'user postgres', option = 'name', value = 'postgres', source = 'file')
+ except KeyError:
+ try:
+ pg_demon_user_passwd_line = pwd.getpwnam ('pgsql')
+ _cfg.set_option(group = 'user postgres', option = 'name', value = 'pgsql', source = 'file')
+ except KeyError:
+ _log.warning('cannot find postgres user')
+ return None
+
+ if os.getuid() == 0: # we are the super-user
+ _log.info(u'switching to UNIX user [%s]' % pg_demon_user_passwd_line[0])
+ os.setuid(pg_demon_user_passwd_line[2])
+ gmPG2.log_auth_environment()
+
+ elif running_as == pg_demon_user_passwd_line[0]: # we are the postgres user already
+ _log.info(u'I already am the UNIX user [%s]' % pg_demon_user_passwd_line[0])
+
+ else:
+ _log.warning('not running as root or postgres, cannot become postmaster demon user')
+ _log.warning('may have trouble connecting as gm-dbo if IDENT auth is forced upon us')
+ if _interactive:
+ print_msg("WARNING: This script may not work if not running as the system administrator.")
+
+#==============================================================================
+def cfg_get(group=None, option=None):
+ return _cfg.get (
+ group = group,
+ option = option,
+ source_order = [('file', 'return')]
+ )
+
+#==================================================================
+def handle_cfg():
+ """Bootstrap the source 'file' in _cfg."""
+
+ _log.info(u'config file: %s', _cfg.source_files['file'])
+
+ become_pg_demon_user()
+
+ global _interactive
+
+ if _interactive is None:
+ tmp = cfg_get("installation", "interactive")
+ if tmp == "no":
+ _interactive = False
+ else:
+ _interactive = True
+
+ tmp = cfg_get('installation', 'keep temp files')
+ if tmp == "yes":
+ global _keep_temp_files
+ _keep_temp_files = True
+
+ if not ask_for_confirmation():
+ exit_with_msg("Bootstrapping aborted by user.")
+
+ if not bootstrap_bundles():
+ exit_with_msg("Cannot bootstrap bundles.")
+
+ if not bootstrap_auditing():
+ exit_with_msg("Cannot bootstrap audit trail.")
+
+ if not bootstrap_notifications():
+ exit_with_msg("Cannot bootstrap notification tables.")
+
+ if not import_data():
+ exit_with_msg("Bootstrapping failed: unable to import data")
+
+#==================================================================
+def main():
+
+ _cfg.add_cli(long_options = ['conf-file=', 'log-file=', 'quiet'])
+
+ global quiet
+ quiet = bool(_cfg.get(option = '--quiet', source_order = [('cli', 'return')]))
+
+ print_msg("=======================================")
+ print_msg("Bootstrapping GNUmed database system...")
+ print_msg("=======================================")
+
+ # get initial conf file from CLI
+ cfg_file = _cfg.get(option = '--conf-file', source_order = [('cli', 'return')])
+ if cfg_file is None:
+ _log.error(u"no config file specified on command line")
+ exit_with_msg('Cannot bootstrap without config file. Use --conf-file=<FILE>.')
+
+ _log.info(u'initial config file: %s', cfg_file)
+
+ # read that conf file
+ _cfg.add_file_source (
+ source = 'file',
+ file = cfg_file
+ )
+
+ # does it point to other conf files ?
+ cfg_files = _cfg.get (
+ group = 'installation',
+ option = 'config files',
+ source_order = [('file', 'return')]
+ )
+
+ if cfg_files is None:
+ _log.info(u'single-shot config file')
+ handle_cfg()
+ else:
+ _log.info(u'aggregation of config files')
+ tmp = cfg_get("installation", "interactive")
+ global _interactive
+ if tmp == "no":
+ _interactive = False
+ else:
+ _interactive = True
+ for cfg_file in cfg_files:
+ # read that conf file
+ _cfg.add_file_source (
+ source = 'file',
+ file = cfg_file
+ )
+ handle_cfg()
+
+ global _bootstrapped_dbs
+
+ db = _bootstrapped_dbs[_bootstrapped_dbs.keys()[0]]
+
+ if not db.verify_result_hash():
+ exit_with_msg("Bootstrapping failed: wrong result hash")
+
+ if not db.check_data_plausibility():
+ exit_with_msg("Bootstrapping failed: plausibility checks inconsistent")
+
+# if not db.import_data():
+# exit_with_msg("Bootstrapping failed: unable to import data")
+
+ db.check_holy_auth_line()
+
+ _log.info(u"shutdown")
+ print("Done bootstrapping GNUmed database: We very likely succeeded.")
+ print('log:', gmLog2._logfile_name)
+
+#==================================================================
+if __name__ != "__main__":
+ print("This currently is not intended to be used as a module.")
+ sys.exit(1)
+
+
+gmI18N.activate_locale()
+
+_log.info(u'startup')
+
+try:
+ main()
+except Exception:
+ _log.exception(u'unhandled exception caught, shutting down connections')
+ exit_with_msg(u'Bootstrapping failed: unhandled exception occurred')
+finally:
+ for conn in conn_ref_count:
+ if conn.closed == 0:
+ _log.warning(u'open connection detected: %s', conn.cookie)
+ _log.warning(u'%s', conn)
+ _log.warning(u'closing connection')
+ conn.close()
+
+_log.info(u'after main, before sys.exit(0)')
+
+sys.exit(0)
+
+
+#==================================================================
+# pipe = popen2.Popen3(cmd, 1==1)
+# pipe.tochild.write("%s\n" % aPassword)
+# pipe.tochild.flush()
+# pipe.tochild.close()
+
+# result = pipe.wait()
+# print result
+
+ # read any leftovers
+# pipe.fromchild.flush()
+# pipe.childerr.flush()
+# tmp = pipe.fromchild.read()
+# lines = tmp.split("\n")
+# for line in lines:
+# _log.debug(u"child stdout: [%s]" % line, gmLog.lCooked)
+# tmp = pipe.childerr.read()
+# lines = tmp.split("\n")
+# for line in lines:
+# _log.error(u"child stderr: [%s]" % line, gmLog.lCooked)
+
+# pipe.fromchild.close()
+# pipe.childerr.close()
+# del pipe
+
+#==================================================================
=====================================
server/bootstrap/fixup_db-v22.conf
=====================================
--- a/server/bootstrap/fixup_db-v22.conf
+++ b/server/bootstrap/fixup_db-v22.conf
@@ -57,6 +57,7 @@ $upgrade plausibility checks$
script base directory = ../sql/v21-v22/python/
data import scripts = $data import scripts$
+v22-2-fixup-form-templates.py
$data import scripts$
#----------------------------------
=====================================
server/bootstrap/gmAuditSchemaGenerator.py.bak
=====================================
--- /dev/null
+++ b/server/bootstrap/gmAuditSchemaGenerator.py.bak
@@ -0,0 +1,426 @@
+"""Automatic GNUmed audit trail generation.
+
+This module creates SQL DDL commands for the audit
+trail triggers and functions to be created in the schema "audit".
+
+Theory of operation:
+
+Any table that needs to be audited (all modifications
+logged) must be recorded in the table "audit.audited_tables".
+
+This script creates the triggers, functions and tables
+neccessary to establish the audit trail. Some or all
+audit trail tables may have been created previously but
+need not contain all columns of the audited table. Do not
+put any constraints on the audit trail tables except for
+"not null" on those columns that cannot be null in the
+audited table.
+"""
+#==================================================================
+__author__ = "Horst Herb, Karsten.Hilbert at gmx.net"
+__license__ = "GPL v2 or later" # (details at http://www.gnu.org)
+
+import sys, os.path, string, logging, io
+
+
+from Gnumed.pycommon import gmPG2
+
+
+_log = logging.getLogger('gm.bootstrapper')
+
+
+LOG_TABLE_PREFIX = u'log_' # the audit trail tables start with this prefix
+AUDIT_TRAIL_PARENT_TABLE = u'audit_trail' # and inherit from this table
+AUDIT_FIELDS_TABLE = u'audit_fields' # audited tables inherit these fields
+AUDIT_SCHEMA = u'audit' # audit stuff lives in this schema
+
+#==================================================================
+# SQL statements for auditing setup script
+#------------------------------------------------------------------
+# audit triggers are named "zt_*_*" to make
+# reasonably sure they are executed last
+
+# insert
+SQL_TEMPLATE_INSERT = u"""DROP FUNCTION IF EXISTS audit.ft_ins_%(src_tbl)s() cascade;
+
+CREATE FUNCTION audit.ft_ins_%(src_tbl)s()
+ RETURNS trigger
+ LANGUAGE 'plpgsql'
+ SECURITY DEFINER
+ AS '
+DECLARE
+ _is_allowed_inserter boolean;
+BEGIN
+ -- is the session user allowed to insert data ?
+ SELECT gm.account_is_dbowner_or_staff(SESSION_USER) INTO STRICT _is_allowed_inserter;
+ IF _is_allowed_inserter IS FALSE THEN
+ RAISE EXCEPTION
+ ''INSERT: gm.account_is_dbowner_or_staff(NAME): <%%> is neither database owner, nor <postgres>, nor on staff'', SESSION_USER
+ USING ERRCODE = ''integrity_constraint_violation''
+ ;
+ return NEW;
+ END IF;
+
+ NEW.row_version := 0;
+ NEW.modified_when := CURRENT_TIMESTAMP;
+ NEW.modified_by := SESSION_USER;
+ return NEW;
+END;';
+
+CREATE TRIGGER zt_ins_%(src_tbl)s
+ BEFORE INSERT ON %(src_schema)s.%(src_tbl)s
+ FOR EACH ROW EXECUTE PROCEDURE audit.ft_ins_%(src_tbl)s();
+"""
+
+SQL_TEMPLATE_INSERT_NO_INSERTER_CHECK = u"""DROP FUNCTION IF EXISTS audit.ft_ins_%(src_tbl)s() cascade;
+
+CREATE FUNCTION audit.ft_ins_%(src_tbl)s()
+ RETURNS trigger
+ LANGUAGE 'plpgsql'
+ SECURITY DEFINER
+ AS '
+BEGIN
+ NEW.row_version := 0;
+ NEW.modified_when := CURRENT_TIMESTAMP;
+ NEW.modified_by := SESSION_USER;
+ return NEW;
+END;';
+
+CREATE TRIGGER zt_ins_%(src_tbl)s
+ BEFORE INSERT ON %(src_schema)s.%(src_tbl)s
+ FOR EACH ROW EXECUTE PROCEDURE audit.ft_ins_%(src_tbl)s();
+"""
+
+# update
+SQL_TEMPLATE_UPDATE = u"""DROP FUNCTION IF EXISTS audit.ft_upd_%(src_tbl)s() cascade;
+
+CREATE FUNCTION audit.ft_upd_%(src_tbl)s()
+ RETURNS trigger
+ LANGUAGE 'plpgsql'
+ SECURITY DEFINER
+ AS '
+DECLARE
+ _is_allowed_updater boolean;
+BEGIN
+ -- is the session user allowed to update data ?
+ SELECT gm.account_is_dbowner_or_staff(SESSION_USER) INTO STRICT _is_allowed_updater;
+ IF _is_allowed_updater IS FALSE THEN
+ RAISE EXCEPTION
+ ''UPDATE: gm.account_is_dbowner_or_staff(NAME): <%%> is neither database owner, nor <postgres>, nor on staff'', SESSION_USER
+ USING ERRCODE = ''integrity_constraint_violation''
+ ;
+ return NEW;
+ END IF;
+
+ NEW.row_version := OLD.row_version + 1;
+ NEW.modified_when := CURRENT_TIMESTAMP;
+ NEW.modified_by := SESSION_USER;
+ INSERT INTO audit.%(log_tbl)s (
+ orig_version, orig_when, orig_by, orig_tableoid, audit_action,
+ %(cols_clause)s
+ ) VALUES (
+ OLD.row_version, OLD.modified_when, OLD.modified_by, TG_RELID, TG_OP,
+ %(vals_clause)s
+ );
+ return NEW;
+END;';
+
+CREATE TRIGGER zt_upd_%(src_tbl)s
+ BEFORE UPDATE ON %(src_schema)s.%(src_tbl)s
+ FOR EACH ROW EXECUTE PROCEDURE audit.ft_upd_%(src_tbl)s();
+"""
+
+SQL_TEMPLATE_UPDATE_NO_UPDATER_CHECK = u"""DROP FUNCTION IF EXISTS audit.ft_upd_%(src_tbl)s() cascade;
+
+CREATE FUNCTION audit.ft_upd_%(src_tbl)s()
+ RETURNS trigger
+ LANGUAGE 'plpgsql'
+ SECURITY DEFINER
+ AS '
+BEGIN
+ NEW.row_version := OLD.row_version + 1;
+ NEW.modified_when := CURRENT_TIMESTAMP;
+ NEW.modified_by := SESSION_USER;
+ INSERT INTO audit.%(log_tbl)s (
+ orig_version, orig_when, orig_by, orig_tableoid, audit_action,
+ %(cols_clause)s
+ ) VALUES (
+ OLD.row_version, OLD.modified_when, OLD.modified_by, TG_RELID, TG_OP,
+ %(vals_clause)s
+ );
+ return NEW;
+END;';
+
+CREATE TRIGGER zt_upd_%(src_tbl)s
+ BEFORE UPDATE ON %(src_schema)s.%(src_tbl)s
+ FOR EACH ROW EXECUTE PROCEDURE audit.ft_upd_%(src_tbl)s();
+"""
+
+# delete
+SQL_TEMPLATE_DELETE = u"""DROP FUNCTION IF EXISTS audit.ft_del_%(src_tbl)s() cascade;
+
+CREATE FUNCTION audit.ft_del_%(src_tbl)s()
+ RETURNS trigger
+ LANGUAGE 'plpgsql'
+ SECURITY DEFINER
+ AS '
+DECLARE
+ _is_allowed_deleter boolean;
+BEGIN
+ -- is the session user allowed to delete data ?
+ SELECT gm.account_is_dbowner_or_staff(SESSION_USER) INTO STRICT _is_allowed_deleter;
+ IF _is_allowed_deleter IS FALSE THEN
+ RAISE EXCEPTION
+ ''DELETE: gm.account_is_dbowner_or_staff(NAME): <%%> is neither database owner, nor <postgres>, nor on staff'', SESSION_USER
+ USING ERRCODE = ''integrity_constraint_violation''
+ ;
+ return OLD;
+ END IF;
+
+ INSERT INTO audit.%(log_tbl)s (
+ orig_version, orig_when, orig_by, orig_tableoid, audit_action,
+ %(cols_clause)s
+ ) VALUES (
+ OLD.row_version, OLD.modified_when, OLD.modified_by, TG_RELID, TG_OP,
+ %(vals_clause)s
+ );
+ return OLD;
+END;';
+
+CREATE TRIGGER zt_del_%(src_tbl)s
+ BEFORE DELETE ON %(src_schema)s.%(src_tbl)s
+ FOR EACH ROW EXECUTE PROCEDURE audit.ft_del_%(src_tbl)s();
+"""
+
+SQL_TEMPLATE_DELETE_NO_DELETER_CHECK = u"""DROP FUNCTION IF EXISTS audit.ft_del_%(src_tbl)s() cascade;
+
+CREATE FUNCTION audit.ft_del_%(src_tbl)s()
+ RETURNS trigger
+ LANGUAGE 'plpgsql'
+ SECURITY DEFINER
+ AS '
+BEGIN
+ INSERT INTO audit.%(log_tbl)s (
+ orig_version, orig_when, orig_by, orig_tableoid, audit_action,
+ %(cols_clause)s
+ ) VALUES (
+ OLD.row_version, OLD.modified_when, OLD.modified_by, TG_RELID, TG_OP,
+ %(vals_clause)s
+ );
+ return OLD;
+END;';
+
+CREATE TRIGGER zt_del_%(src_tbl)s
+ BEFORE DELETE ON %(src_schema)s.%(src_tbl)s
+ FOR EACH ROW EXECUTE PROCEDURE audit.ft_del_%(src_tbl)s();
+"""
+
+# we cannot do this because NOT VALID only applies to the time when
+# we add the constraint, the FK would still be enforced during later
+# INSERTs/UPDATEs
+#SQL_TEMPLATE_FK_MODIFIED_BY = u"""ALTER TABLE %(src_schema)s.%(src_tbl)s
+# DROP CONSTRAINT IF EXISTS fk_%(src_schema)s_%(src_tbl)s_fk_modified_by CASCADE;
+#
+#-- this is set NOT VALID because it only serves to tell pg_dump
+#-- to dump dem.staff before other tables such that we do not run
+#-- into trouble with checking gm.is_dbowner_or_staff(SESSION_USER)
+#ALTER TABLE %(src_schema)s.%(src_tbl)s
+# ADD CONSTRAINT fk_%(src_schema)s_%(src_tbl)s_fk_modified_by
+# FOREIGN KEY (modified_by)
+# REFERENCES dem.staff(db_user)
+# ON UPDATE RESTRICT
+# ON DELETE RESTRICT
+# NOT VALID;"""
+#
+#SQL_TEMPLATE_DEM_STAFF_FK = u"""
+#ALTER TABLE dem.staff
+# DROP CONSTRAINT IF EXISTS fk_dem_staff_fk_modified_by CASCADE;
+#"""
+
+SQL_TEMPLATE_CREATE_AUDIT_TRAIL_TABLE = u"""
+create table %(log_schema)s.%(log_tbl)s (
+ %(log_cols)s
+) inherits (%(log_schema)s.%(log_base_tbl)s);
+
+COMMENT ON COLUMN %(log_schema)s.%(log_tbl)s.orig_version is
+ 'the .row_version in the original row before the audited action took place, should be equal to .row_version';
+
+COMMENT ON COLUMN %(log_schema)s.%(log_tbl)s.orig_when is
+ 'the .modified_when in the original row before the audited action took place, should be equal to .modified_when';
+
+COMMENT ON COLUMN %(log_schema)s.%(log_tbl)s.orig_by is
+ 'the .modified_by in the original row before the audited action took place, should be equal to .modified_by';
+
+COMMENT ON COLUMN %(log_schema)s.%(log_tbl)s.orig_tableoid is
+ 'the TG_RELID when the audit trigger was run';
+"""
+
+#grant insert on %s.%s to group "gm-public"
+
+#------------------------------------------------------------------
+#------------------------------------------------------------------
+def audit_trail_table_ddl(aCursor=None, schema=None, table2audit=None):
+
+ audit_trail_table = '%s%s' % (LOG_TABLE_PREFIX, table2audit)
+
+ # which columns to potentially audit
+ cols2potentially_audit = gmPG2.get_col_defs(link_obj = aCursor, schema = schema, table = table2audit)
+
+ # which to skip
+ cols2skip = gmPG2.get_col_names(link_obj = aCursor, schema = AUDIT_SCHEMA, table = AUDIT_FIELDS_TABLE)
+
+ # which ones to really audit
+ cols2really_audit = []
+ for col in cols2potentially_audit[0]:
+ if col in cols2skip:
+ continue
+ cols2really_audit.append("\t%s %s" % (col, cols2potentially_audit[1][col]))
+
+ # does the audit trail target table exist ?
+ exists = gmPG2.table_exists(aCursor, AUDIT_SCHEMA, audit_trail_table)
+ if exists is None:
+ _log.error('cannot check existence of table [audit.%s]' % audit_trail_table)
+ return None
+
+ if exists:
+ _log.info('audit trail table [audit.%s] already exists' % audit_trail_table)
+ # sanity check table structure
+ currently_audited_cols = gmPG2.get_col_defs(link_obj = aCursor, schema = AUDIT_SCHEMA, table = audit_trail_table)
+ currently_audited_cols = [ '\t%s %s' % (c, currently_audited_cols[1][c]) for c in currently_audited_cols[0] ]
+ for col in cols2really_audit:
+ try:
+ currently_audited_cols.index(col)
+ except ValueError:
+ _log.error('table structure incompatible: column ".%s" not found in audit table' % col.strip())
+ _log.error('%s.%s:' % (schema, table2audit))
+ _log.error('%s' % ','.join(cols2really_audit))
+ _log.error('%s.%s:' % (AUDIT_SCHEMA, audit_trail_table))
+ _log.error('%s' % ','.join(currently_audited_cols))
+ return None
+ return []
+
+ # must create audit trail table
+ _log.info('no audit trail table found for [%s.%s]' % (schema, table2audit))
+ _log.info('creating audit trail table [audit.%s]' % audit_trail_table)
+
+ args = {
+ 'log_schema': AUDIT_SCHEMA,
+ 'log_base_tbl': AUDIT_TRAIL_PARENT_TABLE,
+ 'log_tbl': audit_trail_table,
+ 'log_cols': u',\n '.join(cols2really_audit)
+ }
+ return [SQL_TEMPLATE_CREATE_AUDIT_TRAIL_TABLE % args, '']
+
+#------------------------------------------------------------------
+def trigger_ddl(aCursor='default', schema=AUDIT_SCHEMA, audited_table=None):
+
+ target_columns = gmPG2.get_col_names(link_obj = aCursor, schema = schema, table = audited_table)
+ columns2skip = gmPG2.get_col_names(link_obj = aCursor, schema = AUDIT_SCHEMA, table = AUDIT_FIELDS_TABLE)
+ columns = []
+ values = []
+ for column in target_columns:
+ if column not in columns2skip:
+ columns.append(column)
+ values.append(u'OLD.%s' % column)
+
+ args = {
+ 'src_tbl': audited_table,
+ 'src_schema': schema,
+ 'log_tbl': u'%s%s' % (LOG_TABLE_PREFIX, audited_table),
+ 'cols_clause': u', '.join(columns),
+ 'vals_clause': u', '.join(values)
+ }
+
+ modified_by_func_exists = gmPG2.function_exists(link_obj = aCursor, schema = u'gm', function = u'account_is_dbowner_or_staff')
+
+ ddl = []
+ if modified_by_func_exists:
+ ddl.append(SQL_TEMPLATE_INSERT % args)
+ ddl.append(u'')
+ ddl.append(SQL_TEMPLATE_UPDATE % args)
+ ddl.append(u'')
+ ddl.append(SQL_TEMPLATE_DELETE % args)
+ #ddl.append(u'')
+ #ddl.append(SQL_TEMPLATE_FK_MODIFIED_BY % args)
+ else:
+ # the *_NO_*_CHECK variants are needed for pre-v21 databases
+ # where gm.account_is_dbowner_or_staff() doesn't exist yet
+ ddl.append(SQL_TEMPLATE_INSERT_NO_INSERTER_CHECK % args)
+ ddl.append(u'')
+ ddl.append(SQL_TEMPLATE_UPDATE_NO_UPDATER_CHECK % args)
+ ddl.append(u'')
+ ddl.append(SQL_TEMPLATE_DELETE_NO_DELETER_CHECK % args)
+ ddl.append(u'')
+
+ return ddl
+
+#------------------------------------------------------------------
+def create_audit_ddl(aCursor):
+ # get list of all marked tables
+ # we could also get the child tables for audit.audit_fields
+ # but we would have to potentially parse down several levels
+ # of interitance (such as with clin.clin_root_item) to find
+ # the actual leaf table to audit
+ cmd = u"select schema, table_name from audit.audited_tables"
+ rows, idx = gmPG2.run_ro_queries(link_obj = aCursor, queries = [{'cmd': cmd}])
+ if len(rows) == 0:
+ _log.info('no tables to audit')
+ return None
+ _log.debug('the following tables will be audited:')
+ _log.debug(rows)
+ ddl = []
+ ddl.append('\set check_function_bodies 1\n')
+ ddl.append('set check_function_bodies to on;\n\n')
+
+ # for each marked table
+ for row in rows:
+
+ if not gmPG2.table_exists(link_obj = aCursor, schema = row['schema'], table = row['table_name']):
+ _log.error('table to audit (%s) does not exist', row)
+ return None
+
+ # create log table if necessary
+ audit_trail_ddl = audit_trail_table_ddl(aCursor = aCursor, schema = row['schema'], table2audit = row['table_name'])
+ if audit_trail_ddl is None:
+ _log.error('cannot generate audit trail DDL for audited table [%s]' % row['table_name'])
+ return None
+ ddl.extend(audit_trail_ddl)
+ if len(audit_trail_ddl) != 0:
+ ddl.append('-- ----------------------------------------------')
+
+ # create functions and triggers on log table
+ ddl.extend(trigger_ddl(aCursor = aCursor, schema = row['schema'], audited_table = row['table_name']))
+ ddl.append('-- ----------------------------------------------')
+
+ #ddl.append(SQL_TEMPLATE_DEM_STAFF_FK)
+
+ return ddl
+
+#==================================================================
+# main
+#------------------------------------------------------------------
+if __name__ == "__main__" :
+ tmp = ''
+ try:
+ tmp = raw_input("audit trail parent table [%s]: " % AUDIT_TRAIL_PARENT_TABLE)
+ except KeyboardError:
+ pass
+ if tmp != '':
+ AUDIT_TRAIL_PARENT_TABLE = tmp
+
+ conn = gmPG2.get_connection(readonly=False, pooled=False)
+ curs = conn.cursor()
+
+ schema = create_audit_ddl(curs)
+
+ curs.close()
+ conn.close()
+
+ if schema is None:
+ print "error creating schema"
+ sys.exit(-1)
+
+ f = io.open('audit-trail-schema.sql', mode = 'wb', encoding = 'utf8')
+ for line in schema:
+ f.write("%s;\n" % line)
+ f.close()
=====================================
server/bootstrap/update_db-v21_v22.conf
=====================================
--- a/server/bootstrap/update_db-v21_v22.conf
+++ b/server/bootstrap/update_db-v21_v22.conf
@@ -195,6 +195,7 @@ $superuser schema$
script base directory = ../sql/v21-v22/python/
data import scripts = $data import scripts$
v22-import-form-templates.py
+v22-2-fixup-form-templates.py
$data import scripts$
@@ -212,9 +213,9 @@ v_all_persons::::select count(1) from dem.v_persons
v_active_persons::::select count(1) from dem.v_active_persons
select count(1) from dem.v_active_persons -- new
staff::::select count(1) from dem.staff -- old
- select count(1) from dem.v_staff where not person_is_deleted -- new
+ select count(1) from dem.v_staff -- new
v_staff::::select count(1) from dem.v_staff
- select count(1) from dem.v_staff -- new
+ select count(1) from dem.v_staff
addresses::::select count(1) from dem.address
select count(1) from dem.address -- new
unique URBs with ZIP::::select count(1) from dem.v_uniq_zipped_urbs -- old
@@ -317,10 +318,12 @@ paperwork templates::::select count(1) from ref.paperwork_templates
select count(1) from ref.paperwork_templates
automatic hints::::select count(1) from ref.auto_hint
select count(1) - 1 from ref.auto_hint
--- do NOT try to check the number of suppressed hints because
--- even though we do know the number of *hints* that will have
--- changed we simply cannot know the number of suppressions that
--- will be lifted by those changes
+-- do NOT try to check the number of suppressed hints because even though
+--
+-- we do know the number of *hints* that will have changed we simply cannot
+--
+-- know the number of suppressions that will be lifted by those changes
+--
--suppressed hints::::select count(1) from clin.suppressed_hint
-- select count(1) from clin.suppressed_hint
raw keyword expansions::::select count(1) from ref.keyword_expansion
=====================================
server/doc/schema/gnumed-entire_schema.html
=====================================
--- a/server/doc/schema/gnumed-entire_schema.html
+++ b/server/doc/schema/gnumed-entire_schema.html
@@ -112,7 +112,7 @@
<body>
<!-- Primary Index -->
- <p><br><br>Dumped on 2018-04-05</p>
+ <p><br><br>Dumped on 2018-05-10</p>
<h1><a name="index">Index of database - gnumed_v22</a></h1>
<ul>
=====================================
server/sql/v21-v22/data/v22-Begleitbrief.tex
=====================================
--- a/server/sql/v21-v22/data/v22-Begleitbrief.tex
+++ b/server/sql/v21-v22/data/v22-Begleitbrief.tex
@@ -73,7 +73,7 @@
$<praxis::%(praxis)s, %(branch)s::120>$\\
$<praxis_address::%(street)s %(number)s (%(subunit)s), %(postcode)s %(urb)s::60>$
}
-\setkomavar{fromlogo}{$<data_snippet::praxis-logo//\includegraphics[width=30mm]{%s}//image/png//.png::250>$}%$
+\setkomavar{fromlogo}{$<data_snippet::praxis-logo//\includegraphics[width=30mm]{%s}//image/png//.png::250>$}%$ -- this dollarsign unconfuses mcedit syntax coloring
\setkomavar{backaddress}{$<current_provider_firstnames::::1>$.$<current_provider_lastnames::::>$\\$<praxis_address::%(street)s %(number)s\\%(postcode)s %(urb)s::60>$}
@@ -86,15 +86,15 @@
% Betreff, nämlich Patientendaten
-\setkomavar{subject}[]{
- $<free_text::Betreff für den Brief::120>$\\
+\setkomavar{subject}[]{%
+ $<<free_text::Betreff für den Brief::120>>$\\
Patient: $<title::::>$ $<firstname::::>$ $<lastname::::>$ (geb $<date_of_birth::%d.%B %Y::>$)\\
Adresse: $<adr_street::home::>$ $<adr_number::home::>$, $<adr_postcode::home::>$ $<adr_location::home::>$
}
% Unterschrift
-\setkomavar{signature}{
+\setkomavar{signature}{%
\centering
$<data_snippet::autograph-$<<current_provider_lastnames::::>>$_$<<current_provider_firstnames::::>>$//\includegraphics[width=30mm]{%s}\\//image/png//.png::250>$\rule{\widthof{\tiny (Der Unterzeichner haftet nicht für unsignierte Änderungen des Inhalts.)}}{.1pt}\\
$<current_provider_name::%(title)s %(firstnames)s %(lastnames)s::>$\\
@@ -110,7 +110,7 @@
% Fußzeile 1.Seite
\setkomavar{firstfoot}{%
\rule{\textwidth}{.3pt}
- \parbox[t]{\textwidth}{
+ \parbox[t]{\textwidth}{%
\tiny
\begin{tabular}[t]{ll}%
% \multicolumn{2}{l}{Erreichbarkeit:}\\
@@ -152,22 +152,29 @@
$<receiver_street::::>$\ $<receiver_number::::>$\ $<receiver_subunit::::>$\\
$<receiver_postcode::::>$\ $<receiver_location::::>$\\
$<receiver_country::::>$
-}%
+}
% Anrede
-\opening{$<free_text::Anrede, z.B. "Sehr geehrte Frau" (wird automatisch ergänzt durch " <Name des Empfängers>,")::140>$ $<receiver_name::::>$,}
+\opening{%
+ $<<free_text::Anrede, z.B. "Sehr geehrte Frau" (wird automatisch ergänzt durch " $<receiver_name::::>$,")::140>>$ $<receiver_name::::>$,
+}
% Brieftext
\selectlanguage{ngerman}
-$<free_text::Der eigentliche Brieftext (in LaTeX !)::>$
+$<<free_text::Der eigentliche Brieftext (in LaTeX)::>>$
+
+
+\closing{%
+ $<<free_text::Grußformel, z.B. "Mit freundlichen Grüßen" (ohne Komma am Ende)::140>>$,
+}
-\closing{$<free_text::Grußformel, z.B. "Mit freundlichen Grüßen" (ohne Komma am Ende)::140>$,}
% Anlagen
%\setkomavar*{enclseparator}[Anlage(n)] % Titel für Anlagebereich
-\encl{$<free_text::Liste von Anlagen::300>$}
+\encl{$<<free_text::Liste von Anlagen::300>>$}
+
% kein Verteiler
%\cc{}
=====================================
server/sql/v21-v22/fixups/v22-release_notes-fixup.sql
=====================================
--- a/server/sql/v21-v22/fixups/v22-release_notes-fixup.sql
+++ b/server/sql/v21-v22/fixups/v22-release_notes-fixup.sql
@@ -17,8 +17,17 @@ INSERT INTO dem.message_inbox (
) VALUES (
(select pk from dem.staff where db_user = 'any-doc'),
(select pk_type from dem.v_inbox_item_type where type = 'memo' and category = 'administrative'),
- 'Release Notes for GNUmed 1.7.1 (database v22.1)',
- 'GNUmed 1.7.1 Release Notes:
+ 'Release Notes for GNUmed 1.7.2 (database v22.2)',
+ 'GNUmed 1.7.2 Release Notes:
+
+ 1.7.2
+
+FIX: GTK3 related size adjustments for PatientOverview/SimpleSoap plugins
+FIX: GTK3 related bitmap adjustments
+FIX: [Save] functionality of Export Area
+FIX: placeholders $current_provider_[title/firstnames/lastnames]$
+FIX: receiver selection address list setup
+FIX: exception on creation of duplicate patient [thanks Marc]
1.7.1
@@ -28,10 +37,15 @@ IMPROVED: make DWV optional
IMPROVED: prerequisites check tool
IMPROVED: update timeline code to 1.17.0 release
+ 22.2
+
+FIX: staff/v_staff plausibility check [thanks Marc]
+FIX: LaTeX-Template for Begleitbrief
+
22.1
IMPROVED: concurrency robustness of backup/restore scripts
');
-- --------------------------------------------------------------
-select gm.log_script_insertion('v22-release_notes-fixup.sql', '22.1');
+select gm.log_script_insertion('v22-release_notes-fixup.sql', '22.2');
=====================================
server/sql/v21-v22/python/v22-2-fixup-form-templates.py
=====================================
--- /dev/null
+++ b/server/sql/v21-v22/python/v22-2-fixup-form-templates.py
@@ -0,0 +1,31 @@
+# coding: utf8
+#==============================================================
+# GNUmed database schema change script
+#
+# License: GPL v2 or later
+# Author: karsten.hilbert at gmx.net
+#
+#==============================================================
+import os
+
+from Gnumed.pycommon import gmPG2
+
+#--------------------------------------------------------------
+
+def run(conn=None):
+
+ # Begleitbrief
+ gmPG2.file2bytea (
+ query = u"""
+ UPDATE ref.paperwork_templates SET
+ data = %(data)s::bytea,
+ external_version = '22.2'
+ WHERE
+ name_long = 'Begleitbrief ohne medizinische Daten [K.Hilbert]'""",
+ filename = os.path.join('..', 'sql', 'v21-v22', 'data', 'v22-Begleitbrief.tex'),
+ conn = conn
+ )
+
+ return True
+
+#==============================================================
View it on GitLab: https://salsa.debian.org/med-team/gnumed-server/compare/296084e80bda8b6750bec4417e89eb2888c51af2...416b4709dee41d8b2a142ec21891dd357d9986aa
--
View it on GitLab: https://salsa.debian.org/med-team/gnumed-server/compare/296084e80bda8b6750bec4417e89eb2888c51af2...416b4709dee41d8b2a142ec21891dd357d9986aa
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20180528/03804306/attachment-0001.html>
More information about the debian-med-commit
mailing list