[med-svn] [gnumed-server] 02/05: New upstream version 21.11

Andreas Tille tille at debian.org
Thu Dec 29 07:30:06 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository gnumed-server.

commit 6885bbfe48c6eb029140562bf5f7dd24cb55dc25
Author: Andreas Tille <tille at debian.org>
Date:   Thu Dec 29 07:59:31 2016 +0100

    New upstream version 21.11
---
 server/bootstrap/fixup_db-v21.conf                 |   1 +
 server/bootstrap/update_db-v20_v21.conf            |   1 +
 server/doc/schema/gnumed-entire_schema.html        |   2 +-
 server/gm-backup.sh                                | 112 +++++----
 server/gm-zip+sign_backups.sh                      |  57 +++--
 server/pycommon/gmNetworkTools.py                  |   3 +-
 server/pycommon/gmPG2.py                           | 143 ++++++++---
 .../v20-v21/dynamic/v21-release_notes-dynamic.sql  |  33 ++-
 .../v20-v21/fixups/v21-notifications-dynamic.sql   | 261 +++++++++++++++++++++
 9 files changed, 499 insertions(+), 114 deletions(-)

diff --git a/server/bootstrap/fixup_db-v21.conf b/server/bootstrap/fixup_db-v21.conf
index b5160c4..ad44a82 100644
--- a/server/bootstrap/fixup_db-v21.conf
+++ b/server/bootstrap/fixup_db-v21.conf
@@ -37,6 +37,7 @@ v21-AMTS_Medikationsplan-fixup.sql
 v21-ref-auto_hint-smoking_status-fixup.sql
 v21-ref-GKV_CU-fixup.sql
 v21-clin-get_hints_for_patient-fixup.sql
+v21-notifications-dynamic.sql
 $schema$
 
 #----------------------------------
diff --git a/server/bootstrap/update_db-v20_v21.conf b/server/bootstrap/update_db-v20_v21.conf
index 9b09bb2..3ac98f0 100644
--- a/server/bootstrap/update_db-v20_v21.conf
+++ b/server/bootstrap/update_db-v20_v21.conf
@@ -128,6 +128,7 @@ v21-AMTS_Medikationsplan-fixup.sql
 v21-ref-auto_hint-smoking_status-fixup.sql
 v21-ref-GKV_CU-fixup.sql
 v21-clin-get_hints_for_patient-fixup.sql
+v21-notifications-dynamic.sql
 $schema$
 
 #----------------------------------
diff --git a/server/doc/schema/gnumed-entire_schema.html b/server/doc/schema/gnumed-entire_schema.html
index c6321db..26ae867 100644
--- a/server/doc/schema/gnumed-entire_schema.html
+++ b/server/doc/schema/gnumed-entire_schema.html
@@ -112,7 +112,7 @@
   <body>
 
     <!-- Primary Index -->
-	<p><br><br>Dumped on 2016-11-07</p>
+	<p><br><br>Dumped on 2016-12-19</p>
 <h1><a name="index">Index of database - gnumed_v21</a></h1>
 <ul>
     
diff --git a/server/gm-backup.sh b/server/gm-backup.sh
index bdb39d3..dc8ad01 100755
--- a/server/gm-backup.sh
+++ b/server/gm-backup.sh
@@ -1,7 +1,6 @@
 #!/bin/bash
 
 #==============================================================
-#
 # This script creates an uncompressed, directory-style backup
 # of the database schema, data, and roles which can be used to
 # restore a GNUmed database from scratch with pg_restore.
@@ -74,7 +73,7 @@ if test -z ${GM_HOST} ; then
 	HAS_HIGHER_VER=`sudo -u postgres psql -A -t -d ${GM_DATABASE} -p ${GM_PORT} -c "SELECT exists (select 1 from pg_database where datname like 'gnumed_v%' and substring(datname from 9 for 3)::integer > '${OUR_VER}');"`
 else
 	HAS_HIGHER_VER=`sudo -u postgres psql -A -t -h ${GM_HOST} -d ${GM_DATABASE} -p ${GM_PORT} -c "SELECT exists (select 1 from pg_database where datname like 'gnumed_v%' and substring(datname from 9 for 3)::integer > '${OUR_VER}');"`
-fi;
+fi
 
 if test "${HAS_HIGHER_VER}" = "t" ; then
 	echo "Backing up database ${GM_DATABASE}."
@@ -84,7 +83,7 @@ if test "${HAS_HIGHER_VER}" = "t" ; then
 	sudo -u postgres psql -l -p ${GM_PORT} | grep gnumed_v
 	echo ""
 	echo "Make sure you really want to backup the older database !"
-fi ;
+fi
 
 
 # generate backup file name
@@ -93,7 +92,7 @@ if test -z ${GM_HOST} ; then
 	BACKUP_BASENAME="backup-${GM_DATABASE}-${INSTANCE_OWNER}-"`hostname`
 else
 	BACKUP_BASENAME="backup-${GM_DATABASE}-${INSTANCE_OWNER}-${GM_HOST}"
-fi ;
+fi
 BACKUP_FILENAME="${BACKUP_BASENAME}-${TS}"
 
 
@@ -105,80 +104,91 @@ fi
 
 
 # create dumps
+BACKUP_DATA_DIR="${BACKUP_FILENAME}.dir"
+ROLES_FILE="${BACKUP_FILENAME}-roles.sql"
 if test -z ${GM_HOST} ; then
 	# locally
 
 	# database
-	pg_dump -v --format=directory --compress=0 --column-inserts --clean --if-exists --serializable-deferrable -p ${GM_PORT} -U ${GM_DBO} -f ${BACKUP_FILENAME}.dir ${GM_DATABASE} 2> /dev/null
+	pg_dump -v --format=directory --compress=0 --column-inserts --clean --if-exists --serializable-deferrable -p ${GM_PORT} -U ${GM_DBO} -f ${BACKUP_DATA_DIR} ${GM_DATABASE} 2> /dev/null
 
 	# roles
 	# -r -> -g for older versions
-	sudo -u postgres pg_dumpall -v --roles-only -p ${GM_PORT} > ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-
-	echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- Below find a list of database roles which were in use" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- in the GNUmed database \"${GM_DATABASE}\"."            >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "--" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- Only those need to be restored to create a working"    >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- copy of your original database. All other roles can"   >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- be commented out by prepending '-- ' to the relevant"  >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- lines above."                                          >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- In particular, you will very very likely want to"      >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- comment out the 'postgres' role."                      >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-	echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
+	sudo -u postgres pg_dumpall -v --roles-only -p ${GM_PORT} > ${ROLES_FILE} 2> /dev/null
+
+	echo "" >> ${ROLES_FILE} 2> /dev/null
+	echo "-- -----------------------------------------------------" >> ${ROLES_FILE} 2> /dev/null
+	echo "-- Below find a list of database roles which were in use" >> ${ROLES_FILE} 2> /dev/null
+	echo "-- in the GNUmed database \"${GM_DATABASE}\"."            >> ${ROLES_FILE} 2> /dev/null
+	echo "--" >> ${ROLES_FILE} 2> /dev/null
+	echo "-- Only those need to be restored to create a working"    >> ${ROLES_FILE} 2> /dev/null
+	echo "-- copy of your original database. All other roles can"   >> ${ROLES_FILE} 2> /dev/null
+	echo "-- be commented out by prepending '-- ' to the relevant"  >> ${ROLES_FILE} 2> /dev/null
+	echo "-- lines above."                                          >> ${ROLES_FILE} 2> /dev/null
+	echo "-- In particular, you will very very likely want to"      >> ${ROLES_FILE} 2> /dev/null
+	echo "-- comment out the 'postgres' role."                      >> ${ROLES_FILE} 2> /dev/null
+	echo "-- -----------------------------------------------------" >> ${ROLES_FILE} 2> /dev/null
+	echo "" >> ${ROLES_FILE} 2> /dev/null
 	ROLES=`psql -A -t -d ${GM_DATABASE} -p ${GM_PORT} -U ${GM_DBO} -c "select gm.get_users('${GM_DATABASE}');"`
-	echo "-- ${ROLES}" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
+	echo "-- ${ROLES}" >> ${ROLES_FILE} 2> /dev/null
 
 else
 	# remotely
 	if ping -c 3 -i 2 ${GM_HOST} > /dev/null; then
 
 		# database
-		pg_dump -v --format=directory --compress=0 --column-inserts --clean --if-exists --serializable-deferrable -p ${GM_PORT} -U ${GM_DBO} -f ${BACKUP_FILENAME}.dir -h ${GM_HOST} ${GM_DATABASE} 2> /dev/null
+		pg_dump -v --format=directory --compress=0 --column-inserts --clean --if-exists --serializable-deferrable -p ${GM_PORT} -U ${GM_DBO} -f ${BACKUP_DATA_DIR} -h ${GM_HOST} ${GM_DATABASE} 2> /dev/null
 
 		# roles
 		# -r -> -g for older versions
-		pg_dumpall -v --roles-only -h ${GM_HOST} -p ${GM_PORT} -U postgres > ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-
-		echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- Below find a list of database roles which were in use" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- in the GNUmed database \"${GM_DATABASE}\"."            >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "--" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- Only those need to be restored to create a working"    >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- copy of your original database. All other roles can"   >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- be commented out by prepending '-- ' to the relevant"  >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- lines above."                                          >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- In particular, you will very very likely want to"      >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- comment out the 'postgres' role."                      >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "-- -----------------------------------------------------" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
-		echo "" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
+		pg_dumpall -v --roles-only -h ${GM_HOST} -p ${GM_PORT} -U postgres > ${ROLES_FILE} 2> /dev/null
+
+		echo "" >> ${ROLES_FILE} 2> /dev/null
+		echo "-- -----------------------------------------------------" >> ${ROLES_FILE} 2> /dev/null
+		echo "-- Below find a list of database roles which were in use" >> ${ROLES_FILE} 2> /dev/null
+		echo "-- in the GNUmed database \"${GM_DATABASE}\"."            >> ${ROLES_FILE} 2> /dev/null
+		echo "--" >> ${ROLES_FILE} 2> /dev/null
+		echo "-- Only those need to be restored to create a working"    >> ${ROLES_FILE} 2> /dev/null
+		echo "-- copy of your original database. All other roles can"   >> ${ROLES_FILE} 2> /dev/null
+		echo "-- be commented out by prepending '-- ' to the relevant"  >> ${ROLES_FILE} 2> /dev/null
+		echo "-- lines above."                                          >> ${ROLES_FILE} 2> /dev/null
+		echo "-- In particular, you will very very likely want to"      >> ${ROLES_FILE} 2> /dev/null
+		echo "-- comment out the 'postgres' role."                      >> ${ROLES_FILE} 2> /dev/null
+		echo "-- -----------------------------------------------------" >> ${ROLES_FILE} 2> /dev/null
+		echo "" >> ${ROLES_FILE} 2> /dev/null
 		ROLES=`psql -A -t -d ${GM_DATABASE} -p ${GM_PORT} -U ${GM_DBO} -c "select gm.get_users('${GM_DATABASE}');"`
-		echo "-- ${ROLES}" >> ${BACKUP_FILENAME}-roles.sql 2> /dev/null
+		echo "-- ${ROLES}" >> ${ROLES_FILE} 2> /dev/null
 
 	else
 		echo "Cannot ping database host ${GM_HOST}."
 		exit 1
-	fi ;
-fi ;
+	fi
+fi
 
 
-# tar and test it
+# create tar archive
+TAR_FILE="${BACKUP_FILENAME}.tar"
+tar -cf ${TAR_FILE} ${ROLES_FILE} ${BACKUP_DATA_DIR}/
+RESULT="$?"
+if test "${RESULT}" != "0" ; then
+	echo "Creating backup tar archive [${TAR_FILE}] failed (${RESULT}). Aborting."
+	exit ${RESULT}
+fi
+
+
+# test tar archive
 if test -z ${VERIFY_TAR} ; then
-	tar -cf ${BACKUP_FILENAME}.tar ${BACKUP_FILENAME}-roles.sql ${BACKUP_FILENAME}.dir/
-else
-	tar -cWf ${BACKUP_FILENAME}.tar ${BACKUP_FILENAME}-roles.sql ${BACKUP_FILENAME}.dir/
-fi ;
-if test "$?" != "0" ; then
-	echo "Creating backup tar archive [${BACKUP_FILENAME}.tar] failed. Aborting."
-	exit 1
+	tar -xOf ${TAR_FILE} > /dev/null
+	RESULT="$?"
+	if test "${RESULT}" != "0" ; then
+		echo "Verifying backup tar archive [${TAR_FILE}] failed (${RESULT}). Aborting."
+		exit ${RESULT}
+	fi
 fi
-rm --dir --recursive --one-file-system ${BACKUP_FILENAME}.dir/
-rm -f ${BACKUP_FILENAME}-roles.sql
+rm --dir --recursive --one-file-system ${BACKUP_DATA_DIR}/
+rm -f ${ROLES_FILE}
 
 
-chown ${BACKUP_OWNER} ${BACKUP_FILENAME}.tar
+chown ${BACKUP_OWNER} ${TAR_FILE}
 
 exit 0
diff --git a/server/gm-zip+sign_backups.sh b/server/gm-zip+sign_backups.sh
index add2db2..f2dd8c7 100755
--- a/server/gm-zip+sign_backups.sh
+++ b/server/gm-zip+sign_backups.sh
@@ -52,28 +52,56 @@ fi
 shopt -s -q nullglob
 
 # zip up any backups
-for BACKUP in ${BACKUP_BASENAME}-*.tar ; do
+AGGREGATE_EXIT_CODE=0
+for TAR_FILE in ${BACKUP_BASENAME}-*.tar ; do
+
+	BZ2_FILE="${TAR_FILE}.bz2"
 
 	# are the backup and ...
-	TAR_OPEN=`lsof | grep ${BACKUP}`
+	TAR_OPEN=`lsof | grep ${TAR_FILE}`
 	# ... the corresponding bz2 both open at the moment ?
-	BZ2_OPEN=`lsof | grep ${BACKUP}.bz2`
+	BZ2_OPEN=`lsof | grep ${BZ2_FILE}`
 	if test -z "${TAR_OPEN}" -a -z "${BZ2_OPEN}" ; then
 		# no: remove the bz2 and start over compressing
-		rm -f ${BACKUP}.bz2
+		rm -f ${BZ2_FILE}
 	else
 		# yes: skip to next backup
 		continue
 	fi
 
+	# verify tar archive
+	if test -z ${VERIFY_TAR} ; then
+		tar -xOf ${TAR_FILE} > /dev/null
+		RESULT="$?"
+		if test "${RESULT}" != "0" ; then
+			echo "Verifying backup tar archive [${TAR_FILE}] failed (${RESULT}). Skipping."
+			AGGREGATE_EXIT_CODE=${RESULT}
+			continue
+		fi
+	fi
+
+	# compress tar archive
 	# I have tried "xz -9 -e" and it did not make much of
 	# a difference (48 MB in a 1.2 GB backup)
-	bzip2 -zq -${COMPRESSION_LEVEL} ${BACKUP}
-	bzip2 -tq ${BACKUP}.bz2
-	# FIXME: add check for exit code
+	bzip2 -zq -${COMPRESSION_LEVEL} ${TAR_FILE}
+	RESULT="$?"
+	if test "${RESULT}" != "0" ; then
+		echo "Compressing tar archive [${TAR_FILE}] as bz2 failed (${RESULT}). Skipping."
+		AGGREGATE_EXIT_CODE=${RESULT}
+		continue
+	fi
+	# verify compressed archive
+	bzip2 -tq ${BZ2_FILE}
+	RESULT="$?"
+	if test "${RESULT}" != "0" ; then
+		echo "Verifying compressed archive [${BZ2_FILE}] failed (${RESULT}). Removing."
+		AGGREGATE_EXIT_CODE=${RESULT}
+		rm -f ${BZ2_FILE}
+		continue
+	fi
 
-	chmod ${BACKUP_MASK} ${BACKUP}.bz2
-	chown ${BACKUP_OWNER} ${BACKUP}.bz2
+	chmod ${BACKUP_MASK} ${BZ2_FILE}
+	chown ${BACKUP_OWNER} ${BZ2_FILE}
 
 	# Reed-Solomon error protection support
 #	if test -n ${ADD_ECC} ; then
@@ -85,8 +113,8 @@ for BACKUP in ${BACKUP_BASENAME}-*.tar ; do
 		LOCAL_MAILER=`which mail`
 
 		#SHA512="SHA 512:"`sha512sum -b ${BACKUP_FILENAME}.tar.bz2`
-		SHA512=`openssl dgst -sha512 -hex ${BACKUP}.bz2`
-		RMD160=`openssl dgst -ripemd160 -hex ${BACKUP}.bz2`
+		SHA512=`openssl dgst -sha512 -hex ${BZ2_FILE}`
+		RMD160=`openssl dgst -ripemd160 -hex ${BZ2_FILE}`
 
 		export REPLYTO=${SIG_RECEIVER}
 
@@ -98,8 +126,8 @@ for BACKUP in ${BACKUP_BASENAME}-*.tar ; do
 			echo "	<tan>$GNOTARY_TAN</tan>"
 			echo "	<action>notarize</action>"
 			echo "	<hashes number=\"2\">"
-			echo "		<hash file=\"${BACKUP}.bz2\" modified=\"${TS}\" algorithm=\"SHA-512\">${SHA512}</hash>"
-			echo "		<hash file=\"${BACKUP}.bz2\" modified=\"${TS}\" algorithm=\"RIPE-MD-160\">${RMD160}</hash>"
+			echo "		<hash file=\"${BZ2_FILE}\" modified=\"${TS}\" algorithm=\"SHA-512\">${SHA512}</hash>"
+			echo "		<hash file=\"${BZ2_FILE}\" modified=\"${TS}\" algorithm=\"RIPE-MD-160\">${RMD160}</hash>"
 			echo "	</hashes>"
 			echo "</message>"
 			echo " "
@@ -108,6 +136,5 @@ for BACKUP in ${BACKUP_BASENAME}-*.tar ; do
 
 done
 
-exit 0
 
-#==============================================================
+exit ${AGGREGATE_EXIT_CODE}
diff --git a/server/pycommon/gmNetworkTools.py b/server/pycommon/gmNetworkTools.py
index 73a633d..6d36bd2 100644
--- a/server/pycommon/gmNetworkTools.py
+++ b/server/pycommon/gmNetworkTools.py
@@ -202,7 +202,8 @@ def check_for_update(url=None, current_branch=None, current_version=None, consid
 
 	try:
 		remote_file = wget.urlopen(url)
-	except (wget.URLError, ValueError, OSError):
+	except (wget.URLError, ValueError, OSError, IOError):
+		# IOError: socket.error
 		_log.exception("cannot retrieve version file from [%s]", url)
 		return (None, _('Cannot retrieve version information from:\n\n%s') % url)
 
diff --git a/server/pycommon/gmPG2.py b/server/pycommon/gmPG2.py
index acc0d09..38990dc 100644
--- a/server/pycommon/gmPG2.py
+++ b/server/pycommon/gmPG2.py
@@ -1492,6 +1492,59 @@ def sanitize_pg_regex(expression=None, escape_all=False):
 		#']', '\]',			# not needed
 
 #------------------------------------------------------------------------
+def capture_cursor_state(cursor=None):
+	conn = cursor.connection
+
+	tx_status = conn.get_transaction_status()
+	if tx_status in [ psycopg2.extensions.TRANSACTION_STATUS_INERROR, psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN ]:
+		isolation_level = u'tx aborted or unknown, cannot retrieve'
+	else:
+		isolation_level = conn.isolation_level
+
+	txt = u"""Link state:
+Cursor
+  identity: %s; name: %s
+  closed: %s; scrollable: %s; with hold: %s; arraysize: %s; itersize: %s;
+  last rowcount: %s; rownumber: %s; lastrowid (OID): %s;
+  last description: %s
+  statusmessage: %s
+Connection
+  identity: %s; backend pid: %s; protocol version: %s;
+  closed: %s; autocommit: %s; isolation level: %s; encoding: %s; async: %s;
+  TX status: %s; CX status: %s; executing async op: %s;
+Query
+  %s
+""" % (
+		id(cursor),
+		cursor.name,
+		cursor.closed,
+		cursor.scrollable,
+		cursor.withhold,
+		cursor.arraysize,
+		cursor.itersize,
+		cursor.rowcount,
+		cursor.rownumber,
+		cursor.lastrowid,
+		cursor.description,
+		cursor.statusmessage,
+
+		id(conn),
+		conn.get_backend_pid(),
+		conn.protocol_version,
+		conn.closed,
+		conn.autocommit,
+		isolation_level,
+		conn.encoding,
+		conn.async,
+		tx_status,
+		conn.status,
+		conn.isexecuting(),
+
+		cursor.query,
+	)
+	return txt
+
+#------------------------------------------------------------------------
 def run_ro_queries(link_obj=None, queries=None, verbose=False, return_data=True, get_col_idx=False):
 	"""Run read-only queries.
 
@@ -1504,9 +1557,9 @@ def run_ro_queries(link_obj=None, queries=None, verbose=False, return_data=True,
 	"""
 	if isinstance(link_obj, dbapi._psycopg.cursor):
 		curs = link_obj
-		curs_close = __noop
-		tx_rollback = __noop
-		readonly_rollback_just_in_case = __noop
+		curs_close = lambda :1
+		tx_rollback = lambda :1
+		readonly_rollback_just_in_case = lambda :1
 	elif isinstance(link_obj, dbapi._psycopg.connection):
 		curs = link_obj.cursor()
 		curs_close = curs.close
@@ -1517,7 +1570,7 @@ def run_ro_queries(link_obj=None, queries=None, verbose=False, return_data=True,
 			# do not rollback readonly queries on passed-in readwrite
 			# connections just in case because they may have already
 			# seen fully legitimate write action which would get lost
-			readonly_rollback_just_in_case = __noop
+			readonly_rollback_just_in_case = lambda :1
 	elif link_obj is None:
 		conn = get_connection(readonly=True, verbose=verbose)
 		curs = conn.cursor()
@@ -1541,14 +1594,10 @@ def run_ro_queries(link_obj=None, queries=None, verbose=False, return_data=True,
 		try:
 			curs.execute(query['cmd'], args)
 			if verbose:
-				_log.debug('ran query: [%s]', curs.query)
-				if curs.statusmessage != u'':
-					_log.debug('PG status message: %s', curs.statusmessage)
-				_log.debug('cursor description: %s', str(curs.description))
+				_log.debug(capture_cursor_state(curs))
 		except dbapi.Error as pg_exc:
-			_log.error('query failed: [%s]', curs.query)
-			if curs.statusmessage != u'':
-				_log.error('PG status message: %s', curs.statusmessage)
+			_log.error('query failed in RO connection')
+			_log.error(capture_cursor_state(curs))
 			pg_exc = make_pg_exception_fields_unicode(pg_exc)
 			_log.error('PG error code: %s', pg_exc.pgcode)
 			if pg_exc.pgerror is not None:
@@ -1577,9 +1626,8 @@ def run_ro_queries(link_obj=None, queries=None, verbose=False, return_data=True,
 				)
 			raise
 		except:
-			_log.error('query failed: [%s]', curs.query)
-			if curs.statusmessage != u'':
-				_log.error('PG status message: %s', curs.statusmessage)
+			_log.exception('query failed in RO connection')
+			_log.error(capture_cursor_state(curs))
 			try:
 				curs_close()
 			except dbapi.InterfaceError:
@@ -1649,21 +1697,23 @@ def run_rw_queries(link_obj=None, queries=None, end_tx=False, return_data=None,
 			* for <index> see <get_col_idx>
 	"""
 	if isinstance(link_obj, dbapi._psycopg.cursor):
-		conn_close = __noop
-		conn_commit = __noop
-		tx_rollback = __noop
+		conn_close = lambda :1
+		conn_commit = lambda :1
+		tx_rollback = lambda :1
 		curs = link_obj
-		curs_close = __noop
+		curs_close = lambda :1
+		notices_accessor = curs.connection
 	elif isinstance(link_obj, dbapi._psycopg.connection):
-		conn_close = __noop
+		conn_close = lambda :1
 		if end_tx:
 			conn_commit = link_obj.commit
 			tx_rollback = link_obj.rollback
 		else:
-			conn_commit = __noop
-			tx_rollback = __noop
+			conn_commit = lambda :1
+			tx_rollback = lambda :1
 		curs = link_obj.cursor()
 		curs_close = curs.close
+		notices_accessor = link_obj
 	elif link_obj is None:
 		conn = get_connection(readonly=False)
 		conn_close = conn.close
@@ -1671,6 +1721,7 @@ def run_rw_queries(link_obj=None, queries=None, end_tx=False, return_data=None,
 		tx_rollback = conn.rollback
 		curs = conn.cursor()
 		curs_close = curs.close
+		notices_accessor = conn
 	else:
 		raise ValueError('link_obj must be cursor, connection or None but not [%s]' % link_obj)
 
@@ -1684,20 +1735,23 @@ def run_rw_queries(link_obj=None, queries=None, end_tx=False, return_data=None,
 			args = None
 		try:
 			curs.execute(query['cmd'], args)
+			if verbose:
+				_log.debug(capture_cursor_state(curs))
+			for notice in notices_accessor.notices:
+				_log.debug(notice.strip(u'\n').strip(u'\r'))
+			del notices_accessor.notices[:]
+		# DB related exceptions
 		except dbapi.Error as pg_exc:
-			_log.error('RW query failed: [%s]', curs.query)
-			if curs.statusmessage != u'':
-				_log.error('PG status message: %s', curs.statusmessage)
+			_log.error('query failed in RW connection')
+			_log.error(capture_cursor_state(curs))
+			for notice in notices_accessor.notices:
+				_log.error(notice.strip(u'\n').strip(u'\r'))
+			del notices_accessor.notices[:]
 			pg_exc = make_pg_exception_fields_unicode(pg_exc)
 			_log.error(u'PG error code: %s', pg_exc.pgcode)
 			if pg_exc.pgerror is not None:
 				_log.error(u'PG error message: %s', pg_exc.u_pgerror)
-			try:
-				curs_close()
-				tx_rollback()			# just for good measure
-				conn_close()
-			except dbapi.InterfaceError:
-				_log.exception('cannot cleanup')
+			# privilege problem
 			if pg_exc.pgcode == sql_error_codes.INSUFFICIENT_PRIVILEGE:
 				details = u'Query: [%s]' % curs.query.strip().strip(u'\n').strip().strip(u'\n')
 				if curs.statusmessage != u'':
@@ -1709,15 +1763,34 @@ def run_rw_queries(link_obj=None, queries=None, end_tx=False, return_data=None,
 					msg = u'[%s]' % pg_exc.pgcode
 				else:
 					msg = u'[%s]: %s' % (pg_exc.pgcode, pg_exc.u_pgerror)
+				try:
+					curs_close()
+					tx_rollback()			# just for good measure
+					conn_close()
+				except dbapi.InterfaceError:
+					_log.exception('cannot cleanup')
 				raise gmExceptions.AccessDenied (
 					msg,
 					source = u'PostgreSQL',
 					code = pg_exc.pgcode,
 					details = details
 				)
+			# other problem
+			gmLog2.log_stack_trace()
+			try:
+				curs_close()
+				tx_rollback()			# just for good measure
+				conn_close()
+			except dbapi.InterfaceError:
+				_log.exception('cannot cleanup')
 			raise
+		# other exception
 		except:
-			_log.exception('error running RW query')
+			_log.exception('error running query in RW connection')
+			_log.error(capture_cursor_state(curs))
+			for notice in notices_accessor.notices:
+				_log.debug(notice.strip(u'\n').strip(u'\r'))
+			del notices_accessor.notices[:]
 			gmLog2.log_stack_trace()
 			try:
 				curs_close()
@@ -1725,7 +1798,6 @@ def run_rw_queries(link_obj=None, queries=None, end_tx=False, return_data=None,
 				conn_close()
 			except dbapi.InterfaceError:
 				_log.exception('cannot cleanup')
-				raise
 			raise
 
 	data = None
@@ -2776,6 +2848,10 @@ SELECT to_timestamp (foofoo,'YYMMDD.HH24MI') FROM (
 		print get_index_name(indexed_table = 'clin.vaccination', indexed_column = 'fk_episode')
 
 	#--------------------------------------------------------------------
+	def test_faulty_SQL():
+		run_rw_queries(queries = [{'cmd': u'SELEC 1'}])
+
+	#--------------------------------------------------------------------
 	# run tests
 	#test_get_connection()
 	#test_exceptions()
@@ -2799,6 +2875,7 @@ SELECT to_timestamp (foofoo,'YYMMDD.HH24MI') FROM (
 	#test_file2bytea()
 	#test_file2bytea_overlay()
 	#test_file2bytea_copy_from()
-	test_file2bytea_lo()
+	#test_file2bytea_lo()
+	test_faulty_SQL()
 
 # ======================================================================
diff --git a/server/sql/v20-v21/dynamic/v21-release_notes-dynamic.sql b/server/sql/v20-v21/dynamic/v21-release_notes-dynamic.sql
index 042ba69..a5b55b8 100644
--- a/server/sql/v20-v21/dynamic/v21-release_notes-dynamic.sql
+++ b/server/sql/v20-v21/dynamic/v21-release_notes-dynamic.sql
@@ -17,25 +17,32 @@ INSERT INTO dem.message_inbox (
 ) VALUES (
 	(select pk from dem.staff where db_user = 'any-doc'),
 	(select pk_type from dem.v_inbox_item_type where type = 'memo' and category = 'administrative'),
-	'Release Notes for GNUmed 1.6.10 (database v21.10)',
-	'GNUmed 1.6.10 Release Notes:
+	'Release Notes for GNUmed 1.6.11 (database v21.11)',
+	'GNUmed 1.6.11 Release Notes:
 
-	1.6.10
+	1.6.11
 
-FIX: more faults with dynamic hint detection
-FIX: exception on verifying substance intake EA
-FIX: failure to download studies from early Orthanc versions
-FIX: failure to create BMP when no allergy check date available
+IMPROVED: edit area refresh on first setting data
+IMPROVED: DB link error logging
+IMPROVED: suppressed hints display in patient overview
+IMPROVED: sorting of Hx items in patient overview
+IMPROVED: use of pdfinfo in gm-describe_file
 
-IMPROVED: LaTeX formatting of current medications
+FIX: stall of gm-create_datamatrix in swap storm
+FIX: BMP creation without substance intakes
+FIX: missing quotes in BMP datafile [thanks Moritz]
+FIX: failure to sometimes store progress notes [thanks Marc]
+FIX: exception on double-clicking document tree label node
+FIX: exception on switching to drug database frontend [thanks a sk_SK]
+FIX: exception on saving hospital stay [thanks a sk_SK]
+FIX: exception on checking for upgrade [thanks Philipp]
 
-NEW: placeholders $<bill_adr_*>$ for accessing the address of a bill
-NEW: --wxp=2|3 command line option
+	21.11
 
-	21.10
+IMPROVED: backup scripts error checking
 
-FIX: clin.get_hints_for_patient()
+FIX: serialization failures due to table mod announcement triggers
 ');
 
 -- --------------------------------------------------------------
-select gm.log_script_insertion('v21-release_notes-dynamic.sql', '21.10');
+select gm.log_script_insertion('v21-release_notes-dynamic.sql', '21.11');
diff --git a/server/sql/v20-v21/fixups/v21-notifications-dynamic.sql b/server/sql/v20-v21/fixups/v21-notifications-dynamic.sql
new file mode 100644
index 0000000..67f906d
--- /dev/null
+++ b/server/sql/v20-v21/fixups/v21-notifications-dynamic.sql
@@ -0,0 +1,261 @@
+-- ==============================================================
+-- GNUmed database schema change script
+--
+-- License: GPL v2 or later
+-- Author: Karsten.Hilbert at gmx.net
+--
+-- ==============================================================
+\set ON_ERROR_STOP 1
+
+--set default_transaction_read_only to off;
+set check_function_bodies to on;
+
+-- --------------------------------------------------------------
+create or replace function gm.trf_announce_table_ins_upd()
+	returns trigger
+	language 'plpgsql'
+	as '
+declare
+	_payload text;
+	_pk_accessor_SQL text;
+	_pk_col_val integer;
+	_identity_accessor_SQL text;
+	_pk_identity integer;
+begin
+	_pk_accessor_SQL := TG_ARGV[1];
+	EXECUTE _pk_accessor_SQL INTO STRICT _pk_col_val USING NEW;
+	_payload := ''operation='' || TG_OP || ''::'' || TG_ARGV[0] || ''::row PK='' || coalesce(_pk_col_val::text, ''NULL'');
+
+	_identity_accessor_SQL := TG_ARGV[2];
+	if _identity_accessor_SQL <> ''<NULL>'' then
+		EXECUTE _identity_accessor_SQL INTO STRICT _pk_identity USING NEW;
+		_payload := _payload || ''::person PK='' || coalesce(_pk_identity::text, ''NULL'');
+	end if;
+
+	perform pg_notify(''gm_table_mod'', _payload);
+	return NULL;
+end;
+';
+
+
+comment on function gm.trf_announce_table_ins_upd() is
+'Trigger function announcing an INSERT or UPDATE to a table.
+
+sends signal: gm_table_mod
+payload:
+	operation=INSERT/UPDATE,
+	table=the table that is updated,
+	PK name=the name of the PK column of the table (requires single column PKs),
+	row PK=the PK of the affected row,
+	person PK=the PK of the affected person,
+';
+
+-- --------------------------------------------------------------
+create or replace function gm.trf_announce_table_del()
+	returns trigger
+	language 'plpgsql'
+	as '
+declare
+	_payload text;
+	_pk_accessor_SQL text;
+	_pk_col_val integer;
+	_identity_accessor_SQL text;
+	_pk_identity integer;
+begin
+	_pk_accessor_SQL := TG_ARGV[1];
+	EXECUTE _pk_accessor_SQL INTO STRICT _pk_col_val USING OLD;
+	_payload := TG_ARGV[0] || ''::row PK='' || coalesce(_pk_col_val::text, ''NULL'');
+
+	_identity_accessor_SQL := TG_ARGV[2];
+	if _identity_accessor_SQL <> ''<NULL>'' then
+		--raise notice ''%.%: %'', TG_TABLE_SCHEMA, TG_TABLE_NAME, _identity_accessor_SQL;
+		EXECUTE _identity_accessor_SQL INTO STRICT _pk_identity USING OLD;
+		_payload := _payload || ''::person PK='' || coalesce(_pk_identity::text, ''NULL'');
+	end if;
+
+	perform pg_notify(''gm_table_mod'', _payload);
+	return NULL;
+end;
+';
+
+
+comment on function gm.trf_announce_table_del() is
+'Trigger function announcing a DELETE on a table.
+
+sends signal: gm_table_mod
+payload:
+	operation=DELETE,
+	table=the table that is updated,
+	PK name=the name of the PK column of the table (requires single column PKs),
+	row PK=the PK of the affected row,
+	person PK=the PK of the affected person,
+';
+
+-- --------------------------------------------------------------
+create or replace function gm.create_table_mod_triggers(_schema_name name, _table_name name, _drop_old_triggers boolean)
+	returns boolean
+	language plpgsql
+	as '
+DECLARE
+	_qualified_table text;
+	_msg text;
+	_payload text;
+	_PK_col_name text;
+	_pk_accessor_SQL text;
+	_accessor_col text;
+	_col_candidate text;
+	_identity_accessor_SQL text;
+	_cmd text;
+BEGIN
+	_qualified_table := _schema_name || ''.'' || _table_name;
+	raise notice ''gm.create_table_mod_triggers(): %'', _qualified_table;
+	-- verify table exists
+	if not exists(select 1 from information_schema.tables where table_schema = _schema_name and table_name = _table_name) then
+		raise warning ''gm.create_table_mod_triggers(): table <%> does not exist'', _qualified_table;
+		raise exception undefined_table;
+		return false;
+	end if;
+
+	-- find PK column
+	select
+		pg_attribute.attname into _PK_col_name
+	from
+		pg_index, pg_class, pg_attribute
+	where
+		pg_class.oid = _qualified_table::regclass
+			AND
+		indrelid = pg_class.oid
+			AND
+		pg_attribute.attrelid = pg_class.oid
+			AND
+		pg_attribute.attnum = any(pg_index.indkey)
+			AND
+		indisprimary;
+	if _PK_col_name is NULL then
+		raise warning ''gm.create_table_mod_triggers(): table <%> lacks a primary key'', _qualified_table;
+		raise exception undefined_column;
+		return false;
+	end if;
+
+	_pk_accessor_SQL := ''select $1.'' || _PK_col_name;
+
+	-- find identity accessor
+	-- special case
+	if _qualified_table = ''dem.identity'' then
+		_identity_accessor_SQL := ''select $1.pk'';
+	else
+		-- look for columns by which to retrieve affected person
+		_accessor_col := NULL;
+		foreach _col_candidate in array array[''fk_identity'', ''fk_patient'', ''id_identity'', ''fk_encounter''] loop
+			if exists (
+				select 1 from pg_class, pg_attribute where
+				pg_class.oid = _qualified_table::regclass
+					AND
+				pg_attribute.attname = _col_candidate
+					AND
+				pg_attribute.attrelid = pg_class.oid
+			) then
+				_accessor_col := _col_candidate;
+				exit;
+			end if;
+		end loop;
+		if _accessor_col = ''fk_encounter'' then					-- retrieve identity PK via fk_encounter
+			_identity_accessor_SQL := ''select fk_patient from clin.encounter where pk = $1.fk_encounter limit 1'';
+		elsif _accessor_col = ''fk_identity'' then					-- retrieve identity PK via fk_identity
+			_identity_accessor_SQL := ''select $1.fk_identity'';
+		elsif _accessor_col = ''fk_patient'' then					-- retrieve identity PK via fk_patient
+			_identity_accessor_SQL := ''select $1.fk_patient'';
+		elsif _accessor_col = ''id_identity'' then					-- retrieve identity PK via id_identity
+			_identity_accessor_SQL := ''select $1.id_identity'';
+		else
+			_identity_accessor_SQL := ''<NULL>'';
+		end if;
+	end if;
+
+	-- drop triggers should they exist
+	-- old-name announcement triggers
+	-- remove in v22
+	_cmd := ''drop trigger if exists tr_announce_'' || _schema_name || ''_'' || _table_name || ''_ins_upd on '' || _qualified_table || '' cascade;'';
+	execute _cmd;
+	_cmd := ''drop trigger if exists tr_announce_'' || _schema_name || ''_'' || _table_name || ''_del on '' || _qualified_table || '' cascade;'';
+	execute _cmd;
+	-- new-name announcement triggers
+	_cmd := ''drop trigger if exists zzz_tr_announce_'' || _schema_name || ''_'' || _table_name || ''_ins_upd on '' || _qualified_table || '' cascade;'';
+	execute _cmd;
+	_cmd := ''drop trigger if exists zzz_tr_announce_'' || _schema_name || ''_'' || _table_name || ''_del on '' || _qualified_table || '' cascade;'';
+	execute _cmd;
+
+	-- remove in v21
+	if _drop_old_triggers is true then
+		_cmd := ''drop function if exists '' || _schema_name || ''.trf_announce_'' || _table_name || ''_mod() cascade;'';
+		execute _cmd;
+		_cmd := ''drop function if exists '' || _schema_name || ''.trf_announce_'' || _table_name || ''_mod_no_pk() cascade;'';
+		execute _cmd;
+		_cmd := ''drop function if exists '' || _schema_name || ''.trf_announce_'' || _table_name || ''_generic_mod_no_pk() cascade;'';
+		execute _cmd;
+	end if;
+
+	-- re-create triggers
+	-- 1) INSERT/UPDATE
+	_payload := ''table='' || _qualified_table || ''::PK name='' || _PK_col_name;
+	_cmd := ''create constraint trigger zzz_tr_announce_'' || _schema_name || ''_'' || _table_name || ''_ins_upd'';
+	_cmd := _cmd || '' after insert or update'';
+	_cmd := _cmd || '' on '' || _qualified_table;
+	-- needed so a SELECT inside, say, _identity_accessor_SQL running
+	-- concurrently to a "lengthy" TX does not create a serialization
+	-- failure by being a rw-dependancy pivot
+	_cmd := _cmd || '' deferrable initially deferred'';
+	_cmd := _cmd || '' for each row'';
+	_cmd := _cmd || '' execute procedure gm.trf_announce_table_ins_upd('''''' || _payload || '''''', '''''' || _pk_accessor_SQL || '''''', '''''' || _identity_accessor_SQL || '''''');'';
+	execute _cmd;
+	-- 2) DELETE
+	_payload := ''operation=DELETE::'' || _payload;
+	_cmd := ''create constraint trigger zzz_tr_announce_'' || _schema_name || ''_'' || _table_name || ''_del'';
+	_cmd := _cmd || '' after delete'';
+	_cmd := _cmd || '' on '' || _qualified_table;
+	-- needed so a SELECT inside, say, _identity_accessor_SQL running
+	-- concurrently to a "lengthy" TX does not create a serialization
+	-- failure by being a rw-dependancy pivot
+	_cmd := _cmd || '' deferrable initially deferred'';
+	_cmd := _cmd || '' for each row'';
+	_cmd := _cmd || '' execute procedure gm.trf_announce_table_del('''''' || _payload || '''''', '''''' || _pk_accessor_SQL || '''''', '''''' || _identity_accessor_SQL || '''''');'';
+	execute _cmd;
+
+	return True;
+END;
+';
+
+
+comment on function gm.create_table_mod_triggers(_schema_name name, _table_name name, _drop_old_triggers boolean) is
+'This function can be run on any table in order to add notification triggers to that table.';
+
+-- --------------------------------------------------------------
+create or replace function gm.create_all_table_mod_triggers(_drop_old_triggers boolean)
+	returns boolean
+	language plpgsql
+	as '
+DECLARE
+	_notify_table record;
+	_cmd text;
+	_total_success boolean;
+BEGIN
+	_total_success := True;
+	-- loop over registered tables
+	for _notify_table in select * from gm.notifying_tables loop
+		BEGIN
+			PERFORM gm.create_table_mod_triggers(_notify_table.schema_name, _notify_table.table_name, _drop_old_triggers);
+		EXCEPTION
+			WHEN undefined_table OR undefined_column THEN
+				raise warning ''gm.create_all_table_mod_triggers(): error processing <%.%>, skipping'', _notify_table.schema_name, _notify_table.table_name;
+				_total_success := False;
+		END;
+	end loop;
+	return _total_success;
+END;
+';
+
+comment on function gm.create_all_table_mod_triggers(_drop_old_triggers boolean) is
+	'(Re)create all table mod triggers for all registered tables.';
+
+-- --------------------------------------------------------------
+select gm.log_script_insertion('v21-notifications-dynamic.sql', '21.11');

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/gnumed-server.git



More information about the debian-med-commit mailing list