[med-svn] [Git][med-team/sra-sdk][upstream] New upstream version 2.10.7+dfsg

Aaron M. Ucko gitlab at salsa.debian.org
Wed Jun 3 03:11:19 BST 2020



Aaron M. Ucko pushed to branch upstream at Debian Med / sra-sdk


Commits:
6461e62e by Aaron M. Ucko at 2020-06-02T21:46:22-04:00
New upstream version 2.10.7+dfsg
- - - - -


24 changed files:

- CHANGES.md
- build/Makefile.vers
- + build/docker/Dockerfile.build-alpine
- + build/docker/Dockerfile.build-amazonlinux
- + build/docker/Dockerfile.build-ubuntu
- build/docker/Dockerfile.delite
- shared/toolkit.vers
- shared/toolkit.vers.h
- test/driver-tool/Makefile
- + test/driver-tool/expected/vdbcache.stderr
- tools/driver-tool/imp_fasterq_dump.cpp
- tools/driver-tool/proc.posix.cpp
- tools/driver-tool/proc.win32.cpp
- tools/driver-tool/run-source.cpp
- tools/driver-tool/util.hpp
- + tools/kar/DL/INSTALL.sh
- + tools/kar/DL/README.txt
- + tools/kar/DL/TEST.sh
- + tools/kar/DL/UPD.sh
- tools/kar/README.txt
- tools/kar/delite_docker.sh
- + tools/kar/delite_test_docker.sh
- tools/kar/sra_delite.kfg
- tools/kar/sra_delite.sh


Changes:

=====================================
CHANGES.md
=====================================
@@ -1,6 +1,12 @@
 # NCBI External Developer Release:
 
 
+## SRA Toolkit 2.10.7
+**May 20, 2020**
+
+  **sratools**: fixed issue with some runs not working correctly and fixed typo in fasterq-dump command line
+
+
 ## SRA Toolkit 2.10.6
 **MAY 18, 2020**
 


=====================================
build/Makefile.vers
=====================================
@@ -23,4 +23,4 @@
 # ===========================================================================
 
 # SRA-TOOLS and library version
-VERSION = 2.10.6
+VERSION = 2.10.7


=====================================
build/docker/Dockerfile.build-alpine
=====================================
@@ -0,0 +1,39 @@
+FROM alpine:latest AS build-base
+RUN apk add build-base util-linux linux-headers g++ bash perl make cmake git bison flex
+
+FROM build-base AS build
+ARG VDB_BRANCH=master
+ARG NGS_BRANCH=${VDB_BRANCH}
+ARG SRA_BRANCH=${VDB_BRANCH}
+ARG BUILD_STYLE=--without-debug
+RUN git clone -b ${NGS_BRANCH} --depth 1 https://github.com/ncbi/ngs.git
+RUN git clone -b ${VDB_BRANCH} --depth 1 https://github.com/ncbi/ncbi-vdb.git
+RUN git clone -b ${SRA_BRANCH} --depth 1 https://github.com/ncbi/sra-tools.git
+WORKDIR /ncbi-vdb
+RUN sed -e '/gnu\/libc-version.h/ s/^/\/\//' -e '/gnu_get_libc_version/s/^/\/\//' -i libs/kns/manager.c
+RUN ./configure ${BUILD_STYLE} && make -s >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+WORKDIR /ngs
+RUN ./configure ${BUILD_STYLE} && make -s -C ngs-sdk >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+WORKDIR /sra-tools
+RUN sed -e'/execinfo.h/ s/^/\/\//' -e'/backtrace/ s/^/\/\//' -i tools/driver-tool/secure/except.cpp
+RUN sed -e'/mv -b -v/ s/mv -b/mv/' -i build/install-kfg.sh
+RUN perl -pi.orig -e'BEGIN{@l=(279,383,407,415,416,420,770,907);$i=0}if ($.==$l[$i]){++$i;s/stdout/use_stdout/g unless /use_stdout/}' tools/fasterq-dump/fasterq-dump.c && rm -f tools/fasterq-dump/fasterq-dump.c.orig
+RUN ./configure ${BUILD_STYLE} && make -s >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+RUN make install
+RUN mkdir -p /root/.ncbi
+RUN printf '/LIBS/GUID = "%s"\n' `uuidgen` > /root/.ncbi/user-settings.mkfg
+# SEND CLOUD INSTANCE IDENTITY
+RUN printf '/libs/cloud/report_instance_identity = "true"\n' >> /root/.ncbi/user-settings.mkfg
+
+FROM alpine:latest
+LABEL author="Kenneth Durbrow" \
+      maintainer="kenneth.durbrow at nih.gov" \
+      vendor="gov.nih.nlm.ncbi" \
+      website="https://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?view=software" \
+      repository="https://github.com/ncbi/sra-tools/" \
+      description="The SRA Toolkit along with a working default configuration." \
+      NOTICE="WHEN USING THIS IMAGE IN A CLOUD ENVIRONMENT, YOU WILL BE SENDING YOUR CLOUD INSTANCE IDENTITY TO NCBI."
+COPY --from=build /etc/ncbi /etc/ncbi
+COPY --from=build /usr/local/ncbi /usr/local/ncbi
+COPY --from=build /root/.ncbi /root/.ncbi
+ENV PATH=/usr/local/ncbi/sra-tools/bin:${PATH}


=====================================
build/docker/Dockerfile.build-amazonlinux
=====================================
@@ -0,0 +1,35 @@
+FROM amazonlinux:latest AS build-base
+RUN yum -q -y update && yum -q -y install gcc g++ gcc-c++ make cmake git bison flex uuid-runtime
+
+FROM build-base AS build
+LABEL author="Kenneth Durbrow" \
+      maintainer="kenneth.durbrow at nih.gov" \
+      vendor="gov.nih.nlm.ncbi" \
+      website="https://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?view=software" \
+      repository="https://github.com/ncbi/sra-tools/" \
+      description="Builds and installs the sratoolkit along with a working default configuration"
+ARG NGS_BRANCH=master
+ARG VDB_BRANCH=master
+ARG SRA_BRANCH=master
+ARG BUILD_STYLE=--without-debug
+RUN git clone -b ${NGS_BRANCH} --depth 1 https://github.com/ncbi/ngs.git
+RUN git clone -b ${VDB_BRANCH} --depth 1 https://github.com/ncbi/ncbi-vdb.git
+RUN git clone -b ${SRA_BRANCH} --depth 1 https://github.com/ncbi/sra-tools.git
+WORKDIR /ncbi-vdb
+RUN ./configure ${BUILD_STYLE} && make -s >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+WORKDIR /ngs
+RUN ./configure ${BUILD_STYLE} && make -s -C ngs-sdk >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+WORKDIR /sra-tools
+RUN ./configure ${BUILD_STYLE} && make -s >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+RUN make install
+RUN mkdir -p /root/.ncbi
+RUN printf '/LIBS/GUID = "%s"\n' `uuidgen` > /root/.ncbi/user-settings.mkfg
+RUN printf '/libs/cloud/report_instance_identity = "true"\n' >> /root/.ncbi/user-settings.mkfg
+RUN printf '/libs/cloud/accept_aws_charges = "true"\n/libs/cloud/accept_gcp_charges = "true"\n' >> /root/.ncbi/user-settings.mkfg
+
+FROM amazonlinux:latest
+COPY --from=build /etc/ncbi /etc/ncbi
+COPY --from=build /usr/local/ncbi /usr/local/ncbi
+COPY --from=build /root/.ncbi /root/.ncbi
+ENV PATH=/usr/local/ncbi/sra-tools/bin:${PATH}
+RUN echo "BY USING THIS DOCKER IMAGE IN A CLOUD ENVIRONMENT, YOU WILL BE SENDING YOUR CLOUD INSTANCE IDENTITY TO NCBI, AND YOU AGREE TO ACCEPT ANY CHARGES WHICH MIGHT OCCUR DUE TO TRANSFERING DATA FROM NCBI."


=====================================
build/docker/Dockerfile.build-ubuntu
=====================================
@@ -0,0 +1,35 @@
+FROM ubuntu:bionic AS build-base
+RUN apt-get --quiet update && apt-get --quiet install -y make cmake git gcc g++ flex bison uuid-runtime
+
+FORM build-base as build
+LABEL author="Kenneth Durbrow" \
+      maintainer="kenneth.durbrow at nih.gov" \
+      vendor="gov.nih.nlm.ncbi" \
+      website="https://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?view=software" \
+      repository="https://github.com/ncbi/sra-tools/" \
+      description="Builds and installs the sratoolkit along with a working default configuration"
+ARG NGS_BRANCH=master
+ARG VDB_BRANCH=master
+ARG SRA_BRANCH=master
+ARG BUILD_STYLE=--without-debug
+RUN git clone -b ${NGS_BRANCH} --depth 1 https://github.com/ncbi/ngs.git
+RUN git clone -b ${VDB_BRANCH} --depth 1 https://github.com/ncbi/ncbi-vdb.git
+RUN git clone -b ${SRA_BRANCH} --depth 1 https://github.com/ncbi/sra-tools.git
+WORKDIR /ncbi-vdb
+RUN ./configure ${BUILD_STYLE} && make -s >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+WORKDIR /ngs
+RUN ./configure ${BUILD_STYLE} && make -s -C ngs-sdk >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+WORKDIR /sra-tools
+RUN ./configure ${BUILD_STYLE} && make -s >/dev/null 2>&1 || { echo "make failed"; exit 1; }
+RUN make install
+RUN mkdir -p /root/.ncbi
+RUN printf '/LIBS/GUID = "%s"\n' `uuidgen` > /root/.ncbi/user-settings.mkfg
+RUN printf '/libs/cloud/report_instance_identity = "true"\n' >> /root/.ncbi/user-settings.mkfg
+RUN printf '/libs/cloud/accept_aws_charges = "true"\n/libs/cloud/accept_gcp_charges = "true"\n' >> /root/.ncbi/user-settings.mkfg
+
+FROM ubuntu:latest
+COPY --from=build /etc/ncbi /etc/ncbi
+COPY --from=build /usr/local/ncbi /usr/local/ncbi
+COPY --from=build /root/.ncbi /root/.ncbi
+ENV PATH=/usr/local/ncbi/sra-tools/bin:${PATH}
+RUN echo "BY USING THIS DOCKER IMAGE IN A CLOUD ENVIRONMENT, YOU WILL BE SENDING YOUR CLOUD INSTANCE IDENTITY TO NCBI, AND YOU AGREE TO ACCEPT ANY CHARGES WHICH MIGHT OCCUR DUE TO TRANSFERING DATA FROM NCBI."


=====================================
build/docker/Dockerfile.delite
=====================================
@@ -14,6 +14,8 @@
 # % mkdir ~/output
 # % docker run -v ~/output/:/output:rw --rm sratoolkit:delite delite_docker.sh SRR000001 SRR000002
 # % docker run -v ~/output/:/output:rw --rm sratoolkit:delite vdb-dump -R 1 /output/SRR000001/new.kar
+# % docker run -v ~/output/:/output:rw --rm sratoolkit:delite delite_docker.sh --skiptest SRR000001
+# % docker run -v ~/output/:/output:rw --rm sratoolkit:delite delite_test_docker.sh SRR000001
 # 
 
 # bionic is 18.04 LTS
@@ -36,8 +38,8 @@ RUN make install
 
 FROM build AS delited
 ### Install delite process binaries and script
-RUN cp -a /root/ncbi-outdir/sra-tools/*/*/*/*/bin/kar+* /root/ncbi-outdir/sra-tools/*/*/*/*/bin/make-read-filter* /sra-tools/tools/kar/sra_delite.sh /sra-tools/tools/kar/sra_delite.kfg /sra-tools/tools/kar/delite_docker.sh /usr/local/ncbi/sra-tools/bin
-RUN chmod ugo+x /usr/local/ncbi/sra-tools/bin/delite_docker.sh
+RUN cp -a /root/ncbi-outdir/sra-tools/*/*/*/*/bin/kar+* /root/ncbi-outdir/sra-tools/*/*/*/*/bin/make-read-filter* /sra-tools/tools/kar/sra_delite.sh /sra-tools/tools/kar/sra_delite.kfg /sra-tools/tools/kar/delite_docker.sh /sra-tools/tools/kar/delite_test_docker.sh /usr/local/ncbi/sra-tools/bin
+RUN chmod ugo+x /usr/local/ncbi/sra-tools/bin/delite_docker.sh /usr/local/ncbi/sra-tools/bin/delite_test_docker.sh
 ### Copy schema files
 WORKDIR /ncbi-vdb/interfaces
 RUN rm -rf csra2 sra/pevents.* ; for i in */*.vschema ; do mkdir -p /schema/`dirname $i` ; cp $i /schema/`dirname $i` ; done ; rm -f /schema/ncbi/trace.vschema


=====================================
shared/toolkit.vers
=====================================
@@ -1 +1 @@
-2.10.6
+2.10.7


=====================================
shared/toolkit.vers.h
=====================================
@@ -1 +1 @@
-#define TOOLKIT_VERS 0x020A0006
+#define TOOLKIT_VERS 0x020A0007


=====================================
test/driver-tool/Makefile
=====================================
@@ -30,7 +30,7 @@ MODULE = test/driver-tool
 
 include $(TOP)/build/Makefile.env
 
-runtests: built-in bogus container good
+runtests: built-in bogus container good vdbcache
 
 TMPDIR ?= /tmp
 TEMPDIR ?= $(TMPDIR)
@@ -96,5 +96,14 @@ $(CONTAINER): | actual mkfg
 	$(BINDIR)/sratools $@ 2>actual/$@.stderr ; \
 	diff expected/$@.stderr actual/$@.stderr
 
-.PHONY: runtests bogus container good $(CONTAINER) $(GOOD)
+vdbcache: | actual mkfg
+	@echo "testing expected output for run with vdbcache" ;\
+	NCBI_SETTINGS=$(TEMPDIR)/tmp.mkfg \
+	PATH=$(BINDIR):$$PATH \
+	SRATOOLS_TESTING=5 \
+	SRATOOLS_IMPERSONATE=vdb-dump \
+	$(BINDIR)/sratools SRR390728 2>actual/$@.stderr ; \
+	diff expected/$@.stderr actual/$@.stderr
+
+.PHONY: runtests bogus container good $(CONTAINER) $(GOOD) vdbcache
 .INTERMEDIATE: $(TEMPDIR)/tmp.mkfg


=====================================
test/driver-tool/expected/vdbcache.stderr
=====================================
@@ -0,0 +1,2 @@
+VDB_REMOTE_URL
+VDB_REMOTE_VDBCACHE


=====================================
tools/driver-tool/imp_fasterq_dump.cpp
=====================================
@@ -103,7 +103,7 @@ struct FasterqParams final : CmnOptAndAccessions
         cmdline . addOption ( Threads, &ThreadsCount, "e", "threads", "<count>",
             "how many threads to use (dflt=6)" );
 
-        cmdline . addOption ( progress, "p", "progres", "show progress (not possible if stdout used)" );
+        cmdline . addOption ( progress, "p", "progress", "show progress (not possible if stdout used)" );
         cmdline . addOption ( details, "x", "details", "print details of all options selected" );
         cmdline . addOption ( split_spot, "s", "split-spot", "split spots into reads" );
         cmdline . addOption ( split_files, "S", "split-files", "write reads into different files" );


=====================================
tools/driver-tool/proc.posix.cpp
=====================================
@@ -111,6 +111,11 @@ static void debugPrintDryRun(  char const *const toolpath
                              , char const *const *const argv)
 {
     switch (logging_state::testing_level()) {
+    case 5:
+        for (auto name : make_sequence(constants::env_var::names(), constants::env_var::END_ENUM)) {
+            debugPrintEnvVarName(name);
+        }
+        exit(0);
     case 4:
         for (auto name : make_sequence(constants::env_var::names(), constants::env_var::END_ENUM)) {
             debugPrintEnvVar(name, true);


=====================================
tools/driver-tool/proc.win32.cpp
=====================================
@@ -146,6 +146,11 @@ static bool debugPrintDryRun(char const *const toolpath
     , char const *const *const argv)
 {
     switch (logging_state::testing_level()) {
+    case 5:
+        for (auto name : make_sequence(constants::env_var::names(), constants::env_var::END_ENUM)) {
+            debugPrintEnvVarName(name);
+        }
+        exit(0);
     case 4:
         for (auto name : make_sequence(constants::env_var::names(), constants::env_var::END_ENUM)) {
             debugPrintEnvVar(name, true);


=====================================
tools/driver-tool/run-source.cpp
=====================================
@@ -375,13 +375,13 @@ Dictionary data_source::get_environment() const
         result[names[env_var::REMOTE_NEED_PMT]] = "1";
 
     if (haveVdbCache) {
-        result[names[env_var::REMOTE_URL]] = vdbcache.remoteUrl;
+        result[names[env_var::REMOTE_VDBCACHE]] = vdbcache.remoteUrl;
         if (vdbcache.haveCachePath)
-            result[names[env_var::CACHE_URL]] = vdbcache.cachePath;
+            result[names[env_var::CACHE_VDBCACHE]] = vdbcache.cachePath;
         if (vdbcache.haveLocalPath)
-            result[names[env_var::LOCAL_URL]] = vdbcache.localPath;
+            result[names[env_var::LOCAL_VDBCACHE]] = vdbcache.localPath;
         if (vdbcache.haveSize)
-            result[names[env_var::SIZE_URL]] = vdbcache.fileSize;
+            result[names[env_var::SIZE_VDBCACHE]] = vdbcache.fileSize;
         if (vdbcache.needCE)
             result[names[env_var::CACHE_NEED_CE]] = "1";
         if (haveVdbCache && vdbcache.needPmt)


=====================================
tools/driver-tool/util.hpp
=====================================
@@ -138,3 +138,10 @@ static inline void debugPrintEnvVar(char const *const name, bool const continuel
     if (value)
         std::cerr << name << "='" << value << "'" << (continueline ? " \\\n" : "\n");
 }
+
+static inline void debugPrintEnvVarName(char const *const name, bool const continueline = false)
+{
+    auto const value = EnvironmentVariables::get(name);
+    if (value)
+        std::cerr << name << (continueline ? " \\\n" : "\n");
+}


=====================================
tools/kar/DL/INSTALL.sh
=====================================
@@ -0,0 +1,198 @@
+#!/bin/bash
+
+#THIS is not a script ... lol
+
+usage ()
+{
+    AGA="$@"
+
+    if [ -n "$AGA" ]
+    then
+        echo
+        echo $AGA >&2
+    fi
+
+    cat <<EOF >&2
+
+This script will install or update delite binaries in somewhere
+
+Syntax: `basename $0` [-n] directory_where_to_install
+
+Where:
+    -n - file will be updated only if there is newer version
+
+EOF
+
+    exit 1
+}
+
+
+NEW_ONLY=0
+
+case $# in
+    1)
+        TARGET_DIR=$1
+        ;;
+    2)
+        if [ $1 = "-n" ]
+        then
+            NEW_ONLY=1
+        else
+            usage ERROR: invalid argument \'$1\'
+        fi
+
+        TARGET_DIR=$2
+        ;;
+    *)
+        usage ERROR: invalid arguments
+        ;;
+esac
+
+
+if [ -z "$TARGET_DIR" ]
+then
+    echo ERROR: target directory is not defined >&2
+    exit 1
+fi
+
+##
+## Looking to place where is everything
+INSDIR=$( cd $( dirname $0 ); pwd )
+VER_DIR=$INSDIR/VER
+
+if [ ! -d "$VER_DIR" ]
+then
+    echo ERROR: can not stat version directory \'$VER_DIR\' >&2
+    exit 1
+fi
+
+##
+## Some usefuls
+run_cmd ()
+{
+    CMD="$@"
+    if [ -z "$CMD" ]
+    then
+        echo WARNING: run_cmd called with empty arguments >&2
+        return
+    fi
+
+    echo "## $CMD"
+    eval "$CMD"
+    if [ $? -ne 0 ]
+    then
+        echo ERROR: command failed >&2
+        exit 1
+    fi
+}
+
+##
+## Looking for target directory
+
+check_make_dir ()
+{
+    if [ ! -d "$1" ]
+    then
+        echo "## Creating directory '$1'"
+        mkdir $1
+        if [ $? -ne 0 ]
+        then
+            echo ERROR: failed to create directory \'$1\'
+            exit 1
+        fi
+    fi
+}
+
+check_make_dir $TARGET_DIR
+
+##
+## Copy binaries
+BIN_SRC=$VER_DIR/BIN
+BIN_DST=$TARGET_DIR/bin
+
+check_make_dir $BIN_DST
+
+if [ "$NEW_ONLY" = "1" -a -d "$BIN_DST" ]
+then
+    echo "## Updating newer binaries"
+    for i in `ls $BIN_SRC`
+    do
+        SF=$BIN_SRC/$i
+        DF=$BIN_DST/$i
+        if [ "$SF" -nt "$DF" ]
+        then
+            run_cmd cp -pP "$SF" "$DF"
+        else
+            echo "## SKIP: $i"
+        fi
+    done
+else
+    echo "## Replacing binaries"
+
+    run_cmd rm -r $BIN_DST
+    run_cmd cp -rpP $BIN_SRC $BIN_DST
+fi
+
+##
+## Copy schemas
+SCM_SRC=$VER_DIR/SCM
+SCM_DST=$TARGET_DIR/schema
+
+check_make_dir $SCM_DST
+
+if [ "$NEW_ONLY" = "1" ]
+then
+    echo "## Updating newer schemas"
+    for SF in `find $SCM_SRC -type f -name "*.vschema"`
+    do
+        DF=$SCM_DST/`echo $SF | sed "s#$SCM_SRC##1" `
+        if [ "$SF" -nt "$DF" ]
+        then
+            FDO=$( dirname $DF )
+            if [ ! -d $FDO ]
+            then
+                run_cmd mkdir $FDO
+            fi
+            run_cmd cp -pP "$SF" "$DF"
+        else
+            echo "## SKIP: $SF"
+        fi
+    done
+else
+    echo "## Replacing schemas"
+
+    run_cmd rm -r $SCM_DST
+    run_cmd cp -rpP $SCM_SRC $SCM_DST
+fi
+
+for i in `find $SCM_DST -type f -name trace.vschema`
+do
+    run_cmd rm -f $i
+done
+
+
+UPDD=$( cd $TARGET_DIR; pwd )
+UPDS=$UPDD/UPDATE.sh
+
+echo Generating update script : $UPDS
+
+cat <<EOF > $UPDS
+#!/bin/bash
+
+echo Update: $UPDD
+$INSDIR/INSTALL.sh $UPDD
+if [ $? -ne 0 ]
+then
+    echo UPDATE FAILED>&2
+    exit 1
+fi
+
+echo ENJOY
+EOF
+
+chmod 774 $UPDS
+
+echo For future update binaries, please run script $UPDS
+
+echo DONE
+


=====================================
tools/kar/DL/README.txt
=====================================
@@ -0,0 +1,4 @@
+This directory contains scirpts necessary for DELITE testing.
+Scritp UPD.sh will create bundle of binaries and scripts, which may be distributed 
+with script INSTALL.sh
+Script TEST.sh is umbrella for sra_delite.sh


=====================================
tools/kar/DL/TEST.sh
=====================================
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#####
+## This test script
+#####
+
+DD=`dirname $0`
+
+BIN_D=$( cd $( dirname $0 ); pwd )
+BASE_D=$( dirname $BIN_D )
+
+TEST_D=$BASE_D/test
+SCM_D=$BASE_D/schema
+
+run_cmd ()
+{
+    ARA="$@"
+    if [ -z "$ARA" ]
+    then
+        echo WARNING: no arguments passed to run_cmd >&2
+        return
+    fi
+
+    echo
+    echo "########"
+    echo "##  $ARA"
+    eval $ARA
+    RETC=$?
+    if [ $RETC -ne 0 ]
+    then
+        echo "ERROR: command failed ($RETC)" >&2
+        exit $RETC
+    fi
+}
+
+if [ ! -d "$SCM_D" ]
+then
+    echo ERROR: can not stat schema directory \'$SCM_D\' >&2
+    exit 1
+fi
+
+if [ ! -d "$TEST_D" ]
+then
+    run_cmd mkdir $TEST_D
+fi
+
+echo
+
+if [ $# -ne 1 ]
+then
+    echo Usage : $( basename $0 ) ACCESSION >&2
+    exit 1
+fi
+
+ACCESSION=$1
+DST_D=$TEST_D/$ACCESSION
+
+echo DELITE: $ACCESSION to $DST_D
+
+##
+## 1 Exporting
+run_cmd $BIN_D/sra_delite.sh import --accession $ACCESSION --target $DST_D --force
+
+##
+## 2 Deliting
+run_cmd $BIN_D/sra_delite.sh delite --schema $SCM_D --target $DST_D
+
+##
+## 3 Exporting
+run_cmd $BIN_D/sra_delite.sh export --target $DST_D --force
+
+echo ENJOY!
+


=====================================
tools/kar/DL/UPD.sh
=====================================
@@ -0,0 +1,275 @@
+#!/bin/bash
+
+## That script will update binaries and schemas for delite to current directory
+##
+## Syntax: UPD.sh [--help|-h]
+##
+## Directories with sources :
+## /panfs/pan1/sra-test/vdb3/git/inst
+## ~/work/Kar+/sra-tools/tools/kar
+##
+## Structure:
+## .../BIN - THAT BINARY is in here
+## .../REP - repository
+##      |-- VER - version of data
+##           |-- BIN - binaries
+##           |-- SCM - schemas
+## .../VER - link to current version
+## .../TMP - temparary directory
+##
+
+
+
+## Directories with data sources
+##
+DBIN_DIR=/panfs/pan1/sra-test/KinDzaDza/Ku/blue/bin
+
+## Other directories and links
+##
+BIN_D=$( cd $( dirname "$0" ); pwd )
+BASE_D=$( cd $( dirname "$BIN_D" ); pwd )
+REP_D=$BASE_D/REP
+VER_D=$BASE_D/VER
+TMP_D=$BASE_D/TMP
+
+for i in $REP_D $VER_D $TMP_D
+do
+    if [ ! -d "$i" ]
+    then
+        echo WARNING: directory missed \"$i\" >&2
+        mkdir $i >/dev/null 2>&1
+        if [ $? -ne 0 ]
+        then
+            echo ERROR: can not create directory \"$i\" >&2
+            exit 1
+        fi
+    fi
+done
+
+##
+## Arguments and envir
+##
+usage ()
+{
+    cat << EOF
+
+That script will update binares and schemas for delite project. By default, it will 
+put binaries to the same location where script is, and it will put schemas to directory
+'schemas', which has the same parent directory as script location.
+
+Syntax: `basename $0` [--help|-h]
+
+Where:
+    --help|-h - will show that message
+
+EOF
+
+}
+
+for i in $@
+do
+    case $i in
+        --help)
+            usage
+            exit 0
+            ;;
+        -h)
+            usage
+            exit 0
+            ;;
+        *)
+            echo ERROR: invalid argument \"$i\" >&2
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+##
+## Something we should put here
+##
+run_cmd ()
+{
+    CMD="$@"
+
+    if [ -z "$CMD" ]
+    then
+        echo ERROR: invalid usage \"run_cmd\". >&2
+        exit 1
+    fi
+
+    echo "## $CMD"
+    eval "$CMD"
+    if [ $? -ne 0 ]
+    then
+        echo ERROR: command failed >&2
+        exit 1
+    fi
+}
+
+## removing data from previous builds
+##
+for i in `ls $TMP_D`
+do
+    run_cmd rm -rf $TMP_D/$( basename $i )
+done
+
+##
+## First we should make a new version
+##
+TMP_VER=$TMP_D/VER
+if [ -d "$TMP_VER" ]
+then
+    echo WARNING: removing old bundle directory \"$TMP_VER\". >&2
+    run_cmd rm -r $TMP_VER
+    if [ $? -ne 0 ]
+    then
+        echo ERROR: can not remove old bundle directory \"$TMP_VER\". >&2
+        exit 1
+    fi
+fi
+TMP_BIN=$TMP_VER/BIN
+TMP_SCM=$TMP_VER/SCM
+
+for i in $TMP_VER $TMP_BIN $TMP_SCM
+do
+    run_cmd mkdir $i
+done
+
+##
+## Copying binaries
+##
+
+copy_f ()
+{
+    for i in $@
+    do
+        FILES=`ls $SRC_D/${i}*`
+        if [ $? -ne 0 ]
+        then
+            echo ERROR: can nog stat appropriate files for \'${i}*\' >&2
+            exit 1
+        fi
+
+        for u in `ls $SRC_D/${i}*`
+        do
+            FOH=`basename $u`
+            run_cmd cp -pP $SRC_D/$FOH $DST_D/$FOH
+        done
+    done
+}
+
+## Here we are copying binaries
+##
+copy_bins ()
+{
+    echo "###################################################"
+    echo "## CP BIN: $TMP_BIN"
+
+    BIN2CP="    \
+            fastq-dump  \
+            kar+  \
+            kar+meta  \
+            make-read-filter  \
+            prefetch  \
+            srapath  \
+            sratools    \
+            vdb-diff  \
+            vdb-dump  \
+            vdb-lock  \
+            vdb-unlock \
+            vdb-validate  \
+            vdb-config  \
+            "
+    SRC_D=$DBIN_DIR
+    DST_D=$TMP_BIN
+
+    copy_f $BIN2CP
+}
+
+## copying scripts
+##
+
+## copyinng schemas
+##
+copy_schemas ()
+{
+    echo CP SCM: $TMP_SCM
+
+    run_cmd git clone -b engineering https://github.com/ncbi/ncbi-vdb.git $TMP_D/ncbi-vdb
+
+    DSCM_DIR=$TMP_D/ncbi-vdb/interfaces
+
+
+    for i in `find $DSCM_DIR -type f -name "*.vschema"`
+    do
+        FF=$( echo $i | sed "s#$DSCM_DIR##1" )
+        DNM=$( dirname $FF )
+        DSTD=$TMP_SCM/$DNM
+        if [ ! -d "$DSTD" ]
+        then
+            run_cmd mkdir $DSTD
+        fi
+        run_cmd cp -p $i $DSTD
+    done
+
+    FF="$TMP_SCM/ncbi/trace.vschema"
+    if [ -f "$FF" ]
+    then
+        run_cmd rm $FF
+    fi
+}
+
+copy_scripts ()
+{
+    ## Here we are copying scripts
+    ##
+    echo CP SCR: $TMP_BIN
+
+    run_cmd git clone -b engineering https://github.com/ncbi/sra-tools.git $TMP_D/sra-tools
+
+    SCR2CP="    \
+            sra_delite.sh   \
+            sra_delite.kfg   \
+            README.txt   \
+            "
+    SRC_D=$TMP_D/sra-tools/tools/kar
+    DST_D=$TMP_BIN
+
+    copy_f $SCR2CP
+
+    SCR2CP="    \
+            TEST.sh  \
+            "
+    SRC_D=$TMP_D/sra-tools/tools/kar/DL
+
+    copy_f $SCR2CP
+
+    run_cmd rm -rf $TMP_D/sra-tools
+}
+
+make_prep ()
+{
+    FUFIL=$( date +%y-%m-%d_%H:%M )
+
+    echo $FUFIL >$TMP_VER/version
+
+    if [ -d "$REP_D/$FUFIL" ]
+    then
+        run_cmd rm -rf $REP_D/$FUFIL
+    fi
+    run_cmd mv $TMP_VER $REP_D/$FUFIL
+
+    run_cmd cp -rp $REP_D/$FUFIL $BASE_D/$FUFIL
+    run_cmd mv $VER_D ${VER_D}.obsolete
+    run_cmd mv $BASE_D/$FUFIL $VER_D
+    run_cmd rm -rf  ${VER_D}.obsolete
+}
+
+copy_bins
+
+copy_scripts
+
+copy_schemas
+
+make_prep


=====================================
tools/kar/README.txt
=====================================
@@ -9,6 +9,9 @@ The delite process is three stage process :
 3) packing modified KAR archive with/without reduced data, testing resulting
    KAR archive with VDB-DIFF program
 
+It is should note here, that user could perform testing of resulting KAR archive
+separately from stage #3/
+
 Contents:
 
 I.    Script requirements, environment and configuring.
@@ -17,6 +20,7 @@ III.  Script configuration file
 IV.   Unpacking original KAR archive
 V.    Editing resulting database
 V|.   Exporting data
+V|-|. Testing data
 VII.  Status
 VIII. Physical requirements (important, read it)
 IX.   Error codes
@@ -89,6 +93,7 @@ list of possible actions :
     import - script will download and/or unpack archive to working directory
     delite - script will perform DELITE on database content
     export - script will create 'delited' KAR archive
+      test - script will test 'delited' KAR archive
     status - script will report some status, or whatever.
 
 Options could be different for each action, many of them will be discussed
@@ -245,7 +250,7 @@ perform following:
 V|.   Exporting data
 =============================================================================
 Action 'export' will export delited data into KAR archive and test result.
-There is syntax of that command:
+There is syntax for that command:
 
 sra_delite.sh export [ --config CONFIG ] --target TARGET [--force] [--skiptest]
 
@@ -263,6 +268,18 @@ and consistency of schemas, it could take several minutes. The second test will
 be done by 'vdb-dump' utility. That test will perform record-by-record data
 comparation for both original and new KAR archives. It is longest test and can
 take more than several minutes. User can skip testing by adding flag '--skiptest'.
+Even if user skipped test, he cound test resulting data later.
+
+
+V|-|. Testing data
+=============================================================================
+Action 'test' will test exported delited KAR archive. There is syntax for that
+command:
+
+sra_delite.sh test [ --config CONFIG ] --target TARGET
+
+User should be ready that it could be lengtly procedure. It was introduced for
+the case if there will be two different queues for deliting and testing results.
 
 
 V|I.   Status
@@ -479,7 +496,7 @@ Now everything is ready for delite process.
 First step user should download docker file, or copy it. The simplest way to download
 docker file is clone sra-toolkit package from GITHUB:
 
-    git clone https://github.com/ncbi/sra-tools.git
+    git clone -b engineering https://github.com/ncbi/sra-tools.git
 
 User could copy docker file to his working directory, and delete sra-toolkit package after.
 
@@ -510,6 +527,12 @@ to script and they will be delited sequentially:
 
 In the case of error, script will return error code of failed sra_delite.sh command.
 
+If user want to separate delite process and testing results, he can use "delite_docker.sh" command
+with "--skiptest" flag, and perform testing after with "delite_test_docker.sh" command:
+
+    docker run -v ~/output/:/output:rw --rm sratoolkit:delite delite_docker.sh --skiptest SRR000001 SRR000002
+    docker run -v ~/output/:/output:rw --rm sratoolkit:delite delite_test_docker.sh SRR000001 SRR000002
+
 NOTE: if there are results of previous delite process for accession, script will exit with
       error message. User is responsible for deleting these before calling script.
 


=====================================
tools/kar/delite_docker.sh
=====================================
@@ -5,6 +5,8 @@
 ##       run only from docker container. It is umbrella for sra_delite.sh script
 ##
 
+SKIPTEST_TAG="--skiptest"
+
 usage ()
 {
     cat <<EOF >&2
@@ -13,7 +15,7 @@ That script will run delite process on SRA fun by it's accession
 
 Syntax:
 
-    `basename $0` < -h | --help | ACCESSION [ACCESSION ...] >
+    `basename $0` < -h | --help | [$SKIPTEST_TAG] ACCESSION [ACCESSION ...] >
 
 Where:
 
@@ -46,6 +48,16 @@ then
     esac
 fi
 
+unset SKIPTEST
+for i in $@
+do
+    if [ $i == "$SKIPTEST_TAG" ]
+    then
+        SKIPTEST=$i
+        break
+    fi
+done
+
 run_cmd ()
 {
     CMD="$@"
@@ -67,6 +79,23 @@ run_cmd ()
     fi
 }
 
+delite_work_dir ()
+{
+    D2R=$1
+    if [ -d "$D2R" ]
+    then
+            ## We do not care about if that command will fail
+        echo Removing directory $D2R
+        vdb-unlock $D2R
+        rm -r $D2R
+        if [ $? -ne 0 ]
+        then
+            echo WARNING: can not remove directory $D2R
+        fi
+    fi
+
+}
+
 delite_run ()
 {
     ACC=$1
@@ -93,23 +122,13 @@ EOF
 
     run_cmd sra_delite.sh delite --target $OUTD  --schema /etc/ncbi/schema
 
-    run_cmd sra_delite.sh export --target $OUTD
+    run_cmd sra_delite.sh export --target $OUTD $SKIPTEST
 
     ##
-    ## Removing orig directory
+    ## Removing work directory
 
-    D2R=$OUTD/orig
-    if [ -d $D2R ]
-    then
-            ## We do not care about if that command will fail
-        echo Removing directory $D2R
-        vdb-unlock $D2R
-        rm -r $D2R
-        if [ $? -ne 0 ]
-        then
-            echo WARNING: can not remove directory $D2R
-        fi
-    fi
+    delite_work_dir $OUTD/work
+    delite_work_dir $OUTD/work.vch
 
 cat <<EOF
 ## Finished delite process for $ACC
@@ -121,6 +140,11 @@ EOF
 
 for i in $@
 do
+    if [ $i = "$SKIPTEST_TAG" ]
+    then
+        continue
+    fi
+
     delite_run $i
 done
 


=====================================
tools/kar/delite_test_docker.sh
=====================================
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+##
+## NOTE: That script does not have permissions for execution, because it should
+##       run only from docker container. It is umbrella for sra_delite.sh script
+##
+
+usage ()
+{
+    cat <<EOF >&2
+
+That script will run test results of delite process on SRA run by it's accession
+
+Syntax:
+
+    `basename $0` < -h | --help | ACCESSION [ACCESSION ...] >
+
+Where:
+
+       ACCESSION - valid SRA accession
+     -h | --help - print that help message
+
+EOF
+
+}
+
+if [ $# -eq 0 ]
+then
+    usage
+
+    echo ERROR: ACCESSION parameter is not defined >&2
+    exit 1
+fi
+
+if [ $# -eq 1 ]
+then
+    case $1 in
+        -h)
+            usage
+            exit 0
+            ;;
+        --help)
+            usage
+            exit 0
+            ;;
+    esac
+fi
+
+run_cmd ()
+{
+    CMD="$@"
+
+    if [ -z "$CMD" ]
+    then
+        echo ERROR: invalid usage or run_cmd command >&2
+        exit 1
+    fi
+
+    echo
+    echo "## $CMD"
+    eval $CMD
+    RV=$?
+    if [ $RV -ne 0 ]
+    then
+        echo ERROR: command failed with exit code $RV >&2
+        exit $RV
+    fi
+}
+
+delite_test ()
+{
+    ACC=$1
+
+    if [ -z "$ACC" ]
+    then
+        echo ERROR: invalid usage or delite_test command, ACCESSION missed >&2
+        exit 1
+    fi
+
+    cat <<EOF 
+
+#######################################################################################
+## Running test on results of delite process for $ACC
+## `date`
+EOF
+
+    OUTD=/output/$ACC
+
+    run_cmd sra_delite.sh test --target $OUTD
+
+cat <<EOF
+## Finished test on results of delite process for $ACC
+## `date`
+#######################################################################################
+
+EOF
+}
+
+for i in $@
+do
+    delite_test $i
+done
+
+echo "DONE ($@)"


=====================================
tools/kar/sra_delite.kfg
=====================================
@@ -105,5 +105,4 @@ diff-exclude MATE_EDIT_DISTANCE
 ### Please, do not allow spaces between parameters
 # DELITE_BIN_DIR=/panfs/pan1/trace_work/iskhakov/Tundra/KAR+TST/bin
 # USE_OWN_TEMPDIR=1
-DELITE_REJECT_VDBCACHE=1
 


=====================================
tools/kar/sra_delite.sh
=====================================
@@ -23,6 +23,7 @@ ORIGINAL_CMD="$0 $@"
 IMPORT_TAG="import"
 DELITE_TAG="delite"
 EXPORT_TAG="export"
+TEST_TAG="test"
 STATUS_TAG="status"
 
 ACCESSION_TAG="--accession"
@@ -71,6 +72,7 @@ Where :
                       working directory
              $DELITE_TAG - script will perform DELITE on database content
              $EXPORT_TAG - script will create 'delited' KAR archive
+               $TEST_TAG - script will test 'exported' KAR archive
              $STATUS_TAG - script will report some status, or whatever.
 
 Options:
@@ -112,6 +114,8 @@ case $ACTION in
         ;;
     $EXPORT_TAG)
         ;;
+    $TEST_TAG)
+        ;;
     $STATUS_TAG)
         ;;
     *)
@@ -316,8 +320,6 @@ diff-exclude RIGHT_SOFT_CLIP
 # USE_OWN_TEMPDIR=1
 ### That is for docker, and please do not modify it by yourself
 # DELITE_GUID=
-### That is for new version of make-read-filter
-DELITE_REJECT_VDBCACHE=1
 
 EOF
     else
@@ -566,9 +568,12 @@ set_resolve_set_dir_values ()
     fi
 
     TARGET_DIR=$TDVAL
-    DATABASE_DIR=$TARGET_DIR/orig
-    NEW_KAR_FILE=$TARGET_DIR/new.kar
-    ORIG_KAR_FILE=$TARGET_DIR/orig.kar
+    DATABASE_DIR=$TARGET_DIR/work
+    DATABASE_CACHE_DIR=$TARGET_DIR/work.vch
+    NEW_KAR_FILE=$TARGET_DIR/out
+    NEW_CACHE_FILE=$TARGET_DIR/out.vch
+    ORIG_KAR_FILE=$TARGET_DIR/in
+    ORIG_CACHE_FILE=$TARGET_DIR/in.vch
     STATUS_FILE=$TARGET_DIR/.status.txt
     VDBCFG_NAME=vdbconfig.kfg
     VDBCFG_FILE=$TARGET_DIR/$VDBCFG_NAME
@@ -581,6 +586,15 @@ set_resolve_set_dir_values
 export NCBI_SETTINGS=/
 export VDB_CONFIG=$VDBCFG_NAME
 
+if [ -z "$DELITE_GUID" ]
+then
+    UUIDGEN=$( which uuidgen )
+    if [ $? -eq 0 ]
+    then
+        DELITE_GUID=$( uuidgen )
+    fi
+fi
+
 ###############################################################################################
 ##  There will be description of status file, which is quite secret file
 ##  ...
@@ -717,13 +731,13 @@ import_proc ()
 /sra/quality_type = "raw_scores"
 EOF
 
-###
-##  In the case of AWS, we needed GUID for correct work
-#
-if [ -n "$DELITE_GUID" ]
-then
-    echo /LIBS/GUID = \"$DELITE_GUID\" >>$VDBCFG_FILE
-fi
+    ###
+    ##  In the case of AWS, we needed GUID for correct work
+    #
+    if [ -n "$DELITE_GUID" ]
+    then
+        echo /LIBS/GUID = \"$DELITE_GUID\" >>$VDBCFG_FILE
+    fi
 
     info_msg Changing directory to \'$TARGET_DIR\'
     cd $TARGET_DIR
@@ -742,24 +756,6 @@ fi
         dpec__ 105; err_exit can not stat file \'$TOUTF\'
     fi
 
-    if [ -n "$DELITE_REJECT_VDBCACHE" ]
-    then
-        VCH=$( ls ${TOUTD}/*.vdbcache 2>/dev/null )
-        if [ -n "$VCH" ]
-        then
-cat <<EOF >&2
-
-WARNING: This run will not be processed because current make-read-filter
-utility does not support vdbcache files. Please, try to DELITE that run
-later, when new version of make-read-filter utility will be available.
-Thank You for understanding.
-
-EOF
-
-            dpec__ 80; err_exit can not process VDBCACHE file \'$TOUTF\'
-        fi
-    fi
-
     info_msg Read `stat --format="%s" $TOUTF` bytes to \'$TARGET_DIR/$TOUTF\'
 
     dpec__ 61; exec_cmd_exit ln -s $TOUTF $ORIG_KAR_FILE
@@ -772,6 +768,14 @@ EOF
 
     dpec__ 62; exec_cmd_exit $ICMD --extract $ORIG_KAR_FILE --directory $DATABASE_DIR
 
+    TOUTC=${TOUTF}.vdbcache
+    if [ -f "$TOUTC" ]
+    then
+        info_msg "Found .VDBCACHE file"
+        dpec__ 61; exec_cmd_exit ln -s $TOUTC $ORIG_CACHE_FILE
+        dpec__ 62; exec_cmd_exit $ICMD --extract $ORIG_CACHE_FILE --directory $DATABASE_CACHE_DIR
+    fi
+
     ## Checking if it is colorspace run
     check_rejected_run_exit
 
@@ -797,11 +801,25 @@ check_ready_for_delite ()
         dpec__ 105; err_exit can not stat directory \'$SCHEMA_VAL\'
     fi
 
+    SCHEMA_DIR=`cd $SCHEMA_VAL; pwd`
+    if [ -z "$SCHEMA_DIR" ]
+    then
+        dpec__ 105; err_exit can not resolve directory \'$SCHEMA_VAL\'
+    fi
+
     if [ ! -d "$DATABASE_DIR" ]
     then
         dpec__ 105; err_exit can not stat database \'$DATABASE_DIR\'
     fi
 
+    if [ -e "$ORIG_CACHE_FILE" ]
+    then
+        if [ ! -d "$DATABASE_CACHE_DIR" ]
+        then
+            dpec__ 105; err_exit can not stat database VDBCACHE \'$DATABASE_CACHE_DIR\'
+        fi
+    fi
+
     if [ ! -f "$STATUS_FILE" ]
     then
         dpec__ 105; err_exit can not stat status file
@@ -993,7 +1011,7 @@ modify_object ()
     if [ -n "$NEW_SCHEMA" ]
     then
         info_msg subst $OLD_SCHEMA to $NEW_SCHEMA
-        dpec__ 63; exec_cmd_exit $KARMETA_BIN --spath $SCHEMA_VAL --updschema schema=\'$NEW_SCHEMA\' $M2D
+        dpec__ 63; exec_cmd_exit $KARMETA_BIN --spath $SCHEMA_DIR --updschema schema=\'$NEW_SCHEMA\' $M2D
     else
         warn_msg no subst found for $OLD_SCHEMA
     fi
@@ -1225,22 +1243,14 @@ check_read_and_quality_len ()
 
 test_kar ()
 {
-    F2T=$1
-
-    if [ -n "$SKIPTEST_VAL" ]
-    then
-        warn_msg skipping tests for \'$F2T\' ...
-        return
-    fi
-
-    check_read_and_quality_len $F2T
+    check_read_and_quality_len $NEW_KAR_FILE
 
     if [ ! -f $ORIG_KAR_FILE ]
     then
-        dpec__ 105; err_exit SKIPPING DIFF TESTS for \'$F2T\', can not stat original KAR file \'$ORIG_KAR_FILE\'
+        dpec__ 105; err_exit SKIPPING DIFF TESTS for \'$NEW_KAR_FILE\', can not stat original KAR file \'$ORIG_KAR_FILE\'
     fi
 
-    exec_cmd $VDBVALIDATE_BIN -x $F2T
+    exec_cmd $VDBVALIDATE_BIN -x $NEW_KAR_FILE
     if [ $? -ne 0 ]
     then
         warn_msg vdb-validate step failed, checking original KAR file
@@ -1253,7 +1263,7 @@ test_kar ()
         fi
     fi
 
-    TCMD="$VDBDIFF_BIN $ORIG_KAR_FILE $F2T -i"
+    TCMD="$VDBDIFF_BIN $ORIG_KAR_FILE $NEW_KAR_FILE -c -i"
 
     TDC="$DIFFEXCLUDE"
 
@@ -1288,18 +1298,32 @@ test_kar ()
     dpec__ 68; exec_cmd_exit $TCMD
 }
 
-kar_new ()
+check_force_remove_old_kar ()
 {
-    if [ -f "$NEW_KAR_FILE" ]
+    F2R=$1
+    MSS=$2
+
+    if [ -z "$MSS" ]
+    then
+        MSS="KAR"
+    fi
+
+    if [ -f "$F2R" ]
     then
         if [ -n "$FORCE_VAL" ]
         then
-            info_msg forcing to remove odl KAR file \'$NEW_KAR_FILE\'
-            dpec__ 107; exec_cmd_exit rm $NEW_KAR_FILE
+            info_msg forcing to remove old $MSS file \'$F2R\'
+            dpec__ 107; exec_cmd_exit rm "$F2R"
         else
-            dpec__ 106; err_exit old KAR file found \'$NEW_KAR_FILE\'
+            dpec__ 106; err_exit old $MSS file found \'$F2R\'
         fi
     fi
+}
+
+kar_new ()
+{
+    check_force_remove_old_kar $NEW_KAR_FILE KAR
+    check_force_remove_old_kar $NEW_CACHE_FILE .VDBCACHE
 
     TCMD="$KAR_BIN"
     if [ -n "$FORCE_VAL" ]
@@ -1319,7 +1343,20 @@ kar_new ()
 
     dpec__ 62; exec_cmd_exit $TCMD
 
-    test_kar $NEW_KAR_FILE
+    if [ -d "$DATABASE_CACHE_DIR" ]
+    then
+        info_msg "Creating .VDBCACHE file"
+
+        TCMD="$KAR_BIN"
+        if [ -n "$FORCE_VAL" ]
+        then
+            TCMD="$TCMD -f"
+        fi
+
+        TCMD="$TCMD --create $NEW_CACHE_FILE --directory $DATABASE_CACHE_DIR"
+
+        dpec__ 62; exec_cmd_exit $TCMD
+    fi
 }
 
 print_stats ()
@@ -1364,12 +1401,86 @@ export_proc ()
     ## writing delited kar archive
     kar_new
 
+
+    if [ -n "$SKIPTEST_VAL" ]
+    then
+        warn_msg skipping tests for \'$NEW_KAR_FILE\' ...
+        return
+    else
+        test_kar
+    fi
+
     ## just printing stats
     print_stats
 
     info_msg "DONE"
 }
 
+###############################################################################################
+###############################################################################################
+###<<>>### Test
+##############################################################################################
+check_ready_for_test ()
+{
+    if [ ! -f "$STATUS_FILE" ]
+    then
+        dpec__ 105; err_exit can not stat status file
+    fi
+
+    TVAR=`grep "$DELITED_TAG" $STATUS_FILE 2>/dev/null`
+    if [ -z "$TVAR" ]
+    then
+        dpec__ 86; err_exit status shows that object was not delited yet
+    fi
+
+    if [ ! -e "$ORIG_KAR_FILE" ]
+    then
+        dpec__ 105; err_exit can not stat original KAR file \'$ORIG_KAR_FILE\'
+    fi
+
+    if [ ! -f "$NEW_KAR_FILE" ]
+    then
+        dpec__ 105; err_exit can not stat delited KAR file \'$NEW_KAR_FILE\'
+    fi
+
+    TVAR=`$KARMETA_BIN --info SOFTWARE/delite $NEW_KAR_FILE 2>/dev/null`
+    if [ -z "$TVAR" ]
+    then
+        dpec__ 86; err_exit object was not delited yet
+    fi
+
+    if [ -h "$ORIG_CACHE_FILE" ]
+    then
+        if [ ! -e "$ORIG_CACHE_FILE" ]
+        then
+            dpec__ 105; err_exit can not stat .VDBCACHE for delited KAR file \'$ORIG_CACHE_FILE\'
+        fi
+
+        if [ ! -f "$NEW_CACHE_FILE" ]
+        then
+            dpec__ 105; err_exit can not stat .VDBCACHE for delited KAR file \'$NEW_CACHE_FILE\'
+        fi
+    fi
+}
+
+test_proc ()
+{
+    ## checking if it is was delited
+    check_ready_for_test
+
+    info_msg Changing directory to \'$TARGET_DIR\'
+    cd $TARGET_DIR
+
+    if [ ! -f "$VDBCFG_NAME" ]
+    then
+        dpec__ 105; err_exit can not stat file \'$VDBCFG_FILE\'
+    fi
+
+    test_kar
+
+    info_msg "DONE"
+}
+
 ###############################################################################################
 ###############################################################################################
 ###<<>>### Status



View it on GitLab: https://salsa.debian.org/med-team/sra-sdk/-/commit/6461e62e68fb5d8dcde3de71bb531257330decb2

-- 
View it on GitLab: https://salsa.debian.org/med-team/sra-sdk/-/commit/6461e62e68fb5d8dcde3de71bb531257330decb2
You're receiving this email because of your account on salsa.debian.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/debian-med-commit/attachments/20200603/64abd2a6/attachment-0001.html>


More information about the debian-med-commit mailing list