#!/bin/bash

###################################################################################
# do_regtest some info [Joost VandeVondele, 2005-02-25]
# Major revision of the script [Marko Misic, PRACE, July - August 2013]
#
# Regression testing cp2k ...
#    - maintain code quality
#    - helps rapid development and refactoring
#
# What does a regtest do
#    - automatically update cp2k with the current SVN version (including new tests)
#    - performs a realclean build of the source
#    - executes a list of tests
#    - compares the results (outputs) with those of the last known result (reference)
#    - produces a summary
#
# How to set up a regtest
#    - you must be able to build and run cp2k on the given machine, the rest should be 'easy'
#    - decide on a directory for doing the regtest, there will be plenty of files in this dir
#      (after a while) so make it something like $HOME/rt
#    - checkout a version of cp2k into $HOME/rt.
#      If you set up your svn account so that you don't need to provide
#      a passwd for doing svn update, things will be more convenient later on.
#    - set up the arch files so that you can cleanly build cp2k (test this)
#    - cp $HOME/rt/cp2k/tools/do_regtest $HOME/rt
#    - modify the do_regtest script to match your local environment (e.g. specify
#      if you wish a sdbg or a sopt build, which compiler, gmake, ....) see below
#    - execute './do_regtest' regularly (i.e. after comitting new code)
#
# Interpretation of the results
#  test can be:
#    - 'OK' if the results match those of a previous run precisely. The execution time is also given.
#    - 'NEW' if they have not been executed previously. The reference result is generated
#      automatically in this run. Tests can also be 'NEW' if they have been reset, i.e. been newly
#      added to the TEST_FILES_RESET files.
#    - 'RUNTIME FAILURE' if they stopped unexpectedly (e.g. core dump, or stop)
#    - 'WRONG RESULT' if they produce a result that deviates (even a tiny bit) from an old reference
#  the last two options generally mean that a bug has been introduced, which requires investigation.
#  since regtesting only yields information relative to a previously known result, it is most useful
#  to do a regtest before and after you make changes. To allow per-test numerical difference higher
#  than that set as a default, add third column in appropriete TEST_FILES file with a relative value
#  of the difference.
#
# Adding/resetting/creating tests to the testsuite
#  these is fully controlled by the following files in the cp2k/tests directories
#  -TEST_DIRS  : is just a list of directories that contain tests. You can add your directory here.
#  -TEST_FILES : the list of input files that need to be executed. You can add your file name here.
#                adding a comment about what it tests might help later debugging problems if a regtest
#                fails
#  -TEST_FILES_RESET : you can add files for which the reference output became invalid (e.g. bug fix)
#                      to this list fo files. However be absolutely sure that the change is due to
#                      a bug fix, do not reset these that fail because of unclear reasons. Try to add
#                      a comment to the svn message and/or the file itself
#  -TEST_TYPES : this file allows you to create a new test type. I.e. to specify for which words should
#                be grepped and what field should be used in the numerical comparison.
#
# Command line switches to the do_regtest script (also configurable from within the script)
#  -nosvn   : do not access the SVN for any updating, makes regtesting fully local
#  -quick   : rebuild the code if needed, but do not perform a realclean before (noquick is not needed anymore)
#  -noreset : do not reset the reference outputs automatically
#  -svndate string : specify any string to svn update (most likely used as "2005-02-17")
#  -skipdir string : this switch can repeat, exclude certain dirs from regtesting, useful to
#                    speed-up regtesting after very localised changes (e.g. -skipdir QS/regtest)
#  -restrictdir string : this switch can repeat, restrict regtesting to certain dirs, useful to
#                        speed-up regtesting after very localised changes (e.g. -restrictdir QS/regtest)
#  -config string : loads a site/compiler/environment specific configuration
#  -retest : regtesting will be restricted only to those directories that contained failed tests
#            in the previous run
#  -noemptycheck : use this to force regression testing even if no changes exist in SVN
#  -nobuild : do not build cp2k, but use one built by the user (turns on quick option automatically)
#
# Possible exit statuses:
# -  end_test 1   : problem with svn update
# -  end_test 100 : no svn changes since last run - clean exit without testing
# -  end_test 3   : problem with realclean
# -  end_test 4   : build errors
# -  end_test 5   : problem with retest option - no TEST directory with latest test results found
# -  end_test 6   : problem with retest option - no error summary exists in the last TEST directory
# -  end_test 7   : reference directory is locked
# -  end_test 0   : clean exit with testing
#
# Script configuration. The value of the follow variables can be redefined, see below
#   dir_base, FORT_C_NAME, cp2k_version, dir_triplet, cp2k_dir, cp2k_prefix, cp2k_postfix,
#   make, awk, datum_full, datum_short, default_err_tolerance
#   nosvn, quick, svndate, noreset, ndirtoskip, skip_dirs, restrict_dir, ndirtorestrict, doretest
#
########################################################################################################
#
# THESE VARIABLES WILL NEED CHANGING FOR YOUR LOCAL CONFIGURATION
#
# - dir_base: the base directory for testing (e.g. $HOME/rt)
# - FORT_C_NAME: compiler selection (e.g. intel)
# - cp2k_version: sopt,sdbg,popt....
# - dir_triplet: the result of tools/get_arch_name, where the executable can be found
# - cp2k_dir: normally cp2k
# - maxtasks: how many instances of cp2k should run simultaneously (~> #CPUs)
# - emptycheck: useful for automatic testers, no testing if nothing changed in SVN (YES|NO)
# - leakcheck: if using g95, set this variable to "YES" in order to get memory leak checking
#
# The following variable definitions can now be loaded from a
# site-specific configuration using the -config option. Create one
# configuration file for each architecture/compiler using these as a
# template (and don't forget to include a modified cp2k_prefix for MPI
# runs as well!).
export LC_ALL=C
export FORT_C_NAME=gfortran
# Might consider following export to be done, too
export MPI_F90=mpif90
dir_base=$PWD
cp2k_version=sopt
dir_triplet=Linux-x86-64-${FORT_C_NAME}
export ARCH=${dir_triplet}
cp2k_dir=cp2k
maxtasks=1
numprocs=1
export OMP_NUM_THREADS=1
emptycheck="NO"
leakcheck="NO"

# This is the default tolerance used in regression testing
default_err_tolerance="1.0E-14"

#
# The following variables typically need no changes on Linux machine, but might need changes on
# other an OS
#

# *** how to execute an input file [ cp2k_prefix input cp2k_postfix ]
# Leave empty for serial, uncomment for parallel
cp2k_run_prefix=""
#cp2k_run_prefix="mpiexec -np ${numprocs}"
cp2k_run_postfix=""
#cp2k_prefix="poe ${dir_base}/${cp2k_dir}/exe/${dir_triplet}/cp2k.${cp2k_version}"
#cp2k_postfix="-llfile ${dir_base}/llfile"

# *** make and awk
make=make
#make=gmake
awk=awk
#awk=nawk

# *** a short and long version of the data, in a format that SVN understands
datum_full=`date --iso-8601="seconds"`
datum_short=`date '+%F_%H-%M-%S'`

# *** default settings for command line switches
quick="noquick"
noreset="reset"
ndirstoskip=0
skip_dirs[1]=""
ndirstorestrict=0
restrict_dirs[1]=""
doretest="no"
nosvn="svn"
nobuild="build"
svndate="$datum_full"

###################################################################################
#
# From here on no changes to the script should be needed
#
###################################################################################
#
# command line argument passing
#
###################################################################################

full_time_t1=`date +%s`

while [ $# -ge 1 ]; do
 case $1 in
 # build a list of directories to skip
 -skipdir)
  let ndirstoskip=ndirstoskip+1
  skip_dirs[ndirstoskip]=$2
  shift;;
 # build a list of directories to restrict, i.e. only matching dirs will be run
 -restrictdir)
  let ndirstorestrict=ndirstorestrict+1
  restrict_dirs[ndirstorestrict]=$2
  shift;;
 # enable partial testing of only those directories that contain failed tests
 -retest)
  doretest="yes";;
 # load system-specific configuration
 -c|-config)
  [[ -f $2 ]] || { echo "ERROR: Configuration file $2 not found"; exit 1; }
  source $2   || { echo "ERROR: Sourcing of the configuration file $2 failed" ; exit 1; }
  shift;;
 # do not update the SVN
 -nosvn)
  nosvn="nosvn";;
 # do not reset reference outputs
 -noreset)
  noreset="noreset";;
 # do not do a realclean before building
 -quick)
  quick="quick";;
 # specify the full string "-D 2005-02-01" to get a check-out of a specific date
 -svndate)
  svndate=$2
  datum_short=${svndate}
  shift;;
 # do not check if code has changed
 -noemptycheck)
  emptycheck="NO";;
 # do not build cp2k
 # turns on automatically quick option, so that do_regtest does not do realclean
 -nobuild)
  quick="quick"; nobuild="nobuild";;
 # stop on invalid flag
 -*)
  echo "ERROR: Invalid command line flag $1 found"
  exit 1;;
 # Default case
 *)
  echo "ERROR: Unknown command line string $1 found"
  exit 1;;
 esac
 shift
done

if [ "${ARCH}" ]; then
    ARCH_SPEC="ARCH=${ARCH}"
fi

# *** how to execute an input file [ cp2k_prefix input cp2k_postfix ]
# Unless cp2k_prefix is overridden in the sourced -config file, the
# default is used.
cp2k_prefix=${cp2k_prefix:-"${cp2k_run_prefix} ${dir_base}/${cp2k_dir}/exe/${dir_triplet}/cp2k.${cp2k_version}"}
cp2k_postfix=${cp2k_postfix:-"${cp2k_run_postfix}"}

# allow the config file to set the maximum allowed time. Useful for valgrind runs
job_max_time=${job_max_time:-"600"}

###################################################################################
#
# set up the initial directory structures
#
###################################################################################
test_types_file=${dir_base}/${cp2k_dir}/tests/TEST_TYPES
dir_last=${dir_base}/LAST-${dir_triplet}-${cp2k_version}
dir_out=${dir_base}/TEST-${dir_triplet}-${cp2k_version}-${datum_short}
changelog=${dir_last}/ChangeLog
changelog_diff=${changelog}.diff
changelog_new=${changelog}.new
changelog_old=${changelog}.old
changelog_tests=${changelog}-tests
changelog_tests_diff=${changelog_tests}.diff
changelog_tests_new=${changelog_tests}.new
changelog_tests_old=${changelog_tests}.old
mkdir -p ${dir_out}
mkdir -p ${dir_last}
svn_out=${dir_out}/svn.out
make_out=${dir_out}/make.out
error_description_file=${dir_out}/error_summary
>${error_description_file}
memory_description_file=${dir_out}/memory_summary
>${memory_description_file}
summary=${dir_out}/summary.txt
printf "Summary of the regression tester run from ${datum_short} using ${FORT_C_NAME}\n" >${summary}

###################################################################################
#
# simple function to end the tests all in the same way
#
###################################################################################
function end_test() {
full_time_t2=`date +%s`
full_timing_all=`echo "1+(${full_time_t2})-(${full_time_t1})" | bc -l`
# Remove lockfile
if [[ -n $2 ]]; then
   [[ -f $2 ]] && rm $2
fi
echo "--------------------------------------------------------------------------"
printf "Regtest took %0.2f seconds.\n" ${full_timing_all}
echo "--------------------------------------------------------------------------"
date
echo "*************************** testing ended ********************************"
exit $1
}

###################################################################################
#
# function to grep for changes in the output. Takes five arguments
#
###################################################################################
function do_test_grep(){
 output_new=$1
 output_old=$2
 error_file=$3
 grep_string=$4
 grep_field=$5
 error_tolerance=$6
 e1=`grep -a "${grep_string}" ${output_old} | tail -1 | ${awk} -v f=${grep_field} '{print $f}'`
 e2=`grep -a "${grep_string}" ${output_new} | tail -1 | ${awk} -v f=${grep_field} '{print $f}'`
 big=`echo "${e1} ${e2} ${error_tolerance}" | ${awk} '{if($2==0){v=sqrt(($1-$2)^2)}else{v=sqrt((($1-$2)/$2)^2)}; if (v>$3) printf("%16.8e",v); else printf("0") ;}'`
 case ${big} in
 0)
  # ok, same energy
  return 0 ;;
 *)
  # nope too large
  echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_file}
  echo "${output_new} : " >> ${error_file}
  echo " ${grep_string} : old = ${e1} new = ${e2}  " >> ${error_file}
  echo " relative error : ${big} >  numerical tolerance = ${error_tolerance}  " >>${error_file}
  echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_file}
  return 1 ;;
 esac
}

###################################################################################
#
# function to select which test to run
#
###################################################################################
function do_test() {
 which_test=$1
 num_tolerance=$2
 output_new=$3
 output_old=$4
 error_file=$5
 case ${which_test} in
 0)
   #just be happy you executed
   return 0;;
 *)
   do_test_grep ${output_new} ${output_old} ${error_file} "${test_grep[which_test]}" "${test_col[which_test]}" ${num_tolerance}
   return $? ;;
 esac
}

# Check for lockfile
lockfile=${dir_last}/LOCKFILE
if [[ -f ${lockfile} ]]; then
   echo "ERROR: Directory ${dir_last} is locked"
   echo "       Check if a regtest is already running for the same directory and if not then"
   echo "       remove the lockfile ${lockfile}"
   echo "       first and retry"
   rm -rf ${dir_out}
   end_test 7
else
   touch ${lockfile}
fi

# Start testing
echo "*************************** testing started ******************************"
echo " started on " `date`
echo " checking version ${svndate} "
echo " configuration: ${dir_triplet}-${cp2k_version} "
echo "--------------------------- SVN ------------------------------------------"

if [[ ${nosvn} != "nosvn" ]]; then

  # SVN update src
  cd ${dir_base}/${cp2k_dir}/src

  # If ChangeLog does not exist for the first time, it is created
  if [[ ! -s ${changelog} ]]; then
     echo "Creating ChangeLog file for the first time. This may take a while ..."
     ${dir_base}/${cp2k_dir}/tools/svn2cl/svn2cl.sh -i -o ${changelog}
  fi

  svn update -r {${svndate}} &>${svn_out}
  if (( $? )); then
     echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
     cat ${svn_out} >>${error_description_file}
     echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
     echo "ERROR: svn update of cp2k/src failed ... bailing out" >>${error_description_file}
     cat "${error_description_file}"
     end_test 1 ${lockfile}
  fi

  echo "--- svn update src ---"
  cat ${svn_out}
  echo "svn update src went fine"

  #  cp2k_lines=`wc *.F | tail -1 |  ${awk} '{print $1}'`
  #  echo "cp2k is now ${cp2k_lines} lines .F"

  # Using svn2cl.pl to generate GNU like ChangeLog
  ${dir_base}/${cp2k_dir}/tools/svn2cl/svn2cl.sh --limit 100 -i -o ${changelog_new} >>${error_description_file} 2>&1

  line1="$(head -n 1 ${changelog})"
  nline=$(grep -n "${line1}" ${changelog_new} | head -n 1 | cut -f 1 -d:)
  mv ${changelog} ${changelog_old}
  head -n $((nline - 1)) ${changelog_new} >${changelog}
  cat ${changelog_old} >>${changelog}

  diff ${changelog} ${changelog_old} >${changelog_diff}
  echo "------- differences --------" >>${changelog_diff}

  rm ${changelog_new} ${changelog_old}

  echo "---  ChangeLog diff src  ---"
  cat ${changelog_diff}
  echo "----------------------------"

  # SVN update tests
  cd ${dir_base}/${cp2k_dir}/tests

  if [[ ! -s ${changelog_tests} ]]; then
     echo "Creating ChangeLog-tests file for the first time. This may take a while ..."
     ${dir_base}/${cp2k_dir}/tools/svn2cl/svn2cl.sh -i -o ${changelog_tests}
  fi

  svn update -r {${svndate}} &>${svn_out}
  if (( $? )); then
     echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
     cat ${svn_out} >>${error_description_file}
     echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
     echo "ERROR: svn update of cp2k/tests failed ... bailing out" >>${error_description_file}
     cat "${error_description_file}"
     end_test 1 ${lockfile}
  fi

  echo "--- svn update tests ---"
  cat ${svn_out}
  echo "svn update tests went fine"

  # Using svn2cl.pl to generate GNU like ChangeLog
  ${dir_base}/${cp2k_dir}/tools/svn2cl/svn2cl.sh --limit 100 -i -o ${changelog_tests_new} >>${error_description_file} 2>&1

  line1="$(head -n 1 ${changelog_tests})"
  nline=$(grep -n "${line1}" ${changelog_tests_new} | head -n 1 | cut -f 1 -d:)
  mv ${changelog_tests} ${changelog_tests_old}
  head -n $((nline - 1)) ${changelog_tests_new} >${changelog_tests}
  cat ${changelog_tests_old} >> ${changelog_tests}

  diff ${changelog_tests} ${changelog_tests_old} > ${changelog_tests_diff}
  echo "------- differences --------" >>${changelog_tests_diff}

  rm ${changelog_tests_new} ${changelog_tests_old}

  echo "---  ChangeLog diff tests  ---"
  cat ${changelog_tests_diff}
  echo "----------------------------"

else

  echo "No SVN updating"

fi

# Check if there is any update or difference that thus requires a rerun
# one day, this requires improvement for speed.
if [[ ${emptycheck} == "YES" ]]; then
   isempty_1=`nl ${changelog_diff} | awk '{print $1}'`
   isempty_2=`nl ${changelog_tests_diff} | awk '{print $1}'`

   if [[ ${isempty_1} == "1" && ${isempty_2} == "1" ]]; then
      echo "No changes since last run -- clean exit without testing"

      # cleanup of empty directories
      rm -rf ${dir_out}
      end_test 100 ${lockfile}
   else
      echo "Code has changed since last run -- continue regtest"
   fi
fi

# make realclean
if [[ ${quick} != "quick" ]]; then
   cd ${dir_base}/${cp2k_dir}/makefiles
   ${make} realclean ${ARCH_SPEC} VERSION=${cp2k_version} >>${make_out} 2>&1
   if (( $? )); then
      echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
      cat ${make_out} >>${error_description_file}
      echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
      echo "ERROR: ${make} realclean ${ARCH_SPEC} VERSION=${cp2k_version} failed ... bailing out" >>${error_description_file}
      cat "${error_description_file}"
      end_test 3 ${lockfile}
   else
      echo "${make} realclean ${ARCH_SPEC} VERSION=${cp2k_version} went fine"
   fi
else
   echo "Quick testing, no realclean"
fi

# from here failures are likely to be bugs in cp2k
if [[ ${nobuild} != "nobuild" ]]; then
   echo "-------------------------compiling cp2k-----------------------------------"
   echo "${make} -j $((maxtasks)) ${ARCH_SPEC} VERSION=${cp2k_version}"
   echo "(make output is written to ${make_out})"
   cd ${dir_base}/${cp2k_dir}/makefiles
   ${make} -j $((maxtasks)) ${ARCH_SPEC} VERSION=${cp2k_version} >>${make_out} 2>&1
   if (( $? )); then
      echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
      cat ${make_out} >>${error_description_file}
      echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
      echo "ERROR: ${make} -j $((maxtasks)) ${ARCH_SPEC} VERSION=${cp2k_version} failed" >>${error_description_file}
      cat "${error_description_file}"
      end_test 4 ${lockfile}
   else
      compile_warnings=`grep "Warning" ${make_out} | wc -l`
      echo "${make} -j $((maxtasks)) ${ARCH_SPEC} VERSION=${cp2k_version} went fine (${compile_warnings} warnings)"
   fi
else
   echo "No build, continue regression testing"
fi

echo "-------------------------regtesting cp2k----------------------------------"

###################################################################################
#
# parse the TEST_TYPES file to do different kinds of test (done after svn update)
#
# tests grep for the last line in the file where a string matches (test_grep)
# and compares a numeric field at a given column (test_col)
#
# the format of the TEST_TYPES file is (notice the '!' as a field separator, to allow
# for spaces in the test_grep)
#
# Ntest_types
# test_grep_1 ! test_col_1
# test_grep_2 ! test_col_2
# ....
# followed by comment lines
#
###################################################################################
Ntest_types=`awk -v l=1 -v c=1 'BEGIN{FS="!"}{lr=lr+1;if (lr==l) print $c}' ${test_types_file}`
test_grep[0]=""
test_col[0]=1
t=1
while [ $t -le ${Ntest_types} ]; do
test_grep[t]=`${awk} -v l=$t -v c=1 'BEGIN{FS="!"}{lr=lr+1;if (lr==l+1) print $c}' ${test_types_file}`
test_col[t]=`${awk} -v l=$t -v c=2 'BEGIN{FS="!"}{lr=lr+1;if (lr==l+1) print $c}' ${test_types_file}`
let t=t+1
done

###################################################################################
#
# *** now start testing
# *** for a given directory we do a run on all files in TEST_FILES and
# *** do the test as indicated by the number
# *** files are run in order so that they can e.g. restart
#
###################################################################################
n_runtime_error=0
n_wrong_results=0
n_correct=0
n_tests=0
n_new=0

printf "Copying tests into working directory ... "

# Copy the tests into the working regtest directory.
  cp -al ${dir_base}/${cp2k_dir}/tests ${dir_out}/tests || \
  cp -rpl ${dir_base}/${cp2k_dir}/tests ${dir_out}/tests || ( \
      echo "Could not copy testing directory as hard links."
      echo "Using normal copy instead."
      sleep 10
      cp -rp ${dir_base}/${cp2k_dir}/tests/* ${dir_out}/tests
  )

printf "done!\n"

#
# preprocess the restrict directories if the retest is enabled
#
last_test_dir=""
retest_dirs=""
if [[ $doretest == "yes" ]];
then
  last_test_dir=`ls ${dir_base} | grep ^TEST-${dir_triplet}-${cp2k_version}- | tail -2 | head -1`
  if [[ "${dir_out}" == "${dir_base}/${last_test_dir}" ]]
  then
    echo "No TEST directory with latest test results found. Nothing to test!"
    end_test 5 ${lockfile}
  elif [[ ! -f "${dir_base}/${last_test_dir}/error_summary" ]]
  then
    echo "No error summary exists in the last TEST directory. Nothing to test!"
    end_test 6 ${lockfile}
  else
    retest_dirs=`grep .inp.out ${dir_base}/${last_test_dir}/error_summary | awk -F/ '{ test = 0; for (i = 1; i < NF; i++) { if (test == 1) { printf "%s/", $i } if ($i ~ /TEST/) test = 1; } printf "\n" }' | sort | sed 's/\/$//' | awk '!x[$0]++'`
    for t in $retest_dirs
    do
      let ndirstorestrict=ndirstorestrict+1;
      restrict_dirs[ndirstorestrict]=$t;
    done
    if (( ndirstorestrict == 0 )); then
       echo "No error occurred during the last run. Nothing to retest!"
       end_test 0 ${lockfile}
    fi
  fi
fi

#
# get a list of directories to be tested, taking care of the exclusions
#
dirs=`cat ${dir_out}/tests/TEST_DIRS | grep -v "#"`
newdirs=""
for dir in ${dirs}
do
  match="no"
  t=1
  # Match to exclusion list
  while [ $t -le ${ndirstoskip} ]; do
     if [[ "${skip_dirs[t]}" == "${dir}" ]]; then
        match="yes"
     fi
     let t=t+1
  done
  # Match to the restrict list, if no restrict list is found, all dirs match
  if [ ${ndirstorestrict} -gt 0 ]; then
     restrictmatch="no"
     t=1
     while [ $t -le ${ndirstorestrict} ]; do
        if [[ "${restrict_dirs[t]}" == "${dir}" ]]; then
           restrictmatch="yes"
        fi
        let t=t+1
     done
  else
    restrictmatch="yes"
  fi

  # If not excluded add to list of dirs
  if [[ "${match}" == "no" && "${restrictmatch}" == "yes" ]]; then
     new_dirs="$new_dirs $dir"
  fi
done
dirs=$new_dirs

#
# execute all regtests
#

# Just to be sure, clean possible existing status files.
cd ${dir_out}
mkdir ${dir_out}/status
rm -f ${dir_out}/status/REGTEST_RUNNING-* \
    ${dir_out}/status/REGTEST_TASK_RESULT-* \
    ${dir_out}/status/REGTEST_TASK_TESTS-*


(( ndir = $(echo ${dirs} | wc -w) ))
(( idir = 0 ))
for dir in ${dirs}; do
 (( idir++ ))
 #
 # tests in different dirs can run in parallel. We spawn processes up to a given maximum
 #
 task=${dir//\//-}
 (
  touch ${dir_out}/status/REGTEST_RUNNING-$task
  n_runtime_error=0
  n_wrong_results=0
  n_correct=0
  n_tests=0
  n_new=0

  cd ${dir_out}/tests/${dir}
  mkdir -p ${dir_out}/${dir}
  mkdir -p ${dir_last}/${dir}
  touch ${dir_last}/${dir}/TEST_FILES_RESET

  #
  # first reset reference outputs that have become out-dated since the last run
  #
  if [[ ${noreset} != "noreset" ]]; then
     diff TEST_FILES_RESET ${dir_last}/${dir}/TEST_FILES_RESET > ${dir_out}/${dir}/TEST_FILES_RESET.diff
     cp TEST_FILES_RESET ${dir_last}/${dir}/TEST_FILES_RESET
     nreset=`grep '<' ${dir_out}/${dir}/TEST_FILES_RESET.diff | grep -v '#' |  ${awk} '{c=c+1}END{print c}'`
     for ((itest=1;itest<=nreset;itest++));
     do
        reset_file=`grep '<' ${dir_out}/${dir}/TEST_FILES_RESET.diff | grep -v '#' | ${awk} -v itest=$itest '{c=c+1;if (c==itest) print $2}'`
        rm -f ${dir_last}/${dir}/${reset_file}.out
     done
  fi
  #
  # run the tests now
  #
  echo "Starting tests in ${dir_out}/tests/${dir} (${idir} of ${ndir})"
  echo ">>> ${dir_out}/tests/${dir}" > ${dir_out}/status/REGTEST_TASK_TESTS-$task
  ntest=`grep -v "#" TEST_FILES | ${awk} '{c=c+1}END{print c}'`
  t1=`date +%s`
  for ((itest=1;itest<=ntest;itest++));
  do
     n_tests=$((n_tests+1))
     this_test=""
     input_file=`grep -v "#" TEST_FILES | ${awk} -v itest=$itest '{c=c+1;if (c==itest) print $1}'`
     # just one test right now, but this should generalize
     test_types=`grep -v "#" TEST_FILES | ${awk} -v itest=$itest '{c=c+1;if (c==itest) print $2}'`
     # third field allows numerical tolerances to be read from the TEST_FILES
     # if value does not exist set to the default of 1.0E-14
     test_tolerance=`grep -v "#" TEST_FILES | ${awk} -v itest=$itest -v def_err_tol=$default_err_tolerance '{c=c+1;if (c==itest) if (NF == 3) { print $3 } else { print def_err_tol } }'`

     output_file=${dir_out}/${dir}/${input_file}.out
     output_last=${dir_last}/${dir}/${input_file}.out
     ( ulimit -t ${job_max_time} ; ${cp2k_prefix} ${input_file} ${cp2k_postfix} &> ${output_file} ) >& /dev/null
     (( cp2k_exit_status = $? ))
     if (( cp2k_exit_status )); then
        # CP2K failed obviously
        if (( cp2k_exit_status == 137 )); then
           # SIGKILL = 9 ... exit code 9+128
           # usually caused by time-out
           this_test="KILLED"
        else
           this_test="RUNTIME FAIL"
        fi 
        #
        echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
        echo ${output_file} >>${error_description_file}
        tail -40 ${output_file} >>${error_description_file}
        echo "EXIT CODE: " $cp2k_exit_status " MEANING: " $this_test >>${error_description_file}
        echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
        n_runtime_error=$((n_runtime_error+1))
        failed_tests="${failed_tests} ${output_file}"
     else
        # ran but did not end !?
        grep -a "ENDED" ${output_file} &> /dev/null
        if (( $? )); then
           echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
           echo ${output_file} >>${error_description_file}
           tail -40 ${output_file} >>${error_description_file}
           echo "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" >>${error_description_file}
           this_test="RUNTIME FAIL"
           n_runtime_error=$((n_runtime_error+1))
           failed_tests="${failed_tests} ${output_file}"
        else
           # Still running, you must be joking...
           # see if we manage to pass the testing
           # but only if we can compare
           if [ -f ${output_last} ]; then
              for test_type in ${test_types};
              do
                 do_test ${test_type} ${test_tolerance} ${output_file} ${output_last} ${error_description_file}
                 if (( $? )); then
                    this_test="WRONG RESULT TEST ${test_type}"
                    n_wrong_results=$((n_wrong_results+1))
                    # No further testing
                    break;
                 else
                    n_correct=$((n_correct+1))
                    this_test="OK"
                 fi
              done
           else
              this_test="NEW"
              n_new=$((n_new+1))
           fi
        fi
     fi
     # Keep the output up-to-date
     timing=0
     case ${this_test} in
     "NEW" )
        cp ${output_file} ${output_last}
        timing=`grep -a "CP2K   " ${output_file} | tail -n 1 | ${awk} '{printf("%7.2f",$NF)}'`
        this_test="${this_test} (${timing} sec)" ;;
     "OK" )
        timing=`grep -a "CP2K   " ${output_file} | tail -n 1 | ${awk} '{printf("%7.2f",$NF)}'`
        this_test="${this_test} (${timing} sec)" ;;
     esac
     leak_marker=""
     if [[ ${leakcheck} == "YES" ]]; then
        if [[ -n $(echo ${cp2k_prefix} | grep "valgrind") ]]; then
           leak_error_string="ERROR SUMMARY:"
           nerror=$(grep -i "${leak_error_string}" ${output_file} | awk '{print $4}')
           dum=""
           (( nerror > 0 )) && dum=${nerror}
        else
           case ${FORT_C_NAME} in
              g95*     ) leak_error_string="Remaining memory";;
              gfortran*) leak_error_string="SUMMARY: LeakSanitizer:";;
           esac
           dum=`grep -l "${leak_error_string}" ${output_file}`
        fi
        if [[ ${dum} != "" ]]; then
           leak_marker="!"
           echo "XXXXXXXX  ${output_file} XXXXXXX" >>${memory_description_file}
           grep -i "${leak_error_string}" ${output_file} >>${memory_description_file}
        fi
     fi
     printf "    %-50s %20s %s\n" "${input_file}" "${this_test}" "${leak_marker}" >> ${dir_out}/status/REGTEST_TASK_TESTS-$task
  done
  t2=`date +%s`
  timing_all=`echo "1+($t2)-($t1)" | bc -l`
  printf "%s %0.2f %s\n" "<<< ${dir_out}/tests/${dir} (${idir} of ${ndir}) done in" ${timing_all} "sec"  >> ${dir_out}/status/REGTEST_TASK_TESTS-$task
  echo "${n_runtime_error} ${n_wrong_results} ${n_correct} ${n_new} ${n_tests}" > ${dir_out}/status/REGTEST_TASK_RESULT-$task
  cat ${dir_out}/status/REGTEST_TASK_TESTS-$task
  rm -f ${dir_out}/status/REGTEST_TASK_TESTS-$task ${dir_out}/status/REGTEST_RUNNING-$task
 )&

 #
 # Here we allow only a given maximum of tasks
 #
 runningtasks=10000
 while (( runningtasks >= maxtasks/(numprocs*OMP_NUM_THREADS) )); do
    sleep 1
    runningtasks=`ls -1 ${dir_out}/status/REGTEST_RUNNING-* 2>/dev/null | wc -l`
 done

done

#
# wait for all tasks to finish
#
wait
#
# generate results
#
for dir in ${dirs};
do
  task=${dir//\//-}
  file=${dir_out}/status/REGTEST_TASK_RESULT-$task
  tmp=`awk '{print $1}' $file`
  n_runtime_error=$((n_runtime_error+tmp))
  tmp=`awk '{print $2}' $file`
  n_wrong_results=$((n_wrong_results+tmp))
  tmp=`awk '{print $3}' $file`
  n_correct=$((n_correct+tmp))
  tmp=`awk '{print $4}' $file`
  n_new=$((n_new+tmp))
  tmp=`awk '{print $5}' $file`
  n_tests=$((n_tests+tmp))
  rm -f $file
done

echo "--------------------------------------------------------------------------"
cat "${error_description_file}"
echo "--------------------------------- Summary --------------------------------"
printf "Number of FAILED  tests %d\n" ${n_runtime_error} | tee -a ${summary}
printf "Number of WRONG   tests %d\n" ${n_wrong_results} | tee -a ${summary}
printf "Number of CORRECT tests %d\n" ${n_correct}       | tee -a ${summary}
printf "Number of NEW     tests %d\n" ${n_new}           | tee -a ${summary}
printf "Total number of   tests %d\n" ${n_tests}         | tee -a ${summary}

if [[ ${leakcheck} == "YES" ]]; then
   echo "--------------------------------------------------------------------------" | tee -a ${summary}
   case ${FORT_C_NAME} in
      g95*     )
       n_leaks=`grep -v "XXXXXXXX" ${memory_description_file} | wc | ${awk} '{print $1}'`
       ;;
      gfortran*)
       n_leaking_tests=`grep "XXXXXXXX" ${memory_description_file} | wc | ${awk} '{print $1}'`
       n_leaks=`grep "SUMMARY" ${memory_description_file} | ${awk} 'BEGIN{n=0}{n=n+$7}END{print n}'`
       printf "Number of LEAKING tests %d\n" ${n_leaking_tests} | tee -a ${summary}
       ;;
   esac
   (( n_leaks == 0 )) && echo "No memory leaks detected" >>${memory_description_file}
   printf "Number of memory  leaks %d\n" ${n_leaks} | tee -a ${summary}
   echo "GREPME ${n_runtime_error} ${n_wrong_results} ${n_correct} ${n_new} ${n_tests} ${n_leaks}"
else
   echo "No memory leak check was performed" >>${memory_description_file}
   echo "GREPME ${n_runtime_error} ${n_wrong_results} ${n_correct} ${n_new} ${n_tests} X"
fi

end_test 0 ${lockfile}
