added tests
This commit is contained in:
86
dvm/tools/tester/trunk/main/configure-run.sh
Normal file
86
dvm/tools/tester/trunk/main/configure-run.sh
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Common part
|
||||
MAX_PPN=60
|
||||
MAX_CPU_SHARING_FACTOR=4
|
||||
MAX_CUDA_SHARING_FACTOR=16
|
||||
|
||||
# Default
|
||||
NODE_COUNT=1
|
||||
MAX_NODES_PER_TASK=1
|
||||
INTERACTIVE=1
|
||||
HAS_RES_MANAGER=0
|
||||
|
||||
# Specializations
|
||||
if [ `hostname` = "k100" ]; then
|
||||
NODE_COUNT=64
|
||||
MAX_NODES_PER_TASK=8
|
||||
INTERACTIVE=0
|
||||
# Since launch isn't interactive - one must provide is_launched, is_finished, get_elapsed_time, stdout_fn, stderr_fn calls
|
||||
get_task_dir() {
|
||||
local n
|
||||
for n in 1 2 3 4 5 6 7 8 9; do
|
||||
if [ -d "$1.$n" ]; then
|
||||
printf %s "$1.$n"
|
||||
return
|
||||
fi
|
||||
done
|
||||
printf %s "$1"
|
||||
}
|
||||
is_launched() {
|
||||
local STDOUT_FN
|
||||
STDOUT_FN="$1"
|
||||
local STDERR_FN
|
||||
STDERR_FN="$2"
|
||||
# Add handling for refuses from SUPPZ
|
||||
echo 1
|
||||
}
|
||||
is_finished() {
|
||||
if [ "$(tail -n 1 $(get_task_dir "$1")/manager.log)" = "Exiting..." ]; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
}
|
||||
get_elapsed_time() {
|
||||
local da
|
||||
local mo
|
||||
local ye
|
||||
local dat
|
||||
local tim
|
||||
local sec1
|
||||
local sec2
|
||||
local task_dir
|
||||
task_dir="$(get_task_dir "$1")"
|
||||
dat=`grep "started at" <"$task_dir/manager.log" | awk '{print $5}' | sed 's/\./ /g'`
|
||||
tim=`grep "started at" <"$task_dir/manager.log" | awk '{print $6}'`
|
||||
da=`echo "$dat" | awk '{print $1}'`
|
||||
mo=`echo "$dat" | awk '{print $2}'`
|
||||
ye=`echo "$dat" | awk '{print $3}'`
|
||||
dat="$ye-$mo-$da $tim"
|
||||
sec1=`date -d "$dat" +%s`
|
||||
dat=`grep "done at" <"$task_dir/manager.log" | awk '{print $6}' | sed 's/\./ /g'`
|
||||
tim=`grep "done at" <"$task_dir/manager.log" | awk '{print $7}'`
|
||||
da=`echo "$dat" | awk '{print $1}'`
|
||||
mo=`echo "$dat" | awk '{print $2}'`
|
||||
ye=`echo "$dat" | awk '{print $3}'`
|
||||
dat="$ye-$mo-$da $tim"
|
||||
sec2=`date -d "$dat" +%s`
|
||||
echo $(( sec2 - sec1 ))
|
||||
}
|
||||
stdout_fn() {
|
||||
echo "$(get_task_dir "$1")/output"
|
||||
}
|
||||
stderr_fn() {
|
||||
echo "$(get_task_dir "$1")/errors"
|
||||
}
|
||||
HAS_RES_MANAGER=1
|
||||
# Since machine has resource manager (task queue) - one must provide can_launch call
|
||||
can_launch() {
|
||||
if [ `mps 2>/dev/null | tail -n +3 | wc -l` -lt 6 ]; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
}
|
||||
fi
|
||||
26
dvm/tools/tester/trunk/main/default-test-analyzer.sh
Normal file
26
dvm/tools/tester/trunk/main/default-test-analyzer.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Requires variables: LAUNCH_EXIT_CODE, STDERR_FN
|
||||
# Produces variables: TEST_PASSED, RESULT_COMMENT, ERROR_LEVEL
|
||||
|
||||
if [ `grep -E 'Assertion' <"$STDERR_FN" | wc -l` -gt 0 ]; then
|
||||
TEST_PASSED=0
|
||||
RESULT_COMMENT="Assertion failed"
|
||||
ERROR_LEVEL=3
|
||||
elif [ `grep -E 'RTS fatal' <"$STDERR_FN" | wc -l` -gt 0 ]; then
|
||||
TEST_PASSED=0
|
||||
RESULT_COMMENT="RTS fatal"
|
||||
ERROR_LEVEL=2
|
||||
elif [ `grep -E 'RTS err' <"$STDERR_FN" | wc -l` -gt 0 ]; then
|
||||
TEST_PASSED=0
|
||||
RESULT_COMMENT="RTS err"
|
||||
ERROR_LEVEL=1
|
||||
elif [ $LAUNCH_EXIT_CODE -ne 0 ]; then
|
||||
TEST_PASSED=0
|
||||
RESULT_COMMENT="Launch failure"
|
||||
ERROR_LEVEL=4
|
||||
else
|
||||
TEST_PASSED=1
|
||||
RESULT_COMMENT="OK"
|
||||
ERROR_LEVEL=0
|
||||
fi
|
||||
348
dvm/tools/tester/trunk/main/gen-report.sh
Normal file
348
dvm/tools/tester/trunk/main/gen-report.sh
Normal file
@@ -0,0 +1,348 @@
|
||||
#!/bin/bash
|
||||
# Bash is required due to usage of associative arrays
|
||||
|
||||
MY_DIR=$(cd "$(dirname "$(which "$0")")" && pwd)
|
||||
SAVE_DIR=`pwd`
|
||||
|
||||
TEST_SUITE="$1"
|
||||
RESULTS_DIR="$2"
|
||||
FULL_REP_URL="$3"
|
||||
REV_NUMBER=$4
|
||||
|
||||
if [ -z "$FULL_REP_URL" ]; then
|
||||
FULL_REP_URL="full-report.html"
|
||||
fi
|
||||
|
||||
if [ -z "$REV_NUMBER" ]; then
|
||||
REV_NUMBER=UNKNOWN
|
||||
fi
|
||||
|
||||
MAX_LONELY_OK=50
|
||||
REPORT_DIR="$RESULTS_DIR/report"
|
||||
rm -rf "$REPORT_DIR"
|
||||
RES_FILES=`mktemp`
|
||||
find "$RESULTS_DIR" -mindepth 1 -type f | LC_ALL=C sort >$RES_FILES
|
||||
BUGGY_FILE_DIR="$REPORT_DIR/sources"
|
||||
mkdir -p "$BUGGY_FILE_DIR"
|
||||
REPORT_FILE="$REPORT_DIR/brief-report.html"
|
||||
FULL_REPORT_FILE="$REPORT_DIR/full-report.html"
|
||||
|
||||
COL_COUNT=2
|
||||
PLATFORMS=
|
||||
HAS_SUBTESTS=0
|
||||
TOTAL_LAUNCHES=0
|
||||
TOTAL_ERROR_LAUNCHES=0
|
||||
|
||||
while IFS= read -r f; do
|
||||
CUR_DEPTH=0
|
||||
TEST_SHORT_PATH=`basename "$f" .result`
|
||||
TMPSTR=`dirname "$f"`
|
||||
while [ "$TMPSTR" != "$RESULTS_DIR" ]; do
|
||||
CUR_DEPTH=$(( $CUR_DEPTH + 1 ))
|
||||
TEST_SHORT_PATH="$(basename "$TMPSTR")/$TEST_SHORT_PATH"
|
||||
TMPSTR=`dirname "$TMPSTR"`
|
||||
done
|
||||
IS_SUBTEST=0
|
||||
if [ ! -e "$TEST_SUITE/$TEST_SHORT_PATH" ]; then
|
||||
HAS_SUBTESTS=1
|
||||
IS_SUBTEST=1
|
||||
fi
|
||||
if [ $(( CUR_DEPTH + 2 )) -gt $COL_COUNT ]; then
|
||||
COL_COUNT=$(( $CUR_DEPTH + 2 ))
|
||||
fi
|
||||
if [ $IS_SUBTEST -eq 0 ]; then
|
||||
while IFS= read -r lin; do
|
||||
eval $lin
|
||||
if [ -z "$PLATFORMS" ]; then
|
||||
PLATFORMS=$PLATFORM
|
||||
else
|
||||
FOUND_FLAG=0
|
||||
for platf in $PLATFORMS; do
|
||||
if [ $platf = $PLATFORM ]; then
|
||||
FOUND_FLAG=1
|
||||
fi
|
||||
done
|
||||
if [ $FOUND_FLAG -eq 0 ]; then
|
||||
PLATFORMS="$PLATFORMS $PLATFORM"
|
||||
fi
|
||||
fi
|
||||
TOTAL_LAUNCHES=$(( $TOTAL_LAUNCHES + 1 ))
|
||||
if [ "$ERROR_LEVEL" != "0" ]; then
|
||||
TOTAL_ERROR_LAUNCHES=$(( $TOTAL_ERROR_LAUNCHES + 1 ))
|
||||
fi
|
||||
done <"$f"
|
||||
fi
|
||||
done <$RES_FILES
|
||||
|
||||
CAT_COUNT=$(( COL_COUNT - 1 - HAS_SUBTESTS - 1 ))
|
||||
|
||||
exec 5>"$REPORT_FILE"
|
||||
exec 6>"$FULL_REPORT_FILE"
|
||||
|
||||
echo "<html>" >& 5
|
||||
echo "<html>" >& 6
|
||||
echo "<head>" >& 5
|
||||
echo "<head>" >& 6
|
||||
echo "<title>Test results for DVM-system. Revision $REV_NUMBER.</title>" >& 5
|
||||
echo "<title>Test results for DVM-system. Revision $REV_NUMBER.</title>" >& 6
|
||||
echo "<style>" >& 6
|
||||
cat "$MY_DIR/report.css" >& 6
|
||||
echo "</style>" >& 6
|
||||
echo "<script type='text/javascript'>" >& 6
|
||||
cat "$MY_DIR/report.js" >& 6
|
||||
echo "</script>" >& 6
|
||||
echo "</head>" >& 5
|
||||
echo "</head>" >& 6
|
||||
echo "<body>" >& 5
|
||||
echo "<body>" >& 6
|
||||
echo "<h1 align=center>Test results for DVM-system. Revision $REV_NUMBER.</h1>" >& 5
|
||||
echo "<h1 align=center>Test results for DVM-system. Revision $REV_NUMBER.</h1>" >& 6
|
||||
echo "<h2 align=center>Tested on platforms: $PLATFORMS.</h2>" >& 5
|
||||
echo "<h2 align=center>Tested on platforms: $PLATFORMS.</h2>" >& 6
|
||||
echo "<h3 align=center>Full report can be seen on <a href='$FULL_REP_URL'>$FULL_REP_URL</a></h3>" >& 5
|
||||
echo "<p align=center>Launches with errors: $TOTAL_ERROR_LAUNCHES / $TOTAL_LAUNCHES</p>" >& 5
|
||||
echo "<p align=center>Launches with errors: $TOTAL_ERROR_LAUNCHES / $TOTAL_LAUNCHES</p>" >& 6
|
||||
echo "<h3 align=center><a href='sources.tgz'>Download sources of buggy tests</a></h3>" >& 6
|
||||
echo "<table border=1 cellspacing=0 align=center>" >& 5
|
||||
echo "<table border=1 cellspacing=0 align=center>" >& 6
|
||||
echo "<tr>" >& 5
|
||||
echo "<tr>" >& 6
|
||||
CUR_COL=0
|
||||
while [ $CUR_COL -lt $CAT_COUNT ]; do
|
||||
echo "<th align=center>Category</th>" >& 5
|
||||
echo "<th>Category</th>" >& 6
|
||||
CUR_COL=$(( CUR_COL + 1 ))
|
||||
done
|
||||
echo "<th align=center>Test name</th>" >& 5
|
||||
echo "<th>Test name</th>" >& 6
|
||||
if [ $HAS_SUBTESTS -ne 0 ]; then
|
||||
echo "<th>Subtest</th>" >& 6
|
||||
fi
|
||||
echo "<th align=center>Test result</th>" >& 5
|
||||
echo "<th>Test result</th>" >& 6
|
||||
echo "</tr>" >& 5
|
||||
echo "</tr>" >& 6
|
||||
|
||||
output_cat_recursive()
|
||||
{
|
||||
if [ `basename "$1"` != "$1" ]; then
|
||||
output_cat_recursive `dirname "$1"`
|
||||
fi
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "<td align=center>" >& 5
|
||||
basename "$1" >& 5
|
||||
echo "</td>" >& 5
|
||||
fi
|
||||
echo "<td>" >& 6
|
||||
basename "$1" >& 6
|
||||
echo "</td>" >& 6
|
||||
FILLED_COLS=$(( FILLED_COLS + 1 ))
|
||||
if [ $FILLED_COLS -eq 1 -a `basename "$1"` = "Performance" ]; then
|
||||
FORCE_TABLE=1
|
||||
fi
|
||||
}
|
||||
|
||||
output_cat()
|
||||
{
|
||||
FILLED_COLS=0
|
||||
output_cat_recursive "$1"
|
||||
while [ $FILLED_COLS -lt $CAT_COUNT ]; do
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "<td> </td>" >& 5
|
||||
fi
|
||||
echo "<td> </td>" >& 6
|
||||
FILLED_COLS=$(( FILLED_COLS + 1 ))
|
||||
done
|
||||
}
|
||||
|
||||
nextDetailsId=1
|
||||
|
||||
while IFS= read -r f; do
|
||||
CUR_DEPTH=0
|
||||
TEST_SHORT_PATH=`basename "$f" .result`
|
||||
TMPSTR=`dirname "$f"`
|
||||
while [ "$TMPSTR" != "$RESULTS_DIR" ]; do
|
||||
CUR_DEPTH=$(( $CUR_DEPTH + 1 ))
|
||||
TEST_SHORT_PATH="$(basename "$TMPSTR")/$TEST_SHORT_PATH"
|
||||
TMPSTR=`dirname "$TMPSTR"`
|
||||
done
|
||||
SUBTEST_NAME=
|
||||
if [ ! -e "$TEST_SUITE/$TEST_SHORT_PATH" ]; then
|
||||
SUBTEST_NAME=`basename "$TEST_SHORT_PATH"`
|
||||
TEST_SHORT_PATH=`dirname "$TEST_SHORT_PATH"`
|
||||
fi
|
||||
HAS_FAILS=0
|
||||
if [ `grep "TEST_PASSED=0" <"$f" | wc -l` -gt 0 ]; then
|
||||
HAS_FAILS=1
|
||||
if [ ! -e "$BUGGY_FILE_DIR/$TEST_SHORT_PATH" ]; then
|
||||
mkdir -p `dirname "$BUGGY_FILE_DIR/$TEST_SHORT_PATH"`
|
||||
cp -ur "$TEST_SUITE/$TEST_SHORT_PATH" "$BUGGY_FILE_DIR/$TEST_SHORT_PATH"
|
||||
fi
|
||||
fi
|
||||
TO_BRIEF=1
|
||||
if [ -n "$SUBTEST_NAME" -o $HAS_FAILS -eq 0 ]; then
|
||||
TO_BRIEF=0
|
||||
fi
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "<tr>" >& 5
|
||||
fi
|
||||
echo "<tr>" >& 6
|
||||
FORCE_TABLE=0
|
||||
output_cat `dirname "$TEST_SHORT_PATH"`
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "<td align=center>" >& 5
|
||||
echo `basename "$TEST_SHORT_PATH"` >& 5
|
||||
echo "</td>" >& 5
|
||||
fi
|
||||
if [ -n "$SUBTEST_NAME" ]; then
|
||||
echo "<td>" >& 6
|
||||
echo `basename "$TEST_SHORT_PATH"` >& 6
|
||||
echo "</td>" >& 6
|
||||
echo "<td>" >& 6
|
||||
echo "$SUBTEST_NAME" >& 6
|
||||
echo "</td>" >& 6
|
||||
else
|
||||
echo "<td colspan=$((1 + HAS_SUBTESTS))>" >& 6
|
||||
echo `basename "$TEST_SHORT_PATH"` >& 6
|
||||
echo "</td>" >& 6
|
||||
fi
|
||||
ERROR_LEVELS=$(
|
||||
while IFS= read -r lin; do
|
||||
eval $lin
|
||||
if [ -z "$ERROR_LEVEL" ]; then
|
||||
ERROR_LEVEL=0
|
||||
fi
|
||||
echo $ERROR_LEVEL
|
||||
done <"$f" | sort -unr)
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "<td align=center>" >& 5
|
||||
fi
|
||||
echo "<td>" >& 6
|
||||
LAUNCH_COUNT=`wc -l <"$f"`
|
||||
# echo "$LAUNCH_COUNT total" >& 5
|
||||
# echo "$LAUNCH_COUNT total" >& 6
|
||||
if [ -n "$ERROR_LEVELS" ]; then
|
||||
for el in $ERROR_LEVELS; do
|
||||
unset countByComment
|
||||
unset passedByComment
|
||||
declare -A countByComment
|
||||
declare -A passedByComment
|
||||
while IFS= read -r lin; do
|
||||
eval $lin
|
||||
if [ -z "$ERROR_LEVEL" ]; then
|
||||
ERROR_LEVEL=0
|
||||
fi
|
||||
if [ "$ERROR_LEVEL" = "$el" ]; then
|
||||
if [ -z "${countByComment["$RESULT_COMMENT"]}" ]; then
|
||||
countByComment["$RESULT_COMMENT"]=0
|
||||
fi
|
||||
countByComment["$RESULT_COMMENT"]=$(( countByComment["$RESULT_COMMENT"] + 1 ))
|
||||
passedByComment["$RESULT_COMMENT"]=$TEST_PASSED
|
||||
fi
|
||||
done <"$f"
|
||||
for cmt in "${!countByComment[@]}"; do
|
||||
if [ ${passedByComment["$cmt"]} -ne 0 ]; then
|
||||
DIV_CLASS=passed
|
||||
DIV_COLOR=green
|
||||
else
|
||||
DIV_CLASS=failed
|
||||
DIV_COLOR=red
|
||||
fi
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "<div style='color: $DIV_COLOR'>" >& 5
|
||||
echo "${countByComment[$cmt]} $cmt" >& 5
|
||||
echo "</div>" >& 5
|
||||
fi
|
||||
echo "<div class=$DIV_CLASS>" >& 6
|
||||
if [ $HAS_FAILS -ne 0 -o $LAUNCH_COUNT -le $MAX_LONELY_OK -o $FORCE_TABLE -ne 0 ]; then
|
||||
echo "<a href='#' class='details $DIV_CLASS' onclick='{ toggleElem(\"det$nextDetailsId\"); return false; }'>" >& 6
|
||||
echo "<span class=details>${countByComment[$cmt]} $cmt</span>" >& 6
|
||||
echo "</a>" >& 6
|
||||
echo "<table border=1 cellspacing=0 class=details$FORCE_TABLE id='det$nextDetailsId'>" >& 6
|
||||
echo "<tr>" >& 6
|
||||
echo "<th>Platform</th>" >& 6
|
||||
echo "<th>noH</th>" >& 6
|
||||
echo "<th>autoTfm</th>" >& 6
|
||||
echo "<th>Grid</th>" >& 6
|
||||
echo "<th>CPUs</th>" >& 6
|
||||
echo "<th>GPUs</th>" >& 6
|
||||
echo "<th>Time</th>" >& 6
|
||||
echo "</tr>" >& 6
|
||||
while IFS= read -r lin; do
|
||||
eval $lin
|
||||
if [ -z "$ERROR_LEVEL" ]; then
|
||||
ERROR_LEVEL=0
|
||||
fi
|
||||
if [ "$ERROR_LEVEL" = "$el" -a "$RESULT_COMMENT" = "$cmt" ]; then
|
||||
echo "<tr>" >& 6
|
||||
echo "<td>$PLATFORM</td>" >& 6
|
||||
if [ $NOH_FLAG -ne 0 ]; then
|
||||
echo "<td>+</td>" >& 6
|
||||
else
|
||||
echo "<td>-</td>" >& 6
|
||||
fi
|
||||
if [ $AUTOTFM_FLAG -ne 0 ]; then
|
||||
echo "<td>+</td>" >& 6
|
||||
else
|
||||
echo "<td>-</td>" >& 6
|
||||
fi
|
||||
if [ -n "$PROC_GRID" ]; then
|
||||
echo "<td>$PROC_GRID</td>" >& 6
|
||||
else
|
||||
echo "<td>N/A</td>" >& 6
|
||||
fi
|
||||
if [ -n "$CPUS_PER_PROC" ]; then
|
||||
echo "<td>$CPUS_PER_PROC</td>" >& 6
|
||||
else
|
||||
echo "<td>N/A</td>" >& 6
|
||||
fi
|
||||
if [ -n "$CUDAS_PER_PROC" ]; then
|
||||
echo "<td>$CUDAS_PER_PROC</td>" >& 6
|
||||
else
|
||||
echo "<td>N/A</td>" >& 6
|
||||
fi
|
||||
if [ -n "$CALC_TIME" ]; then
|
||||
echo "<td>$CALC_TIME</td>" >& 6
|
||||
else
|
||||
echo "<td>N/A</td>" >& 6
|
||||
fi
|
||||
echo "</tr>" >& 6
|
||||
fi
|
||||
done <"$f"
|
||||
echo "</table>" >& 6
|
||||
nextDetailsId=$(( nextDetailsId + 1 ))
|
||||
else
|
||||
echo "${countByComment[$cmt]} $cmt" >& 6
|
||||
fi
|
||||
echo "</div>" >& 6
|
||||
done
|
||||
done
|
||||
else
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo " " >& 5
|
||||
fi
|
||||
echo " " >& 6
|
||||
fi
|
||||
if [ $TO_BRIEF -ne 0 ]; then
|
||||
echo "</td>" >& 5
|
||||
echo "</tr>" >& 5
|
||||
fi
|
||||
echo "</td>" >& 6
|
||||
echo "</tr>" >& 6
|
||||
done <$RES_FILES
|
||||
|
||||
echo "</table>" >& 5
|
||||
echo "</table>" >& 6
|
||||
echo "</body>" >& 5
|
||||
echo "</body>" >& 6
|
||||
echo "</html>" >& 5
|
||||
echo "</html>" >& 6
|
||||
|
||||
exec 5>&-
|
||||
exec 6>&-
|
||||
|
||||
cd "$REPORT_DIR"
|
||||
tar -czf "sources.tgz" "sources"
|
||||
cd "$SAVE_DIR"
|
||||
|
||||
rm $RES_FILES
|
||||
17
dvm/tools/tester/trunk/main/machine-config.sh
Normal file
17
dvm/tools/tester/trunk/main/machine-config.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Default
|
||||
# Assuming several identical processors and not counting HT cores
|
||||
CPUS_PER_NODE=$(( `cat /proc/cpuinfo | grep "cpu cores" | LC_ALL=C sort | uniq | awk '{ print $4 }'` * `cat /proc/cpuinfo | grep "physical id" | LC_ALL=C sort | uniq | wc -l` ))
|
||||
which nvidia-smi >/dev/null 2>& 1
|
||||
if [ $? -eq 0 ]; then
|
||||
CUDAS_PER_NODE=`nvidia-smi -L 2>/dev/null | wc -l`
|
||||
else
|
||||
CUDAS_PER_NODE=0
|
||||
fi
|
||||
|
||||
# Specializations
|
||||
if [ `hostname` = "k100" ]; then
|
||||
CPUS_PER_NODE=12
|
||||
CUDAS_PER_NODE=3
|
||||
fi
|
||||
352
dvm/tools/tester/trunk/main/perform-tests.sh
Normal file
352
dvm/tools/tester/trunk/main/perform-tests.sh
Normal file
@@ -0,0 +1,352 @@
|
||||
#!/bin/bash
|
||||
# Bash is required due to usage of arrays
|
||||
|
||||
SAVE_DIR=`pwd`
|
||||
MY_DIR=$(cd "$(dirname "$(which "$0")")" && pwd)
|
||||
|
||||
DVMSYS_DIR="$1"
|
||||
TEST_SUITE="$2"
|
||||
TASK_PROCESSOR_FD=$3
|
||||
|
||||
. "$MY_DIR/machine-config.sh"
|
||||
|
||||
if [ -f "$SAVE_DIR/machine-config.sh" ]; then
|
||||
. "$SAVE_DIR/machine-config.sh"
|
||||
fi
|
||||
|
||||
. "$MY_DIR/test-utils.sh"
|
||||
|
||||
PLATFORM_CMD=$(grep "PLATFORM=" <"$DVMSYS_DIR/bin/dvm_settings.sh" | sed -s 's/export //g')
|
||||
eval $PLATFORM_CMD
|
||||
|
||||
SETTINGS_FILE=settings
|
||||
ANALYZER_FILE=test-analyzer.sh
|
||||
|
||||
prepare_new_dir() {
|
||||
local TASK_DIR
|
||||
TASK_DIR=`mktemp -d`
|
||||
local COMP_OPTS
|
||||
COMP_OPTS="$1"
|
||||
local COMPILE_PID
|
||||
local COMPILE_RES
|
||||
cd "$TASK_DIR"
|
||||
echo "#!/bin/sh" >dvm
|
||||
echo "export dvmarithmloopsize=1000000" >>dvm
|
||||
echo "exec '$DVMSYS_DIR/bin/dvm_drv' \"\$@\"" >>dvm
|
||||
chmod a+x dvm
|
||||
cp "$DVMSYS_DIR/user/usr.par" ./
|
||||
set -m
|
||||
if [ -f "$TEST_FILE" ]; then
|
||||
cp "$TEST_FILE" "$TEST_NAME"
|
||||
./dvm $LANG_COMP -shared-dvm $COMP_OPTS "$TEST_NAME" >"build.log" 2>& 1 &
|
||||
COMPILE_PID=$!
|
||||
else
|
||||
find "$TEST_FILE" -mindepth 1 -maxdepth 1 | xargs cp -r -t .
|
||||
PATH="$TASK_DIR:$PATH" ./compile.sh $COMP_OPTS >"build.log" 2>& 1 &
|
||||
COMPILE_PID=$!
|
||||
fi
|
||||
proc_killer -$COMPILE_PID 600 &
|
||||
KILLER_PID=$!
|
||||
disown
|
||||
wait $COMPILE_PID
|
||||
COMPILE_RES=$?
|
||||
kill -2 $KILLER_PID >/dev/null 2>& 1
|
||||
kill -15 $KILLER_PID >/dev/null 2>& 1
|
||||
kill -9 $KILLER_PID >/dev/null 2>& 1
|
||||
if [ ! -f "$TEST_FILE" ] && [ $COMPILE_RES -eq 0 ] && [ ! -f "$TEST_EXENAME" ]; then
|
||||
:> "$TEST_EXENAME"
|
||||
fi
|
||||
echo "$TASK_DIR"
|
||||
}
|
||||
|
||||
do_test() {
|
||||
TEST_FILE="$1"
|
||||
TEST_NAME=`basename "$TEST_FILE"`
|
||||
TEST_SHORT_PATH="$TEST_NAME"
|
||||
TMPSTR=`dirname $TEST_FILE`
|
||||
while [ "$TMPSTR" != "$TEST_SUITE" ]; do
|
||||
TEST_SHORT_PATH="$(basename $TMPSTR)/$TEST_SHORT_PATH"
|
||||
TMPSTR=`dirname $TMPSTR`
|
||||
done
|
||||
TEST_EXENAME="${TEST_NAME%.*}"
|
||||
case ${TEST_NAME##*.} in
|
||||
c|cdv) IS_FORTRAN=0;;
|
||||
f|f90|fdv) IS_FORTRAN=1;;
|
||||
esac
|
||||
if [ $IS_FORTRAN -ne 0 ]; then
|
||||
LANG_COMP="f"
|
||||
else
|
||||
LANG_COMP="c"
|
||||
fi
|
||||
TEST_DIMS=
|
||||
if [ -n "$DIMENSION_COUNT" ]; then
|
||||
TEST_DIMS=$DIMENSION_COUNT
|
||||
else
|
||||
for t in $DIMENSION_MAP; do
|
||||
FN=`echo $t | sed 's/=/ /g' | awk '{print $1}'`
|
||||
DIM=`echo $t | sed 's/=/ /g' | awk '{print $2}'`
|
||||
if [ "$FN" = "$TEST_NAME" ]; then
|
||||
TEST_DIMS=$DIM
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if [ -z "$TEST_DIMS" ]; then
|
||||
# Trying to extract dimension number from filename - it is first digit in it.
|
||||
TEST_DIMS=`echo "$TEST_EXENAME" | sed 's/[^0-9]//g' | cut -c1`
|
||||
fi
|
||||
if [ -z "$TEST_DIMS" ]; then
|
||||
echo "Can not find information about dimension count for test $TEST_FILE" >& 2
|
||||
TEST_DIMS=1
|
||||
fi
|
||||
if [ $MAX_DIM_PROC_COUNT -le 0 ]; then
|
||||
MAX_DIM_PROC_COUNT=$MAX_PROC_COUNT
|
||||
fi
|
||||
while true; do
|
||||
if [ -f "$SAVE_DIR/dvm-tester.pause" ] && [ "$(cat "$SAVE_DIR/dvm-tester.pause")" = "Immediate" ]; then
|
||||
echo "Paused explicitly (local)"
|
||||
elif [ -f "$MY_DIR/dvm-tester.pause" ] && [ "$(cat "$MY_DIR/dvm-tester.pause")" = "Immediate" ]; then
|
||||
echo "Paused explicitly (global)"
|
||||
else
|
||||
break
|
||||
fi
|
||||
sleep 60
|
||||
done
|
||||
echo "Compiling $TEST_SHORT_PATH on $PLATFORM platform"
|
||||
if [ $GPU_ONLY -eq 0 ]; then
|
||||
# Compile with noH
|
||||
NOH_DIR=`prepare_new_dir "-noH"`
|
||||
if [ -f "$NOH_DIR/$TEST_EXENAME" ]; then
|
||||
ISSUE_NOH=1
|
||||
else
|
||||
ISSUE_NOH=0
|
||||
fi
|
||||
fi
|
||||
if [ $DVM_ONLY -eq 0 ]; then
|
||||
# Compile without noH
|
||||
H_DIR=`prepare_new_dir ""`
|
||||
if [ -f "$H_DIR/$TEST_EXENAME" ]; then
|
||||
ISSUE_H=1
|
||||
else
|
||||
ISSUE_H=0
|
||||
fi
|
||||
# And with autoTfm
|
||||
AUTOTFM_DIR=`prepare_new_dir "-autoTfm"`
|
||||
if [ -f "$AUTOTFM_DIR/$TEST_EXENAME" ]; then
|
||||
ISSUE_AUTOTFM=1
|
||||
else
|
||||
ISSUE_AUTOTFM=0
|
||||
fi
|
||||
fi
|
||||
# cat "$H_DIR/build.log"
|
||||
echo "Generating tasks for $TEST_SHORT_PATH with $TEST_DIMS dimensions on $PLATFORM platform"
|
||||
COMMON_PART=$(
|
||||
echo -n "TASK_TYPE=1"
|
||||
echo -n " TEST_PLATFORM=$PLATFORM"
|
||||
echo -n " SHARE_RESOURCES=$SHARE_RESOURCES"
|
||||
echo -n " TEST_ANALYZER=\"$TEST_ANALYZER\""
|
||||
echo -n " TEST_SHORT_PATH=\"$TEST_SHORT_PATH\""
|
||||
echo -n " TASK_EXE=\"$TEST_EXENAME\""
|
||||
echo -n " TEST_MAX_TIME=$MAX_TIME"
|
||||
)
|
||||
# Additional size number 0 added
|
||||
i=0
|
||||
while [ $i -le $TEST_DIMS ]; do
|
||||
sizes[$i]=1
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
counter=0
|
||||
totalSize=1
|
||||
while [ $(( sizes[0] )) -eq 1 ]; do
|
||||
PROC_GRID=
|
||||
if [ $IS_FORTRAN -eq 0 ]; then
|
||||
i=1
|
||||
while [ $i -le $TEST_DIMS ]; do
|
||||
PROC_GRID="$PROC_GRID $((sizes[i]))"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
else
|
||||
i=$TEST_DIMS
|
||||
while [ $i -ge 1 ]; do
|
||||
PROC_GRID="$PROC_GRID $((sizes[i]))"
|
||||
i=$(( i - 1 ))
|
||||
done
|
||||
fi
|
||||
if [ $GPU_ONLY -eq 0 ]; then
|
||||
if [ $ISSUE_NOH -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$NOH_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_NOH_FLAG=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CPUS_PER_PROC=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
fi
|
||||
if [ $DVM_ONLY -eq 0 ]; then
|
||||
# Single-device and single-threaded configurations
|
||||
if [ $GPU_ONLY -eq 0 ]; then
|
||||
if [ $ISSUE_H -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$H_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CPUS_PER_PROC=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
fi
|
||||
if [ $CUDAS_PER_NODE -gt 0 ]; then
|
||||
if [ $ISSUE_H -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$H_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CUDAS_PER_PROC=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
if [ $ISSUE_AUTOTFM -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$AUTOTFM_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_AUTOTFM_FLAG=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CUDAS_PER_PROC=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
fi
|
||||
# Multi-device and multi-threaded configurations
|
||||
MAX_DEVS_PER_PROC=$((sizes[1]))
|
||||
DEVS_PER_PROC=2
|
||||
while [ $DEVS_PER_PROC -le $MAX_DEVS_PER_PROC ]; do
|
||||
if [ $(( MAX_DEVS_PER_PROC % DEVS_PER_PROC )) -ne 0 ]; then
|
||||
DEVS_PER_PROC=$(( $DEVS_PER_PROC + 1 ))
|
||||
continue
|
||||
fi
|
||||
if [ $IS_FORTRAN -eq 0 ]; then
|
||||
MD_PROC_GRID=" $((MAX_DEVS_PER_PROC / DEVS_PER_PROC))"
|
||||
i=2
|
||||
while [ $i -le $TEST_DIMS ]; do
|
||||
MD_PROC_GRID="$MD_PROC_GRID $((sizes[i]))"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
else
|
||||
MD_PROC_GRID=
|
||||
i=$TEST_DIMS
|
||||
while [ $i -ge 2 ]; do
|
||||
MD_PROC_GRID="$MD_PROC_GRID $((sizes[i]))"
|
||||
i=$(( i - 1 ))
|
||||
done
|
||||
MD_PROC_GRID="$MD_PROC_GRID $((MAX_DEVS_PER_PROC / DEVS_PER_PROC))"
|
||||
fi
|
||||
if [ $GPU_ONLY -eq 0 ]; then
|
||||
if [ $ISSUE_H -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$H_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CPUS_PER_PROC=$DEVS_PER_PROC" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$MD_PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
fi
|
||||
if [ $ALLOW_MULTIDEV -ne 0 ] && [ $CUDAS_PER_NODE -gt 0 ]; then
|
||||
for ((GPUS_PER_PROC=1; GPUS_PER_PROC<=$DEVS_PER_PROC; GPUS_PER_PROC++)); do
|
||||
if [ $ISSUE_H -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$H_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CPUS_PER_PROC=$(($DEVS_PER_PROC - $GPUS_PER_PROC))" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CUDAS_PER_PROC=$GPUS_PER_PROC" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$MD_PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
if [ $ISSUE_AUTOTFM -ne 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$AUTOTFM_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_AUTOTFM_FLAG=1" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CPUS_PER_PROC=$(($DEVS_PER_PROC - $GPUS_PER_PROC))" >&$TASK_PROCESSOR_FD
|
||||
echo -n " CUDAS_PER_PROC=$GPUS_PER_PROC" >&$TASK_PROCESSOR_FD
|
||||
echo -n " PROC_GRID=\"$MD_PROC_GRID\"" >&$TASK_PROCESSOR_FD
|
||||
counter=$(( counter + 1 ))
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
done
|
||||
fi
|
||||
DEVS_PER_PROC=$(( $DEVS_PER_PROC + 1 ))
|
||||
done
|
||||
fi
|
||||
# Advance to next configuration
|
||||
i=$TEST_DIMS
|
||||
while [ $i -ge 0 ]; do
|
||||
sizes[$i]=$(( sizes[i] + 1 ))
|
||||
totalSize=1
|
||||
j=1
|
||||
while [ $j -le $TEST_DIMS ]; do
|
||||
totalSize=$(( totalSize * sizes[j] ))
|
||||
j=$(( j + 1 ))
|
||||
done
|
||||
if [ $(( sizes[i] )) -le $MAX_DIM_PROC_COUNT -a $totalSize -le $MAX_PROC_COUNT ]; then
|
||||
break
|
||||
elif [ $i -gt 0 ]; then
|
||||
sizes[$i]=1
|
||||
fi
|
||||
i=$(( i - 1 ))
|
||||
done
|
||||
done
|
||||
echo "Generated $counter tasks"
|
||||
COMMON_PART=$(
|
||||
echo -n "TASK_TYPE=0"
|
||||
echo -n " TEST_PLATFORM=$PLATFORM"
|
||||
echo -n " TEST_SHORT_PATH=\"$TEST_SHORT_PATH\""
|
||||
echo -n " TASK_EXE=\"$TEST_EXENAME\""
|
||||
)
|
||||
if [ $GPU_ONLY -eq 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$NOH_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_NOH_FLAG=1" >&$TASK_PROCESSOR_FD
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
if [ $DVM_ONLY -eq 0 ]; then
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$H_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
echo -n "$COMMON_PART" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_DIR=\"$AUTOTFM_DIR\"" >&$TASK_PROCESSOR_FD
|
||||
echo -n " TASK_AUTOTFM_FLAG=1" >&$TASK_PROCESSOR_FD
|
||||
echo >&$TASK_PROCESSOR_FD
|
||||
fi
|
||||
}
|
||||
|
||||
traverse_tests() {
|
||||
CUR_DIR="$1"
|
||||
if [ -f "$CUR_DIR/$SETTINGS_FILE" ]; then
|
||||
. "$CUR_DIR/$SETTINGS_FILE"
|
||||
fi
|
||||
if [ -f "$CUR_DIR/$ANALYZER_FILE" ]; then
|
||||
TEST_ANALYZER="$CUR_DIR/$ANALYZER_FILE"
|
||||
fi
|
||||
TESTS=`mktemp`
|
||||
find "$CUR_DIR" -mindepth 1 -maxdepth 1 -regex '.*[.]\(c\|cdv\|f\|f90\|fdv\)' | LC_ALL=C sort >$TESTS
|
||||
DIRS=`mktemp`
|
||||
find "$CUR_DIR" -mindepth 1 -maxdepth 1 -type d -regex '.*/[^.]*' | LC_ALL=C sort >$DIRS
|
||||
while IFS= read -r f; do
|
||||
( do_test "$f" )
|
||||
done <$TESTS
|
||||
while IFS= read -r d; do
|
||||
( traverse_tests "$d" )
|
||||
done <$DIRS
|
||||
rm $DIRS $TESTS
|
||||
}
|
||||
|
||||
set_default_settings() {
|
||||
MAX_PROC_COUNT=1
|
||||
MAX_DIM_PROC_COUNT=0
|
||||
SHARE_RESOURCES=0
|
||||
ALLOW_MULTIDEV=1
|
||||
DVM_ONLY=0
|
||||
GPU_ONLY=0
|
||||
TEST_ANALYZER="$MY_DIR/default-test-analyzer.sh"
|
||||
MAX_TIME=300
|
||||
}
|
||||
|
||||
set_default_settings
|
||||
(traverse_tests "$TEST_SUITE")
|
||||
24
dvm/tools/tester/trunk/main/report.css
Normal file
24
dvm/tools/tester/trunk/main/report.css
Normal file
@@ -0,0 +1,24 @@
|
||||
th, td {
|
||||
text-align: center;
|
||||
}
|
||||
div.passed, a.passed {
|
||||
color: green;
|
||||
}
|
||||
div.failed, a.failed {
|
||||
color: red;
|
||||
}
|
||||
a.details {
|
||||
text-decoration: none;
|
||||
font-size: 50%;
|
||||
border-bottom: 1px dashed;
|
||||
}
|
||||
span.details {
|
||||
font-size: 200%;
|
||||
line-height: normal;
|
||||
}
|
||||
table.details0 {
|
||||
display: none;
|
||||
}
|
||||
table.details1 {
|
||||
display: block;
|
||||
}
|
||||
7
dvm/tools/tester/trunk/main/report.js
Normal file
7
dvm/tools/tester/trunk/main/report.js
Normal file
@@ -0,0 +1,7 @@
|
||||
function toggleElem(id) {
|
||||
var e = document.getElementById(id);
|
||||
if(e.style.display == 'block')
|
||||
e.style.display = 'none';
|
||||
else
|
||||
e.style.display = 'block';
|
||||
}
|
||||
366
dvm/tools/tester/trunk/main/task-processor.sh
Normal file
366
dvm/tools/tester/trunk/main/task-processor.sh
Normal file
@@ -0,0 +1,366 @@
|
||||
#!/bin/bash
|
||||
# Bash is required due to usage of 'disown' command
|
||||
|
||||
SAVE_DIR=`pwd`
|
||||
MY_DIR=$(cd "$(dirname "$(which "$0")")" && pwd)
|
||||
|
||||
RESULTS_DIR="$1"
|
||||
|
||||
. "$MY_DIR/machine-config.sh"
|
||||
|
||||
if [ -f "$SAVE_DIR/machine-config.sh" ]; then
|
||||
. "$SAVE_DIR/machine-config.sh"
|
||||
fi
|
||||
|
||||
. "$MY_DIR/configure-run.sh"
|
||||
|
||||
if [ -f "$SAVE_DIR/configure-run.sh" ]; then
|
||||
. "$SAVE_DIR/configure-run.sh"
|
||||
fi
|
||||
|
||||
. "$MY_DIR/test-utils.sh"
|
||||
|
||||
if [ $INTERACTIVE -ne 0 ]; then
|
||||
stdout_fn() {
|
||||
echo "$1.stdout"
|
||||
}
|
||||
stderr_fn() {
|
||||
echo "$1.stderr"
|
||||
}
|
||||
fi
|
||||
|
||||
if [ $HAS_RES_MANAGER -eq 0 ]; then
|
||||
RES_MAN_DIR=`mktemp -d`
|
||||
fi
|
||||
|
||||
resources_freed() {
|
||||
FN=`mktemp`
|
||||
if [ $SHARE_RESOURCES -eq 0 ]; then
|
||||
FREED_CPUS=$(( CPUS_PER_NODE * MAX_CPU_SHARING_FACTOR ))
|
||||
FREED_CUDAS=$(( CUDAS_PER_NODE * MAX_CUDA_SHARING_FACTOR ))
|
||||
else
|
||||
FREED_CPUS=$(( totalProcs * CPUS_PER_PROC ))
|
||||
FREED_CUDAS=$(( totalProcs * CUDAS_PER_PROC ))
|
||||
fi
|
||||
echo "FREED_CPUS=$FREED_CPUS" >>$FN
|
||||
echo "FREED_CUDAS=$FREED_CUDAS" >>$FN
|
||||
# echo "rm $FN" >>$FN
|
||||
mv $FN $RES_MAN_DIR/
|
||||
}
|
||||
|
||||
interactive_launcher() {
|
||||
cd "$LAUNCH_DIR"
|
||||
STDOUT_FN=`stdout_fn "$LAUNCH_NAME"`
|
||||
STDERR_FN=`stderr_fn "$LAUNCH_NAME"`
|
||||
:>$STDOUT_FN
|
||||
:>$STDERR_FN
|
||||
set -m
|
||||
# echo ./dvm run $PROC_GRID "$TASK_EXE"
|
||||
START_T=`date +%s`
|
||||
if [ -f "run.sh" ]; then
|
||||
PATH="$LAUNCH_DIR:$PATH" PROC_GRID="$PROC_GRID" DVMH_PPN=$LAUNCH_PPN DVMH_NUM_THREADS=$CPUS_PER_PROC DVMH_NUM_CUDAS=$CUDAS_PER_PROC ./run.sh </dev/null >"$STDOUT_FN" 2>"$STDERR_FN" &
|
||||
LAUNCH_PID=$!
|
||||
else
|
||||
DVMH_PPN=$LAUNCH_PPN DVMH_NUM_THREADS=$CPUS_PER_PROC DVMH_NUM_CUDAS=$CUDAS_PER_PROC ./dvm run $PROC_GRID "$TASK_EXE" </dev/null >"$STDOUT_FN" 2>"$STDERR_FN" &
|
||||
LAUNCH_PID=$!
|
||||
fi
|
||||
if [ $TEST_MAX_TIME -gt 0 ]; then
|
||||
# echo "Setting proc_killer to process $LAUNCH_PID for $TEST_MAX_TIME"
|
||||
proc_killer -$LAUNCH_PID $TEST_MAX_TIME </dev/null >/dev/null 2>& 1 &
|
||||
KILLER_PID=$!
|
||||
disown
|
||||
fi
|
||||
wait $LAUNCH_PID
|
||||
START_RES=$?
|
||||
END_T=`date +%s`
|
||||
CALC_TIME=$(( END_T - START_T ))
|
||||
if [ $TEST_MAX_TIME -gt 0 ]; then
|
||||
kill -2 $KILLER_PID >/dev/null 2>& 1
|
||||
kill -15 $KILLER_PID >/dev/null 2>& 1
|
||||
kill -9 $KILLER_PID >/dev/null 2>& 1
|
||||
fi
|
||||
if [ $HAS_RES_MANAGER -eq 0 ]; then
|
||||
resources_freed
|
||||
fi
|
||||
echo "$START_RES $CALC_TIME" >"$TASK_EXE.finished"
|
||||
}
|
||||
|
||||
non_interactive_launcher() {
|
||||
cd "$LAUNCH_DIR"
|
||||
STDOUT_FN=`mktemp`
|
||||
STDERR_FN=`mktemp`
|
||||
# echo ./dvm run $PROC_GRID "$TASK_EXE"
|
||||
if [ $TEST_MAX_TIME -gt 0 ]; then
|
||||
export maxtime=$(( (TEST_MAX_TIME + 59) / 60))
|
||||
fi
|
||||
if [ -f "run.sh" ]; then
|
||||
PATH="$LAUNCH_DIR:$PATH" PROC_GRID="$PROC_GRID" DVMH_PPN=$LAUNCH_PPN DVMH_NUM_THREADS=$CPUS_PER_PROC DVMH_NUM_CUDAS=$CUDAS_PER_PROC ./run.sh >$STDOUT_FN 2>$STDERR_FN
|
||||
START_RES=$?
|
||||
else
|
||||
DVMH_PPN=$LAUNCH_PPN DVMH_NUM_THREADS=$CPUS_PER_PROC DVMH_NUM_CUDAS=$CUDAS_PER_PROC ./dvm run $PROC_GRID "$TASK_EXE" >$STDOUT_FN 2>$STDERR_FN
|
||||
START_RES=$?
|
||||
fi
|
||||
unset maxtime
|
||||
:>"$TASK_EXE.committed"
|
||||
IS_LAUNCHED=`is_launched $STDOUT_FN $STDERR_FN`
|
||||
rm $STDOUT_FN $STDERR_FN
|
||||
if [ $START_RES -eq 0 -a $IS_LAUNCHED -ne 0 ]; then
|
||||
while [ `is_finished "$LAUNCH_NAME"` -eq 0 ]; do
|
||||
sleep 1
|
||||
done
|
||||
CALC_TIME=`get_elapsed_time "$LAUNCH_NAME"`
|
||||
fi
|
||||
if [ $HAS_RES_MANAGER -eq 0 ]; then
|
||||
resources_freed
|
||||
fi
|
||||
echo "$START_RES $CALC_TIME" >"$TASK_EXE.finished"
|
||||
}
|
||||
|
||||
already_analyzed() {
|
||||
# echo -n "PLATFORM=\"$TEST_PLATFORM\""
|
||||
# echo -n " NOH_FLAG=$TASK_NOH_FLAG"
|
||||
# echo -n " AUTOTFM_FLAG=$TASK_AUTOTFM_FLAG"
|
||||
# echo -n " PROC_GRID=\"$PROC_GRID\""
|
||||
# echo -n " CPUS_PER_PROC=$CPUS_PER_PROC"
|
||||
# echo -n " CUDAS_PER_PROC=$CUDAS_PER_PROC"
|
||||
local res
|
||||
res=0
|
||||
if [ -f "$RESULTS_DIR/$TEST_SHORT_PATH.result" ]; then
|
||||
if [ $( cat "$RESULTS_DIR/$TEST_SHORT_PATH.result" | grep "PLATFORM=\"$TEST_PLATFORM\"" | grep "NOH_FLAG=$TASK_NOH_FLAG" | grep "AUTOTFM_FLAG=$TASK_AUTOTFM_FLAG" | grep "PROC_GRID=\"$PROC_GRID\"" | grep "CPUS_PER_PROC=$CPUS_PER_PROC" | grep "CUDAS_PER_PROC=$CUDAS_PER_PROC" | wc -l ) -gt 0 ]; then
|
||||
res=1
|
||||
fi
|
||||
fi
|
||||
echo $res
|
||||
}
|
||||
|
||||
launcher() {
|
||||
counter=0
|
||||
if [ $HAS_RES_MANAGER -eq 0 ]; then
|
||||
if [ $MAX_NODES_PER_TASK -gt 1 ]; then
|
||||
echo "Can manage resources only for one-node system"
|
||||
MAX_NODES_PER_TASK=1
|
||||
fi
|
||||
FREE_CPUS=$(( CPUS_PER_NODE * MAX_CPU_SHARING_FACTOR ))
|
||||
FREE_CUDAS=$(( CUDAS_PER_NODE * MAX_CUDA_SHARING_FACTOR ))
|
||||
fi
|
||||
exec 4>$1
|
||||
while IFS= read -r TASK_SPEC; do
|
||||
TEST_PLATFORM=Unknown
|
||||
TASK_NOH_FLAG=0
|
||||
TASK_AUTOTFM_FLAG=0
|
||||
PROC_GRID=0
|
||||
CPUS_PER_PROC=0
|
||||
CUDAS_PER_PROC=0
|
||||
eval $TASK_SPEC
|
||||
LAUNCHED_FLAG=0
|
||||
ALREADY_ANALYZED=$( already_analyzed )
|
||||
if [ $TASK_TYPE -eq 1 -a $ALREADY_ANALYZED -eq 0 ]; then
|
||||
CAN_CPUS=$CPUS_PER_NODE
|
||||
CAN_CUDAS=$CUDAS_PER_NODE
|
||||
if [ $SHARE_RESOURCES -ne 0 ]; then
|
||||
CAN_CPUS=$(( CAN_CPUS * MAX_CPU_SHARING_FACTOR ))
|
||||
CAN_CUDAS=$(( CAN_CUDAS * MAX_CUDA_SHARING_FACTOR ))
|
||||
fi
|
||||
LAUNCH_PPN=$MAX_PPN
|
||||
CUR_PPN=$LAUNCH_PPN
|
||||
if [ $CPUS_PER_PROC -gt 0 ]; then
|
||||
CUR_PPN=$(( CAN_CPUS / $CPUS_PER_PROC ))
|
||||
fi
|
||||
if [ $CUR_PPN -lt $LAUNCH_PPN ]; then
|
||||
LAUNCH_PPN=$CUR_PPN
|
||||
fi
|
||||
if [ $CUDAS_PER_PROC -gt 0 ]; then
|
||||
CUR_PPN=$(( CAN_CUDAS / $CUDAS_PER_PROC ))
|
||||
fi
|
||||
if [ $CUR_PPN -lt $LAUNCH_PPN ]; then
|
||||
LAUNCH_PPN=$CUR_PPN
|
||||
fi
|
||||
totalProcs=1
|
||||
for proc in $PROC_GRID; do
|
||||
totalProcs=$(( totalProcs * proc ))
|
||||
done
|
||||
if [ $LAUNCH_PPN -gt 0 ]; then
|
||||
USE_NODES=$(( ( totalProcs + LAUNCH_PPN - 1 ) / LAUNCH_PPN ))
|
||||
else
|
||||
LAUNCH_PPN=1
|
||||
USE_NODES=$(( MAX_NODES_PER_TASK + 1 ))
|
||||
fi
|
||||
NEED_CPUS=$(( totalProcs * CPUS_PER_PROC ))
|
||||
NEED_CUDAS=$(( totalProcs * CUDAS_PER_PROC ))
|
||||
if [ $USE_NODES -le $MAX_NODES_PER_TASK ]; then
|
||||
# Launch
|
||||
counter=$(( counter + 1 ))
|
||||
LAUNCH_DIR=`mktemp -d`
|
||||
cp -r $TASK_DIR/* $LAUNCH_DIR/
|
||||
TASK_SPEC=$( echo -n "$TASK_SPEC" ; echo " LAUNCH_DIR=\"$LAUNCH_DIR\"" )
|
||||
if [ $HAS_RES_MANAGER -eq 0 ]; then
|
||||
LAUNCH_NAME="$LAUNCH_DIR/$TASK_EXE"
|
||||
else
|
||||
LAUNCH_NAME="$LAUNCH_DIR/$TASK_EXE.$totalProcs.1"
|
||||
fi
|
||||
TASK_SPEC=$( echo -n "$TASK_SPEC" ; echo " LAUNCH_NAME=\"$LAUNCH_NAME\"" )
|
||||
while true; do
|
||||
if [ -f "$SAVE_DIR/dvm-tester.pause" ] && [ "$(cat "$SAVE_DIR/dvm-tester.pause")" = "Immediate" ]; then
|
||||
:
|
||||
elif [ -f "$MY_DIR/dvm-tester.pause" ] && [ "$(cat "$MY_DIR/dvm-tester.pause")" = "Immediate" ]; then
|
||||
:
|
||||
else
|
||||
break
|
||||
fi
|
||||
sleep 60
|
||||
done
|
||||
if [ $HAS_RES_MANAGER -ne 0 ]; then
|
||||
while [ `can_launch` -eq 0 ]; do
|
||||
sleep 1
|
||||
done
|
||||
else
|
||||
if [ $SHARE_RESOURCES -eq 0 ]; then
|
||||
NEED_CPUS=$(( CPUS_PER_NODE * MAX_CPU_SHARING_FACTOR ))
|
||||
NEED_CUDAS=$(( CUDAS_PER_NODE * MAX_CUDA_SHARING_FACTOR ))
|
||||
fi
|
||||
cd "$RES_MAN_DIR"
|
||||
while [ $FREE_CPUS -lt $NEED_CPUS -o $FREE_CUDAS -lt $NEED_CUDAS ]; do
|
||||
FOUND_SMTH=0
|
||||
for f in `ls`; do
|
||||
FREED_CPUS=
|
||||
FREED_CUDAS=
|
||||
. ./$f
|
||||
if [ -n "$FREED_CPUS" -a -n "$FREED_CUDAS" ]; then
|
||||
FOUND_SMTH=1
|
||||
FREE_CPUS=$(( FREE_CPUS + FREED_CPUS ))
|
||||
FREE_CUDAS=$(( FREE_CUDAS + FREED_CUDAS ))
|
||||
rm $f
|
||||
fi
|
||||
done
|
||||
if [ $FOUND_SMTH -eq 0 ]; then
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
FREE_CPUS=$(( FREE_CPUS - NEED_CPUS ))
|
||||
FREE_CUDAS=$(( FREE_CUDAS - NEED_CUDAS ))
|
||||
fi
|
||||
# Actually launch
|
||||
if [ $INTERACTIVE -ne 0 ]; then
|
||||
interactive_launcher &
|
||||
else
|
||||
non_interactive_launcher &
|
||||
if [ $HAS_RES_MANAGER -ne 0 ]; then
|
||||
while [ ! -f "$LAUNCH_DIR/$TASK_EXE.committed" ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
fi
|
||||
LAUNCHED_FLAG=1
|
||||
else
|
||||
# Can not launch such big task
|
||||
echo "Discarding too big task: $TASK_SPEC"
|
||||
fi
|
||||
elif [ $TASK_TYPE -eq 0 ]; then
|
||||
LAUNCHED_FLAG=1
|
||||
else
|
||||
echo "Discarding task: $TASK_SPEC"
|
||||
fi
|
||||
if [ $LAUNCHED_FLAG -ne 0 ]; then
|
||||
echo "$TASK_SPEC" >& 4
|
||||
fi
|
||||
done
|
||||
echo ":" >& 4
|
||||
exec 4>&-
|
||||
echo "Total tasks launched: $counter"
|
||||
}
|
||||
|
||||
print_result_line() {
|
||||
echo -n "PLATFORM=\"$TEST_PLATFORM\""
|
||||
echo -n " NOH_FLAG=$TASK_NOH_FLAG"
|
||||
echo -n " AUTOTFM_FLAG=$TASK_AUTOTFM_FLAG"
|
||||
echo -n " PROC_GRID=\"$PROC_GRID\""
|
||||
echo -n " CPUS_PER_PROC=$CPUS_PER_PROC"
|
||||
echo -n " CUDAS_PER_PROC=$CUDAS_PER_PROC"
|
||||
echo -n " CALC_TIME=$TASK_CALC_TIME"
|
||||
echo -n " TEST_PASSED=$TEST_PASSED"
|
||||
echo -n " RESULT_COMMENT=\"$RESULT_COMMENT\""
|
||||
echo " ERROR_LEVEL=$ERROR_LEVEL"
|
||||
}
|
||||
|
||||
analyzer() {
|
||||
counter=0
|
||||
FIFO_NAME="$1"
|
||||
while IFS= read -r TASK_SPEC; do
|
||||
if [ "$TASK_SPEC" = ":" ]; then
|
||||
break
|
||||
fi
|
||||
CPUS_PER_PROC=0
|
||||
CUDAS_PER_PROC=0
|
||||
TASK_NOH_FLAG=0
|
||||
TASK_AUTOTFM_FLAG=0
|
||||
eval $TASK_SPEC
|
||||
if [ $TASK_TYPE -eq 0 ]; then
|
||||
if [ ! -f "$TASK_DIR/$TASK_EXE" ]; then
|
||||
# Report compilation error
|
||||
if [ `basename "$TEST_SHORT_PATH"` != "$TEST_SHORT_PATH" ]; then
|
||||
mkdir -p "$RESULTS_DIR/$(dirname "$TEST_SHORT_PATH")"
|
||||
fi
|
||||
PROC_GRID=
|
||||
CPUS_PER_PROC=
|
||||
CUDAS_PER_PROC=
|
||||
TASK_CALC_TIME=
|
||||
TEST_PASSED=0
|
||||
RESULT_COMMENT="Compilation error"
|
||||
ERROR_LEVEL=255
|
||||
print_result_line >>"$RESULTS_DIR/$TEST_SHORT_PATH.result"
|
||||
fi
|
||||
# Cleanup all the test's stuff
|
||||
rm -rf "$TASK_DIR"
|
||||
else
|
||||
counter=$(( counter + 1 ))
|
||||
cd "$LAUNCH_DIR"
|
||||
while [ ! -f "$TASK_EXE.finished" ]; do
|
||||
sleep 1
|
||||
done
|
||||
read LAUNCH_EXIT_CODE TASK_CALC_TIME <"$TASK_EXE.finished"
|
||||
STDOUT_FN=`stdout_fn "$LAUNCH_NAME"`
|
||||
STDERR_FN=`stderr_fn "$LAUNCH_NAME"`
|
||||
SUBTEST_COUNT=0
|
||||
. $TEST_ANALYZER
|
||||
if [ `basename "$TEST_SHORT_PATH"` != "$TEST_SHORT_PATH" ]; then
|
||||
mkdir -p "$RESULTS_DIR/$(dirname "$TEST_SHORT_PATH")"
|
||||
fi
|
||||
print_result_line >>"$RESULTS_DIR/$TEST_SHORT_PATH.result"
|
||||
if [ $SUBTEST_COUNT -gt 0 ]; then
|
||||
mkdir -p $RESULTS_DIR/$TEST_SHORT_PATH
|
||||
for i in `seq $SUBTEST_COUNT`; do
|
||||
SUBTEST_NAME=$i
|
||||
analyze_subtest $i
|
||||
print_result_line >>"$RESULTS_DIR/$TEST_SHORT_PATH/$SUBTEST_NAME.result"
|
||||
done
|
||||
fi
|
||||
# if [ $LAUNCH_EXIT_CODE -ne 0 -o "$RESULT_COMMENT" = "Crash" ]; then
|
||||
# echo "Test's $TEST_SHORT_PATH stdout:"
|
||||
# cat "$STDOUT_FN"
|
||||
# echo "Test's $TEST_SHORT_PATH stderr:"
|
||||
# cat "$STDERR_FN"
|
||||
# fi
|
||||
rm -rf "$LAUNCH_DIR"
|
||||
fi
|
||||
done <$FIFO_NAME
|
||||
echo "Total tasks analyzed: $counter"
|
||||
}
|
||||
|
||||
FIFO_NAME="$(mktemp -u).launch-fifo"
|
||||
mkfifo $FIFO_NAME
|
||||
|
||||
analyzer $FIFO_NAME &
|
||||
launcher $FIFO_NAME
|
||||
|
||||
wait
|
||||
|
||||
rm $FIFO_NAME
|
||||
|
||||
if [ $HAS_RES_MANAGER -eq 0 ]; then
|
||||
cd "$RES_MAN_DIR"
|
||||
for f in `ls`; do
|
||||
. ./$f
|
||||
done
|
||||
cd "$SAVE_DIR"
|
||||
rm -rf "$RES_MAN_DIR"
|
||||
fi
|
||||
103
dvm/tools/tester/trunk/main/test-system.sh
Normal file
103
dvm/tools/tester/trunk/main/test-system.sh
Normal file
@@ -0,0 +1,103 @@
|
||||
#!/bin/sh
|
||||
|
||||
unset CDPATH
|
||||
|
||||
SAVE_DIR=`pwd`
|
||||
MY_DIR=$(cd "$(dirname "$(which "$0")")" && pwd)
|
||||
|
||||
DVMSYS_DIR=
|
||||
if [ -f ./dvm ]; then
|
||||
DVMSYS_DIR_CMD="DVMSYS_DIR=$(grep 'dvmdir=' <./dvm | sed -s 's/export //g' | sed -s 's/dvmdir=//g')"
|
||||
eval $DVMSYS_DIR_CMD
|
||||
fi
|
||||
TEST_SUITE=test-suite
|
||||
RESULTS_DIR=
|
||||
APPEND_RESULTS=0
|
||||
|
||||
parse_params() {
|
||||
while [ -n "$1" ]; do
|
||||
if [ "$1" = "--dvm_sys" ]; then
|
||||
DVMSYS_DIR="$2"
|
||||
shift
|
||||
elif [ "$1" = "--test-suite" ]; then
|
||||
TEST_SUITE="$2"
|
||||
shift
|
||||
elif [ "$1" = "--append-results" ]; then
|
||||
APPEND_RESULTS=1
|
||||
RESULTS_DIR="$2"
|
||||
shift
|
||||
else
|
||||
echo "Unknown option '$1'"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
parse_params $@ || exit 1
|
||||
|
||||
# Check settings
|
||||
cd "$SAVE_DIR"
|
||||
if [ ! -d "$DVMSYS_DIR" -o ! -d "$DVMSYS_DIR/user" -o ! -f "$DVMSYS_DIR/user/dvm" -o ! -d "$TEST_SUITE" ]; then
|
||||
MY_NAME=`basename "$0"`
|
||||
echo "Usage: $0 [<options>]"
|
||||
echo " --dvm_sys Directory of already installed DVM-system. Note that it is a directory, which contains directory 'user' directly. Usually it is .../dvm_current/dvm_sys. By default $MY_NAME searches for 'dvm' file in current directory and makes attempt to use its DVM-system"
|
||||
echo " --test-suite Directory with test suite, which is formed in special way. By default 'test-suite' directory is used."
|
||||
echo " --append-results Directory with partial results, which will be appended. By default new directory will be created."
|
||||
echo "Exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make them global paths
|
||||
DVMSYS_DIR=$(cd "$DVMSYS_DIR" && pwd)
|
||||
TEST_SUITE=$(cd "$TEST_SUITE" && pwd)
|
||||
|
||||
if [ $APPEND_RESULTS -eq 0 ]; then
|
||||
RESULTS_DIR="$SAVE_DIR/$(basename "$TEST_SUITE").results"
|
||||
else
|
||||
RESULTS_DIR=$(cd "$RESULTS_DIR" && pwd)
|
||||
fi
|
||||
export TMPDIR="$SAVE_DIR/$(basename "$TEST_SUITE").work"
|
||||
if [ -d "/home/scratch" ]; then
|
||||
TEMPL_NAME="/home/scratch/$(basename "$TMPDIR").XXX"
|
||||
TMPDIR=$(mktemp -d "$TEMPL_NAME")
|
||||
fi
|
||||
mkdir -p "$TMPDIR"
|
||||
|
||||
# Launch task processor
|
||||
TASK_FIFO="$(mktemp -u).task-fifo"
|
||||
mkfifo "$TASK_FIFO"
|
||||
if [ $APPEND_RESULTS -eq 0 ]; then
|
||||
if [ -e "$RESULTS_DIR" ]; then
|
||||
echo -n "$RESULTS_DIR already exists. Do you want to rewrite it (Y/n)? "
|
||||
ans=n
|
||||
read ans
|
||||
if [ "$ans" != "y" -a "$ans" != "Y" ]; then
|
||||
echo "Exiting"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm -rf "$RESULTS_DIR"
|
||||
fi
|
||||
mkdir -p "$RESULTS_DIR"
|
||||
cd "$SAVE_DIR"
|
||||
"$MY_DIR/task-processor.sh" "$RESULTS_DIR" <"$TASK_FIFO" &
|
||||
|
||||
# Sequentially feed task processor from our test-suite
|
||||
exec 4>"$TASK_FIFO"
|
||||
cd "$SAVE_DIR"
|
||||
"$MY_DIR/perform-tests.sh" "$DVMSYS_DIR" "$TEST_SUITE" 4
|
||||
exec 4>&-
|
||||
|
||||
# Wait for task processor to finish
|
||||
wait
|
||||
|
||||
# Cleanup stuff
|
||||
rm "$TASK_FIFO"
|
||||
|
||||
# Generate final report
|
||||
cd "$SAVE_DIR"
|
||||
"$MY_DIR/gen-report.sh" "$TEST_SUITE" "$RESULTS_DIR"
|
||||
|
||||
echo "Results can be seen in $RESULTS_DIR directory"
|
||||
rm -rf "$TMPDIR"
|
||||
19
dvm/tools/tester/trunk/main/test-utils.sh
Normal file
19
dvm/tools/tester/trunk/main/test-utils.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
proc_killer() {
|
||||
local PROC
|
||||
local TIMEOUT
|
||||
local counter
|
||||
PROC="$1"
|
||||
TIMEOUT=$2
|
||||
counter=0
|
||||
while [ $counter -lt $TIMEOUT ]; do
|
||||
sleep 10
|
||||
counter=$(( counter + 10 ))
|
||||
done
|
||||
kill -2 $PROC >/dev/null 2>& 1
|
||||
sleep 10
|
||||
kill -15 $PROC >/dev/null 2>& 1
|
||||
sleep 10
|
||||
kill -9 $PROC >/dev/null 2>& 1
|
||||
}
|
||||
Reference in New Issue
Block a user