early spring cleaning

This commit is contained in:
ville rantanen
2015-01-20 10:28:21 +02:00
parent 8cd0604d02
commit 9ef188ff01
27 changed files with 0 additions and 1319 deletions

View File

@@ -1,16 +0,0 @@
#!/bin/bash
NODELIST=( $( scontrol show node|grep NodeName | sed -e 's,[^ ]\+=\([^ ]\+\) .*,\1,' ) )
NODERUNS=( )
# find node with least allocated CPUs
for e in ${NODELIST[@]}
do NODERUNS+=( $( scontrol show node $e | grep CPUAlloc | sed 's,.*CPUAlloc=\([0-9]\+\).*,\1,' ) )
done
for (( i=0; i<${#NODERUNS[@]}; i++ ))
do echo -n "$(( $i+3 )):${NODERUNS[$i]} "
done
echo ""

View File

@@ -1,45 +0,0 @@
#!/bin/bash
set -f
MAXDEPTH="-maxdepth 1"
HIDDEN="-not -path */\.*"
KEYS="awk {gsub(/\//,\"_\",\$1);print}"
ME=$( basename "$0" )
HELP='Create an array _index for Anduril of the given folder.
Usage: '$ME' [-anr] [path]
-a Include ".*" hidden files
-n Use number for key
-r Recursively find all files in path, exclude folder names from output
'
while getopts ":anrh" opt; do
case $opt in
a)
HIDDEN=""
;;
n)
KEYS='awk $1=NR'
;;
r)
MAXDEPTH="-type f"
;;
h)
echo "$HELP"
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo "$HELP" >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
INPATH="$1"
[ -z "$INPATH" ] && INPATH=.
echo -e '"Key"'"\t"'"File"'
find "$INPATH" -mindepth 1 $MAXDEPTH $HIDDEN -not -name _index -printf '"%P"\t"%P"\n' \
| sort -V | $KEYS

View File

@@ -1,105 +0,0 @@
#!/usr/bin/python
import sys
import os,shutil
import re
from argparse import ArgumentParser
LOGFILE="_global"
EXECFILE="_state"
def setup_options():
parser=ArgumentParser(description="Remove execution folders recusively")
parser.add_argument("-l",action="store_true",dest="logs",default=False,
help="Search for log folders [%(default)s]")
parser.add_argument("-e",action="store_true",dest="execs",default=False,
help="Search for execution folders [%(default)s]")
parser.add_argument("-f",action="store_true",dest="force",default=False,
help="Force deletion, otherwise just lists the entries. WARNING: THIS OPTION DELETES FILES RECURSIVELY WITHOUT ASKING. [%(default)s]")
parser.add_argument("-L",action="store_true",dest="symlinks",default=False,
help="Follow symbolic links [%(default)s]")
parser.add_argument("-m",type=str,action='store',dest="move",default="",
help="Move folders to a given subfolder, instead of deleting. WARNING: MOVES FILES WITHOUT ASKING. Moving is only sensible within the same filesystem.")
parser.add_argument('startpath', action="store",default='.', nargs='?')
options=parser.parse_args()
if options.move!="":
options.force=False
if not options.move.endswith("/"):
options.move+="/"
if (not options.logs) and (not options.execs):
print("Nothing to do, not printing logs, nor execution folders")
sys.exit(1)
return options
def delete_folder(folder):
# Delete folder if it exists
if os.path.exists(folder):
shutil.rmtree(folder)
def move_folder(folder,options):
# Move a folder to a location, with folder name flattened /->_
if not os.path.exists(options.move):
os.makedirs(options.move)
new_name=folder
while new_name.startswith(".") or new_name.startswith("/"):
new_name=new_name[1:]
new_name=new_name.replace("/","_")
sys.stdout.write(new_name)
new_name=os.path.join(options.move,new_name)
if os.path.exists(new_name):
sys.stdout.write(" Folder already exists! Not moving.")
return
shutil.move(folder, new_name)
def recurse(options):
for path,dirs,files in os.walk(options.startpath,followlinks=options.symlinks):
# do not move files in move folder a second time..
if options.move!="":
if os.path.exists(options.move) and os.path.samefile(options.move, path):
del dirs[:]
continue
take_action=False
if options.logs:
if LOGFILE in files:
take_action=True
if EXECFILE in files:
del dirs[:]
# never descend in to execution directories
if options.execs:
take_action=True
if take_action:
del dirs[:]
sys.stdout.write(path)
# only one of the following will be true:
if options.force:
sys.stdout.write(" Deleting")
delete_folder(path)
if options.move!="":
sys.stdout.write(" -> "+options.move)
move_folder(path,options)
sys.stdout.write("\n")
else:
#Recurse with cleaned out dirs
dirs.sort()
dirs=clean_dirs(dirs)
return
def clean_dirs(dirs):
# removes hidden folders
for s in dirs[:]:
if (s.startswith(".")):
dirs.remove(s)
return dirs
def main():
options=setup_options();
recurse(options)
sys.exit(0)
main()

View File

@@ -1,21 +0,0 @@
#!/bin/bash
(
echo 'Subject: Execution finished! '$NETWORK_TO_RUN'
'
echo "Pipeline: $CLEANER_TITLE"
echo -n "Started in: "
pwd
echo "Host: $HOSTNAME"
echo "Execution folder: $RESULTS_IN"
echo "Log folder: $LOG_IN"
[ -f "$LOG_IN"/_global ] && {
echo "===== Log tail ====="
head -n 1 "$LOG_IN"/_global
tail -n 20 "$LOG_IN"/_global
}
) | sendmail -r$USER@mappi.helsinki.fi $USER@mappi.helsinki.fi &

View File

@@ -1,19 +0,0 @@
#!/bin/bash
NODELIST=( $( scontrol show node|grep NodeName | sed -e 's,[^ ]\+=\([^ ]\+\) .*,\1,' ) )
NODERUNS=( )
# find node with least allocated CPUs
for e in ${NODELIST[@]}
do NODERUNS+=( $( scontrol show node $e | grep CPUAlloc | sed 's,.*CPUAlloc=\([0-9]\+\).*,\1,' ) )
done
echo -n "CPUAllocated: "
for (( i=0; i<${#NODERUNS[@]}; i++ ))
do scontrol -o show node ${NODELIST[$i]} | grep State=DOWN >/dev/null && NODERUNS[$i]=DN
echo -n "${NODELIST[$i]}:${NODERUNS[$i]} "
done
echo ""

View File

@@ -1,75 +0,0 @@
#!/bin/bash
help='CSC Virtual Machine Load Status
-s Short (default)
-l Long
-c Do not pretty print short output
-b [l|m] return the name of the node with l=least load, m=least used memory
-u [username] return a list of nodes with processes of given user
L=Load, M=used memory + cached / total, U=users with processes
'
mode="short"
postprocess=" | sort -V | ncsv -c -i' ' -d' ' "
while getopts "clshu:b:" OPTS;
do case ${OPTS} in
s)
mode="short"
;;
l)
mode="long"
;;
c)
postprocess=""
;;
b)
mode="best"
[ "$OPTARG" = "l" ] && bestcol=3
[ "$OPTARG" = "m" ] && bestcol=6
if [ -z "$bestcol" ]
then echo "Argument to -b not recognized"
echo "$help"
exit
fi
;;
u)
mode="user"
username=$OPTARG
[ -z "$username" ] && exit
;;
h)
echo "$help"
exit
;;
?)
echo "$help"
exit
;;
esac
done
if [ "$OPTIND" -gt 1 ]
then shift $(( $OPTIND-1 ))
fi
status_folder="/mnt/csc-gc5/vm_state"
if [ ! -d "$status_folder" ]
then echo $status_folder not mounted
exit
fi
if [ "$mode" = "short" ]
then eval "cat $status_folder/*short $postprocess"
fi
if [ "$mode" = "long" ]
then eval "cat $status_folder/*long"
fi
if [ "$mode" = "best" ]
then eval "cat $status_folder/*short" | sed 's,[|/:\+], ,g' | sort -k $bestcol -n | cut -d" " -f2 | head -n 1
fi
if [ "$mode" = "user" ]
then eval "grep $username $status_folder/*long" | sed -e 's,.*/,,' -e 's,\..*, ,' | tr -d [:cntrl:]
echo ""
fi

View File

@@ -1,183 +0,0 @@
#!/usr/bin/python
import os
import sys
import re
import csv
import shutil
from argparse import ArgumentParser
filesearch=re.compile('^_index$')
STATEFILE='_state'
DRYSTATEFILE='_stateDryRun'
def setup_options():
usage='''%(prog)s [options]
Use rsync to copy your execution folder to a new place.
e.g. rsync -avP -e ssh user@server:/source/path/ target/path/
Then give a replacement pair -i and -o for each expected changed absolute path.
(multiple -i and -o are often required)
Each replace pair is tried, until a file with that replaced name is found.
'''
parser=ArgumentParser(usage=usage)
parser.add_argument("-d",type=str,dest="execpath",default=".",
help="Execution folder for anduril (location for _state), default: %(default)s")
parser.add_argument("-i",type=str,dest="inabs",action="append",default=["."],
help="Input absolute path prefix. e.g. /home1/user1/project/")
parser.add_argument("-o",type=str,dest="outabs",action="append",default=["."],
help="Output absolute path prefix. e.g. /home2/user2/different_project/")
parser.add_argument("-a",action="store_false",dest="arrays",default=True,
help="Disable finding _index (array) files. Just modify _state and nothing else." )
return parser.parse_args()
def check_options(opts):
if not (os.path.isfile(os.path.join(opts.execpath,STATEFILE))):
print(STATEFILE+' file not found in folder '+opts.execpath)
sys.exit(1)
if len(opts.inabs) is not len(opts.outabs):
print('A matching pair must be found for each -i/-o argument')
sys.exit(1)
if not opts.execpath.endswith('/'):
opts.execpath=opts.execpath+'/'
for i in xrange(len(opts.inabs)):
if not opts.inabs[i].endswith('/'):
opts.inabs[i]=opts.inabs[i]+'/'
for i in xrange(len(opts.outabs)):
if not opts.outabs[i].endswith('/'):
opts.outabs[i]=opts.outabs[i]+'/'
return opts
def getpathlist(path):
''' Returns a list of subfolders '''
list=os.listdir(path)
paths=[]
for d in list:
if (os.path.isdir(os.path.join(path,d))):
paths.append(d+'/')
return paths
def getfilelist(path):
''' Returns a list of files that might require change '''
list=os.listdir(path)
files=[]
for f in list:
if (filesearch.match(f)) and (os.path.isfile(os.path.join(path,f))):
files.append(f)
return files
def statefile(opts,path):
print('Parsing _state file')
shutil.copy2(os.path.join(path,STATEFILE), os.path.join(path,STATEFILE+'.bkp'))
statereader=csv.reader(open(os.path.join(path,STATEFILE),'rb'),
delimiter='\t',
doublequote=False,
escapechar='\\',
quoting=csv.QUOTE_NONE)
stateout=[]
for row in statereader:
rowout=row
if row[3].startswith('INPUT '):
newinput=row[3]
rowpstart=row[3].index(' P path=')
rowtstart=row[3].index(' TS in=')
rowpath=row[3][(rowpstart+7):rowtstart]
rowtime=row[3][(rowtstart+7):-1]
# time has a space at the end (and three zeros...)
print('INPUT found: "'+row[3]+'"')
found=False
for i in xrange(len(opts.inabs)):
newpath=rowpath.replace('='+opts.inabs[i],'='+opts.outabs[i])
if os.path.exists(newpath[1:]):
found=True
newtime=str(int(os.path.getmtime(newpath[1:])))+'000'
newinput=row[3].replace(rowpath,newpath,1).replace(rowtime,newtime,1)
print('NEW INPUT : "'+newinput+'"')
rowout[3]=newinput
break
if not found:
print('WARN: Could not find new INPUT, check your -i and -o arguments')
stateout.append(rowout)
statewriter=csv.writer(open(os.path.join(path,STATEFILE),'wb'),
delimiter='\t',
doublequote=False,
escapechar='\\',
quoting=csv.QUOTE_NONE)
statewriter.writerows(stateout)
return
def arrayreplace(path,filelist,opts):
header=['Key','File']
for f in filelist:
print('Modifying array: '+os.path.join(path,f))
arrayreader=csv.DictReader(open(os.path.join(path,f),'rb'),
delimiter='\t',
quotechar='"',
quoting=csv.QUOTE_ALL)
arrayout=[]
for row in arrayreader:
rowout=row
if os.path.exists(os.path.join(path,row['File'])):
arrayout.append(rowout)
continue
# File is a relative path, and exists - next iteration.
rowpath=row['File']
found=False
for i in xrange(len(opts.inabs)):
newpath=rowpath.replace(opts.inabs[i],opts.outabs[i],1)
if os.path.exists(newpath):
found=True
rowout['File']=newpath
break
if not found:
print('WARN: Could not find File '+rowpath+' in '+os.path.join(path,f)+', check your -i and -o arguments')
arrayout.append(rowout)
writer = csv.DictWriter(open(os.path.join(path,f),'wb'),
header,
delimiter='\t',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(dict(zip(header,header)))
writer.writerows(arrayout)
return
def statequery(path):
statereader=csv.reader(open(os.path.join(path,DRYSTATEFILE),'rb'),
delimiter='\t',
doublequote=False,
escapechar='\\',
quoting=csv.QUOTE_NONE)
for row in statereader:
if row[1]=='NO':
print('Instance will run: '+row[0])
else:
print('Instance wont run: '+row[0])
return
def traverse(opts,path):
pathlist=getpathlist(path)
filelist=getfilelist(path)
arrayreplace(path,filelist,opts)
for p in pathlist:
traverse(opts,os.path.join(path,p))
return
def main():
opts=setup_options()
opts=check_options(opts)
if opts.arrays:
traverse(opts,opts.execpath)
statefile(opts,opts.execpath)
return
main()

View File

@@ -1,53 +0,0 @@
#!/bin/bash
USAGE="Usage: "$( basename $0 )" [options] [--] [command including srun options]
Sends the command to a queue with srun
Version (c) Ville.Rantanen@helsinki.fi
Options:
--possess Possess the whole node for the run
-c N Request number of CPUs [Def 4]
--mem M Request M Gb of memory [Def 20]
-h This help
Use the -- switch to separate switches assigned to slurp vs anything after..
"
if [ -z "$1" ]
then echo -e "$USAGE"
exit 1
fi
SHARE=-s
CPU=4
MEM=20
declare -a argv=("$@")
for (( i=1; i<=${#} ;i++ ))
do [[ ${!i} == "--possess" ]] && {
echo Will possess the whole node.
SHARE=""
unset argv[$(( $i - 1 ))]
}
[[ ${!i} == "-c" ]] && {
CPU=${argv[$i]}
unset argv[$(( $i - 1 ))]
unset argv[$(( $i ))]
}
[[ ${!i} == "--mem" ]] && {
MEM=${argv[$i]}
unset argv[$(( $i - 1 ))]
unset argv[$(( $i ))]
continue
}
[[ ${!i} == "--mem="* ]] && {
MEM=${argv[$(( $i - 1 ))]#*=}
unset argv[$(( $i - 1 ))]
}
[[ ${!i} == "-h" ]] && {
echo -e "$USAGE"
exit 1
}
[[ ${!i} == "--" ]] && {
break
}
done
MEM=$(( 1024*$MEM ))
echo Running command: srun $SHARE -c $CPU --mem $MEM "${argv[@]}"
srun -v $SHARE -c $CPU --mem $MEM "${argv[@]}"

View File

@@ -1,27 +0,0 @@
#!/bin/bash
[ -z "$2" ] && { echo 'Usage: $0 "file" "some" "command"
Runs the command when file exists. waits for max 5 minutes'
exit
}
fname="$1"
shift 1
otime=1
while :
do [ -e "$fname" ] || {
echo -n "File: $fname not found! "
date
}
[ -e "$fname" ] && {
break
}
otime=$(( $otime + 1 ))
sleep 1
[ $otime -gt 300 ] && {
echo "File $fname still missing after 5 minutes!"
exit 1
}
done
eval "$@"

View File

@@ -1 +0,0 @@
slurm-maxfree

View File

@@ -1,131 +0,0 @@
#!/bin/bash
if [ -z "$1" ]
then echo provide the script to run
exit 1
fi
if [ -z "${ANDURIL_NODELIST}" ]
then NODELIST=( $( scontrol show node|grep NodeName | sed -e 's,[^ ]\+=\([^ ]\+\) .*,\1,' ) )
else read -a NODELIST <<< "$ANDURIL_NODELIST"
fi
JOBROOT="$HOME/.srun"
if [ "$1" == "-h" ]
then echo -ne '
This prefix selects the highest free cpu node for slurm run.
Use it with anduril: --exec-mode prefix --prefix '$( basename $0 )'
To change the list of nodes (preferred order):
export ANDURIL_NODELIST="vm3 vm4 vm5"
Current nodelist: "'${NODELIST[@]}'"'"( ${#NODELIST[@]} nodes)\n"
exit
fi
NODERUNS=( )
for e in ${NODELIST[@]}
do NODERUNS+=( 0 )
done
# find node with max free CPUs
while [ true ]
do
MAX=0
INDEX=0
for (( i=0; i<${#NODERUNS[@]}; i++ ))
do NODERUNS[$i]=$(( $( scontrol -o show node ${NODELIST[$i]} | sed 's,.*CPUAlloc=\([0-9]\+\).*CPUTot=\([0-9]\+\).*,\2-\1 ,' ) ))
scontrol -o show node ${NODELIST[$i]} | grep State=DOWN > /dev/null && NODERUNS[$i]=-1
echo -n "${NODELIST[$i]}:${NODERUNS[$i]} "
[ ${NODERUNS[$i]} -gt $MAX ] && {
MAX=${NODERUNS[$i]}
INDEX=$i
}
done
[ $MAX -gt 0 ] && {
break
} || {
SECONDS=$(( 10 + $RANDOM/1000 ))
echo "waiting for free sockets for $SECONDS s."
sleep $SECONDS
}
done
echo "srun: Node ${NODELIST[$INDEX]}, Free sockets: ${NODERUNS[$INDEX]}" >&2
mkdir -p "$JOBROOT"
for (( i=1; i<=$#; i++ ))
do if [[ "${!i}" == */_command ]]
then JOBNAME=job_
COMPONENTNAME=$( grep ^metadata.componentName= "${!i}" | sed s,^metadata.componentName=,, | sed -e 's/[^A-Za-z0-9._-]/_/g' )
INSTANCENAME=$( grep ^metadata.instanceName= "${!i}" | sed s,^metadata.instanceName=,, | sed -e 's/[^A-Za-z0-9._-]/_/g' )
JOBNAME="$JOBNAME"$( grep ^metadata.sourceLocation= "${!i}" | sed s,^metadata.sourceLocation=,, | sed -e 's/[^A-Za-z0-9._-]/_/g' )
JOBNAME="$JOBNAME"_$INSTANCENAME
JOBNAME="$JOBNAME"_$COMPONENTNAME
JOBNAME="$JOBNAME"_$( date +"%y%m%d_%H%M%S" )
CPU=$( grep ^metadata.cpu= "${!i}" | sed s,^metadata.cpu=,, )
MEMORY=$( grep ^metadata.memory= "${!i}" | sed s,^metadata.cpu=,, )
export USERDEFINED=$( grep ^metadata.userDefined= "${!i}" | sed s,^metadata.userDefined=,, )
fi
done
JOBPATH="$JOBROOT/$JOBNAME"
while [ -d "$JOBPATH" ]
do echo Jobpath "$JOBPATH" exists
JOBNAME=job_$( date +"%y%m%d_%H%M%S" )_$( echo $@ | md5sum | cut -f1 -d" " )
JOBPATH="$JOBROOT/$JOBNAME"
done
mkdir -p "$JOBPATH"
JOBFILE="$JOBPATH/job"
STATFILE="$JOBPATH/statistics"
STRMFILE="$JOBPATH/stream"
EXECPATH=$( pwd )
[[ -z "$CPU" ]] || export CPU="-c $CPU"
[[ -z "$MEMORY" ]] || export MEMORY="--mem $MEMORY"
# create the jobfile
echo '#!/bin/bash' > "$JOBFILE"
chmod 755 "$JOBFILE"
# Find _command file
for (( i=1; i<=$#; i++ ))
do if [[ "${!i}" == */_command ]]
then echo 'retrys=0' >> "$JOBFILE"
echo 'while :
do [ -f "'${!i}'" ] && {
break
} || {
echo Waiting for _command file '${!i}'
retrys=$(( $retrys + 1 ))
sleep 1; [ "$retrys" -gt 15 ] && break;
}
done ' >> "$JOBFILE"
fi
done
echo 'echo Node: $HOSTNAME'" >> \"$STATFILE\" " >> "$JOBFILE"
echo "pwd >> \"$STATFILE\" " >> "$JOBFILE"
echo "date +'Start: %s' >> \"$STATFILE\" " >> "$JOBFILE"
echo -n "/usr/bin/time -o \"$STATFILE\" --append " >> "$JOBFILE"
for (( i=1; i<=$#; i++ ))
do echo -n "\"${!i}\" " >> "$JOBFILE"
done
#Catch the input stream (for R launcher)
cat - >> "$STRMFILE"
echo -n ' < "'$STRMFILE'"' >> "$JOBFILE"
echo -e "\n" >> "$JOBFILE"
echo 'EC=$?' >> "$JOBFILE"
echo "date +'Stop: %s' >> \"$STATFILE\" " >> "$JOBFILE"
echo 'exit $EC' >> "$JOBFILE"
echo "The job file is in $JOBFILE"
# send the job
echo \#srun $CPU $MEMORY -J $INSTANCENAME --nodelist=${NODELIST[$INDEX]} slurm-cake "$JOBFILE" "$JOBFILE" >> "$JOBFILE"
srun $CPU $MEMORY -J $INSTANCENAME --nodelist=${NODELIST[$INDEX]} slurm-cake "$JOBFILE" "$JOBFILE"
EC=$?
# clear out old job definitions (semirandomly)
[[ -e "$JOBROOT"/.lastdel ]] || touch "$JOBROOT"/.lastdel
if test "$( find $JOBROOT/.lastdel -mmin +30 )"
then touch "$JOBROOT"/.lastdel
find "$JOBROOT" -maxdepth 2 -mindepth 2 -type f -mtime +20 -delete 2>/dev/null
find "$JOBROOT" -type d -depth -empty -delete 2>/dev/null
fi
exit $EC

View File

@@ -1,126 +0,0 @@
#!/bin/bash
if [ -z "$1" ]
then echo provide the script to run
exit 1
fi
if [ -z "${ANDURIL_NODELIST}" ]
then NODELIST=( $( scontrol show node|grep NodeName | sed -e 's,[^ ]\+=\([^ ]\+\) .*,\1,' ) )
else read -a NODELIST <<< "$ANDURIL_NODELIST"
fi
JOBROOT="$HOME/.srun"
if [ "$1" == "-h" ]
then echo -ne '
This tool selects a random node for slurm run.
Use it with anduril: --exec-mode prefix --prefix '$( basename $0 )'
To change the list of nodes for randomization:
export ANDURIL_NODELIST="vm3 vm4 vm5"
Current nodelist: "'${NODELIST[@]}'"'"( ${#NODELIST[@]} nodes)\n"
exit
fi
NODERUNS=( )
TRY=0
for e in ${NODELIST[@]}
do NODERUNS+=( 0 )
done
while [ true ]
do
INDEX=$(( ( RANDOM % ${#NODELIST[@]} ) ))
NODERUNS=$(( $( scontrol -o show node ${NODELIST[$INDEX]} | sed 's,.*CPUAlloc=\([0-9]\+\).*CPUTot=\([0-9]\+\).*,\2-\1 ,' ) ))
scontrol -o show node ${NODELIST[$INDEX]} | grep State=DOWN > /dev/null && NODERUNS=-1
echo "srun: Node ${NODELIST[$INDEX]}, Free sockets: $NODERUNS" >&2
[ $NODERUNS -gt 0 ] && {
break
} || {
TRY=$(( $TRY + 1 ))
echo "try again $TRY"
[ $TRY -gt 3 ] && {
TRY=0
SECONDS=$(( ( RANDOM % 10 ) ))
echo "waiting for free sockets for $SECONDS s."
sleep $SECONDS
}
}
done
mkdir -p "$JOBROOT"
for (( i=1; i<=$#; i++ ))
do if [[ "${!i}" == */_command ]]
then JOBNAME=job_
COMPONENTNAME=$( grep ^metadata.componentName= "${!i}" | sed s,^metadata.componentName=,, | sed -e 's/[^A-Za-z0-9._-]/_/g' )
INSTANCENAME=$( grep ^metadata.instanceName= "${!i}" | sed s,^metadata.instanceName=,, | sed -e 's/[^A-Za-z0-9._-]/_/g' )
JOBNAME="$JOBNAME"$( grep ^metadata.sourceLocation= "${!i}" | sed s,^metadata.sourceLocation=,, | sed -e 's/[^A-Za-z0-9._-]/_/g' )
JOBNAME="$JOBNAME"_$INSTANCENAME
JOBNAME="$JOBNAME"_$COMPONENTNAME
JOBNAME="$JOBNAME"_$( date +"%y%m%d_%H%M%S" )
CPU=$( grep ^metadata.cpu= "${!i}" | sed s,^metadata.cpu=,, )
MEMORY=$( grep ^metadata.memory= "${!i}" | sed s,^metadata.cpu=,, )
export USERDEFINED=$( grep ^metadata.userDefined= "${!i}" | sed s,^metadata.userDefined=,, )
fi
done
JOBPATH="$JOBROOT/$JOBNAME"
while [ -d "$JOBPATH" ]
do echo Jobpath "$JOBPATH" exists
JOBNAME=job_$( date +"%y%m%d_%H%M%S" )_$( echo $@ | md5sum | cut -f1 -d" " )
JOBPATH="$JOBROOT/$JOBNAME"
done
mkdir -p "$JOBPATH"
JOBFILE="$JOBPATH/job"
STATFILE="$JOBPATH/statistics"
STRMFILE="$JOBPATH/stream"
EXECPATH=$( pwd )
[[ -z "$CPU" ]] || export CPU="-c $CPU"
[[ -z "$MEMORY" ]] || export MEMORY="--mem $MEMORY"
# create the jobfile
echo '#!/bin/bash' > "$JOBFILE"
chmod 755 "$JOBFILE"
# Find _command file
for (( i=1; i<=$#; i++ ))
do if [[ "${!i}" == */_command ]]
then echo 'retrys=0' >> "$JOBFILE"
echo 'while :
do [ -f "'${!i}'" ] && {
break
} || {
echo Waiting for _command file '${!i}'
retrys=$(( $retrys + 1 ))
sleep 1; [ "$retrys" -gt 15 ] && break;
}
done ' >> "$JOBFILE"
fi
done
echo 'echo Node: $HOSTNAME'" >> \"$STATFILE\" " >> "$JOBFILE"
echo "pwd >> \"$STATFILE\" " >> "$JOBFILE"
echo "date +'Start: %s' >> \"$STATFILE\" " >> "$JOBFILE"
echo -n "/usr/bin/time -o \"$STATFILE\" --append " >> "$JOBFILE"
for (( i=1; i<=$#; i++ ))
do echo -n "\"${!i}\" " >> "$JOBFILE"
done
#Catch the input stream (for R launcher)
cat - >> "$STRMFILE"
echo -n ' < "'$STRMFILE'"' >> "$JOBFILE"
echo -e "\n" >> "$JOBFILE"
echo 'EC=$?' >> "$JOBFILE"
echo "date +'Stop: %s' >> \"$STATFILE\" " >> "$JOBFILE"
echo 'exit $EC' >> "$JOBFILE"
echo "The job file is in $JOBFILE"
# send the job
echo \#srun $CPU $MEMORY -J $INSTANCENAME --nodelist=${NODELIST[$INDEX]} slurm-cake "$JOBFILE" "$JOBFILE" >> "$JOBFILE"
srun $CPU $MEMORY -J $INSTANCENAME --nodelist=${NODELIST[$INDEX]} slurm-cake "$JOBFILE" "$JOBFILE"
EC=$?
# clear out old job definitions (semirandomly)
[[ -e "$JOBROOT"/.lastdel ]] || touch "$JOBROOT"/.lastdel
if test "$( find $JOBROOT/.lastdel -mmin +30 )"
then touch "$JOBROOT"/.lastdel
find "$JOBROOT" -maxdepth 2 -mindepth 2 -type f -mtime +20 -delete 2>/dev/null
find "$JOBROOT" -type d -depth -empty -delete 2>/dev/null
fi
exit $EC

View File

@@ -1 +0,0 @@
slurm

View File

@@ -1,47 +0,0 @@
#!/bin/bash
if [ -z "$1" ]
then echo provide the script to run
exit 1
fi
JOBPATH="$( pwd )/.slurpies"
mkdir -p "$JOBPATH"
JOBNAME=job_$( date +"%y%m%d%H%M%S%N" )
JOBFILE="$JOBPATH/$JOBNAME"
DONEFILE="$JOBPATH/$JOBNAME.done"
COMPPATH=$3
echo Job: $JOBFILE
echo "#!/bin/sh" > "$JOBFILE"
for (( i=1; i<=$#; i++ ))
do if [ $i -eq 5 ]
then echo -n \" >> "$JOBFILE"
eval echo -n ${!i} | sed s,\",\\\\\",g >> "$JOBFILE"
echo -n "\" " >> "$JOBFILE"
elif [ $i -eq 6 ]
then echo -n ${!i} >> "$JOBFILE"
else echo -n "\"${!i}\" " >> "$JOBFILE"
fi
done
echo "" >> "$JOBFILE"
echo 'find "'$COMPPATH'" > "'$DONEFILE'"' >> "$JOBFILE"
chmod 775 "$JOBFILE"
slurp "$JOBFILE"
sleep 1
# pause to make sure NFS has time to sync
while [ ! -e "$DONEFILE" ]
do sleep 5
done
DONE=0
IFS=$'\n'
while [ $DONE -eq 0 ]
do sleep 10
DONEITER=1
for f in $( cat "$DONEFILE" )
do [[ -a "$f" ]] || DONEITER=0
done
[[ $DONEITER -eq 1 ]] && DONE=1
done

View File

@@ -1,49 +0,0 @@
#!/bin/bash
if [ -z "${ANDURIL_NODELIST}" ]
then NODELIST=( $( scontrol show node|grep NodeName | sed -e 's,[^ ]\+=\([^ ]\+\) .*,\1,' ) )
else read -a NODELIST <<< "$ANDURIL_NODELIST"
fi
if [ "$1" == "-h" ]
then echo -ne '
This tools selects the lowest cpu allocated node for slurm run
export ANDURIL_NODELIST="vm3 vm4 vm5"
^ to have your own preferred list of nodes
Current nodelist: "'${NODELIST[@]}'"'"( ${#NODELIST[@]} nodes)\n"
exit
fi
NODERUNS=( )
# find node with max free CPUs
for e in ${NODELIST[@]}
do NODERUNS+=( 0 )
done
while [ true ]
do
MAX=0
INDEX=0
for (( i=0; i<${#NODERUNS[@]}; i++ ))
do NODERUNS[$i]=$(( $( scontrol -o show node ${NODELIST[$i]} | sed 's,.*CPUAlloc=\([0-9]\+\).*CPUTot=\([0-9]\+\).*,\2-\1 ,' ) ))
scontrol -o show node ${NODELIST[$i]} | grep State=DOWN > /dev/null && NODERUNS[$i]=-1
echo -n "${NODELIST[$i]}:${NODERUNS[$i]} "
[ ${NODERUNS[$i]} -gt $MAX ] && {
MAX=${NODERUNS[$i]}
INDEX=$i
}
done
[ $MAX -gt 0 ] && {
break
} || {
SECONDS=$(( ( RANDOM % 30 ) ))
echo "waiting for free sockets for $SECONDS s."
sleep $SECONDS
}
done
echo "srun: Node ${NODELIST[$INDEX]}, Free sockets: ${NODERUNS[$INDEX]}" >&2
srun --nodelist=${NODELIST[$INDEX]} "$@"

View File

@@ -1,46 +0,0 @@
#!/bin/bash
if [ -z "${ANDURIL_NODELIST}" ]
then NODELIST=( $( scontrol show node|grep NodeName | sed -e 's,[^ ]\+=\([^ ]\+\) .*,\1,' ) )
else read -a NODELIST <<< "$ANDURIL_NODELIST"
fi
if [ "$1" == "-h" ]
then echo -ne '
This tools selects the lowest cpu allocated node for slurm run
export ANDURIL_NODELIST="vm3 vm4 vm5"
^ to have your own preferred list of nodes
Current nodelist: "'${NODELIST[@]}'"'"( ${#NODELIST[@]} nodes)\n"
exit
fi
NODERUNS=( )
TRY=0
# find node with max free CPUs
for e in ${NODELIST[@]}
do NODERUNS+=( 0 )
done
while [ true ]
do
INDEX=$(( ( RANDOM % ${#NODELIST[@]} ) ))
NODERUNS=$(( $( scontrol -o show node ${NODELIST[$INDEX]} | sed 's,.*CPUAlloc=\([0-9]\+\).*CPUTot=\([0-9]\+\).*,\2-\1 ,' ) ))
scontrol -o show node ${NODELIST[$INDEX]} | grep State=DOWN > /dev/null && NODERUNS=-1
echo "srun: Node ${NODELIST[$INDEX]}, Free sockets: $NODERUNS" >&2
[ $NODERUNS -gt 0 ] && {
break
} || {
TRY=$(( $TRY + 1 ))
echo "try again $TRY"
[ $TRY -gt 3 ] && {
TRY=0
SECONDS=$(( ( RANDOM % 10 ) ))
echo "waiting for free sockets for $SECONDS s."
sleep $SECONDS
}
}
done
srun --nodelist=${NODELIST[$INDEX]} "$@"

View File

@@ -1,33 +0,0 @@
#!/usr/bin/python
#echo client program
import socket
import sys
if len(sys.argv)<2:
message='HELO'
else:
message=sys.argv[1]
hosts=['vm1',
'vm2',
'vm3',
'vm4',
'vm5',
'vm6',
'narsil']
PORT = 50774 # The same port as used by the server
for HOST in hosts:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.settimeout(10)
s.send(message)
data = s.recv(1024)
sys.stdout.write(data)
s.close()
except:
sys.stdout.write('|'+HOST+' does not answer. ')

View File

@@ -1,58 +0,0 @@
#!/usr/bin/python
#echo server program
import socket
import subprocess
help_message="""Recognized commands:
HELO (default)
SHORT
"""
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50774 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
while 1:
s.listen(1)
conn, addr = s.accept()
#print 'Connected by', addr
while 1:
data = conn.recv(1024)
if not data: break
message=""
if (data=="HELO"):
uptime=subprocess.Popen(['uptime'], stdout=subprocess.PIPE)
mem=subprocess.Popen(['free -tg | head -n 2'], stdout=subprocess.PIPE,shell=True)
users=subprocess.Popen(['who -q | head -n 1'], stdout=subprocess.PIPE,shell=True)
message='='*10+socket.gethostname()+'='*10+'\n'+uptime.stdout.read()+mem.stdout.read()+users.stdout.read()
uptime.kill()
mem.kill()
users.kill()
elif (data=="SHORT"):
load=open('/proc/loadavg','r').read()
load=load.split(" ")
cpu=open('/proc/cpuinfo','r').read()
cpucount=0
for row in cpu.split("\n"):
if row.startswith('processor'):
cpucount+=1
loadpercent=int(100*float(load[0])/float(cpucount))
mem=open('/proc/meminfo','r').read()
memory={'total':0, 'free':0, 'cache':0, 'percent':0}
for row in mem.split("\n"):
if row.startswith('MemTotal'):
memory['total']=float(''.join(c for c in row if c.isdigit()))
if row.startswith('MemFree'):
memory['free']=float(''.join(c for c in row if c.isdigit()))
if row.startswith('Cached'):
memory['cache']=float(''.join(c for c in row if c.isdigit()))
memory['percent']=int(100 - (100*(memory['free']+memory['cache'])/memory['total']))
message='|'+socket.gethostname()+' L/M:'+str(loadpercent).zfill(3)+'/'+str(memory['percent']).zfill(3)+'% '
else:
message=help_message
conn.send(message)
conn.close()

View File

@@ -1,54 +0,0 @@
#!/bin/bash
function return_error {
echo "$1"
exit 1
}
ANDURIL_ENV=$HOME/.config/anduril/environment.sh
if [ -f "$ANDURIL_ENV" ]
then . "$ANDURIL_ENV"
echo Read "$ANDURIL_ENV"
fi
[ -d "$ANDURIL_HOME" ] || return_error "ANDURIL_HOME is not set! You may set it also in file: $ANDURIL_ENV"
[ -d "$ANDURIL_BUNDLES" ] || return_error "ANDURIL_BUNDLES is not set! (folder of bundles, may also be same as ANDURIL_HOME)"
echo ANDURIL_HOME="$ANDURIL_HOME"
echo ANDURIL_BUNDLES="$ANDURIL_BUNDLES"
if [ ! -z "$1" ]
then search=$( echo $1 | tr A-Z a-z )
fi
MICROHOME="$ANDURIL_HOME/microarray"
bundles=( )
echo -n "Looking for bundles: "
for e in $( ls $ANDURIL_BUNDLES )
do [[ -e ${ANDURIL_BUNDLES}/$e/bundle.xml ]] || continue
bname=$( cat ${ANDURIL_BUNDLES}/$e/bundle.xml | tr -d [:cntrl:] | grep '<name>' | sed 's/.*<name>\(.*\)<\/name>.*/\1/' | tr A-Z a-z )
bundles+=( $bname )
echo -n "$bname "
if [ "$search" = "$bname" ]
then BUNDLESTRING="-b ${ANDURIL_BUNDLES}/$e"
fi
if [ "$1" = "all" ]
then BUNDLESTRING="-b ${ANDURIL_BUNDLES}/$e $BUNDLESTRING"
fi
if [ "$bname" = "microarray" ]
then MICROHOME="${ANDURIL_BUNDLES}/$e"
fi
done
echo ""
if [ -z "$1" ]
then return_error "Give bundle name to create component docs! 'all' matches to all bundles in ANDURIL_BUNDLES"
fi
[ -d "$MICROHOME" ] || return_error "Could not find Microarray bundle home: $MICROHOME"
[ -z "$BUNDLESTRING" ] && return_error "Could not find given bundle: $search"
echo $ANDURIL_HOME/bin/anduril build-doc result_doc --log log_doc "$BUNDLESTRING" -t $MICROHOME/datatypes.xml
eval $ANDURIL_HOME/bin/anduril build-doc result_doc --log log_doc "$BUNDLESTRING" -t $MICROHOME/datatypes.xml

View File

@@ -1,70 +0,0 @@
#!/bin/bash
function return_error {
echo "$1"
exit 1
}
ANDURIL_ENV=$HOME/.config/anduril/environment.sh
if [ -f "$ANDURIL_ENV" ]
then . "$ANDURIL_ENV"
echo Read "$ANDURIL_ENV"
fi
[ -d "$ANDURIL_HOME" ] || return_error "ANDURIL_HOME is not set! You may set it also in file: $ANDURIL_ENV"
[ -d "$ANDURIL_BUNDLES" ] || return_error "ANDURIL_BUNDLES is not set! (folder of bundles, may also be same as ANDURIL_HOME)"
echo ANDURIL_HOME="$ANDURIL_HOME"
echo ANDURIL_BUNDLES="$ANDURIL_BUNDLES"
echo -n "Looking for bundles: "
b="$ANDURIL_HOME/builtin/bundle.xml"
BUNDLESTRING=" -b $( readlink -f $( dirname ${b} ) )"
bname=$( cat "$b" | tr -d [:cntrl:] | grep '<name>' | sed 's/.*<name>\(.*\)<\/name>.*/\1/' )
echo -n "$bname "
[ -d "$( dirname ${b} )/components" ] && \
list=$( find $( dirname ${b} )/components -maxdepth 2 -mindepth 2 -name component.xml -printf '%P\\n' | sed 's,/component.xml,,g' )
[ -d "$( dirname ${b} )/functions" ] && \
list=${list}$( find $( dirname ${b} )/functions -maxdepth 2 -mindepth 2 -name component.xml -printf '%P\\n' | sed 's,/component.xml,,g' )
for b in $( find -L "$ANDURIL_BUNDLES" -maxdepth 2 -name bundle.xml | grep -v '/testsystem/|/techtest/' ); do
bname=$( cat "$b" | tr -d [:cntrl:] | grep '<name>' | sed 's/.*<name>\(.*\)<\/name>.*/\1/' )
echo -n "$bname "
[ -d "$( dirname ${b} )/components" ] && \
list=${list}$( find $( dirname ${b} )/components -maxdepth 2 -mindepth 2 -name component.xml -printf '%P\\n' | sed 's,/component.xml,,g' )
[ -d "$( dirname ${b} )/functions" ] && \
list=${list}$( find $( dirname ${b} )/functions -maxdepth 2 -mindepth 2 -name component.xml -printf '%P\\n' | sed 's,/component.xml,,g' )
BUNDLESTRING=${BUNDLESTRING}" -b $( dirname ${b} )"
done
echo ""
list=( $( echo -e $list | sort ))
if [ -z "$1" ]
then
for (( n=0; n < ${#list[@]} ; n++ ))
do echo $n: ${list[$n]}
done
echo "all: ALL"
echo "clean: delete log and exec folders"
exit 0
fi
if [[ "$1" == ?([0-9]*) ]]
then # is numeric
echo test ${list[$1]}
echo $ANDURIL_HOME/bin/anduril test ${list[$1]} --log log_component -d result_component ${BUNDLESTRING}
$ANDURIL_HOME/bin/anduril test ${list[$1]} --log log_component -d result_component ${BUNDLESTRING}
else # is not numeric
if [ $1 = "all" ]
then for (( n=0; n < ${#list[@]} ; n++ ))
do echo running: ${list[$n]}
$ANDURIL_HOME/bin/anduril test ${list[$n]} --log log_component -d result_component ${BUNDLESTRING}
done
elif [ $1 = "clean" ]
then rm -vrf log_component result_component
else
echo test $1
echo $ANDURIL_HOME/bin/anduril test $1 --log log_component -d result_component ${BUNDLESTRING}
$ANDURIL_HOME/bin/anduril test $1 --log log_component -d result_component ${BUNDLESTRING}
fi
fi

View File

@@ -1,63 +0,0 @@
#!/bin/bash
function return_error {
echo "$1"
exit 1
}
ANDURIL_ENV=$HOME/.config/anduril/environment.sh
if [ -f "$ANDURIL_ENV" ]
then . "$ANDURIL_ENV"
echo Read "$ANDURIL_ENV"
fi
[ -d "$ANDURIL_HOME" ] || return_error "ANDURIL_HOME is not set! You may set it also in file: $ANDURIL_ENV"
[ -d "$ANDURIL_BUNDLES" ] || return_error "ANDURIL_BUNDLES is not set! (folder of bundles, may also be same as ANDURIL_HOME)"
echo -n "Looking for bundles: "
b="$ANDURIL_HOME/builtin/bundle.xml"
BUNDLESTRING=" -b $( readlink -f $( dirname ${b} ) )"
bname=$( cat "$b" | tr -d [:cntrl:] | grep '<name>' | sed 's/.*<name>\(.*\)<\/name>.*/\1/' )
echo -n "$bname "
[ -d "$( dirname ${b} )/test-networks" ] && \
list=$( find $( dirname ${b} )/test-networks -maxdepth 2 -mindepth 2 -name network.and -printf '%P\\n' | sed 's,/network.and,,g' )
for b in $( find -L "$ANDURIL_BUNDLES" -maxdepth 2 -name bundle.xml | grep -v testsystem ); do
BUNDLESTRING=${BUNDLESTRING}" -b $( readlink -f $( dirname ${b} ) )"
bname=$( cat "$b" | tr -d [:cntrl:] | grep '<name>' | sed 's/.*<name>\(.*\)<\/name>.*/\1/' )
echo -n "$bname "
[ -d "$( dirname ${b} )/test-networks" ] && \
list=${list}$( find $( dirname ${b} )/test-networks -maxdepth 2 -mindepth 2 -name network.and -printf '%P\\n' | sed 's,/network.and,,g' )
done
echo ""
list=( $( echo -e $list | sort ))
if [ -z "$1" ]
then
for (( n=0; n < ${#list[@]} ; n++ ))
do echo $n: ${list[$n]}
done
echo "all: ALL"
echo "clean: delete log and exec folders"
exit 0
fi
if [[ "$1" == ?([0-9]*) ]]
then # is numeric
echo test ${list[$1]}
echo $ANDURIL_HOME/bin/anduril test-networks ${list[$1]} --log log_network -d result_network ${BUNDLESTRING}
$ANDURIL_HOME/bin/anduril test-networks ${list[$1]} --log log_network -d result_network ${BUNDLESTRING}
else # is not numeric
if [ $1 = "all" ]
then for (( n=0; n < ${#list[@]} ; n++ ))
do echo running: ${list[$n]}
$ANDURIL_HOME/bin/anduril test-networks ${list[$n]} --log log_network -d result_network ${BUNDLESTRING}
done
elif [ $1 = "clean" ]
then rm -vrf log_network result_network
else
echo test $1
$ANDURIL_HOME/bin/anduril test-networks $1 --log log_network -d result_network ${BUNDLESTRING}
fi
fi

View File

@@ -1,48 +0,0 @@
#!/bin/bash
if [ -z "$2" ]
then
echo "
Copies Anduril testcase results to the original source location.
Use it when testcases fail, but you know the results to be correct.
After copying you must commit the changes in the checkout.
Usage: testcasecopy executionfolder/ComponentFolder /path/to/bundle/root
example: testcasecopy results/CSVJoin ~/anduril/trunk/microarray/
Any extra arguments are given to the copy command: cp -a [args] source target
Recommended use: -i for prompt before overwrite and -v for verbose
"
exit 0
fi
IFS=$'\n'
component=$( basename "$1" )
execfolder=$( dirname "$1" )
source=$1
bundle=$2
shift 2
if [ ! -d "$source" ]
then echo "$source folder not found"
exit 1
fi
if [ ! -d "$bundle" ]
then echo "$bundle folder not found"
exit 1
fi
#echo $execfolder $component
cases=$( find "$source" -mindepth 1 -maxdepth 1 -type d )
for case in $cases
do
casebase=$( basename "$case" )
tgt="${bundle}/components/${component}/testcases/${casebase}/expected-output"
if [ -d "$tgt" ]
then
for output in $( ls "${case}/component" | grep -v ^_ )
do
cp -a $@ "${case}/component/${output}" "${tgt}/"
done
else
echo "$component $casebase does not have expected-output folder"
fi
done

View File

@@ -1,44 +0,0 @@
#!/bin/bash
cores=$( grep -c processor /proc/cpuinfo )
# verbose mode
tgtf=/mnt/csc-gc5/vm_state/${HOSTNAME}.long
# debug
if [ -z "$1" ]
then touch "$tgtf"
fi
if [ -f "$tgtf" ]
then echo "======== $HOSTNAME =======" > "$tgtf"
TZ="Europe/Helsinki" uptime >> "$tgtf"
echo "Cores: $cores" >> "$tgtf"
free -tg | head -n 2 >> "$tgtf"
ps a --format user| grep -v -e root -e USER | sort -u | tr '\n' ' ' >> "$tgtf"
echo "" >> "$tgtf"
fi
# concise mode
tgtf=/mnt/csc-gc5/vm_state/${HOSTNAME}.short
# debug
if [ -z "$1" ]
then touch "$tgtf"
fi
if [ -f "$tgtf" ]
then echo -n "|$HOSTNAME L:" > "$tgtf"
load=$( cat /proc/loadavg | cut -d" " -f3 | tr -d [:cntrl:] )
echo -n "$load/$cores M:" >> "$tgtf"
# used + cached = total - free
free=$(( $( grep ^MemFree /proc/meminfo | tr -d -c [:digit:] ) / 1048576 ))
cache=$(( $( grep ^Cached /proc/meminfo | tr -d -c [:digit:] ) / 1048576 ))
total=$(( $( grep ^MemTotal /proc/meminfo | tr -d -c [:digit:] ) / 1048576 ))
used=$(( $total -$free -$cache ))
echo -n "$used+$cache/$total U:" >> "$tgtf"
ps a --format user| grep -v -e root -e USER | sort -u | wc -l | tr -c -d [:digit:] >> "$tgtf"
TZ="Europe/Helsinki" date "+ @%H:%M" >> "$tgtf"
# save history
echo -e "$HOSTNAME\t$load\t$used\t"$( date +%s ) >> /mnt/csc-gc5/vm_state/history/${HOSTNAME}
fi

View File

@@ -1 +0,0 @@
../anduril/Folder2Array

View File

@@ -1 +0,0 @@
../anduril/testing/anduril-build-component-docs

View File

@@ -1 +0,0 @@
../anduril/testing/anduril-test-component

View File

@@ -1 +0,0 @@
../anduril/testing/anduril-test-network