diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index 927a58826c..4bf152df89 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -4,6 +4,7 @@ on:
push:
branches:
- main
+ - testharness_minimal
pull_request:
branches:
- main
@@ -19,19 +20,19 @@ jobs:
matrix:
name: [ "Build Bionic", "Build Focal", "Build Impish" ]
include:
-
+
- name: "Build Bionic"
release: bionic
-
+
- name: "Build Focal"
release: focal
-
+
- name: "Build Impish"
release: impish
steps:
- - name: Check Out Repo
+ - name: Check Out Repo
uses: actions/checkout@v2
- name: Login to Docker Hub
@@ -39,7 +40,7 @@ jobs:
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
-
+
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
@@ -61,51 +62,51 @@ jobs:
matrix:
name: [ "Unit Bionic", "Short Bionic", "Medium Bionic", "Unit Focal", "Short Focal", "Medium Focal", "Unit Impish", "Short Impish", "Medium Impish" ]
include:
-
+
- name: "Unit Bionic"
release: bionic
command: "make unittest"
- output: "test_results_unittests.xml"
-
+ output: "unittests.xml"
+
- name: "Short Bionic"
release: bionic
- command: "make THREADS=2 test"
- output: "test_results.xml"
+ command: "make CORES=2 test"
+ output: "shorttests.xml"
- name: "Medium Bionic"
release: bionic
- command: "make THREADS=2 mediumtest"
- output: "test_results_medium.xml"
+ command: "make CORES=2 mediumtest"
+ output: "mediumtests.xml"
- name: "Unit Focal"
release: focal
command: "make unittest"
- output: "test_results_unittests.xml"
-
+ output: "unittests.xml"
+
- name: "Short Focal"
release: focal
- command: "make THREADS=2 test"
- output: "test_results.xml"
+ command: "make CORES=2 test"
+ output: "shorttests.xml"
- name: "Medium Focal"
release: focal
- command: "make THREADS=2 mediumtest"
- output: "test_results_medium.xml"
+ command: "make CORES=2 mediumtest"
+ output: "mediumtests.xml"
- name: "Unit Impish"
release: impish
command: "make unittest"
- output: "test_results_unittests.xml"
-
+ output: "unittests.xml"
+
- name: "Short Impish"
release: impish
- command: "make THREADS=2 test"
- output: "test_results.xml"
+ command: "make CORES=2 test"
+ output: "shorttests.xml"
- name: "Medium Impish"
release: impish
- command: "make THREADS=2 mediumtest"
- output: "test_results_medium.xml"
+ command: "make CORES=2 mediumtest"
+ output: "mediumtests.xml"
steps:
@@ -143,7 +144,7 @@ jobs:
"gyre_parallel",
"Stokes_square_convection_1e4_vv_p1p1",
"Stokes_square_convection_1e4_p1p1_Ra_Test",
- "Stokes_square_convection_1e4_vv_gauss_p2p1",
+ "Stokes_square_convection_1e4_vv_gauss_p2p1",
"viscous_fs_drunkensailor",
"cylinder-3d-drag",
"viscosity_2d_p0_adaptive_parallel",
diff --git a/Makefile.in b/Makefile.in
index 65e67a8b05..ffe7119402 100755
--- a/Makefile.in
+++ b/Makefile.in
@@ -1,5 +1,5 @@
# Copyright (C) 2006 Imperial College London and others.
-#
+#
# Please see the AUTHORS file in the main source directory for a full list
# of copyright holders.
#
@@ -9,7 +9,7 @@
# Imperial College London
#
# amcgsoftware@imperial.ac.uk
-#
+#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
@@ -88,23 +88,23 @@ ifeq (@MBA3D@,yes)
LIBMBA3D = lib/libmba3d.a
endif
-# Thread count for make test.
-THREADS=1
+# Logical cores available for make test.
+CORES=8
EXCLUDE_TAGS =
ifneq (@HAVE_ZOLTAN@,yes)
- EXCLUDE_TAGS := $(EXCLUDE_TAGS) -e zoltan
+ EXCLUDE_TAGS := $(EXCLUDE_TAGS) -o zoltan
else
- EXCLUDE_TAGS := $(EXCLUDE_TAGS) -e nozoltan
+ EXCLUDE_TAGS := $(EXCLUDE_TAGS) -o nozoltan
endif
ifneq (@MBA2D@,yes)
- EXCLUDE_TAGS := $(EXCLUDE_TAGS) -e 2dadapt
+ EXCLUDE_TAGS := $(EXCLUDE_TAGS) -o 2dadapt
endif
ifneq (@HAVE_EXODUSII@,yes)
- EXCLUDE_TAGS := $(EXCLUDE_TAGS) -e exodusii
+ EXCLUDE_TAGS := $(EXCLUDE_TAGS) -o exodusii
endif
ifneq (@HAVE_ASSESS@,yes)
- EXCLUDE_TAGS := $(EXCLUDE_TAGS) -e assess
+ EXCLUDE_TAGS := $(EXCLUDE_TAGS) -o assess
endif
.SUFFIXES: .f90 .F90 .c .cpp .o .a
@@ -190,14 +190,14 @@ static: fluidity_library
shared: lib/shared/$(SLIB_FLUIDITY).1
-lib/shared/$(SLIB_FLUIDITY).1: fluidity_library
+lib/shared/$(SLIB_FLUIDITY).1: fluidity_library
@echo "BUILD shared libfluidity"
@rm -rf tmp
@mkdir -p tmp lib/shared
@cp $(LIB_FLUIDITY) tmp
@cd tmp; ar x lib$(FLUIDITY).a; rm lib$(FLUIDITY).a; cd ..
@echo " LD lib$(FLUIDITY).so"
- @$(EVAL) $(LINKER) -shared -Wl,-soname,$(SLIB_FLUIDITY).1 -o lib/shared/$(SLIB_FLUIDITY).1 tmp/* -L./lib -lvtkfortran
+ @$(EVAL) $(LINKER) -shared -Wl,-soname,$(SLIB_FLUIDITY).1 -o lib/shared/$(SLIB_FLUIDITY).1 tmp/* -L./lib -lvtkfortran
@rm -rf tmp
@cd lib/shared; ln -sf $(SLIB_FLUIDITY).1 $(SLIB_FLUIDITY); cd ../..
@@ -299,7 +299,7 @@ endif
-fldecomp: fluidity_library
+fldecomp: fluidity_library
@echo "BUILD fldecomp"
@echo " MKDIR bin"
@mkdir -p bin
@@ -381,11 +381,11 @@ endif
clean-debian:
@echo " CLEAN debian"
- @cd debian; rm -rf files tmp fluidity python-fluidity *.substvars *.debhelper*
+ @cd debian; rm -rf files tmp fluidity python-fluidity *.substvars *.debhelper*
-clean-test:
+clean-test:
@echo " CLEAN tests"
- @cd tests; PYTHONPATH=../python ../tools/testharness.py --clean >/dev/null
+ @tools/testharness.py --clean >/dev/null
@cd tests/data; $(MAKE) clean
clean-unittest:
@@ -404,13 +404,9 @@ clean-unittest:
@echo " CLEAN ocean_forcing/tests"
@cd ocean_forcing/tests; $(MAKE) clean
-clean-all-tests: clean-test
- @echo " CLEAN parallel/special/long tests"
- @PYTHONPATH=python tools/testharness.py --parallelism=parallel --clean >/dev/null
- @PYTHONPATH=python tools/testharness.py --length=special --clean >/dev/null
- @PYTHONPATH=python tools/testharness.py --length=special --parallelism=parallel --clean >/dev/null
- @PYTHONPATH=python tools/testharness.py --length=long --clean >/dev/null
- @PYTHONPATH=python tools/testharness.py --length=long --parallelism=parallel --clean >/dev/null
+clean-all-tests: clean-test
+ @echo " CLEAN long and vlong tests"
+ @tools/testharness.py -l long -l vlong --clean >/dev/null
distclean: clean
@echo " DISTCLEAN"
@@ -451,13 +447,11 @@ distclean: clean
@find ./ \( -name make.log \) -exec rm -f {} \; > /dev/null
@rm -f Makefile > /dev/null
-test: serialtest
-
-serialtest: fltools bin/$(FLUIDITY)
- @cd tests; ../bin/testharness -x test_results.xml -l short $(EXCLUDE_TAGS) -n $(THREADS)
+test: fltools bin/$(FLUIDITY)
+ @cd tests; ../bin/testharness -x shorttests.xml -l vshort -l short $(EXCLUDE_TAGS) -n $(CORES)
mediumtest: fltools bin/$(FLUIDITY) manual spudtools
- @cd tests; ../bin/testharness -x test_results_medium.xml -l medium $(EXCLUDE_TAGS) -n $(THREADS)
+ @cd tests; ../bin/testharness -x mediumtests.xml -l medium $(EXCLUDE_TAGS) -n $(CORES)
.PHONY: spudtools
@@ -465,7 +459,7 @@ spudtools:
@cd libspud ; $(MAKE) install-spudtools
@echo " INSTALL spudtools"
-setuputs:
+setuputs:
@echo "SETUP tests"
@echo " RMDIR bin/tests"
@rm -rf bin/tests
@@ -494,7 +488,7 @@ endif
unittest: build_unittest
@echo "RUN bin/tests"
- @bin/unittestharness -d bin/tests -x tests/test_results_unittests.xml
+ @bin/unittestharness -d bin/tests -x tests/unittests.xml
bin/spud-preprocess:
@echo "Installing spudtools"
@@ -543,14 +537,14 @@ endif
@cd diagnostics; ../bin/create_makefile --exclude \
"Diagnostic_Fields_Interfaces.F90 Diagnostic_Fields_New.F90" $(TESTOPTS)
@cd diagnostics; $(MAKE) Diagnostic_Fields_Interfaces.o \
- Diagnostic_Fields_New.o
+ Diagnostic_Fields_New.o
@echo " Generating main dependencies"
@cd main; ../bin/create_makefile --exclude test_coupler.F90 $(TESTOPTS)
@echo " Cleaning up the mess"
$(MAKE) clean-light
@echo " Congratulations, make makefiles succeeded!"
-install: default fltools
+install: default fltools
@mkdir -p $(DESTDIR)$(bindir) $(DESTDIR)$(docdir)/fluidity
find bin/ -maxdepth 1 -type f -exec cp '{}' $(DESTDIR)$(bindir) \;
rm -f $(DESTDIR)$(bindir)/spud-* $(DESTDIR)$(bindir)/diamond $(DESTDIR)$(bindir)/silenteval
@@ -566,7 +560,7 @@ install: default fltools
cd $(DESTDIR)$(docdir)/fluidity ; tar -czvf $(DESTDIR)$(docdir)/fluidity/examples.tar.gz examples/
rm -rf $(DESTDIR)$(docdir)/fluidity/examples/
-install-diamond:
+install-diamond:
cd libspud; ./configure --prefix=@prefix@; cd ../..
cd libspud; $(MAKE) clean; cd ../..
cd libspud; $(MAKE) install-diamond; cd ../..
diff --git a/python/fluidity/regressiontest.py b/python/fluidity/regressiontest.py
deleted file mode 100755
index 04870486cc..0000000000
--- a/python/fluidity/regressiontest.py
+++ /dev/null
@@ -1,352 +0,0 @@
-#!/usr/bin/env python3
-import sys
-import os
-import copy
-import random
-import xml.dom.minidom
-import traceback
-import time
-import glob
-import threading
-import traceback
-from io import StringIO
-
-try:
- from junit_xml import TestCase
-except ImportError:
- class TestCase(object):
- def __init__(self,*args,**kwargs):
- pass
- def add_failure_info(self,*args,**kwargs):
- pass
-
-class TestProblem:
- """A test records input information as well as tests for the output."""
- def __init__(self, filename, verbose=False, replace=None, genpbs=False):
- """Read a regression test from filename and record its details."""
- self.name = ""
- self.command = replace
- self.command_line = ""
- self.length = ""
- self.nprocs = 1
- self.verbose = verbose
- self.variables = []
- self.pass_tests = []
- self.warn_tests = []
- self.pass_status = []
- self.warn_status = []
- self.filename = filename.split('/')[-1]
- self.genpbs = genpbs
- self.xml_reports=[]
- # add dir to import path
- sys.path.insert(0, os.path.dirname(filename))
-
- dom = xml.dom.minidom.parse(filename)
-
- probtag = dom.getElementsByTagName("testproblem")[0]
-
- for child in probtag.childNodes:
- try:
- tag = child.tagName
- except AttributeError:
- continue
-
- if tag == "name":
- self.name = child.childNodes[0].nodeValue
- elif tag == "problem_definition":
- self.length = child.getAttribute("length")
- self.nprocs = int(child.getAttribute("nprocs"))
- xmlcmd = child.getElementsByTagName("command_line")[0].childNodes[0].nodeValue
- if self.command is not None:
- self.command_line = self.command(xmlcmd)
- elif tag == "variables":
- for var in child.childNodes:
- try:
- self.variables.append(Variable(name=var.getAttribute("name"), language=var.getAttribute("language"),
- code=var.childNodes[0].nodeValue.strip()))
- except AttributeError:
- continue
- elif tag == "pass_tests":
- for test in child.childNodes:
- try:
- self.pass_tests.append(Test(name=test.getAttribute("name"), language=test.getAttribute("language"),
- code=test.childNodes[0].nodeValue.strip()))
- except AttributeError:
- continue
- elif tag == "warn_tests":
- for test in child.childNodes:
- try:
- self.warn_tests.append(Test(name=test.getAttribute("name"), language=test.getAttribute("language"),
- code=test.childNodes[0].nodeValue.strip()))
- except AttributeError:
- continue
-
- self.random_string()
-
- def log(self, str):
- if self.verbose == True:
- print(self.filename[:-4] + ": " + str)
-
- def random_string(self):
- letters = "abcdefghijklmnopqrstuvwxyz"
- letters += letters.upper() + "0123456789"
-
- str = self.filename[:-4]
- for i in range(10):
- str += random.choice(letters)
-
- self.random = str
-
- def call_genpbs(self, dir):
- cmd = "genpbs \"" + self.filename[:-4] + "\" \"" + self.command_line + "\" \"" + str(self.nprocs) + "\" \"" + self.random + "\""
- self.log("cd "+dir+"; "+cmd)
- ret = os.system("cd "+dir+"; "+cmd)
-
- if ret != 0:
- self.log("Calling genpbs failed.")
- raise Exception
-
- def is_finished(self):
- if self.genpbs:
- file = os.environ["HOME"] + "/lock/" + self.random
- try:
- os.remove(file)
- return True
- except OSError:
- return False
- else:
- return True
-
- def clean(self):
- self.log("Cleaning")
-
- try:
- os.stat("Makefile")
- self.log("Calling 'make clean':")
- ret = os.system("make clean")
- if not ret == 0:
- self.log("No clean target")
- except OSError:
- self.log("No Makefile, not calling make")
-
- def run(self, dir):
- self.log("Running")
-
- run_time=0.0
- start_time=time.process_time()
- wall_time=time.time()
-
- try:
- os.stat(dir+"/Makefile")
- self.log("Calling 'make input':")
- ret = os.system("cd "+dir+"; make input")
- assert(ret == 0)
- except OSError:
- self.log("No Makefile, not calling make")
-
- if self.genpbs:
- ret = self.call_genpbs(dir)
- self.log("cd "+dir+"; qsub " + self.filename[:-4] + ".pbs: " + self.command_line)
- os.system("cd "+dir+"; qsub " + self.filename[:-4] + ".pbs")
- else:
- self.log(self.command_line)
- os.system("cd "+dir+"; "+self.command_line)
- run_time=time.process_time()-start_time
-
- self.xml_reports.append(TestCase(self.name,
- '%s.%s'%(self.length,
- self.filename[:-4]),
- elapsed_sec=time.time()-wall_time))
-
- return run_time
-
- def fl_logs(self, nLogLines = None):
- logs = glob.glob("fluidity.log*")
- errLogs = glob.glob("fluidity.err*")
-
- if nLogLines is None or nLogLines > 0:
- for filename in logs:
- log = open(filename, "r").read().split("\n")
- if not nLogLines is None:
- log = log[-nLogLines:]
- self.log("Log: " + filename)
- for line in log:
- self.log(line)
-
- for filename in errLogs:
- self.log("Log: " + filename)
- log = open(filename, "r").read().split("\n")
- for line in log:
- self.log(line)
-
- return
-
- def test(self):
- def Trim(string):
- if len(string) > 4096:
- return string[:4096] + " ..."
- else:
- return string
-
- varsdict = {}
- self.log("Assigning variables:")
- for var in self.variables:
- tmpdict = {}
- try:
- var.run(tmpdict)
- except:
- self.log("failure.")
- self.pass_status.append('F')
- tc=TestCase(self.name,
- '%s.%s'%(self.length,
- self.filename[:-4]))
- tc.add_failure_info("Failure" )
- self.xml_reports.append(tc)
- return self.pass_status
-
- varsdict[var.name] = tmpdict[var.name]
- self.log("Assigning %s = %s" % (str(var.name), Trim(str(varsdict[var.name]))))
-
- if len(self.pass_tests) != 0:
- self.log("Running failure tests: ")
- for test in self.pass_tests:
- self.log("Running %s:" % test.name)
- log = StringIO()
- original_stdout = sys.stdout
- sys.stdout = log
- status = test.run(varsdict)
- tc=TestCase(test.name,
- '%s.%s'%(self.length,
- self.filename[:-4]))
- if status == True:
- self.log("success.")
- self.pass_status.append('P')
- elif status == False:
- self.log("failure.")
- self.pass_status.append('F')
- tc.add_failure_info( "Failure" )
- else:
- self.log("failure (info == %s)." % status)
- self.pass_status.append('F')
- tc.add_failure_info( "Failure", status )
- self.xml_reports.append(tc)
- sys.stdout = original_stdout
- log.seek(0)
- tc.stdout = log.read()
- print(tc.stdout)
-
- if len(self.warn_tests) != 0:
- self.log("Running warning tests: ")
- for test in self.warn_tests:
- self.log("Running %s:" % test.name)
- status = test.run(varsdict)
- if status == True:
- self.log("success.")
- self.warn_status.append('P')
- elif status == False:
- self.log("warning.")
- self.warn_status.append('W')
- else:
- self.log("warning (info == %s)." % status)
- self.warn_status.append('W')
-
- self.log(''.join(self.pass_status + self.warn_status))
- return self.pass_status + self.warn_status
-
-class TestOrVariable:
- """Tests and variables have a lot in common. This code unifies the commonalities."""
- def __init__(self, name, language, code):
- self.name = name
- self.language = language
- self.code = code
-
- def run(self, varsdict):
- func = getattr(self, "run_" + self.language)
- return func(varsdict)
-
-class Test(TestOrVariable):
- """A test for the model output"""
- def run_bash(self, varsdict):
-
- varstr = ""
- for var in varsdict.keys():
- varstr = varstr + ("export %s=\"%s\"; " % (var, varsdict[var]))
-
- retcode = os.system(varstr + self.code)
- if retcode == 0: return True
- else: return False
-
- def run_python(self, varsdict):
- tmpdict = copy.copy(varsdict)
- try:
- exec(self.code, tmpdict)
- return True
- except AssertionError:
- # in case of an AssertionError, we assume the test has just failed
- return False
- except:
- # tell us what else went wrong:
- traceback.print_exc()
- return False
-
-class Variable(TestOrVariable):
- """A variable definition for use in tests"""
- def run_bash(self, varsdict):
- cmd = "bash -c \"%s\"" % self.code
- fd = os.popen(cmd, "r")
- exec(self.name + "=" + fd.read(), varsdict)
- if self.name not in varsdict.keys():
- raise Exception
-
- def run_python(self, varsdict):
- try:
- print(self.code)
- exec(self.code, varsdict)
- except:
- print("Variable computation raised an exception")
- print("-" * 80)
- for (lineno, line) in enumerate(self.code.split('\n')):
- print("%3d %s" % (lineno+1, line))
- print("-" * 80)
- traceback.print_exc()
- print("-" * 80)
- raise Exception
-
- if self.name not in varsdict.keys():
- print("self.name == ", self.name)
- print("varsdict.keys() == ", varsdict.keys())
- print("self.name not found: does the variable define the right name?")
- raise Exception
-
-class ThreadIterator(list):
- '''A thread-safe iterator over a list.'''
- def __init__(self, seq):
- self.list=list(seq)
-
- self.lock=threading.Lock()
-
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self.next()
-
- def next(self):
-
- if len(self.list)==0:
- raise StopIteration
-
- self.lock.acquire()
- ans=self.list.pop()
- self.lock.release()
-
- return ans
-
-
-if __name__ == "__main__":
- prob = TestProblem(filename=sys.argv[1], verbose=True)
- prob.run()
- while not prob.is_finished():
- time.sleep(60)
- print(prob.test())
diff --git a/tests/mms_les_second_order_p1dgp2/mms_les_second_order_p1dgp2.xml b/tests/mms_les_second_order_p1dgp2/mms_les_second_order_p1dgp2.xml
index ca7ffae968..90d54b3862 100644
--- a/tests/mms_les_second_order_p1dgp2/mms_les_second_order_p1dgp2.xml
+++ b/tests/mms_les_second_order_p1dgp2/mms_les_second_order_p1dgp2.xml
@@ -5,13 +5,13 @@
mms_les_second_order_p2p1
flml
-
+
- fluidity -v3 MMS_A.flml > MMS_A.log; fluidity -v3 MMS_B.flml > MMS_B.log; fluidity -v3 MMS_C.flml > MMS_C.log; fluidity -v3 MMS_D.flml > MMS_D.log
+ fluidity -v3 MMS_A.flml > MMS_A.log; fluidity -v3 MMS_B.flml > MMS_B.log; fluidity -v3 MMS_C.flml > MMS_C.log
-
+
-
+
from fluidity_tools import stat_parser as stat
from math import log
@@ -31,7 +31,7 @@ ab_ratio_x = a_error_x / b_error_x
ab_ratio_y = a_error_y / b_error_y
ab_convergence_vel = [log(ab_ratio_x, 2), log(ab_ratio_y, 2)]
-
+
-
+
from fluidity_tools import stat_parser as stat
from math import log
@@ -63,7 +63,7 @@ print(b_error)
ab_ratio = a_error / b_error
ab_convergence_ev = log(ab_ratio, 2)
-
+
from fluidity_tools import stat_parser as stat
from math import log
@@ -115,14 +115,14 @@ print(c_error)
bc_ratio = b_error / c_error
bc_convergence_ev = log(bc_ratio, 2)
-
+
import os
files = os.listdir("./")
solvers_converged = not "matrixdump" in files and not "matrixdump.info" in files
-
+
assert(ab_convergence_vel[0] > 2.0)
@@ -130,17 +130,17 @@ assert(ab_convergence_vel[0] > 2.0)
assert(ab_convergence_vel[1] > 2.0)
-
+
-
+
assert(ab_convergence_ev > 0.9)
-
+
assert(bc_convergence_vel[0] > 2.0)
@@ -157,14 +157,14 @@ assert(bc_convergence_p > 1.65)
assert(bc_convergence_ev > 0.9)
-
+
assert(solvers_converged)
-
+
-
+
-
+
diff --git a/tests/wetting_and_drying_balzano1_cg/plotfs_detec.py b/tests/wetting_and_drying_balzano1_cg/plotfs_detec.py
deleted file mode 100755
index 6651e10420..0000000000
--- a/tests/wetting_and_drying_balzano1_cg/plotfs_detec.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env python3
-
-import vtktools
-import sys
-import math
-import re
-import matplotlib.pyplot as plt
-import getopt
-
-from scipy.special import erf
-from numpy import poly1d
-from matplotlib.pyplot import figure, show
-from numpy import pi, sin, linspace
-from matplotlib.mlab import stineman_interp
-from numpy import exp, cos
-from fluidity_tools import stat_parser
-
-
-
-
-def mirror(x):
- return 13800-x
-
-
-def usage():
- print('Usage:')
- print('plotfs_detec.py [-w] --file=detector_filename --save=filename')
- print('--save=... saves the plots as images instead of plotting them on the screen.')
- print('-w plots the wetting procedure (drying is default).')
-
-
-# should be copied from the diamond extrude function. X is 2 dimensional
-def bathymetry_function(X):
- return -5.0*X/13800
-
-################# Main ###########################
-def main(argv=None):
-
- filename=''
- timestep_ana=0.0
- dzero=0.01
- save='' # If nonempty, we save the plots as images instead if showing them
- wetting=False
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], ":w", ['file=','save='])
- except getopt.GetoptError:
- usage()
- sys.exit(2)
- for opt, arg in opts:
- if opt == '--file':
- filename=arg
- elif opt == '--save':
- save=arg
- elif opt == '-w':
- wetting=True
- if filename=='':
- print('No filename specified. You have to give the detectors filename.')
- usage()
- sys.exit(2)
-
-
- ####################### Print time plot ###########################
- print('Generating time plot')
-
- s = stat_parser(filename)
-
- timesteps=s["ElapsedTime"]["value"]
- timestep=timesteps[1]-timesteps[0]
- print("Found ", len(timesteps), " timesteps with dt=", timestep)
- if timestep_ana==0.0:
- timestep_ana=timestep
-
-
- fs=s["water"]["FreeSurface"]
- print("Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ").")
-
-
- # Get and plot results
- plt.ion() # swith on interactive mode
- fig2 = figure()
- ax2 = fig2.add_subplot(111)
-
- if wetting:
- ##plot_start=90 # in timesteps
- plot_start=18 # in timesteps, after 18 timesteps the waterlevel reaches its lowest point
- ##plot_end=114 # in timesteps
- plot_end=54 # in timesteps
- plot_name='Wetting'
- else:
- plot_start=54 # in timesteps
- plot_end=89 # in timesteps
- plot_name='Drying'
-
-
-
- for t in range(0,len(timesteps)):
- # ignore the first waveperiod
- if tplot_end:
- continue
- fsvalues=[]
- xcoords=[]
- for name, item in fs.iteritems():
- #print name
- xcoords.append(mirror(s[name]['position'][0][0]))
- #print xcoord
- fsvalues.append(fs[name][t])
-
- # Plot result of one timestep
- ax2.plot(xcoords,fsvalues,'r,', label='Numerical solution')
-
- # Plot Analytical solution
- fsvalues_ana=[]
-
- offset=-bathymetry_function(0.0)+dzero
-
- xcoords.sort()
- for x in xcoords:
- fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
-
- # Plot vertical line in bathmetry on right boundary
- xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
- fsvalues_ana.append(2.1)
-
- ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry')
-
- #plt.legend()
- if t==plot_end:
- # change from meters in kilometers in the x-axis
- # return locs, labels where locs is an array of tick locations and
- # labels is an array of tick labels.
- locs, labels = plt.xticks()
- for i in range(0,len(locs)):
- labels[i]=str(locs[i]/1000)
- plt.xticks(locs, labels)
-
- plt.ylim(-2.2,1.4)
- #plt.title(plot_name)
- plt.xlabel('Position [km]')
- plt.ylabel('Free surface [m]')
-
- if save=='':
- plt.draw()
- raw_input("Please press Enter")
- else:
- plt.savefig(save+'_'+plot_name+'.pdf', facecolor='white', edgecolor='black', dpi=100)
- plt.cla()
- t=t+1
-
-
-# Make video from the images:
-
-# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
-
-
-
-
-
-if __name__ == "__main__":
- main()
-
-
diff --git a/tests/wetting_and_drying_balzano1_cg/wetting_and_drying_balzano1_cg.xml b/tests/wetting_and_drying_balzano1_cg/wetting_and_drying_balzano1_cg.xml
index fc385a56e9..3497d1dc57 100644
--- a/tests/wetting_and_drying_balzano1_cg/wetting_and_drying_balzano1_cg.xml
+++ b/tests/wetting_and_drying_balzano1_cg/wetting_and_drying_balzano1_cg.xml
@@ -3,7 +3,7 @@
balzano1_2plus1
- fluidity balzano1_2plus1.flml; ./plotfs_detec.py --file=balzano1_2plus1.detectors -w --save=fluidity; ./plotfs_detec.py --file=balzano1_2plus1.detectors --save=fluidity
+ fluidity balzano1_2plus1.flml
diff --git a/tests/wetting_and_drying_balzano2_cg/plotfs_detec.py b/tests/wetting_and_drying_balzano2_cg/plotfs_detec.py
deleted file mode 100755
index 3b37c8db22..0000000000
--- a/tests/wetting_and_drying_balzano2_cg/plotfs_detec.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python3
-
-import vtktools
-import sys
-import math
-import re
-import matplotlib.pyplot as plt
-import getopt
-
-from scipy.special import erf
-from numpy import poly1d
-from matplotlib.pyplot import figure, show
-from numpy import pi, sin, linspace
-from matplotlib.mlab import stineman_interp
-from numpy import exp, cos
-from fluidity_tools import stat_parser
-
-
-
-
-def mirror(x):
- return 13800-x
-
-
-def usage():
- print('Usage:')
- print('plotfs_detec.py [-w] --file=detector_filename --save=filename')
- print('--save=... saves the plots as images instead of plotting them on the screen.')
- print('-w plots the wetting procedure (drying is default).')
-
-
-# should be copied from the diamond extrude function. X is 2 dimensional
-def bathymetry_function(X):
- if X<=3600 or X>6000:
- return -X/2760
- elif X>3600 and X<=4800:
- return -30.0/23
- elif X>4800 and X<=6000:
- return -X/1380+50.0/23
-
-################# Main ###########################
-def main(argv=None):
-
- filename=''
- timestep_ana=0.0
- dzero=0.01
- save='' # If nonempty, we save the plots as images instead if showing them
- wetting=False
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], ":w", ['file=','save='])
- except getopt.GetoptError:
- usage()
- sys.exit(2)
- for opt, arg in opts:
- if opt == '--file':
- filename=arg
- elif opt == '--save':
- save=arg
- elif opt == '-w':
- wetting=True
- if filename=='':
- print('No filename specified. You have to give the detectors filename.')
- usage()
- sys.exit(2)
-
-
- ####################### Print time plot ###########################
- print('Generating time plot')
-
- s = stat_parser(filename)
-
- timesteps=s["ElapsedTime"]["value"]
- timestep=timesteps[1]-timesteps[0]
- print("Found ", len(timesteps), " timesteps with dt=", timestep)
- if timestep_ana==0.0:
- timestep_ana=timestep
-
-
- fs=s["water"]["FreeSurface"]
- print("Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ").")
-
-
- # Get and plot results
- plt.ion() # swith on interactive mode
- fig2 = figure()
- ax2 = fig2.add_subplot(111)
-
- if wetting:
- ##plot_start=90 # in timesteps
- plot_start=22 # in timesteps, after 18 timesteps the waterlevel reaches its lowest point
- ##plot_end=114 # in timesteps
- plot_end=54 # in timesteps
- plot_name='Wetting'
- else:
- plot_start=54 # in timesteps
- plot_end=90 # in timesteps
- plot_name='Drying'
-
-
-
- for t in range(0,len(timesteps)):
- # ignore the first waveperiod
- if tplot_end:
- continue
- fsvalues=[]
- xcoords=[]
- for name, item in fs.iteritems():
- #print name
- xcoords.append(mirror(s[name]['position'][0][0]))
- #print xcoord
- fsvalues.append(fs[name][t])
-
- # Plot result of one timestep
- ax2.plot(xcoords,fsvalues,'r,', label='Numerical solution')
-
- # Plot Analytical solution
- fsvalues_ana=[]
-
- offset=-bathymetry_function(0.0)+dzero
-
- xcoords.sort()
- for x in xcoords:
- fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
-
- # Plot vertical line in bathmetry on right boundary
- xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
- fsvalues_ana.append(2.1)
-
- ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry')
-
- #plt.legend()
- if t==plot_end:
- plt.ylim(-2.2,1.4)
- # change from meters in kilometers in the x-axis
- # return locs, labels where locs is an array of tick locations and
- # labels is an array of tick labels.
- locs, labels = plt.xticks()
- for i in range(0,len(locs)):
- labels[i]=str(locs[i]/1000)
- plt.xticks(locs, labels)
-
-
- #plt.title(plot_name)
- plt.xlabel('Position [km]')
- plt.ylabel('Free surface [m]')
-
- if save=='':
- plt.draw()
- raw_input("Please press Enter")
- else:
- plt.savefig(save+'_'+plot_name+'.pdf', facecolor='white', edgecolor='black', dpi=100)
- plt.cla()
- t=t+1
-
-
-# Make video from the images:
-
-# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
-
-
-
-
-
-if __name__ == "__main__":
- main()
-
-
diff --git a/tests/wetting_and_drying_balzano2_cg/wetting_and_drying_balzano2_cg.xml b/tests/wetting_and_drying_balzano2_cg/wetting_and_drying_balzano2_cg.xml
index ddfd5c6798..a0cf3bdd0b 100644
--- a/tests/wetting_and_drying_balzano2_cg/wetting_and_drying_balzano2_cg.xml
+++ b/tests/wetting_and_drying_balzano2_cg/wetting_and_drying_balzano2_cg.xml
@@ -3,7 +3,7 @@
balzano2_2plus1
- fluidity balzano2_2plus1.flml; ./plotfs_detec.py --file=balzano2_2plus1.detectors -w --save=fluidity; ./plotfs_detec.py --file=balzano2_2plus1.detectors --save=fluidity
+ fluidity balzano2_2plus1.flml
diff --git a/tests/wetting_and_drying_balzano3_cg_parallel/plotfs_detec.py b/tests/wetting_and_drying_balzano3_cg_parallel/plotfs_detec.py
deleted file mode 100755
index 2eb3f030af..0000000000
--- a/tests/wetting_and_drying_balzano3_cg_parallel/plotfs_detec.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python3
-
-import vtktools
-import sys
-import math
-import re
-import matplotlib.pyplot as plt
-import getopt
-
-from scipy.special import erf
-from numpy import poly1d
-from matplotlib.pyplot import figure, show
-from numpy import pi, sin, linspace
-from matplotlib.mlab import stineman_interp
-from numpy import exp, cos
-from fluidity_tools import stat_parser
-
-
-
-
-def mirror(x):
- return 13800-x
-
-
-def usage():
- print('Usage:')
- print('plotfs_detec.py --file=detector_filename --save=filename')
- print('--save=... saves the plots as images instead of plotting them on the screen.')
-
-
-# should be copied from the diamond extrude function. X is 2 dimensional
-def bathymetry_function(X):
- if X<=3600 or X>6000:
- return -X/2760
- elif X>3600 and X<=4800:
- return X/2760-60.0/23
- elif X>4800 and X<=6000:
- return -X/920+100.0/23
-
-################# Main ###########################
-def main(argv=None):
-
- filename=''
- timestep_ana=0.0
- dzero=0.01
- save='' # If nonempty, we save the plots as images instead if showing them
- wetting=False
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], "", ['file=','save='])
- except getopt.GetoptError:
- usage()
- sys.exit(2)
- for opt, arg in opts:
- if opt == '--file':
- filename=arg
- elif opt == '--save':
- save=arg
- if filename=='':
- print('No filename specified. You have to give the detectors filename.')
- usage()
- sys.exit(2)
-
-
- ####################### Print time plot ###########################
- print('Generating time plot')
-
- s = stat_parser(filename)
-
- timesteps=s["ElapsedTime"]["value"]
- timestep=timesteps[1]-timesteps[0]
- print("Found ", len(timesteps), " timesteps with dt=", timestep)
- if timestep_ana==0.0:
- timestep_ana=timestep
-
-
- fs=s["water"]["FreeSurface"]
- print("Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ").")
-
-
- # Get and plot results
- plt.ion() # swith on interactive mode
- plt.rcParams['font.size'] = 22
- fig2 = figure(figsize=(8, 6.2))
- fig2.subplots_adjust(left=0.15, bottom=0.15)
- ax2 = fig2.add_subplot(111)
-
- plot_start=580 # in timesteps
- plot_end=581 # in timesteps
- plot_name=''
-
-
-
- for t in range(0,len(timesteps)):
- # ignore the first waveperiod
- if tplot_end:
- continue
- fsvalues=[]
- xcoords=[]
- for name, item in fs.iteritems():
- #print name
- xcoords.append(mirror(s[name]['position'][0][0]))
- #print xcoord
- fsvalues.append(fs[name][t])
-
- # Plot result of one timestep
- ax2.plot(xcoords,fsvalues,'b,', label='Numerical solution')
-
- # Plot Analytical solution
- fsvalues_ana=[]
-
- offset=-bathymetry_function(0.0)+dzero
-
- xcoords.sort()
- for x in xcoords:
- fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
-
- # Plot vertical line in bathmetry on right boundary
- xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
- fsvalues_ana.append(2.1)
-
- ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry', linewidth=2.5)
-
- #plt.legend()
- if t==plot_end:
- # change from meters in kilometers in the x-axis
- # return locs, labels where locs is an array of tick locations and
- # labels is an array of tick labels.
- locs, labels = plt.xticks()
- for i in range(0,len(locs)):
- labels[i]=str(locs[i]/1000)
- plt.xticks(locs, labels)
-
- plt.ylim(-2.2,1.4)
- # plt.title(plot_name)
- plt.xlabel('Position [km]')
- plt.ylabel('Free surface [m]')
-
- if save=='':
- plt.draw()
- raw_input("Please press Enter")
- else:
- plt.savefig(save+'.pdf', facecolor='white', edgecolor='black', dpi=100)
- plt.cla()
- t=t+1
-
-
-# Make video from the images:
-
-# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
-
-
-
-
-
-if __name__ == "__main__":
- main()
-
-
diff --git a/tests/wetting_and_drying_balzano3_cg_parallel/wetting_and_drying_balzano3_cg_parallel.xml b/tests/wetting_and_drying_balzano3_cg_parallel/wetting_and_drying_balzano3_cg_parallel.xml
index bcfa57712f..4c163206db 100644
--- a/tests/wetting_and_drying_balzano3_cg_parallel/wetting_and_drying_balzano3_cg_parallel.xml
+++ b/tests/wetting_and_drying_balzano3_cg_parallel/wetting_and_drying_balzano3_cg_parallel.xml
@@ -5,7 +5,7 @@
parallel zoltan
mpiexec ../../bin/flredecomp -i 1 -o 4 -v -l balzano1_2plus1 balzano1_2plus1_flredecomp &&
-mpiexec fluidity balzano1_2plus1_flredecomp.flml; ./plotfs_detec.py --file=balzano1_2plus1.detectors --save=fluidity
+mpiexec fluidity balzano1_2plus1_flredecomp.flml
import os
diff --git a/tools/testharness.py b/tools/testharness.py
index 89d1ef5359..79c89b7393 100755
--- a/tools/testharness.py
+++ b/tools/testharness.py
@@ -1,547 +1,678 @@
#!/usr/bin/env python3
+import json
+import re
+import subprocess
import sys
-import os
-import os.path
-import glob
-import time
-from io import StringIO
+from argparse import ArgumentParser
+from os import environ, sched_getaffinity
+from pathlib import Path
+from textwrap import dedent, indent
+from time import monotonic, sleep
+from xml.etree.ElementTree import parse
try:
- import fluidity.regressiontest as regressiontest
-except ImportError:
- # try again by adding the path "../python" relative to testharness' own location to sys.path
- head,tail = os.path.split(sys.argv[0])
- python_path = os.path.abspath(os.path.join(head,'..','python'))
- sys.path.append(python_path)
- import fluidity.regressiontest as regressiontest
-
-import traceback
-import multiprocessing
-try:
- import Queue
-except ImportError:
- import queue as Queue
-import xml.parsers.expat
-import string
+ from importlib.metadata import version
-try:
- from junit_xml import TestSuite, TestCase
-except ImportError:
- class TestSuite(object):
- def __init__(self, name, test_cases):
- self.test_cases=test_cases
- def to_file(self,*args):
- print("cannot generate xml report without junit_xml module.")
- class TestCase(object):
- def __init__(self,*args,**kwargs):
+ from junit_xml import TestCase, TestSuite
+
+ assert (
+ float(version("junit_xml")) >= 1.9
+ ), "ERROR: junit_xml version must be at least 1.9 — please update."
+except ImportError: # Provide dummy classes in the absence of junit_xml
+
+ class TestSuite(object):
+ def __init__(self, *args, **kwargs):
+ self.test_cases = []
+
+ def to_file(self, *args, **kwargs):
+ print("WARNING: junit_xml required to produce an XML output file.")
+
+ class TestCase(object):
+ def __init__(self, *args, **kwargs):
pass
- def add_failure_info(self,*args,**kwargs):
+
+ def add_error_info(self, *args, **kwargs):
pass
-# make sure we use the correct version of regressiontest
-sys.path.insert(0, os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), os.pardir, "python"))
-import fluidity.regressiontest as regressiontest
+ def add_failure_info(self, *args, **kwargs):
+ pass
-try:
- import xml.etree.ElementTree as etree
-except ImportError:
- import elementtree.ElementTree as etree
-
-class TestHarness:
- def __init__(self, length="any", parallel="any", exclude_tags=None,
- tags=None, file="", from_file=None,
- verbose=True, justtest=False,
- valgrind=False, genpbs=False, exit_fails=False, xml_outfile=""):
- self.tests = []
- self.verbose = verbose
- self.length = length
- self.parallel = parallel
- self.passcount = 0
- self.failcount = 0
- self.warncount = 0
- self.teststatus = []
- self.completed_tests = []
- self.justtest = justtest
- self.valgrind = valgrind
- self.genpbs = genpbs
- self.xml_parser=TestSuite('TestHarness',[])
- self.cwd=os.getcwd()
- self.iolock = multiprocessing.Lock()
- self.xml_outfile=xml_outfile
- self.exit_fails=exit_fails
-
- fluidity_command = self.decide_fluidity_command()
-
- if file == "":
- print("Test criteria:")
- print("-" * 80)
- print("length: ", length)
- print("parallel: ", parallel)
- print("tags to include: ", tags)
- print("tags to exclude: ", exclude_tags)
- print("-" * 80)
- print()
-
- # step 1. form a list of all the xml files to be considered.
-
- xml_files = []
- rootdir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), os.pardir))
- dirnames = []
- testpaths = ["examples", "tests", "longtests"]
- for directory in testpaths:
- if os.path.exists(os.path.join(rootdir, directory)):
- dirnames.append(directory)
- testdirs = [ os.path.join( rootdir, x ) for x in dirnames ]
- for directory in testdirs:
- subdirs = [ os.path.join(directory, x) for x in os.listdir(directory)]
- for subdir in subdirs:
- g = glob.glob1(subdir, "*.xml")
- for xml_file in g:
- try:
- p = etree.parse(os.path.join(subdir, xml_file))
- x = p.getroot()
- if x.tag == "testproblem":
- xml_files.append(os.path.join(subdir, xml_file))
- except xml.parsers.expat.ExpatError:
- print(("Warning: %s mal-formed" % xml_file))
- traceback.print_exc()
-
- # step 2. if the user has specified a particular file, let's use that.
-
- if file != "":
- files = [file]
- elif from_file:
- try:
- f = open(from_file, 'r')
- files = [line[:-1] for line in f.readlines()]
- except IOError as e:
- sys.stderr.write("Unable to read tests from file %s: %s" % (from_file, e))
- sys.exit(1)
- f.close()
- else:
- files = None
-
- if files:
- for (subdir, xml_file) in [os.path.split(x) for x in xml_files]:
- temp_files=files
- for file in temp_files:
- if xml_file == file:
- p = etree.parse(os.path.join(subdir,xml_file))
- prob_defn = p.findall("problem_definition")[0]
- prob_nprocs = int(prob_defn.attrib["nprocs"])
- testprob = regressiontest.TestProblem(filename=os.path.join(subdir, xml_file),
- verbose=self.verbose, replace=self.modify_command_line(prob_nprocs), genpbs=genpbs)
- self.tests.append((subdir, testprob))
- files.remove(xml_file)
- if files != []:
- print("Could not find the following specified test files:")
- for f in files:
- print(f)
- sys.exit(1)
- return
-
- # step 3. form a cut-down list of the xml files matching the correct length and the correct parallelism.
- working_set = []
- for xml_file in xml_files:
- p = etree.parse(xml_file)
- prob_defn = p.findall("problem_definition")[0]
- prob_length = prob_defn.attrib["length"]
- prob_nprocs = int(prob_defn.attrib["nprocs"])
- if prob_length == length or (length == "any" and prob_length not in ["special", "long"]):
- if self.parallel == "parallel":
- if prob_nprocs > 1:
- working_set.append(xml_file)
- elif self.parallel == "serial":
- if prob_nprocs == 1:
- working_set.append(xml_file)
- elif self.parallel == "any":
- working_set.append(xml_file)
-
- def get_xml_file_tags(xml_file):
- p = etree.parse(xml_file)
- p_tags = p.findall("tags")
- if len(p_tags) > 0 and not p_tags[0].text is None:
- xml_tags = p_tags[0].text.split()
- else:
- xml_tags = []
-
- return xml_tags
-
- # step 4. if there are any excluded tags, let's exclude tests that have
- # them
- if exclude_tags is not None:
- to_remove = []
- for xml_file in working_set:
- p_tags = get_xml_file_tags(xml_file)
- include = True
- for tag in exclude_tags:
- if tag in p_tags:
- include = False
- break
- if not include:
- to_remove.append(xml_file)
- for xml_file in to_remove:
- working_set.remove(xml_file)
-
- # step 5. if there are any tags, let's use them
- if tags is not None:
- tagged_set = []
- for xml_file in working_set:
- p_tags = get_xml_file_tags(xml_file)
-
- include = True
- for tag in tags:
- if tag not in p_tags:
- include = False
-
- if include is True:
- tagged_set.append(xml_file)
- else:
- tagged_set = working_set
-
- for (subdir, xml_file) in [os.path.split(x) for x in tagged_set]:
- # need to grab nprocs here to pass through to modify_command_line
- p = etree.parse(os.path.join(subdir,xml_file))
- prob_defn = p.findall("problem_definition")[0]
- prob_nprocs = int(prob_defn.attrib["nprocs"])
- testprob = regressiontest.TestProblem(filename=os.path.join(subdir, xml_file),
- verbose=self.verbose, replace=self.modify_command_line(prob_nprocs))
- self.tests.append((subdir, testprob))
-
- if len(self.tests) == 0:
- print("Warning: no matching tests.")
-
- def length_matches(self, filelength):
- if self.length == filelength: return True
- if self.length == "medium" and filelength == "short": return True
- return False
-
- def decide_fluidity_command(self):
- bindir = os.environ["PATH"].split(':')[0]
-
- for binaryBase in ["dfluidity", "fluidity"]:
- binary = binaryBase
- debugBinary = binaryBase + "-debug"
- try:
- fluidity_mtime = os.stat(os.path.join(bindir, binary))[-2]
- have_fluidity = True
- except OSError:
- fluidity_mtime = 1e30
- have_fluidity = False
-
- try:
- debug_mtime = os.stat(os.path.join(bindir, debugBinary))[-2]
- have_debug = True
- except OSError:
- debug_mtime = 1e30
- have_debug = False
-
- if have_fluidity is True or have_debug is True:
- if have_fluidity is False and have_debug is True:
- flucmd = debugBinary
-
- elif have_fluidity is True and have_debug is False:
- flucmd = binary
-
- elif fluidity_mtime > debug_mtime:
- flucmd = binary
- else:
- flucmd = debugBinary
-
- # no longer valid since debugging doesn't change the name - any suitable alternative tests?
- # if self.valgrind is True:
- # if flucmd != debugBinary:
- # print("Error: you really should compile with debugging for use with valgrind!")
- # sys.exit(1)
-
- return flucmd
-
- return None
-
- def modify_command_line(self, nprocs):
- flucmd = self.decide_fluidity_command()
- print(flucmd)
- def f(s):
- if not flucmd in [None, "fluidity"]:
- s = s.replace('fluidity ', flucmd + ' ')
-
- if self.valgrind:
- s = "valgrind --tool=memcheck --leak-check=full -v" + \
- " --show-reachable=yes --num-callers=8 --error-limit=no " + \
- "--log-file=test.log " + s
-
- # when calling genpbs, genpbs should take care of inserting the right -n magic
- if not self.genpbs:
- s = s.replace('mpiexec ', 'mpiexec -n %(nprocs)d ' % {'nprocs': nprocs})
-
- return s
-
- return f
-
-
- def log(self, str):
- if self.verbose == True:
- print(str)
-
- def clean(self):
- self.log(" ")
- for t in self.tests:
- os.chdir(t[0])
- t[1].clean()
-
- return
-
- def run(self):
- self.log(" ")
- if not self.justtest:
- threadlist=[]
- self.test_exception_ids = multiprocessing.Queue()
- tests_by_nprocs={}
- for test_id in range(len(self.tests)):
- # sort tests by number of processes requested
- tests_by_nprocs.setdefault(self.tests[test_id][1].nprocs,
- []).append(test_id)
- serial_tests = multiprocessing.Queue()
- for test in tests_by_nprocs.get(1, []):
- # collect serial tests to pass to worker threads
- serial_tests.put(test)
- for nprocs in sorted(list(tests_by_nprocs.keys()), reverse=True):
- for i in range(len(threadlist),
- max(0, options.thread_count-nprocs)):
- # spin up enough new workers to fully subscribe thread count
- threadlist.append(multiprocessing.Process(target=self.threadrun, args=[serial_tests]))
- threadlist[-1].start()
- if nprocs==1:
- # remaining tests are serial. Join the workers
- self.threadrun(serial_tests)
- else:
- tests = tests_by_nprocs[nprocs]
- queue = Queue.Queue()
- for test in tests:
- queue.put(test)
-
- # run the parallel queue on master thread
- self.threadrun(queue)
- for t in threadlist:
- '''Wait until all threads finish'''
- t.join()
-
- exceptions = []
- while True:
- try:
- test_id, lines = self.test_exception_ids.get(timeout=0.1)
- exceptions.append((self.tests[test_id], lines))
- except Queue.Empty:
- break
- for e, lines in exceptions:
- tc=TestCase(e[1].name,
- '%s.%s'%(e[1].length,
- e[1].filename[:-4]))
- tc.add_failure_info("Failure", lines)
- self.xml_parser.test_cases+= [tc]
- self.tests.remove(e)
- self.completed_tests += [e[1]]
-
- count = len(self.tests)
- while True:
- for t in self.tests:
- if t is None: continue
- test = t[1]
- os.chdir(t[0])
- if test.is_finished():
- if test.length == "long":
- test.fl_logs(nLogLines = 20)
- else:
- test.fl_logs(nLogLines = 0)
- try:
- self.teststatus += test.test()
- except:
- self.log("Error: %s raised an exception while testing:" % test.filename)
- lines = traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] )
- for line in lines:
- self.log(line)
- self.teststatus += ['F']
- test.pass_status = ['F']
- self.completed_tests += [test]
- self.xml_parser.test_cases+=test.xml_reports
- t = None
- count -= 1
-
- if count == 0: break
- time.sleep(60)
- else:
- for t in self.tests:
- test = t[1]
- os.chdir(t[0])
- if self.length == "long":
- test.fl_logs(nLogLines = 20)
- else:
- test.fl_logs(nLogLines = 0)
- self.teststatus += test.test()
- self.completed_tests += [test]
-
- self.xml_parser.test_cases+=test.xml_reports
-
- self.passcount = self.teststatus.count('P')
- self.failcount = self.teststatus.count('F')
- self.warncount = self.teststatus.count('W')
-
- if self.failcount + self.warncount > 0:
- print()
- print("Summary of test problems with failures or warnings:")
- for t in self.completed_tests:
- if t.pass_status.count('F')+t.warn_status.count('W')>0:
- print(t.filename+':', ''.join(t.pass_status+t.warn_status))
- print()
-
- if self.passcount + self.failcount + self.warncount > 0:
- print("Passes: %d" % self.passcount)
- print("Failures: %d" % self.failcount)
- print("Warnings: %d" % self.warncount)
-
- if self.xml_outfile!="":
- fd=open(self.cwd+'/'+self.xml_outfile,'w')
- self.xml_parser.to_file(fd,[self.xml_parser])
- fd.close()
-
- if self.exit_fails:
- sys.exit(self.failcount)
-
-
- def threadrun(self, queue):
- '''This is the portion of the loop which actually runs the
- tests. This is split out so that it can be threaded.
- Each thread runs tests from the queue until it is exhausted.'''
-
- # We use IO locking to attempt to keep output understandable
- # That means writing to a buffer to minimise interactions
- main_stdout = sys.stdout
-
- while True:
- buf = StringIO()
- sys.stdout = buf
- try:
- #pull a test number from the queue
- test_id = queue.get(timeout=0.1)
- (dir, test) = self.tests[test_id]
- except Queue.Empty:
- # If the queue is empty, we're done.
- sys.stdout = main_stdout
- buf.seek(0)
- with self.iolock:
- print (buf.read())
- break
+ def add_skipped_info(self, *args, **kwargs):
+ pass
- try:
- runtime=test.run(dir)
- if self.length=="short" and runtime>30.0:
- self.log("Warning: short test ran for %f seconds which"+
- " is longer than the permitted 30s run time"%runtime)
- self.teststatus += ['W']
- test.pass_status = ['W']
-
- except:
- self.log("Error: %s raised an exception while running:" % test.filename)
- lines = traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] )
- for line in lines:
- self.log(line)
- test.pass_status = ['F']
- self.test_exception_ids.put((test_id, lines))
- finally:
- sys.stdout = main_stdout
- buf.seek(0)
- with self.iolock:
- print(buf.read())
-
- def list(self):
- for (subdir, test) in self.tests:
- print(os.path.join(subdir, test.filename))
-
-
-if __name__ == "__main__":
- import optparse
-
- parser = optparse.OptionParser()
- parser.add_option("-l", "--length", dest="length", help="length of problem (default=any)", default="any")
- parser.add_option("-p", "--parallelism", dest="parallel", help="parallelism of problem: options are serial, parallel or any (default=any)",
- default="any")
- parser.add_option("-e", "--exclude-tags", dest="exclude_tags", help="run only tests that do not have specific tags (takes precidence over -t)", default=[], action="append")
- parser.add_option("-t", "--tags", dest="tags", help="run tests with specific tags", default=[], action="append")
- parser.add_option("-f", "--file", dest="file", help="specific test case to run (by filename)", default="")
- parser.add_option("--from-file", dest="from_file", default=None,
- help="run tests listed in FROM_FILE (one test per line)")
- parser.add_option("-n", "--threads", dest="thread_count", type="int",
- help="number of tests to run at the same time", default=1)
- parser.add_option("-v", "--valgrind", action="store_true", dest="valgrind")
- parser.add_option("-c", "--clean", action="store_true", dest="clean", default = False)
- parser.add_option("--just-test", action="store_true", dest="justtest", default=False)
- parser.add_option("--just-list", action="store_true", dest="justlist")
- parser.add_option("--genpbs", action="store_true", dest="genpbs")
- parser.add_option("-x","--xml-output", dest="xml_outfile", default="", help="filename for xml output")
- parser.add_option("--exit-failure-count", action="store_true", dest="exit_fails", help="Return failure count on exit")
- (options, args) = parser.parse_args()
-
- if len(args) > 0: parser.error("Too many arguments.")
-
- if options.parallel not in ['serial', 'parallel', 'any']:
- parser.error("Specify parallelism as either serial, parallel or any.")
-
- os.environ["PATH"] = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "bin")) + ":" + os.environ["PATH"]
- try:
- os.environ["PYTHONPATH"] = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "python")) + ":" + os.environ["PYTHONPATH"]
- except KeyError:
- os.putenv("PYTHONPATH", os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "python")))
- try:
- os.environ["LD_LIBRARY_PATH"] = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "lib")) + ":" + os.environ["LD_LIBRARY_PATH"]
- except KeyError:
- os.putenv("LD_LIBRARY_PATH", os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "lib")))
+def filter_tests(xml_files, test_suite):
+ """Iterate through all found XML files and retain only the ones that match the
+ program input arguments. Update the JUnit record with skipped tests."""
+ tests = {}
+ for xml_file in xml_files:
+ # Obtain basic information about the test
+ parsed_xml = parse(xml_file).getroot()
+ if parsed_xml.tag != "testproblem":
+ print(f"Skipping {parsed_xml} — root tag differs from 'testproblem'.")
+ continue
+ prob_def = parsed_xml.find("problem_definition")
+ prob_length = prob_def.get("length")
+ prob_nprocs = int(prob_def.get("nprocs"))
+ # Manually insert the required amount of cores for parallel tests
+ prob_command = prob_def.find("command_line").text.replace(
+ "mpiexec", f"mpiexec -n {prob_nprocs}"
+ )
+ # Indicate if the test belongs to 'examples' or 'tests' and specify its length.
+ prob_classname = f"{xml_file.parts[-3]}.{prob_length}"
+ try:
+ xml_tags = parsed_xml.find("tags").text.split()
+ except AttributeError: # If no tags are present, assign an empty list
+ xml_tags = []
+ # Conditions to exclude a test
+ length_condition = (
+ False if args.length is None else prob_length not in args.length
+ )
+ nprocs_condition = (args.exec_type == "parallel" and prob_nprocs <= 1) or (
+ args.exec_type == "serial" and prob_nprocs != 1
+ )
+ omitted_tags_condition = set(xml_tags).intersection(args.omit_tags)
+ required_tags_condition = set(args.tags).difference(xml_tags)
+ # Skip the test if any of the conditions is not met and update the JUnit record
+ if (
+ length_condition
+ or nprocs_condition
+ or omitted_tags_condition
+ or required_tags_condition
+ ):
+ test_case = TestCase(
+ name=xml_file.stem, classname=prob_classname, status="Skipped"
+ )
+ test_case.add_skipped_info(message="Test suite conditions unmet.")
+ test_suite.test_cases.append(test_case)
+ continue
+ # Populate dictionary with test information
+ tests[xml_file] = {
+ "id": prob_classname,
+ "length": prob_length,
+ "nprocs": prob_nprocs,
+ "command": prob_command,
+ "variables": {},
+ "pass_tests": {},
+ "warn_tests": {},
+ "elapsed_time": 0,
+ "stdout": "\n",
+ "stderr": "\n",
+ }
+ for var in parsed_xml.iter("variable"):
+ assert var.get("language").strip() == "python"
+ tests[xml_file]["variables"][var.get("name")] = dedent(var.text)
+ for test in parsed_xml.find("pass_tests"):
+ assert test.get("language").strip() == "python"
+ tests[xml_file]["pass_tests"][test.get("name")] = dedent(test.text)
+ try:
+ for test in parsed_xml.find("warn_tests"):
+ assert test.get("language").strip() == "python"
+ tests[xml_file]["warn_tests"][test.get("name")] = dedent(test.text)
+ except TypeError: # This test does not contain Warn Tests
+ pass
+ return tests
+
+
+def gather_tests():
+ """Look for tests given the program input arguments."""
+ if args.file: # Specific test requested
+ # xml_files = [Path(args.file)]
+ xml_files = [xml_file for xml_file in fluidity_root.rglob(args.file)]
+ elif args.from_file: # Specific list of tests requested
+ with open(args.from_file, "r") as fid:
+ xml_files = [Path(test_name.rstrip()) for test_name in fid]
+ else: # Gather all XML files that can be found
+ test_paths = ["examples", "tests", "longtests"]
+ xml_files = [
+ xml_file
+ for test_path in test_paths
+ if (fluidity_root / test_path).exists()
+ for xml_file in (fluidity_root / test_path).glob("*/*.xml")
+ ]
+ return xml_files
+
+
+def generate_python_test_string(test_type, assertion_status, test, test_str):
+ """Encapsulate within a try/except structure the Python code to execute to properly
+ catch potential exceptions and obtain tracebacks."""
+ return f"""
+print("# {test_type} Test: {test}")
+try:
+{indent(test_str.strip(), " ")}
+ print("--- Success")
+except AssertionError:
+ from sys import stderr
+ from traceback import print_exc
+ print("--- {assertion_status} !!!")
+ print_exc()
+ print("~~~ End of Traceback ~~~", file=stderr)
+except Exception:
+ from sys import stderr
+ from traceback import print_exc
+ print("--- Error !!!")
+ print_exc()
+ print("~~~ End of Traceback ~~~", file=stderr)"""
+
+
+def poll_processes(
+ running_procs,
+ core_counter,
+ serial,
+ return_list,
+ task_string,
+ process_interpreter,
+ test_suite,
+):
+ """Check if running processes have terminated and deal with results."""
+ proc_status = [
+ tests[running_xml]["running_proc"].poll() for running_xml in running_procs
+ ]
+ # Reversed to be able to remove items from lists without skipping any item
+ for status, running_xml in zip(reversed(proc_status), reversed(running_procs)):
+ if status is not None: # Test has terminated
+ current_test = tests[running_xml]
+ # Measure an upper bound for the test elapsed time
+ current_test["elapsed_time"] += monotonic() - current_test["create_time"]
+ # Update objects that keep track of the current activity
+ task_ncores = 1 if serial else current_test["nprocs"]
+ core_counter -= task_ncores
+ running_procs.remove(running_xml)
+ # Recover standard ouput and error streams
+ stdout = "".join([line for line in read_stream(running_xml, "stdout_file")])
+ stderr = "".join([line for line in read_stream(running_xml, "stderr_file")])
+ current_test["stdout"] += f"{task_string}\n{stdout}\n"
+ current_test["stderr"] += f"{task_string}\n{stderr}\n"
+ # Deal with successful processes
+ if "No rule to make target 'input'" in stderr or (
+ status == 0
+ and all(
+ x not in stderr for x in ["Traceback", "/bin/bash", "MPI_ABORT"]
+ )
+ ):
+ return_list.append(running_xml)
+ else: # Deal with errors
+ process_error(
+ running_xml, process_interpreter, stdout, stderr, test_suite
+ )
+ sleep(0.1) # Avoid high-intensity polling
+ return core_counter
+
+
+def process_error(test_xml, process_interpreter, stdout, stderr, test_suite):
+ """Process tests that did not complete successfully and update the JUnit record
+ accordingly."""
+ # Add an entry to the XML parser
+ test_case = TestCase(
+ name=test_xml.stem,
+ classname=tests[test_xml]["id"],
+ elapsed_sec=tests[test_xml]["elapsed_time"],
+ stdout=tests[test_xml]["stdout"],
+ stderr=tests[test_xml]["stderr"],
+ allow_multiple_subelements=True,
+ )
+ # Errors generated in make input or while running command(s)
+ if process_interpreter == "Shell":
+ error_list.append(test_xml.stem)
+ test_case.status = "Error"
+ test_case.add_error_info(
+ message=f"{test_xml.stem}: Shell script failed.", output=f"\n{stderr}"
+ )
+ if tests[test_xml]["running_proc"].returncode == 0:
+ tests[test_xml]["running_proc"].returncode = "Failed command"
+ # Errors/Failures generated in Python variables/tests
+ elif process_interpreter == "Python":
+ python_lines = tests[test_xml]["running_proc"].args[-1].split("\n")
+ # Error while parsing Python test string
+ if any(
+ error in stderr for error in ["IndentationError", "SyntaxError", "TabError"]
+ ):
+ error_list.append(test_xml.stem)
+ test_case.status = "Error"
+ test_case.add_error_info(
+ message=f"{test_xml.stem}: Parsing failed", output=f"\n{stderr}"
+ )
+ # Failure(s) within actual test(s)
+ elif any(f"# {kind} Test" in stdout for kind in ["Pass", "Warn"]):
+ # Identify unsuccessful Python tests through stdout
+ regex = (
+ r"(?<=^#\s)"
+ ".+"
+ "(?=(\n^[^#].+|\n^#(?! Pass Test| Warn Test).+){0,}"
+ "\n--- (Error|Failure|Warning))"
+ )
+ failed_tests = [
+ match.group().split(": ", maxsplit=1)
+ for match in re.finditer(regex, stdout, re.MULTILINE)
+ ]
+ # Split stderr into individual tracebacks
+ regex = "(^Traceback).+(\n.+)+?(?=\n~~~ End of Traceback ~~~)"
+ tracebacks = [
+ match.group() for match in re.finditer(regex, stderr, re.MULTILINE)
+ ]
+ flag_pass, flag_warn = True, True
+ for traceback, (test_type, test_name) in zip(tracebacks, failed_tests):
+ if flag_pass and test_type == "Pass Test":
+ failure_list.append(test_xml.stem)
+ test_case.status = "Failure"
+ flag_pass = False
+ elif flag_warn and test_type == "Warn Test":
+ warning_list.append(test_xml.stem)
+ test_case.status = "Warning" if flag_pass else "Failure and Warning"
+ flag_warn = False
+ line_nb = re.search(
+ '(?<="", line )[0-9]+(?=, in )', traceback
+ ).group()
+ python_error_line = python_lines[int(line_nb) - 1][4:]
+ traceback = traceback.replace(
+ f'File "", line {line_nb}, in ',
+ f"\n Caught exception at '{python_error_line.strip()}'\n",
+ )
+ failure_type = "failure" if "Pass" in test_type else "warning"
+ test_case.add_failure_info(
+ message=f"{test_xml.stem}: Test '{test_name}' failed",
+ output=f"\n{traceback}",
+ failure_type=failure_type,
+ )
+ else: # Error within variable
+ error_list.append(test_xml.stem)
+ test_case.status = "Error"
+ try:
+ line_nb = re.search(
+ '(?<="", line )[0-9]+(?=, in )', stderr
+ ).group()
+ python_error_line = python_lines[int(line_nb) - 1]
+ traceback = stderr.replace(
+ f'File "", line {line_nb}, in ',
+ f"\n Caught exception at '{python_error_line.strip()}'\n",
+ )
+ except AttributeError:
+ traceback = stderr
+ var_name = list(
+ re.finditer(r"(?<=^#\sVariable:\s).+", stdout, re.MULTILINE)
+ )[-1].group()
+ test_case.add_error_info(
+ message=f"{test_xml.stem}: Variable '{var_name}' failed.",
+ output=f"\n{traceback}",
+ )
+ if tests[test_xml]["running_proc"].returncode == 0:
+ tests[test_xml]["running_proc"].returncode = "Python exception"
+ test_suite.test_cases.append(test_case)
+ # Print relevant information regarding the error
+ print(f"\n* ERROR: {test_xml.stem} exited with a non-zero exit code.")
+ print(f"* Exit status: {tests[test_xml]['running_proc'].returncode}")
+ print(f"* Output:\n{stdout.strip()}")
+ print(f"* Stderr output:\n{stderr}")
+
+
+def read_stream(test_xml, stream_key):
+ """Read content of provided stream."""
+ # Set file object’s position to the beginning of the file
+ tests[test_xml][stream_key].seek(0)
+ stream = tests[test_xml][stream_key].readlines()
+ if bool(stream) is False: # Check if stream is empty
+ stream = ["No output generated.\n"]
+ tests[test_xml][stream_key].close() # Close file object
+ return stream
+
+
+def run_tasks(
+ task_string, tests_list, serial, process_interpreter, task_function, test_suite
+):
+ """Iterate through the test list and execute the provided function in parallel."""
+ return_list, running_procs, core_counter = [], [], 0
+ print(task_string)
+ # Start tasks whilst there are queuing tests
+ while tests_list:
+ # Remove and obtain a test from the end of the queue
+ test_xml = tests_list.pop()
+ current_test = tests[test_xml]
+ # Determine the amount of cores required
+ task_ncores = 1 if serial else current_test["nprocs"]
+ # Check if there are sufficient resources available; allow tests requesting a
+ # higher number of cores than available to run by oversubscribing nodes (MPI),
+ # with only one oversubscribed test at a time
+ if core_counter >= core_avail or (
+ core_counter + task_ncores > core_avail and task_ncores <= core_avail
+ ):
+ # Check if some processes have terminated
+ core_counter = poll_processes(
+ running_procs,
+ core_counter,
+ serial,
+ return_list,
+ task_string,
+ process_interpreter,
+ test_suite,
+ )
+ # Check if sufficient resources have become available
+ if core_counter >= core_avail or (
+ core_counter + task_ncores > core_avail and task_ncores <= core_avail
+ ):
+ # Re-insert the test at the beginning of the queue
+ tests_list.insert(0, test_xml)
+ # Skip the remainder of the loop iteration
+ continue
+ print(f"\t-> New test: {test_xml.stem}")
+ # Open streams to re-direct stdout and stderr
+ current_test["stdout_file"] = open(test_xml.parent / "stdout", "w+")
+ current_test["stderr_file"] = open(test_xml.parent / "stderr", "w+")
+ # Register the starting time of the task
+ current_test["create_time"] = monotonic()
+ # Submit task and update objects that keep track of the current load
+ task_function(test_xml)
+ core_counter += task_ncores
+ running_procs.append(test_xml)
+ # Once the queue is empty, wait for the processes that are still running
+ while running_procs:
+ core_counter = poll_processes(
+ running_procs,
+ core_counter,
+ serial,
+ return_list,
+ task_string,
+ process_interpreter,
+ test_suite,
+ )
+
+ # Check that the objects which keep track of the current load have come back to
+ # their nominal value
+ assert core_counter == 0 and bool(running_procs) is False
+
+ return return_list
+
+
+def set_environment_variable(env_var, env_path):
+ """Set or prepend to the requested environment variable."""
try:
- os.mkdir(os.environ["HOME"] + os.sep + "lock")
- except OSError:
- pass
-
- if len(options.exclude_tags) == 0:
- exclude_tags = None
- else:
- exclude_tags = options.exclude_tags
-
- if len(options.tags) == 0:
- tags = None
- else:
- tags = options.tags
-
- testharness = TestHarness(length=options.length, parallel=options.parallel,
- exclude_tags=exclude_tags, tags=tags,
- file=options.file, verbose=True,
- justtest=options.justtest,
- valgrind=options.valgrind,
- from_file=options.from_file,
- genpbs=options.genpbs,
- exit_fails=options.exit_fails,
- xml_outfile=options.xml_outfile)
-
- if options.justlist:
- testharness.list()
- elif options.clean:
- testharness.clean()
- else:
- print("-" * 80)
- which = os.popen("which %s" % testharness.decide_fluidity_command()).read()
- if len(which) > 0:
- print("which %s: %s" % ("fluidity", which), end=' ')
- versio = os.popen("%s -V" % testharness.decide_fluidity_command()).read()
- if len(versio) > 0:
- print(versio)
- print("-" * 80)
-
- if options.valgrind is True:
- print("-" * 80)
- print("I see you are using valgrind!")
- print("A couple of points to remember.")
- print("a) The log file will be produced in the directory containing the tests.")
- print("b) Valgrind typically takes O(100) times as long. I hope your test is short.")
- print("-" * 80)
-
- testharness.run()
+ environ[env_var] = f"{env_path}:{environ[env_var]}"
+ except KeyError: # If the environment variable does not exist, create it
+ environ[env_var] = str(env_path)
+
+
+def task_make_input(test_xml):
+ """Execute `make input`."""
+ tests[test_xml]["running_proc"] = subprocess.Popen(
+ ["make", "input"],
+ cwd=test_xml.parent,
+ encoding="utf-8",
+ stdout=tests[test_xml]["stdout_file"],
+ stderr=tests[test_xml]["stderr_file"],
+ )
+
+
+def task_run_commands(test_xml):
+ """Execute test instructions."""
+ tests[test_xml]["running_proc"] = subprocess.Popen(
+ tests[test_xml]["command"],
+ cwd=test_xml.parent,
+ shell=True,
+ encoding="utf-8",
+ stdout=tests[test_xml]["stdout_file"],
+ stderr=tests[test_xml]["stderr_file"],
+ )
+
+
+def task_run_tests(test_xml):
+ """Calculate Python variables specific to the test and assess their values."""
+ # Join variable strings together
+ var_string = "\n".join(
+ [
+ f"print('# Variable: {var}')\n{var_str.strip()}"
+ for var, var_str in tests[test_xml]["variables"].items()
+ ]
+ )
+ # Join pass-test strings together
+ pass_string = "\n".join(
+ [
+ generate_python_test_string("Pass", "Failure", test, test_str)
+ for test, test_str in tests[test_xml]["pass_tests"].items()
+ ]
+ )
+ # Join warn-test strings together
+ warn_string = "\n".join(
+ [
+ generate_python_test_string("Warn", "Warning", test, test_str)
+ for test, test_str in tests[test_xml]["warn_tests"].items()
+ ]
+ )
+ # Join all three strings
+ test_string = "\n".join([var_string, pass_string, warn_string])
+ tests[test_xml]["running_proc"] = subprocess.Popen(
+ [sys.executable, "-c", test_string],
+ cwd=test_xml.parent,
+ encoding="utf-8",
+ stdout=tests[test_xml]["stdout_file"],
+ stderr=tests[test_xml]["stderr_file"],
+ )
+
+
+parser = ArgumentParser(description="Fluidity Test Harness")
+parser.add_argument(
+ "--clean", action="store_true", help="call `make clean` for each test found"
+)
+parser.add_argument(
+ "-e",
+ "--exec-type",
+ default="any",
+ choices=["serial", "parallel"],
+ help="specify which kind of tests to run; choose either serial or parallel",
+ metavar="TYPE",
+)
+# parser.add_argument(
+# "-f", "--file", help="run a single test — expects an XML filepath"
+# )
+parser.add_argument("-f", "--file", help="run a single test — expects an XML file")
+parser.add_argument(
+ "--from-file",
+ help="path to a file where to read which tests to run — one XML filepath per line",
+ metavar="FILE",
+)
+parser.add_argument(
+ "--just-list",
+ nargs="?",
+ const=True,
+ default=False,
+ help="print which tests were found and save the list to a JSON file if provided",
+ metavar="FILE",
+)
+parser.add_argument(
+ "--just-test",
+ action="store_true",
+ help="execute Python instructions without re-running test core commands",
+)
+parser.add_argument(
+ "-l",
+ "--length",
+ action="append",
+ choices=["vshort", "short", "medium", "long", "vlong"],
+ help="test length(s) to be run; choose from vshort, short, medium, long or vlong",
+)
+parser.add_argument(
+ "-n",
+ "--ncores",
+ type=int,
+ help="number of logical cores to target",
+ metavar="CORES",
+)
+parser.add_argument(
+ "-o",
+ "--omit-tags",
+ # action="extend",
+ action="append",
+ # nargs="*",
+ default=[],
+ help="tags identifying which tests to exclude",
+ # metavar="TAG",
+)
+parser.add_argument(
+ "-t",
+ "--tags",
+ # action="extend",
+ action="append",
+ # nargs="*",
+ default=[],
+ help="tags identifying which tests to run",
+ # metavar="TAG",
+)
+parser.add_argument("-v", "--valgrind", action="store_true", help="enable Valgrind")
+parser.add_argument("-x", "--xml-output", help="XML output filename", metavar="OUTPUT")
+args = parser.parse_args()
+
+# Obtain path to the root of fluidity's directory
+fluidity_root = Path(sys.argv[0]).resolve().parent.parent
+
+set_environment_variable("PATH", fluidity_root / "bin")
+set_environment_variable("PATH", fluidity_root / "libspud" / "bin")
+set_environment_variable("PYTHONPATH", fluidity_root / "python")
+set_environment_variable("PYTHONPATH", fluidity_root / "libspud" / "diamond")
+set_environment_variable("LD_LIBRARY_PATH", fluidity_root / "lib")
+
+print(
+ f"""*** Test criteria
+\t-> length: {"any" if args.length is None else " ".join(args.length)}
+\t-> parallel: {args.exec_type}
+\t-> tags to include: {args.tags}
+\t-> tags to exclude: {args.omit_tags}
+"""
+)
+
+xml_files = gather_tests()
+assert xml_files, "No tests were found."
+test_suite = TestSuite("Test Harness")
+tests = filter_tests(xml_files, test_suite)
+assert tests, "No tests matched the provided test criteria."
+
+if args.just_list:
+ print("*** Found tests that match the input criteria")
+ for test_xml in sorted(tests.keys()):
+ print(f"\t-> {test_xml.stem}")
+ nb_tests = len(tests.keys())
+ print(f"{nb_tests} test{'s' if nb_tests > 1 else ''} found.")
+ if isinstance(args.just_list, str):
+ with open(args.just_list, "w") as fid:
+ json.dump([test.name for test in tests.keys()], fid)
+ raise SystemExit
+elif args.clean:
+ print("*** Cleaning")
+ for test_xml in tests.keys():
+ print(f"\t-> {test_xml.stem}: Calling `make clean`.")
+ try:
+ subprocess.run(
+ ["make", "clean"],
+ cwd=test_xml.parent,
+ encoding="utf-8",
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ check=True,
+ )
+ except subprocess.CalledProcessError as test_error:
+ if "No rule to make target 'clean'" in test_error.stderr:
+ pass
+ else:
+ raise test_error
+ print("Cleaning done.")
+ raise SystemExit
+
+fluidity_version = subprocess.run(
+ ["fluidity", "-V"],
+ check=True,
+ encoding="utf-8",
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+)
+print(
+ f"""{"-" * 80}
+Output of "fluidity -V"
+{fluidity_version.stderr.strip()}
+{"-" * 80}
+"""
+)
+if args.valgrind:
+ # Make sure Fluidity has been compiled in debug mode
+ assert (
+ "debugging" in fluidity_version.stderr
+ ), "Please compile Fluidity in debug mode to use valgrind."
+ print(
+ f"""{"-" * 80}
+I see you are using Valgrind!
+Keep the following in mind:
+- The log file will be produced in the directory containing the tests.
+- Valgrind typically takes O(100) times as long. I hope your test is short.
+{"-" * 80}
+"""
+ )
+ # Prepend valgrind command
+ for test_xml in tests.keys():
+ # What about when command contains commands delimited by ";"?
+ tests[test_xml][
+ "command"
+ ] = f"""valgrind --tool=memcheck \
+--leak-check=full -v --show-reachable=yes --num-callers=8 --error-limit=no \
+--log-file=test.log {tests[test_xml]["command"]}"""
+
+core_avail = len(sched_getaffinity(0))
+if args.ncores is not None:
+ core_avail = min(args.ncores, core_avail)
+error_list, failure_list, warning_list = [], [], []
+
+tests_list = list(tests.keys())
+if not args.just_test:
+ tests_list = run_tasks(
+ "*** Executing 'make input'",
+ tests_list,
+ True,
+ "Shell",
+ task_make_input,
+ test_suite,
+ )
+ if tests_list:
+ tests_list = run_tasks(
+ "*** Executing test commands from XML files",
+ tests_list,
+ False,
+ "Shell",
+ task_run_commands,
+ test_suite,
+ )
+if tests_list:
+ tests_list = run_tasks(
+ "*** Executing Python tests",
+ tests_list,
+ True,
+ "Python",
+ task_run_tests,
+ test_suite,
+ )
+
+for test_xml in tests_list:
+ test_case = TestCase(
+ name=test_xml.stem,
+ classname=tests[test_xml]["id"],
+ elapsed_sec=tests[test_xml]["elapsed_time"],
+ status="Success",
+ )
+ test_case.stdout = tests[test_xml]["stdout"]
+ test_case.stderr = tests[test_xml]["stderr"]
+ test_suite.test_cases.append(test_case)
+
+if args.xml_output:
+ with open(args.xml_output, "w") as fid:
+ test_suite.to_file(fid, [test_suite])
+
+if any([error_list, failure_list, warning_list]):
+ if error_list:
+ print("Summary of test problems that produced errors:")
+ for test in error_list:
+ print(f"-> {test}")
+ if failure_list:
+ print("Summary of test problems that produced failures:")
+ for test in failure_list:
+ print(f"-> {test}")
+ if warning_list:
+ print("Summary of test problems that produced warnings:")
+ for test in warning_list:
+ print(f"-> {test}")
+else:
+ print("Test suite completed successfully.")