diff --git a/config/kub/kub_plots.json b/config/kub/kub_plots.json index 2d8c5237..e209817e 100644 --- a/config/kub/kub_plots.json +++ b/config/kub/kub_plots.json @@ -2,13 +2,27 @@ "plots": [ { - "title": "Absolute performance", - "plot_types": [ "grouped_bar", "table" ], + "title": "General Relative performance", + "plot_types": [ "stacked_bar", "grouped_bar" ], + "transformation": "relative_performance", + "variables": [ "constructor", "simulation", "postprocess" ], + "names": ["Constructor", "Simulation", "PostProcess"], + "xaxis": { + "parameter": "resources.tasks", + "label": "Number of tasks" + }, + "yaxis": { + "label": "execution time (s)" + } + }, + { + "title": "General Absolute performance", + "plot_types": [ "grouped_bar","stacked_bar"], "transformation": "performance", - "variables": [ "execute", "postprocess", "constructor", "updateForUse", "simulation" ], - "names": [], + "variables": [ "constructor", "simulation", "postprocess" ], + "names": ["Constructor", "Simulation", "PostProcess"], "xaxis": { - "parameter": "nb_tasks.tasks_per_node", + "parameter": "resources.tasks", "label": "Number of tasks" }, "yaxis": { @@ -16,13 +30,71 @@ } }, { - "title": "Relative performance", - "plot_types": [ "stacked_bar", "grouped_bar" ], - "transformation": "relative_performance", - "variables": [ "execute", "postprocess", "constructor", "updateForUse", "simulation" ], - "names": [], + "title": "General Speedup", + "plot_types": [ "scatter" ], + "transformation": "speedup", + "variables": [ "constructor", "simulation","postprocess" ], + "names": ["Constructor", "Simulation", "PostProcess"], + "xaxis": { + "parameter": "resources.tasks", + "label": "Number of tasks" + }, + "yaxis": { + "label": "execution time (s)" + } + }, + { + "title": "Constructor UpdateForUse Absolute performance", + "plot_types": [ "grouped_bar" ], + "transformation": "performance", + "variables": [ + "constructor.updateForUse.SunModel", "constructor.updateForUse.updateMeshBuilding", "constructor.updateForUse.weather", + "constructor.updateForUse.buildingMap", "constructor.updateForUse.gisMeshInit", "constructor.updateForUse.gisPartition", + "constructor.updateForUse.initBuildingModels", "constructor.updateForUse.scenarios" + ], + "names": [ + "SunModel", "updateMeshBuilding", "weather", + "buildingMap", "gisMeshInit", "gisPartition", + "initBuildingModels", "scenarios" + ], + "xaxis": { + "parameter": "resources.tasks", + "label": "Number of tasks" + }, + "yaxis": { + "label": "execution time (s)" + } + }, + { + "title": "Constructor Speedup", + "plot_types": [ "scatter" ], + "transformation": "speedup", + "variables": [ + "constructor.updateForUse.SunModel", "constructor.updateForUse.updateMeshBuilding", "constructor.updateForUse.weather", + "constructor.updateForUse.buildingMap", "constructor.updateForUse.gisMeshInit", "constructor.updateForUse.gisPartition", + "constructor.updateForUse.initBuildingModels", "constructor.updateForUse.scenarios" + ], + "names": [ + "SunModel", "updateMeshBuilding", "weather", + "buildingMap", "gisMeshInit", "gisPartition", + "initBuildingModels", "scenarios" + ], + "xaxis": { + "parameter": "resources.tasks", + "label": "Number of tasks" + }, + "yaxis": { + "label": "execution time (s)" + } + }, + { + "title": "Simulation Absolute performance", + "plot_types": [ "grouped_bar" ], + "transformation": "performance", + "variables": [ "simulation.solarMasks", "simulation.buildingExecuteStep" ], + "names": ["Solar Masks","Building Execute Step"], "xaxis": { - "parameter": "nb_tasks.tasks_per_node", + "parameter": "resources.tasks", "label": "Number of tasks" }, "yaxis": { @@ -30,13 +102,13 @@ } }, { - "title": "Speedup", + "title": "Simulation Speedup", "plot_types": [ "scatter" ], "transformation": "speedup", - "variables": [ "execute", "postprocess", "constructor", "updateForUse", "simulation" ], - "names": [], + "variables": [ "simulation.solarMasks", "simulation.buildingExecuteStep" ], + "names": ["Solar Masks","Building Execute Step"], "xaxis": { - "parameter": "nb_tasks.tasks_per_node", + "parameter": "resources.tasks", "label": "Number of tasks" }, "yaxis": { diff --git a/config/kub/poznan.json b/config/kub/poznan.json index 0f9b26a3..9d7094a7 100644 --- a/config/kub/poznan.json +++ b/config/kub/poznan.json @@ -1,44 +1,53 @@ { "executable": "feelpp_kub_cem", - "output_directory": "{{machine.output_app_dir}}/kub", + "output_directory": "{{machine.output_app_dir}}/javier_test/kub", "use_case_name": "poznan", "timeout":"0-0:15:0", + "resources":{ + "tasks":"{{parameters.resources.tasks.value}}", + "exclusive_access":"{{parameters.resources.exclusive_access.value}}" + }, "platforms": { "apptainer":{ "image": { + "remote":"oras://ghcr.io/feelpp/ktirio-urban-building:master-sif", "name":"{{machine.containers.apptainer.image_base_dir}}/kub.sif" }, - "input_dir":"/input_data/", + "input_dir":"/input_data", "options": [ "--home {{machine.output_app_dir}}", - "--bind {{machine.input_dataset_base_dir}}/kub/cfg/:{{platforms.apptainer.input_dir}}" + "--bind {{machine.input_dataset_base_dir}}:{{platforms.apptainer.input_dir}}", + "--env OMP_NUM_THREADS=1" ], "append_app_option":[] }, "builtin":{ - "input_dir":"{{machine.input_dataset_base_dir}}/kub/cfg/", + "input_dir":"{{machine.input_dataset_base_dir}}", "append_app_option":[] } }, + "input_file_dependencies":{ + "case_cfg":"kub/cfg/Poznan.cfg" + }, "options": [ - "--config-file {{platforms.{{machine.platform}}.input_dir}}/Poznan.cfg", - "--cem.database.directory {{output_directory}}/{{use_case_name}}/{{instance}}" + "--config-file {{platforms.{{machine.platform}}.input_dir}}/{{input_file_dependencies.case_cfg}}", + "--cem.database.directory {{output_directory}}/{{instance}}/{{use_case_name}}", + "--directory {{output_directory}}/{{instance}}/{{use_case_name}}" ], - "outputs": [], "scalability": { - "directory": "{{output_directory}}/{{use_case_name}}/{{instance}}/cem/", + "directory": "{{output_directory}}/{{instance}}/{{use_case_name}}/cem/", "stages": [ { "name":"", - "filepath": "instances/np_{{parameters.nb_tasks.tasks.value}}/logs/timers.json", + "filepath": "instances/np_{{parameters.resources.tasks.value}}/logs/timers.json", "format": "json", - "variables_path":"*" + "variables_path":["*.timeElapsed.max","execute.subtimers.*.subtimers.*.timeElapsed.max","execute.subtimers.*.timeElapsed.max"] }, { "name":"", "filepath": "logs/timers.json", "format": "json", - "variables_path":"*" + "variables_path":["*.timeElapsed.max","*.subtimers.*.subtimers.*.timeElapsed.max"] } ] }, @@ -48,14 +57,14 @@ }, "parameters": [ { - "name": "nb_tasks", + "name": "resources", "sequence": [ - {"tasks_per_node" : 8, "tasks":8, "exclusive_access":true}, - {"tasks_per_node" : 16, "tasks":16, "exclusive_access":true}, - {"tasks_per_node" : 32, "tasks":32, "exclusive_access":true}, - {"tasks_per_node" : 64, "tasks":64, "exclusive_access":true}, - {"tasks_per_node" : 128, "tasks":128, "exclusive_access":true}, - {"tasks_per_node" : 128, "tasks":256, "exclusive_access":true} + {"tasks":32, "exclusive_access":true}, + {"tasks":64, "exclusive_access":true}, + {"tasks":128, "exclusive_access":true}, + {"tasks":256, "exclusive_access":true}, + {"tasks":384, "exclusive_access":true}, + {"tasks":512, "exclusive_access":true} ] } ] diff --git a/config/machines/gaya.json b/config/machines/gaya.json index 47f15c64..072c4823 100644 --- a/config/machines/gaya.json +++ b/config/machines/gaya.json @@ -1,18 +1,18 @@ { "machine": "gaya", - "targets":["production:apptainer:"], - "execution_policy": "serial", + "targets":["public:builtin:hpcx"], + "execution_policy": "async", "reframe_base_dir":"$PWD/build/reframe", "reports_base_dir":"$PWD/reports/", //If input_user_dir exists, input_dataset_base_dir is supposed to be temporary. Will be deleted after test. // If input_user_dir DOES NOT exist, input_datates_base_dir will not be deleted "input_dataset_base_dir":"/data/scratch/cladellash/feelppdb/input_data", - "input_user_dir":"/home/u2/cladellash/largedataexample/input_data/", + // "input_user_dir":"/home/u2/cladellash/largedataexample/input_data/", "output_app_dir":"/data/scratch/cladellash/feelppdb", "containers":{ "apptainer":{ - // "cachedir": "/home/u2/cladellash/.apptainer/cache", - // "tmpdir": "/data/scratch/cladellash/images/tmp", + "cachedir": "/home/u2/cladellash/.apptainer/cache", + "tmpdir": "/data/scratch/cladellash/images/tmp", "image_base_dir":"/data/scratch/cladellash/images", "options":[ "--sharens", diff --git a/config/toolbox_heat/heat_plots.json b/config/toolbox_heat/heat_plots.json index 2fac1b4d..0978a1cf 100644 --- a/config/toolbox_heat/heat_plots.json +++ b/config/toolbox_heat/heat_plots.json @@ -28,7 +28,7 @@ "label": "Measures" }, "aggregations":[ - {"column":"resources.tasks","agg":"filter:640"} + {"column":"resources.tasks","agg":"filter:768"} ] }, { @@ -57,6 +57,7 @@ "label":"Performance variable" }, "aggregations":[ + {"column":"repeat", "agg":"min"}, {"column":"discretization","agg":"filter:P1"} ] }, @@ -204,6 +205,58 @@ "aggregations":[ {"column":"discretization","agg":"filter:P3"} ] + }, + { + "title": "Algebraic-solve execution time", + "plot_types": [ "grouped_bar" ], + "transformation": "performance", + "variables": [ + "Solve_algebraic-solve" + ], + "names": [ ], + "xaxis": { + "parameter": "resources.tasks", + "label": "Number of tasks" + }, + "yaxis": { + "label": "execution time (s)" + }, + "color_axis":{ + "parameter":"mesh", + "label":"Mesh" + }, + "secondary_axis":{ + "parameter":"discretization", + "label":"Discretization" + }, + "aggregations":[ + {"column":"performance_variable","agg":"filter:Solve_algebraic-solve"} + ] + }, + { + "title":"Number of iterations of GMRES", + "plot_types":["scatter"], + "transformation":"performance", + "variables":["Solve_ksp-niter"], + "names":[], + "xaxis":{ + "parameter":"resources.tasks", + "label":"Number of tasks" + }, + "yaxis":{ + "label":"Number of iterations" + }, + "color_axis":{ + "parameter":"mesh", + "label":"Mesh" + }, + "secondary_axis":{ + "parameter":"discretization", + "label":"Discretization" + }, + "aggregations":[ + {"column":"performance_variable","agg":"filter:Solve_ksp-niter"} + ] } ] } \ No newline at end of file diff --git a/config/toolbox_heat/thermal_bridges_case_3.json b/config/toolbox_heat/thermal_bridges_case_3.json index 2ec2d92b..11075106 100644 --- a/config/toolbox_heat/thermal_bridges_case_3.json +++ b/config/toolbox_heat/thermal_bridges_case_3.json @@ -48,17 +48,12 @@ "--case.discretization {{parameters.discretization.value}}", "--heat.json.patch='{\"op\": \"replace\",\"path\": \"/Meshes/heat/Import/filename\",\"value\": \"{{platforms.{{machine.platform}}.input_dir}}/{{input_file_dependencies.mesh_json}}\" }'" ], - "outputs": [ - { - "filepath": "{{output_directory}}/{{instance}}/{{use_case_name}}/heat.measures/values.csv", - "format": "csv" - } - ], "additional_files":{ "parameterized_descriptions_filepath":"{{output_directory}}/{{instance}}/{{use_case_name}}/heat.information.adoc" }, "scalability": { "directory": "{{output_directory}}/{{instance}}/{{use_case_name}}", + "clean_directory":false, "stages": [ { "name": "Constructor", @@ -73,7 +68,19 @@ { "name": "Solve", "filepath": "heat.scalibility.HeatSolve.data", - "format": "tsv" + "format": "tsv", + "units":{ + "*":"s", + "ksp-niter":"iter" + } + }, + { + "name":"Outputs", + "filepath": "heat.measures/values.csv", + "format": "csv", + "units":{ + "*":"W" + } } ] }, @@ -87,7 +94,7 @@ "zip":[ { "name":"tasks", - "sequence":[128,256,384,512,640,768,896,1024,1152,1280] + "sequence":[64] }, { "name":"exclusive_access", @@ -112,9 +119,9 @@ "name":"discretization", "sequence":["P1","P2","P3"], "conditions":{ - "P1":[{ "resources.tasks":[128,256,384,512,640,768] }], - "P2":[{ "resources.tasks":[768,896,1024,1152,1280] }], - "P3":[{ "resources.tasks":[640,768,896,1024,1152,1280] }] + "P1":[{ "resources.tasks":[64] }], + "P2":[{ "resources.tasks":[] }], + "P3":[{ "resources.tasks":[] }] } }, { diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 864e2231..4be9eeec 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -4,4 +4,4 @@ ifdef::env-github,env-browser,env-vscode[:imagesprefix: ../images/] = Benchmarking :page-layout: toolboxes :page-tags: catalog, catalog-index -:docdatetime: 2025-01-22T11:42:45 \ No newline at end of file +:docdatetime: 2025-02-10T15:33:05 \ No newline at end of file diff --git a/docs/modules/tutorial/pages/configurationfiles/plots.adoc b/docs/modules/tutorial/pages/configurationfiles/plots.adoc index dd9f1398..820a210d 100644 --- a/docs/modules/tutorial/pages/configurationfiles/plots.adoc +++ b/docs/modules/tutorial/pages/configurationfiles/plots.adoc @@ -120,7 +120,7 @@ plot_config = Plot(**{ "plot_types": [ "stacked_bar", "grouped_bar" ], "transformation": "performance", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "xaxis":{ "parameter":"nb_tasks.tasks", "label":"Number of tasks" @@ -148,7 +148,7 @@ plot_config = Plot(**{ "plot_types": [ "stacked_bar", "grouped_bar" ], "transformation": "relative_performance", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "xaxis":{ "parameter":"nb_tasks.tasks", "label":"Number of tasks" @@ -179,7 +179,7 @@ plot_config = Plot(**{ "plot_types": [ "stacked_bar", "grouped_bar" ], "transformation": "speedup", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "xaxis":{ "parameter":"nb_tasks.tasks", "label":"Number of tasks" @@ -208,7 +208,7 @@ figures = FigureFactory.create(Plot(**{ "plot_types": [ "scatter" ], "transformation": "performance", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "color_axis":{ "parameter":"performance_variable", "label":"Performance variable" @@ -236,7 +236,7 @@ figures = FigureFactory.create(Plot(**{ "plot_types": [ "stacked_bar" ], "transformation": "performance", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "color_axis":{ "parameter":"performance_variable", "label":"Performance variable" @@ -265,7 +265,7 @@ figures = FigureFactory.create(Plot(**{ "plot_types": [ "grouped_bar" ], "transformation": "performance", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "color_axis":{ "parameter":"performance_variable", "label":"Performance variable" @@ -293,7 +293,7 @@ figures = FigureFactory.create(Plot(**{ "plot_types": [ "table" ], "transformation": "performance", "variables": [ "computation_time","communication_time" ], - "names": ["Time"], + "names": ["Computation","Comunication"], "color_axis":{ "parameter":"performance_variable", "label":"Performance variable" diff --git a/src/feelpp/benchmarking/reframe/__main__.py b/src/feelpp/benchmarking/reframe/__main__.py index 8675c932..7bb54976 100644 --- a/src/feelpp/benchmarking/reframe/__main__.py +++ b/src/feelpp/benchmarking/reframe/__main__.py @@ -43,6 +43,18 @@ def main_cli(): app_reader.updateConfig(machine_reader.processor.flattenDict(machine_reader.config,"machine")) app_reader.updateConfig() #Update with own field + #PULL IMAGES + if not parser.args.dry_run: + for platform_name, platform_field in app_reader.config.platforms.items(): + if not platform_field.image or not platform_field.image.remote: + continue + + if platform_name == "apptainer": + subprocess.run(f"apptainer pull -F {platform_field.image.name} {platform_field.image.remote}", shell=True) + elif platform_name == "docker": + raise NotImplementedError("Pulling docker image is not yet implemented") + + reframe_cmd = cmd_builder.buildCommand( app_reader.config.timeout) exit_code = subprocess.run(reframe_cmd, shell=True) diff --git a/src/feelpp/benchmarking/reframe/config/configPlots.py b/src/feelpp/benchmarking/reframe/config/configPlots.py index 0ac89960..b94ae75c 100644 --- a/src/feelpp/benchmarking/reframe/config/configPlots.py +++ b/src/feelpp/benchmarking/reframe/config/configPlots.py @@ -29,6 +29,11 @@ class Plot(BaseModel): color_axis:Optional[PlotAxis] = None layout_modifiers: Optional[Dict] = {} + @model_validator(mode="after") + def checkNames(self): + if self.variables and self.names: + assert len(self.variables)==len(self.names) + return self @field_validator("xaxis","secondary_axis", mode="after") @classmethod diff --git a/src/feelpp/benchmarking/reframe/config/configSchemas.py b/src/feelpp/benchmarking/reframe/config/configSchemas.py index faf4fdfa..b269880a 100644 --- a/src/feelpp/benchmarking/reframe/config/configSchemas.py +++ b/src/feelpp/benchmarking/reframe/config/configSchemas.py @@ -13,6 +13,13 @@ class Stage(BaseModel): filepath:str format:Literal["csv","tsv","json"] variables_path:Optional[Union[str,List[str]]] = [] + units: Optional[Dict[str,str]] = {} + + @field_validator("units",mode="before") + @classmethod + def parseUnits(cls,v): + v["*"] = v.get("*","s") + return v @model_validator(mode="after") def checkFormatOptions(self): @@ -36,32 +43,18 @@ class Scalability(BaseModel): directory: str stages: List[Stage] custom_variables:Optional[List[CustomVariable]] = [] + clean_directory: Optional[bool] = False -class AppOutput(BaseModel): - filepath: str - format: str class Image(BaseModel): - protocol:Optional[Literal["oras","docker","library","local"]] = None + remote: Optional[str] = None name:str - @model_validator(mode="before") - def extractProtocol(self): - """ Extracts the image protocol (oras, docker, etc..) or if a local image is provided. - If local, checks if the image exists """ - - self["protocol"] = self["name"].split("://")[0] if "://" in self["name"] else "local" - - if self["protocol"] not in ["oras","docker","library","local"]: - raise ValueError("Unkown Protocol") - - return self - @field_validator("name", mode="after") @classmethod def checkImage(cls,v,info): - if info.data["protocol"] == "local" and not ("{{" in v or "}}" in v) : + if not info.data["remote"] and not ("{{" in v or "}}" in v) : if not os.path.exists(v): if info.context and info.context.get("dry_run", False): print(f"Dry Run: Skipping image check for {v}") @@ -107,7 +100,6 @@ class ConfigFile(BaseModel): use_case_name: str options: List[str] env_variables:Optional[Dict] = {} - outputs: List[AppOutput] input_file_dependencies: Optional[Dict[str,str]] = {} scalability: Scalability sanity: Sanity diff --git a/src/feelpp/benchmarking/reframe/config/machineConfigs/gaya.py b/src/feelpp/benchmarking/reframe/config/machineConfigs/gaya.py index bf9e4f2a..7c97c0bd 100644 --- a/src/feelpp/benchmarking/reframe/config/machineConfigs/gaya.py +++ b/src/feelpp/benchmarking/reframe/config/machineConfigs/gaya.py @@ -41,6 +41,41 @@ 'memory_per_node':500 } }, + { + 'name': 'public', + 'scheduler': 'squeue', + 'launcher': 'mpiexec', + 'max_jobs': 8, + 'access': ['--partition=public'], + 'environs': ['default','hpcx'], + 'prepare_cmds': [ + 'source /etc/profile.d/modules.sh', + "export PATH=/opt/apptainer/v1.3.5/apptainer/bin/:$PATH" + ], + 'processor': { + 'num_cpus': 128 + }, + 'resources': [ + { + 'name':'launcher_options', + 'options':['-bind-to','core'] + } + ], + 'devices': [ + { + 'type': 'cpu', + 'num_devices': 6 + } + ], + 'container_platforms':[ + { + 'type': 'Apptainer' + } + ], + 'extras':{ + 'memory_per_node':500 + } + }, ] } ], @@ -50,14 +85,14 @@ 'modules': [], 'cc': 'clang', 'cxx': 'clang++', - 'target_systems': ['gaya:production'] + 'target_systems': ['gaya:production','gaya:public'] }, { 'name': 'hpcx', 'modules': ['hpcx'], 'cc': 'clang', 'cxx': 'clang++', - 'target_systems': ['gaya:production'] + 'target_systems': ['gaya:production','gaya:public'] } ] } diff --git a/src/feelpp/benchmarking/reframe/outputs.py b/src/feelpp/benchmarking/reframe/outputs.py index 7922fb43..bf396aca 100644 --- a/src/feelpp/benchmarking/reframe/outputs.py +++ b/src/feelpp/benchmarking/reframe/outputs.py @@ -1,75 +1,2 @@ -import reframe.utility.sanity as sn -import os, re, shutil - - -class OutputsHandler: - """Class to handle application outputs and convert them to reframe readable objects""" - def __init__(self,outputs_config,additional_files_config = None): - self.config = outputs_config - self.additional_files_config = additional_files_config - - def getOutputs(self): - """ Opens and parses the all the outputs files provided on the configuration - Returns: - dict[str,performance_function] : Dictionary with deferrable functions containing the value of the outputs. - """ - rfm_outputs = {} - for output_info in self.config: - if output_info.format == "csv": - number_regex = re.compile(r'^-?\d+(\.\d+)?([eE][-+]?\d+)?$') - rows = sn.extractall( - r'^(?!\s*$)(.*?)[\s\r\n]*$', - output_info.filepath, - 0, - conv=lambda x: [float(col.strip()) if number_regex.match(col.strip()) else col.strip() for col in x.split(',') if col.strip()] - ) - header = rows[0] - rows = rows[1:] - - assert all ( len(header.evaluate()) == len(row) for row in rows), f"CSV File {output_info.filepath} is incorrectly formatted" - - for line in range(len(rows.evaluate())): - for i,col in enumerate(header): - rfm_outputs.update({ f"{col}" : sn.make_performance_function(rows[line][i],unit="") }) - else: - raise NotImplementedError(f"Output extraction not implemented for format {output_info.format}") - - return rfm_outputs - - def copyDescription(self,dir_path, name): #TODO: This can be redesigned... or factor it at least - """ Searches the file on the additional_files.description_filepath configuration and copies it inside dir_path/partials - Args: - dir_path (str) : Directory where the reframe report is exported to - name(str): name of the new file (without extension) - """ - if self.additional_files_config and self.additional_files_config.description_filepath: - file_extension = self.additional_files_config.description_filepath.split(".")[-1] if "." in self.additional_files_config.description_filepath else None - - outdir = os.path.join(dir_path,"partials") - if not os.path.exists(outdir): - os.mkdir(outdir) - - filename = f"{name}.{file_extension}" if file_extension else name - - shutil.copy2( self.additional_files_config.description_filepath, os.path.join(outdir,filename) ) - - - - def copyParametrizedDescriptions(self,dir_path,name): - """ Searches the files on the additional_files.parameterized_descriptions_filepath configuration and copy them inside dir_path/partials - Args: - dir_path (str) : Directory where the reframe report is exported to - name(str): name of the new file (without extension) - """ - - if self.additional_files_config and self.additional_files_config.parameterized_descriptions_filepath: - file_extension = self.additional_files_config.parameterized_descriptions_filepath.split(".")[-1] if "." in self.additional_files_config.parameterized_descriptions_filepath else None - - outdir = os.path.join(dir_path,"partials") - if not os.path.exists(outdir): - os.mkdir(outdir) - - filename = f"{name}.{file_extension}" if file_extension else name - - shutil.copy2( self.additional_files_config.parameterized_descriptions_filepath, os.path.join(outdir,filename) ) +import os, shutil diff --git a/src/feelpp/benchmarking/reframe/regression.py b/src/feelpp/benchmarking/reframe/regression.py index 28a295f0..3b0d7d8b 100644 --- a/src/feelpp/benchmarking/reframe/regression.py +++ b/src/feelpp/benchmarking/reframe/regression.py @@ -2,7 +2,6 @@ from feelpp.benchmarking.reframe.setup import ReframeSetup from feelpp.benchmarking.reframe.validation import ValidationHandler from feelpp.benchmarking.reframe.scalability import ScalabilityHandler -from feelpp.benchmarking.reframe.outputs import OutputsHandler import shutil, os @@ -16,7 +15,6 @@ class RegressionTest(ReframeSetup): def initHandlers(self): self.validation_handler = ValidationHandler(self.app_setup.reader.config.sanity) self.scalability_handler = ScalabilityHandler(self.app_setup.reader.config.scalability) - self.outputs_handler = OutputsHandler(self.app_setup.reader.config.outputs,self.app_setup.reader.config.additional_files) @run_after('run') def executionGuard(self): @@ -60,13 +58,17 @@ def setPerfVars(self): self.perf_variables.update( self.scalability_handler.getCustomPerformanceVariables(self.perf_variables) ) - self.perf_variables.update( - self.outputs_handler.getOutputs() - ) @run_before('performance') def copyParametrizedFiles(self): - self.outputs_handler.copyParametrizedDescriptions(self.report_dir_path,self.hashcode) + self.app_setup.reset(self.machine_setup.reader.config) + self.app_setup.updateConfig({ "instance" : str(self.hashcode) }) + self.app_setup.copyParametrizedDescriptionFile(self.report_dir_path,name=self.hashcode) + + @run_before("cleanup") + def removeDirectories(self): + if self.app_setup.reader.config.scalability.clean_directory: + self.app_setup.cleanupDirectories() @sanity_function def sanityCheck(self): diff --git a/src/feelpp/benchmarking/reframe/scalability.py b/src/feelpp/benchmarking/reframe/scalability.py index 4acaa3fb..8477ce2a 100644 --- a/src/feelpp/benchmarking/reframe/scalability.py +++ b/src/feelpp/benchmarking/reframe/scalability.py @@ -3,16 +3,146 @@ from feelpp.benchmarking.reframe.config.configReader import TemplateProcessor -#TODO: Factor this with outputs. Consider strategy pattern for formats +class Extractor: + def __init__(self,filepath,stage_name, units): + self.filepath = filepath + self.stage_name = stage_name + self.units = units + + def _getPerfVars(self,columns,vars): + perf_variables = {} + nb_rows = len(vars.evaluate()) + for line in range(nb_rows): + for i, col in enumerate(columns): + perfvar_name = f"{self.stage_name}_{col}" if self.stage_name else col + if nb_rows > 1: + perfvar_name = f"{perfvar_name}_{line}" + perf_variables[perfvar_name] = sn.make_performance_function(vars[line][i],unit=self.units.get(col,self.units["*"])) + + return perf_variables + + def _extractVariables(self): + raise NotImplementedError("Not to be called from base class") + + def extract(self): + columns,vars = self._extractVariables() + return self._getPerfVars(columns,vars) + +class TsvExtractor(Extractor): + def __init__(self,filepath,stage_name,units,index): + super().__init__(filepath,stage_name,units) + self.index = index + + def _getFileContent(self): + with open(self.filepath,"r") as f: + content = f.readlines() + return content + + def _extractVariables(self): + content = self._getFileContent() + #WARNING: This assumes that index is in column 0 + columns = re.sub("\s+"," ",content[0].replace("# ","")).strip().split(" ") + vars = sn.extractall_s( + patt=rf'^{self.index}[\s]+' + r'([0-9e\-\+\.]+)[\s]+'*(len(columns)-1), + string="\n".join(content[1:]), + conv=float, + tag=range(1,len(columns)) + )[0] + return columns[1:],sn.defer([vars]) + + +class CsvExtractor(Extractor): + def __init__(self, filepath, stage_name, units): + super().__init__(filepath, stage_name, units) + + def _extractVariables(self): + number_regex = re.compile(r'^-?\d+(\.\d+)?([eE][-+]?\d+)?$') + vars = sn.extractall( + r'^(?!\s*$)(.*?)[\s\r\n]*$', + self.filepath, + 0, + conv=lambda x: [float(col.strip()) if number_regex.match(col.strip()) else col.strip() for col in x.split(',') if col.strip()] + ) + columns = vars[0] + vars = vars[1:] + assert all ( len(columns.evaluate()) == len(row) for row in vars), f"CSV File {self.filepath} is incorrectly formatted" + return columns,vars + + +class JsonExtractor(Extractor): + def __init__(self, filepath, stage_name, units, variables_path): + super().__init__(filepath, stage_name, units) + self.variables_path = variables_path + + def _getFileContent(self): + with open(self.filepath,"r") as f: + content = json.load(f) + return content + + def _recursiveExtract(self,varpath, content, fields=None, prefix=""): + """ Extract values from a dictionary following a path (varpath) containing multiple wildcards""" + if fields is None: + fields = {} + + splitted_keys = varpath.split("*", 1) + left_keys = splitted_keys[0].strip(".").split(".") + + j = content + for left_key in left_keys: + if left_key: + j = j[left_key] + + if len(splitted_keys) == 1: + fields[left_keys[-1]] = j + else: + right_keys = splitted_keys[1].strip(".") + if "*" in right_keys: + for wildcard, subcontent in j.items(): + self._recursiveExtract(right_keys, subcontent, fields, prefix=f"{prefix}{wildcard}.") + else: + right_keys = right_keys.split(".") + for wildcard, subcontent in j.items(): + value = subcontent + for right_key in right_keys: + if right_key: + value = value[right_key] + fields[f"{prefix}{wildcard}"] = value + + return fields + + def _extractVariables(self): + items = {} + content = self._getFileContent() + for varpath in self.variables_path: + fields = self._recursiveExtract(varpath,content) + fields = TemplateProcessor.flattenDict(fields) + items.update(fields) + return items.keys(),sn.defer([[sn.defer(v) for v in items.values()]]) + + +class ExtractorFactory: + """Factory class for extractor strategies""" + @staticmethod + def create(stage,directory,index=None): + filepath = os.path.join(directory,stage.filepath) + if stage.format == "csv": + return CsvExtractor(filepath=filepath, stage_name = stage.name, units=stage.units) + elif stage.format == "tsv": + return TsvExtractor(filepath=filepath,stage_name = stage.name,index=index, units=stage.units) + elif stage.format == "json": + return JsonExtractor(filepath=filepath,stage_name = stage.name, variables_path=stage.variables_path, units=stage.units) + else: + raise NotImplementedError + + class ScalabilityHandler: """ Class to handle scalability related attributes""" def __init__(self,scalability_config): self.directory = scalability_config.directory self.stages = scalability_config.stages self.custom_variables = scalability_config.custom_variables - self.filepaths = {k.name if k.name else k.filepath : os.path.join(self.directory,k.filepath) for k in self.stages} - def getPerformanceVariables(self,index): + def getPerformanceVariables(self,index=None): """ Opens and parses the performance variable values depending on the config setup. Args: index (numerical | string). Key/index to find in the scalability file, depending on the format. @@ -20,81 +150,22 @@ def getPerformanceVariables(self,index): """ perf_variables = {} for stage in self.stages: - if stage.format == "csv": - pass - elif stage.format == "tsv": - #WARNING: This assumes that index is in column 0 - with open(self.filepaths[stage.name if stage.name else stage.filepath],"r") as f: - lines = f.readlines() - - columns = re.sub("\s+"," ",lines[0].replace("# ","")).strip().split(" ") - - vars = sn.extractall_s( - patt=rf'^{index}[\s]+' + r'([0-9e\-\+\.]+)[\s]+'*(len(columns)-1), - string="\n".join(lines[1:]), - conv=float, - tag=range(1,len(columns)) - )[0] - - - for i, col in enumerate(columns[1:]): #UNIT TEMPORARY HOTFIX - perf_variables.update( { f"{stage.name}_{col}" if stage.name else col: sn.make_performance_function(vars[i],unit="iter" if col.endswith("-niter") else "s") }) - elif stage.format == "json": - for varpath in stage.variables_path: - splitted_keys = varpath.split("*") - - if len(splitted_keys) > 2: - raise NotImplementedError(f"More than one wildcard is not supported. Number of wildcards: {len(splitted_keys)}") - - left_keys = splitted_keys[0].strip(".").split(".") - - - with open(self.filepaths[stage.name if stage.name else stage.filepath],"r") as f: - j = json.load(f) - - for left_key in left_keys: - if left_key: - j = j[left_key] - - fields = {} - if len(splitted_keys) == 1: - fields[left_keys[-1]] = j - else: - right_keys = splitted_keys[1].strip(".").split(".") - - wildcards = j.keys() - for wildcard in wildcards: - fields[wildcard] = j[wildcard] - for right_key in right_keys: - if right_key: - fields[wildcard] = fields[wildcard][right_key] - - fields = TemplateProcessor.flattenDict(fields) - - for k,v in fields.items(): - perf_variables.update( { - f"{stage.name}_{k}" if stage.name else k - : sn.make_performance_function(sn.defer(v),unit="s") - }) - - else: - raise NotImplementedError + extractor = ExtractorFactory.create(stage,self.directory,index) + perf_variables.update( extractor.extract() ) return perf_variables @staticmethod def aggregateCustomVar(op,column_values): - if op == "sum": - return sum(column_values) - elif op == "min": - return min(column_values) - elif op =="max": - return max(column_values) - elif op == "mean": - return sum(column_values)/len(column_values) - else: + ops = { + "sum":sum, + "min":min, + "max":max, + "mean": lambda v : sum(v)/len(v) + } + if op not in ops: raise NotImplementedError(f"Operation {op} is not implemented") - + return ops[op](column_values) def getCustomPerformanceVariables(self,perfvars): """ Creates custom aggregated performance variables from existing ones @@ -108,7 +179,6 @@ def getCustomPerformanceVariables(self,perfvars): computed_vars = {} - def evaluateCustomVariable(custom_var): if custom_var.name in computed_vars: return computed_vars[custom_var.name] @@ -127,7 +197,6 @@ def evaluateCustomVariable(custom_var): return custom_var_value - for custom_var in self.custom_variables: custom_perfvars[custom_var.name] = sn.make_performance_function( sn.defer(evaluateCustomVariable(custom_var)),unit=custom_var.unit diff --git a/src/feelpp/benchmarking/reframe/setup.py b/src/feelpp/benchmarking/reframe/setup.py index c0033f23..81055c1e 100644 --- a/src/feelpp/benchmarking/reframe/setup.py +++ b/src/feelpp/benchmarking/reframe/setup.py @@ -3,7 +3,6 @@ from feelpp.benchmarking.reframe.config.configReader import ConfigReader from feelpp.benchmarking.reframe.config.configSchemas import ConfigFile from feelpp.benchmarking.reframe.config.configMachines import MachineConfig -from feelpp.benchmarking.reframe.outputs import OutputsHandler from feelpp.benchmarking.reframe.resources import ResourceHandler @@ -164,6 +163,28 @@ def cleanupDirectories(self): if os.path.exists(self.reader.config.scalability.directory): shutil.rmtree(self.reader.config.scalability.directory) + def copyFile(self,dir_path,name,filepath): + """ Copies the file from filepath to dir_path/name""" + if not filepath: + return + file_extension = filepath.split(".")[-1] if "." in filepath else None + outdir = os.path.join(dir_path,"partials") + if not os.path.exists(outdir): + os.mkdir(outdir) + filename = f"{name}.{file_extension}" if file_extension else name + shutil.copy2( filepath, os.path.join(outdir,filename) ) + + def copyDescriptionFile(self,dir_path,name): + """ copies the file from the description_filepath field""" + if self.reader.config.additional_files and self.reader.config.additional_files.description_filepath: + self.copyFile(dir_path,name,self.reader.config.additional_files.description_filepath) + + def copyParametrizedDescriptionFile(self,dir_path,name): + """ copies the file from the parameterized_descriptions_filepath field""" + if self.reader.config.additional_files and self.reader.config.additional_files.parameterized_descriptions_filepath: + self.copyFile(dir_path,name,self.reader.config.additional_files.parameterized_descriptions_filepath) + + def setExecutable(self, rfm_test, machine_config): """ Sets the executable and executable_opts attrbiutes @@ -244,9 +265,7 @@ def setupAfterInit(self): self.app_setup.setupAfterInit(self) self.machine_setup.setupAfterInit(self,self.app_setup.reader.config) - #Used only to copy description - temp_outputs_handler = OutputsHandler(self.app_setup.reader.config.outputs,self.app_setup.reader.config.additional_files) - temp_outputs_handler.copyDescription(self.report_dir_path,name="description") + self.app_setup.copyDescriptionFile(self.report_dir_path,name="description") @run_after('setup') def setupAfterSetup(self): diff --git a/src/feelpp/benchmarking/report/atomicReports/atomicReport.py b/src/feelpp/benchmarking/report/atomicReports/atomicReport.py index 049b1dc7..380f510b 100644 --- a/src/feelpp/benchmarking/report/atomicReports/atomicReport.py +++ b/src/feelpp/benchmarking/report/atomicReports/atomicReport.py @@ -195,9 +195,8 @@ def createReport(self, base_dir, renderer): """ hash_params_headers, flat_hash_params = self.parseHashMap() - model=AtomicReportModel( self.runs ) view=AtomicReportView( self.plots_config ) - controller=AtomicReportController(model,view) + controller=AtomicReportController(self.model,view) renderer.render( f"{base_dir}/{self.filename()}.adoc", @@ -211,9 +210,7 @@ def createReport(self, base_dir, renderer): flat_hash_param_map = flat_hash_params, hash_params_headers = hash_params_headers, description_path = self.description_path, - figures = controller.generateData("html"), - figure_csvs = controller.generateData("csv"), - figure_pgfs = controller.generateData("pgf") + figures = controller.generateAll() ) ) diff --git a/src/feelpp/benchmarking/report/base/baseComponent.py b/src/feelpp/benchmarking/report/base/baseComponent.py index 5a5caf69..c250f43d 100644 --- a/src/feelpp/benchmarking/report/base/baseComponent.py +++ b/src/feelpp/benchmarking/report/base/baseComponent.py @@ -142,9 +142,7 @@ def createOverview(self,base_dir,renderer,parents,plots_config,master_df): data = dict( parent_catalogs = "-".join([parent.id for parent in parents]), parents = parents, - figures = controller.generateData("html"), - figure_csvs = controller.generateData("csv"), - figure_pgfs = controller.generateData("pgf") + figures = controller.generateAll() ) ) diff --git a/src/feelpp/benchmarking/report/base/controller.py b/src/feelpp/benchmarking/report/base/controller.py index 02b55e6e..582ac5a5 100644 --- a/src/feelpp/benchmarking/report/base/controller.py +++ b/src/feelpp/benchmarking/report/base/controller.py @@ -11,41 +11,23 @@ def __init__(self, model, view): self.model = model self.view = view - def generatePlotly(self): - """ Creates plotly figures for each plot specified on the view config file - Returns a list of plotly figures. - """ - return [ figure.createFigure(self.model.master_df) for figure in self.view.figures ] - - def generatePlotlyHtml(self): - """ Creates plotly figures in html for each plot specified on the view config file - Returns a list of plotly HTML figures - """ - return [ figure.createFigureHtml(self.model.master_df) for figure in self.view.figures ] - - def generateTikz(self): - """ Creates Tikz/Pgf figures for each plot specified on the view config file - Returns: - list[str] LaTeX pgf plots. - """ - return [ figure.createTex(self.model.master_df) for figure in self.view.figures ] - - def generateCsv(self): - """ Create a list containing the data for each plot specified on the view config in CSV format. - Returns (list[str]): List of csv data. - """ - return [ figure.createCsv(self.model.master_df) for figure in self.view.figures ] - - - def generateData(self,format): - """ Creates a list of data depending on the desired format, using the plot configuration and the model's master dataframe""" - if format == "plotly": - return self.generatePlotly() - elif format == "html": - return self.generatePlotlyHtml() - elif format in ["tikz", "pgf"]: - return self.generateTikz() - elif format == "csv": - return self.generateCsv() - else: - raise NotImplementedError \ No newline at end of file + def generateAll(self): + return [ + self.generateFigure(figure,plot_config.plot_types) + for figure,plot_config in zip(self.view.figures,self.view.plots_config) + ] + + def generateFigure(self,figure,plot_types): + return { + "plot_types": plot_types, + "subfigures": [self.generateSubfigure(subfigure) for subfigure in figure] + } + + def generateSubfigure(self, subfigure): + return { + "exports": [ + { "display_text":"CSV", "filename":"data.csv", "data":subfigure.createCsv(self.model.master_df) }, + { "display_text":"LaTeX", "filename":"figure.tex", "data":subfigure.createTex(self.model.master_df) }, + ], + "html": subfigure.createFigureHtml(self.model.master_df) + } \ No newline at end of file diff --git a/src/feelpp/benchmarking/report/base/view.py b/src/feelpp/benchmarking/report/base/view.py index 10f2d2db..45bd9fbb 100644 --- a/src/feelpp/benchmarking/report/base/view.py +++ b/src/feelpp/benchmarking/report/base/view.py @@ -9,7 +9,4 @@ def __init__(self,plots_config): plots_config list[dict]. List with dictionaries specifying plots configuration. """ self.plots_config = [Plot(**d) for d in plots_config] - - self.figures = [] - for plot_config in self.plots_config: - self.figures.extend(FigureFactory.create(plot_config)) \ No newline at end of file + self.figures = [FigureFactory.create(plot_config) for plot_config in self.plots_config] \ No newline at end of file diff --git a/src/feelpp/benchmarking/report/figures/base.py b/src/feelpp/benchmarking/report/figures/base.py index c1aa9bca..49923bef 100644 --- a/src/feelpp/benchmarking/report/figures/base.py +++ b/src/feelpp/benchmarking/report/figures/base.py @@ -21,7 +21,9 @@ def createSimpleFigure(self,df): raise NotImplementedError("Pure virtual function. Not to be called from the base class") def createCsv(self,df): - return self.transformation_strategy.calculate(df).to_csv() + df = self.transformation_strategy.calculate(df) + df = self.renameColumns(df) + return df.to_csv() def createFigure(self,df, **args): """ Creates a figure from the master dataframe @@ -31,7 +33,7 @@ def createFigure(self,df, **args): go.Figure: Plotly figure corresponding to the grouped Bar type """ df = self.transformation_strategy.calculate(df) - + df = self.renameColumns(df) if isinstance(df.index,MultiIndex): figure = self.createMultiindexFigure(df, **args) else: @@ -39,6 +41,12 @@ def createFigure(self,df, **args): return figure + def renameColumns(self,df): + if self.config.variables and self.config.names: + assert len(self.config.variables) == len(self.config.names) + df = df.rename(columns = {var:name for var,name in zip(self.config.variables,self.config.names)}) + + return df class CompositeFigure: def createFigure(self, df): diff --git a/src/feelpp/benchmarking/report/figures/plotlyFigures.py b/src/feelpp/benchmarking/report/figures/plotlyFigures.py index 19dc996f..1bdf904e 100644 --- a/src/feelpp/benchmarking/report/figures/plotlyFigures.py +++ b/src/feelpp/benchmarking/report/figures/plotlyFigures.py @@ -63,6 +63,7 @@ def createSliderAnimation(self,df): fig = go.Figure() return fig + def createMultiindexFigure(self,df): """ Creates a plotly figure from a multiIndex dataframe Args: diff --git a/src/feelpp/benchmarking/report/templates/atomicOverview.adoc.j2 b/src/feelpp/benchmarking/report/templates/atomicOverview.adoc.j2 index 363fd0e8..fd78da94 100644 --- a/src/feelpp/benchmarking/report/templates/atomicOverview.adoc.j2 +++ b/src/feelpp/benchmarking/report/templates/atomicOverview.adoc.j2 @@ -11,4 +11,14 @@ - {{parent.type.title()}} : {{parent.display_name}} {% endfor %} -{%include "figures.html.j2" %} \ No newline at end of file +++++ + + +{%include "figures.html.j2" %} + + +++++ diff --git a/src/feelpp/benchmarking/report/templates/benchmark.adoc.j2 b/src/feelpp/benchmarking/report/templates/benchmark.adoc.j2 index b9edd4f3..dadee9a5 100644 --- a/src/feelpp/benchmarking/report/templates/benchmark.adoc.j2 +++ b/src/feelpp/benchmarking/report/templates/benchmark.adoc.j2 @@ -21,7 +21,15 @@ include::ROOT:{{description_path}}[leveloffset=+1] * Total Tests: {{ session_info.num_cases }} * Failures: {{ session_info.num_failures }} - +++++ + +++++ +[.scrollable] +-- |=== {% for header in hash_params_headers %} | {% if header == 'partial_filepath' or header == 'logs_filepath' %} {% else %} {{ header }} {% endif %} {% endfor %} @@ -32,9 +40,18 @@ include::ROOT:{{description_path}}[leveloffset=+1] {% endfor %} |=== +-- {% if not empty %} +++++ + {%include "figures.html.j2" %} + +++++ {% endif %} \ No newline at end of file diff --git a/src/feelpp/benchmarking/report/templates/css/figures.css b/src/feelpp/benchmarking/report/templates/css/figures.css new file mode 100644 index 00000000..e0aed620 --- /dev/null +++ b/src/feelpp/benchmarking/report/templates/css/figures.css @@ -0,0 +1,78 @@ +.figure-container { + position: relative; + margin: 1.5rem auto; + padding: 1rem; + background-color: #fff; + border: 1px solid #ddd; + border-radius: 8px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.08); +} + +.subfigure-container { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background-color: #f9f9f9; + padding: 1rem; + border: 1px solid #ddd; + border-top: none; + border-radius: 0 0 8px 8px; +} + +.subfigure-container.active { + position: relative; + opacity: 1; + pointer-events: auto; + z-index: 1; +} + +.subfigure-container.inactive { + opacity: 0; + pointer-events: none; + z-index: 0; +} + +/* Tabs container: display buttons inline with a bottom border to indicate grouping */ +.tabs-container { + display: flex; + border-bottom: 2px solid #007acc; + margin-bottom: 0.5rem; +} + +/* Figure tab button styling */ +.figure-tab { + background: transparent; + border: none; + outline: none; + padding: 0.5rem 1rem; + margin-right: 0.3rem; + font-size: 1rem; + color: #007acc; + cursor: pointer; + border-radius: 4px 4px 0 0; + transition: background-color 0.2s ease, color 0.2s ease; +} + +.figure-tab:hover, +.figure-tab.active { + background-color: #007acc; + color: #fff; +} + +.export-container button { + background-color: #007acc; + color: #fff; + border: none; + padding: 0.4rem 0.8rem; + margin-right: 0.4rem; + border-radius: 4px; + cursor: pointer; + font-size: 0.9rem; + transition: background-color 0.2s ease; +} + +.export-container button:hover { + background-color: #005fa3; +} \ No newline at end of file diff --git a/src/feelpp/benchmarking/report/templates/figures.html.j2 b/src/feelpp/benchmarking/report/templates/figures.html.j2 index e857cb43..dd7bebd0 100644 --- a/src/feelpp/benchmarking/report/templates/figures.html.j2 +++ b/src/feelpp/benchmarking/report/templates/figures.html.j2 @@ -1,36 +1,29 @@ {% for figure in figures %} -++++ - - -{{figure}} -++++ -{% endfor %} +{% set figure_i = loop.index %} +
+ {% if figure.plot_types | length > 1%} +
+ {% for plot_type in figure.plot_types %} + {% set plot_type_i = loop.index %} + + {% endfor %} +
+ {% endif %} + {% for subfigure in figure.subfigures %} + {% set subfigure_i = loop.index %} +
+
+ {% for export in subfigure.exports %} + + {% endfor %} +
-++++ - -++++ \ No newline at end of file +{% endfor %} \ No newline at end of file diff --git a/src/feelpp/benchmarking/report/templates/js/figureHelpers.js b/src/feelpp/benchmarking/report/templates/js/figureHelpers.js new file mode 100644 index 00000000..c2cee0f5 --- /dev/null +++ b/src/feelpp/benchmarking/report/templates/js/figureHelpers.js @@ -0,0 +1,26 @@ + +function downloadString(data, filename, mimeType ) { + const blob = new Blob([data], { type: `${mimeType};charset=utf-8` }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = filename; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); +} + +function switchTab(figureIndex, tabIndex) { + let subfigures = document.querySelectorAll(`[id^="subfig_${figureIndex}_"]`); + subfigures.forEach(subfig => { + subfig.classList.remove('active'); + subfig.classList.add('inactive'); + }); + + let activeSubfig = document.getElementById(`subfig_${figureIndex}_${tabIndex}`); + if(activeSubfig){ + activeSubfig.classList.remove('inactive'); + activeSubfig.classList.add('active'); + } +} \ No newline at end of file diff --git a/tests/configtest/test_benchmark.py b/tests/configtest/test_benchmark.py index 45fc11ff..f1afc305 100644 --- a/tests/configtest/test_benchmark.py +++ b/tests/configtest/test_benchmark.py @@ -7,36 +7,6 @@ -class TestImage: - """Tests for the Image schema""" - - dummy_image_path = "tests/data/configs/mockAppConfig.json" - - def test_extractProtocol(self): - """Tests the correct extraction of the protocol from the image name""" - image = Image(name=self.dummy_image_path) - assert image.protocol == "local" - assert image.name == self.dummy_image_path - - with pytest.raises(ValueError,match="Unkown Protocol"): - image = Image(name="unkown_local://tests/data/configs") - - image = Image(name="docker://test_name") - assert image.protocol == "docker" - assert image.name == "docker://test_name" - - - def test_checkImage(self): - """Tests that checking if image exists is done correctly""" - - with pytest.raises(FileNotFoundError): - image = Image(name="nonexistant_image.sif") - - #Dry run - image = Image.model_validate({"name":"nonexistant_image.sif"},context={"dry_run":True}) - assert image.protocol == "local" - - class TestPlatform: pass diff --git a/tests/data/configs/mockAppConfig.json b/tests/data/configs/mockAppConfig.json index 1f5fedab..23da17c0 100644 --- a/tests/data/configs/mockAppConfig.json +++ b/tests/data/configs/mockAppConfig.json @@ -9,8 +9,6 @@ } }, "options": [], - "outputs": [], - "additional_files":{}, "scalability": { "directory":"", "stages":[] diff --git a/tests/scalability/test_appSetup.py b/tests/scalability/test_appSetup.py new file mode 100644 index 00000000..67e52f33 --- /dev/null +++ b/tests/scalability/test_appSetup.py @@ -0,0 +1,35 @@ +import pytest +import tempfile, os +from feelpp.benchmarking.reframe.config.configMachines import MachineConfig +from feelpp.benchmarking.reframe.config.configReader import ConfigReader + + +class AdditionalFilesMocker: + def __init__(self,description_filepath="",parameterized_descriptions_filepath=""): + self.description_filepath=description_filepath + self.parameterized_descriptions_filepath = parameterized_descriptions_filepath + +class TestAdditionalFilesCopy: + + def test_copyFile(self): + """Test the copyDescription method of the AppSetup class. + It checks that a file is correctly copied as expected, with its content intact""" + + os.environ["MACHINE_CONFIG_FILEPATH"] = "./tests/data/configs/mockMachineConfig.json" + os.environ["APP_CONFIG_FILEPATH"] = "./tests/data/configs/mockAppConfig.json" + + from feelpp.benchmarking.reframe.setup import AppSetup + + with tempfile.NamedTemporaryFile() as file: + with open(file.name,"w") as f: + f.write("TEST DESCRIPTION FILE") + + app_setup = AppSetup("./tests/data/configs/mockAppConfig.json",ConfigReader("./tests/data/configs/mockMachineConfig.json",MachineConfig).config) + app_setup.reader.config.additional_files = True + + with tempfile.TemporaryDirectory() as tmp_dir: + app_setup.copyFile(dir_path=tmp_dir,name="test_description",filepath=file.name) + + assert os.path.isfile(os.path.join(tmp_dir,"partials","test_description")) + with open(os.path.join(tmp_dir,"partials","test_description"),"r") as f: + assert f.read() == "TEST DESCRIPTION FILE" \ No newline at end of file diff --git a/tests/scalability/test_outputsHandler.py b/tests/scalability/test_outputsHandler.py deleted file mode 100644 index c2032222..00000000 --- a/tests/scalability/test_outputsHandler.py +++ /dev/null @@ -1,82 +0,0 @@ -import pytest -import tempfile, os -from feelpp.benchmarking.reframe.outputs import OutputsHandler - - -class OutputsConfigMocker: - def __init__(self,filepath="",format=""): - self.filepath = filepath - self.format = format - -class AdditionalFilesMocker: - def __init__(self,description_filepath="",parameterized_descriptions_filepath=""): - self.description_filepath=description_filepath - self.parameterized_descriptions_filepath = parameterized_descriptions_filepath - -class TestOutputsHandler: - - @staticmethod - def buildCsvString(columns,values): - """Helper function to create the content of a CSV from a list of columns and a list of values""" - assert len(columns) == len(values) - return ",".join(columns) + "\n" + ",".join([str(v) for v in values]) - - - def test_getOutputsCsv(self): - """Tests the getOutputs method with csv format""" - file = tempfile.NamedTemporaryFile() - columns = ["col1","col2","col3"] - values = [1,2,3] - with open(file.name,"w") as f: - f.write(self.buildCsvString(columns,values)) - - outputs_handler = OutputsHandler( - [OutputsConfigMocker(filepath=file.name,format="csv")] - ) - - perfvars = outputs_handler.getOutputs() - - for i,column in enumerate(columns): - assert perfvars[column].evaluate() == values[i] - - file.close() - - def test_copyDescription(self): - """Test the copyDescription method of the OutputsHandler class. - It checks that a file is correctly copied as expected, with its content intact""" - with tempfile.NamedTemporaryFile() as file: - with open(file.name,"w") as f: - f.write("TEST DESCRIPTION FILE") - - outputs_handler = OutputsHandler( - outputs_config = [], - additional_files_config=AdditionalFilesMocker(description_filepath=file.name) - ) - - with tempfile.TemporaryDirectory() as tmp_dir: - outputs_handler.copyDescription(dir_path=tmp_dir,name="test_description") - - assert os.path.isfile(os.path.join(tmp_dir,"partials","test_description")) - with open(os.path.join(tmp_dir,"partials","test_description"),"r") as f: - assert f.read() == "TEST DESCRIPTION FILE" - - #TODO: This should be refactored when the OutputsHandler is reworked - def test_copyParametrizedDescriptions(self): - """Test the copyParametrizedDescriptions method of the OutputsHandler class. - It checks that a file is correctly copied as expected, with its content intact""" - with tempfile.NamedTemporaryFile() as file: - with open(file.name,"w") as f: - f.write("TEST PARAMETRIZED DESCRIPTION FILE") - - outputs_handler = OutputsHandler( - outputs_config = [], - additional_files_config=AdditionalFilesMocker(parameterized_descriptions_filepath=file.name) - ) - - with tempfile.TemporaryDirectory() as tmp_dir: - outputs_handler.copyParametrizedDescriptions(dir_path=tmp_dir,name="test_parametrized_description") - - assert os.path.isfile(os.path.join(tmp_dir,"partials","test_parametrized_description")) - with open(os.path.join(tmp_dir,"partials","test_parametrized_description"),"r") as f: - assert f.read() == "TEST PARAMETRIZED DESCRIPTION FILE" - diff --git a/tests/scalability/test_scalabilityHandler.py b/tests/scalability/test_scalabilityHandler.py index 63fd1568..f5d35e03 100644 --- a/tests/scalability/test_scalabilityHandler.py +++ b/tests/scalability/test_scalabilityHandler.py @@ -2,16 +2,16 @@ import pytest import tempfile, json -from feelpp.benchmarking.reframe.scalability import ScalabilityHandler -from unittest.mock import patch +from feelpp.benchmarking.reframe.scalability import ScalabilityHandler, CsvExtractor,TsvExtractor,JsonExtractor,Extractor,ExtractorFactory import numpy as np class StageMocker: - def __init__(self,format="",filepath="",name="",variables_path=[]): + def __init__(self,format="",filepath="",name="",variables_path=[],units={"*":"s"}): self.format = format self.filepath = filepath self.name = name self.variables_path = variables_path + self.units = units class CustomVariableMocker: def __init__(self, name="",columns=[],op="",unit="s"): @@ -27,7 +27,38 @@ def __init__(self, directory="",stages=[],custom_variables=[]): self.custom_variables = custom_variables -class TestScalabilityHandler: +class TestExtractors: + + @staticmethod + def buildCsvString(columns,values): + """Helper function to create the content of a CSV from a list of columns and a list of values. + Args: + columns(list[str]): List of colum values + values(list[list[any]]): List of lists containing the csv values + Returns + str: The built csv string + """ + assert all(len(columns) == len(v) for v in values) + return ",".join(columns) + "\n" + "\n".join([",".join([str(r) for r in row]) for row in values]) + + + @pytest.mark.parametrize(("values"),[ + ([[1,2,3]]), ([[1,2,3],[4,5,6]]) + ]) + def test_extractCsv(self,values): + file = tempfile.NamedTemporaryFile() + columns = ["col1","col2","col3"] + with open(file.name,"w") as f: + f.write(self.buildCsvString(columns,values)) + + extractor = CsvExtractor(filepath=file.name,stage_name="",units={"*":"s"}) + perfvars = extractor.extract() + for j,column in enumerate(columns): + for i in range(len(values)): + column_name = column if len(values) == 1 else f"{column}_{i}" + assert perfvars[column_name].evaluate() == values[i][j] + + file.close() @staticmethod def buildTsvString(index, columns, values): @@ -35,44 +66,21 @@ def buildTsvString(index, columns, values): tsv = "# nProc "+ " ".join(columns) + "\n" + f"{index} " + " ".join([str(v) for v in values]) + "\n" return tsv - def test_extractCsv(self): - """ Test performance variable extraction for CSV files""" - pass - def test_extractTsv(self): """ Test performance variable extraction for special TSV files [WILL BE REMOVED]""" - index = 32 - file1 = tempfile.NamedTemporaryFile() - columns1 = ["col1","col2","col3"] - values1 = [1,2.5,1e-5] - with open(file1.name,"w") as f: - f.write(self.buildTsvString(index,columns1,values=values1)) - - file2 = tempfile.NamedTemporaryFile() - columns2 = ["col1","colX"] - values2 = [4,5.5] - with open(file2.name,"w") as f: - f.write(self.buildTsvString(index,columns2,values2)) - - scalability_handler = ScalabilityHandler(ScalabilityMocker( - directory="", - stages = [ - StageMocker(format="tsv",filepath=file1.name,name="file1"), - StageMocker(format="tsv",filepath=file2.name,name="file2") - ] - )) - - perf_vars = scalability_handler.getPerformanceVariables(index) - for i,col1 in enumerate(columns1): - print(perf_vars["file1_"+col1]) - assert perf_vars[f"file1_{col1}"].evaluate() == values1[i] + file = tempfile.NamedTemporaryFile() + columns = ["col1","col2","col3"] + values = [1,2.5,1e-5] + with open(file.name,"w") as f: + f.write(self.buildTsvString(index,columns,values=values)) - for j,col2 in enumerate(columns2): - assert perf_vars[f"file2_{col2}"].evaluate() == values2[j] + extractor = TsvExtractor(filepath=file.name,stage_name="file",index=index,units={"*":"s"}) + perfvars = extractor.extract() + for i,col1 in enumerate(columns): + assert perfvars[f"file_{col1}"].evaluate() == values[i] - file1.close() - file2.close() + file.close() def test_extractJson(self): @@ -92,25 +100,15 @@ def test_extractJson(self): json.dump(values,f) #Test no variables path - scalability_handler = ScalabilityHandler(ScalabilityMocker( - directory="", - stages = [ - StageMocker(format="json",filepath=file.name,name=""), - ] - )) - perf_vars = scalability_handler.getPerformanceVariables(None) - assert perf_vars == {} + extractor = JsonExtractor(file.name,"",units={"*":"s"},variables_path=[]) + perfvars = extractor.extract() + assert perfvars == {} #Test with * - scalability_handler = ScalabilityHandler(ScalabilityMocker( - directory="", - stages = [ - StageMocker(format="json",filepath=file.name,name="",variables_path=["*"]), - ] - )) - perf_vars = scalability_handler.getPerformanceVariables(None) - for k,v in perf_vars.items(): + extractor = JsonExtractor(file.name,"",units={"*":"s"},variables_path=["*"]) + perfvars = extractor.extract() + for k,v in perfvars.items(): path = k.split(".") dic = values for p in path: @@ -119,20 +117,79 @@ def test_extractJson(self): assert val == v.evaluate() #Test with specific paths - scalability_handler = ScalabilityHandler(ScalabilityMocker( - directory="", - stages = [ - StageMocker(format="json",filepath=file.name,name="",variables_path=["field2.field2_2.*","field1"]), - ] - )) - perf_vars = scalability_handler.getPerformanceVariables(None) - assert len(perf_vars.keys()) == 3 - assert perf_vars["field1"].evaluate() == values["field1"] - assert perf_vars["field2_2_1"].evaluate() == values["field2"]["field2_2"]["field2_2_1"] - assert perf_vars["field2_2_2"].evaluate() == values["field2"]["field2_2"]["field2_2_2"] + extractor = JsonExtractor(file.name,"",units={"*":"s"},variables_path=["field2.field2_2.*","field1"]) + perfvars = extractor.extract() + assert len(perfvars.keys()) == 3 + assert perfvars["field1"].evaluate() == values["field1"] + assert perfvars["field2_2_1"].evaluate() == values["field2"]["field2_2"]["field2_2_1"] + assert perfvars["field2_2_2"].evaluate() == values["field2"]["field2_2"]["field2_2_2"] file.close() + #Test with multiple wildcards + file = tempfile.NamedTemporaryFile() + values = { + "hardware": { + "gaya3": { + "mem": { + "available": { + "host": "527759648", + "physical": "442275", + "virtual": "51059" + }, + "total": { + "host": "527759648", + "physical": "515390", + "virtual": "51199" + } + } + }, + "gaya2":{ + "mem": { + "available": { + "host": "101010101", + "physical": "442275", + }, + "total": { + "host": "202020202", + "physical": "515390", + "virtual": "51199" + } + } + } + } + } + with open(file.name,"w") as f: + json.dump(values,f) + + + extractor = JsonExtractor(file.name,"",variables_path=["hardware.*.mem.*.host"],units={"*":"s"}) + perfvars = extractor.extract() + assert perfvars["gaya2.available"] == values["hardware"]["gaya2"]["mem"]["available"]["host"] + assert perfvars["gaya2.total"] == values["hardware"]["gaya2"]["mem"]["total"]["host"] + assert perfvars["gaya3.available"] == values["hardware"]["gaya3"]["mem"]["available"]["host"] + assert perfvars["gaya3.total"] == values["hardware"]["gaya3"]["mem"]["total"]["host"] + + + + extractor = JsonExtractor(file.name,"",variables_path=["hardware.*.mem.*"],units={"*":"s"}) + perfvars = extractor.extract() + assert perfvars["gaya2.available.host"] == values["hardware"]["gaya2"]["mem"]["available"]["host"] + assert perfvars["gaya2.available.physical"] == values["hardware"]["gaya2"]["mem"]["available"]["physical"] + assert perfvars["gaya2.total.host"] == values["hardware"]["gaya2"]["mem"]["total"]["host"] + assert perfvars["gaya2.total.physical"] == values["hardware"]["gaya2"]["mem"]["total"]["physical"] + + assert perfvars["gaya3.available.host"] == values["hardware"]["gaya3"]["mem"]["available"]["host"] + assert perfvars["gaya3.available.physical"] == values["hardware"]["gaya3"]["mem"]["available"]["physical"] + assert perfvars["gaya3.available.virtual"] == values["hardware"]["gaya3"]["mem"]["available"]["virtual"] + assert perfvars["gaya3.total.host"] == values["hardware"]["gaya3"]["mem"]["total"]["host"] + assert perfvars["gaya3.total.physical"] == values["hardware"]["gaya3"]["mem"]["total"]["physical"] + assert perfvars["gaya3.total.virtual"] == values["hardware"]["gaya3"]["mem"]["total"]["virtual"] + + file.close() + + +class TestScalabilityHandler: @pytest.mark.parametrize(("op","fct"),[ @@ -161,7 +218,7 @@ def test_evaluateCustomVariables(self): columns = ["col1","col2","col3"] values = [1,2,5.5] with open(file.name,"w") as f: - f.write(self.buildTsvString(index,columns,values=values)) + f.write(TestExtractors.buildTsvString(index,columns,values=values)) scalability_handler = ScalabilityHandler(ScalabilityMocker( directory="",