diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..b4840547a --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,40 @@ +# Description + +- Please include a summary of the change and which issue is fixed. +- Please also include relevant motivation and context. +- List any dependencies that are required for this change. + + +- Fixes # (issue) +## Type of change + +Check relevant points. + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] This change requires a documentation update + +# How Has This Been Tested? + +- Please describe the tests that you ran to verify your changes. +- Provide instructions so we can reproduce. +- Please also list any relevant details for your test configuration + +- [ ] Test A +- [ ] Test B + +# Checklist: +Prepare items below using: +\[ :x: \] (markdown: `[ :x: ]`) for TODO items +\[ :white_check_mark: \] (markdown: `[ :white_check_mark: ]`) for DONE items +\[ N/A \] for items that are not applicable for this PR. + + +- [ ] updated version number in setup.py/pyproject.toml/environment.yml. +- [ ] updated the lock file. +- [ ] added changes to History.rst. +- [ ] updated the latest version in README file. +- [ ] I have added tests that prove my fix is effective or that my feature works. +- [ ] New and existing unit tests pass locally with my changes. +- [ ] documentation are updated. diff --git a/hydrolib/core/dflowfm/ext/models.py b/hydrolib/core/dflowfm/ext/models.py index 052a7e4e9..42aa02963 100644 --- a/hydrolib/core/dflowfm/ext/models.py +++ b/hydrolib/core/dflowfm/ext/models.py @@ -103,20 +103,19 @@ def forcing(self) -> Union[ForcingBase, None]: Returns: ForcingBase: The corresponding forcing data. None when this boundary does not have a forcing file or when the data cannot be found. """ - - if self.forcingfile is None: - return None - - for forcing in self.forcingfile.forcing: - - if self.nodeid != forcing.name: - continue - - for quantity in forcing.quantityunitpair: - if quantity.quantity.startswith(self.quantity): - return forcing - - return None + result = None + if self.forcingfile is not None: + for forcing in self.forcingfile.forcing: + + if self.nodeid == forcing.name: + if any( + quantity.quantity.startswith(self.quantity) + for quantity in forcing.quantityunitpair + ): + result = forcing + break + + return result class Lateral(INIBasedModel): diff --git a/hydrolib/core/dflowfm/extold/models.py b/hydrolib/core/dflowfm/extold/models.py index bd24af00c..0864c527f 100644 --- a/hydrolib/core/dflowfm/extold/models.py +++ b/hydrolib/core/dflowfm/extold/models.py @@ -164,6 +164,21 @@ class ExtOldBoundaryQuantity(StrEnum): """Discharge-water level dependency""" +class ExtOldParametersQuantity(StrEnum): + """Enum class containing the valid values for the Spatial parameter category + of the external forcings. + + for more details check D-Flow FM User Manual 1D2D, Chapter D.3.1, Table D.2 + https://content.oss.deltares.nl/delft3d/D-Flow_FM_User_Manual_1D2D.pdf + """ + + FrictionCoefficient = "frictioncoefficient" + HorizontalEddyViscosityCoefficient = "horizontaleddyviscositycoefficient" + HorizontalEddyDiffusivityCoefficient = "horizontaleddydiffusivitycoefficient" + AdvectionType = "advectiontype" + InfiltrationCapacity = "infiltrationcapacity" + + class ExtOldMeteoQuantity(StrEnum): # Meteorological fields diff --git a/hydrolib/core/dflowfm/inifield/models.py b/hydrolib/core/dflowfm/inifield/models.py index eb44942e5..7a86d611b 100644 --- a/hydrolib/core/dflowfm/inifield/models.py +++ b/hydrolib/core/dflowfm/inifield/models.py @@ -188,6 +188,9 @@ class InitialField(AbstractSpatialField): Initial condition field definition, represents an `[Initial]` block in an inifield file. Typically inside the definition list of a [FMModel][hydrolib.core.dflowfm.mdu.models.FMModel]`.geometry.inifieldfile.initial[..]` + + All lowercased attributes match with the initial field input as described in + [UM Sec.D.2](https://content.oss.deltares.nl/delft3dfm1d2d/D-Flow_FM_User_Manual_1D2D.pdf#subsection.D.2). """ _header: Literal["Initial"] = "Initial" diff --git a/hydrolib/tools/ext_old_to_new/base_converter.py b/hydrolib/tools/ext_old_to_new/base_converter.py deleted file mode 100644 index d6fa2307e..000000000 --- a/hydrolib/tools/ext_old_to_new/base_converter.py +++ /dev/null @@ -1,34 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any - -from hydrolib.core.dflowfm.extold.models import ExtOldForcing - - -class BaseConverter(ABC): - """Abstract base class for converting old external forcings blocks - to new blocks. - - Subclasses must implement the `convert` method, specific for the - type of model data in the various old external forcing blocks. - - Class ConverterFactory uses these subclasses to create the correct - converter, depending on the quantity of the forcing block. - """ - - def __init__(self): - pass - - @abstractmethod - def convert(self, data: ExtOldForcing) -> Any: - """Converts the data from the old external forcings format to - the proper/new model input block. - - Args: - data (ExtOldForcing): The data read from an old format - external forcings file. - - Returns: - Any: The converted data in the new format. Should be - included into some FileModel object by the caller. - """ - raise NotImplementedError("Subclasses must implement convert method") diff --git a/hydrolib/tools/ext_old_to_new/converter_factory.py b/hydrolib/tools/ext_old_to_new/converter_factory.py deleted file mode 100644 index 021d00dd8..000000000 --- a/hydrolib/tools/ext_old_to_new/converter_factory.py +++ /dev/null @@ -1,49 +0,0 @@ -from hydrolib.core.dflowfm.extold.models import ( - ExtOldBoundaryQuantity, - ExtOldInitialConditionQuantity, - ExtOldMeteoQuantity, -) -from hydrolib.tools.ext_old_to_new.base_converter import BaseConverter -from hydrolib.tools.ext_old_to_new.converters import ( - BoundaryConditionConverter, - InitialConditionConverter, - MeteoConverter, -) - - -def __contains__(cls, item): - try: - cls(item) - except ValueError: - return False - return True - - -class ConverterFactory: - """ - A factory class for creating converters based on the given quantity. - """ - - @staticmethod - def create_converter(quantity) -> BaseConverter: - """ - Create converter based on the given quantity. - - Args: - quantity: The quantity for which the converter needs to be created. - - Returns: - BaseConverter: An instance of a specific BaseConverter subclass - for the given quantity. - - Raises: - ValueError: If no converter is available for the given quantity. - """ - if __contains__(ExtOldMeteoQuantity, quantity): - return MeteoConverter() - elif __contains__(ExtOldInitialConditionQuantity, quantity): - return InitialConditionConverter() - elif __contains__(ExtOldBoundaryQuantity, quantity): - return BoundaryConditionConverter() - else: - raise ValueError(f"No converter available for QUANTITY={quantity}.") diff --git a/hydrolib/tools/ext_old_to_new/converters.py b/hydrolib/tools/ext_old_to_new/converters.py index e2ca5b260..4cbd3bd67 100644 --- a/hydrolib/tools/ext_old_to_new/converters.py +++ b/hydrolib/tools/ext_old_to_new/converters.py @@ -1,15 +1,57 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict + from hydrolib.core.basemodel import DiskOnlyFileModel from hydrolib.core.dflowfm.bc.models import ForcingModel from hydrolib.core.dflowfm.ext.models import Boundary, Meteo -from hydrolib.core.dflowfm.extold.models import ExtOldForcing -from hydrolib.core.dflowfm.inifield.models import InitialField, InterpolationMethod -from hydrolib.tools.ext_old_to_new.enum_converters import ( +from hydrolib.core.dflowfm.extold.models import ( + ExtOldBoundaryQuantity, + ExtOldForcing, + ExtOldInitialConditionQuantity, + ExtOldMeteoQuantity, + ExtOldParametersQuantity, +) +from hydrolib.core.dflowfm.inifield.models import ( + InitialField, + InterpolationMethod, + ParameterField, +) +from hydrolib.tools.ext_old_to_new.utils import ( oldfiletype_to_forcing_file_type, oldmethod_to_averaging_type, oldmethod_to_interpolation_method, ) -from .base_converter import BaseConverter + +class BaseConverter(ABC): + """Abstract base class for converting old external forcings blocks + to new blocks. + + Subclasses must implement the `convert` method, specific for the + type of model data in the various old external forcing blocks. + + Class ConverterFactory uses these subclasses to create the correct + converter, depending on the quantity of the forcing block. + """ + + def __init__(self): + """Initializes the BaseConverter object.""" + pass + + @abstractmethod + def convert(self, data: ExtOldForcing) -> Any: + """Converts the data from the old external forcings format to + the proper/new model input block. + + Args: + data (ExtOldForcing): The data read from an old format + external forcings file. + + Returns: + Any: The converted data in the new format. Should be + included into some FileModel object by the caller. + """ + raise NotImplementedError("Subclasses must implement convert method") class MeteoConverter(BaseConverter): @@ -80,12 +122,12 @@ def __init__(self): super().__init__() def convert(self, forcing: ExtOldForcing) -> Boundary: - """Convert an old external forcing block with meteo data to a boundary - forcing block suitable for inclusion in a new external forcings file. + """Convert an old external forcing block to a boundary forcing block + suitable for inclusion in a new external forcings file. This function takes a forcing block from an old external forcings file, represented by an instance of ExtOldForcing, and converts it - into a Meteo object. The Boundary object is suitable for use in new + into a boundary object. The Boundary object is suitable for use in new external forcings files, adhering to the updated format and specifications. @@ -96,7 +138,7 @@ def convert(self, forcing: ExtOldForcing) -> Boundary: required for the conversion process. Returns: - Boundary: A Boindary object that represents the converted forcing + Boundary: A Boundary object that represents the converted forcing block, ready to be included in a new external forcings file. The Boundary object conforms to the new format specifications, ensuring compatibility with updated systems and models. @@ -118,6 +160,51 @@ def convert(self, forcing: ExtOldForcing) -> Boundary: return new_block +def create_initial_cond_and_parameter_input_dict( + forcing: ExtOldForcing, +) -> Dict[str, str]: + """Create the input dictionary for the `InitialField` or `ParameterField` + + Args: + forcing: [ExtOldForcing] + External forcing block from the old external forcings file. + + Returns: + Dict[str, str]: + the input dictionary to the `InitialField` or `ParameterField` constructor + """ + block_data = { + "quantity": forcing.quantity, + "datafile": forcing.filename, + "datafiletype": oldfiletype_to_forcing_file_type(forcing.filetype), + } + if block_data["datafiletype"] == "polygon": + block_data["value"] = forcing.value + + if forcing.sourcemask != DiskOnlyFileModel(None): + raise ValueError( + f"Attribute 'SOURCEMASK' is no longer supported, cannot " + f"convert this input. Encountered for QUANTITY=" + f"{forcing.quantity} and FILENAME={forcing.filename}." + ) + block_data["interpolationmethod"] = oldmethod_to_interpolation_method( + forcing.method + ) + if block_data["interpolationmethod"] == InterpolationMethod.averaging: + block_data["averagingtype"] = oldmethod_to_averaging_type(forcing.method) + block_data["averagingrelsize"] = forcing.relativesearchcellsize + block_data["averagingnummin"] = forcing.nummin + block_data["averagingpercentile"] = forcing.percentileminmax + block_data["operand"] = forcing.operand + + if hasattr(forcing, "extrapolation"): + block_data["extrapolationmethod"] = ( + "yes" if forcing.extrapolation == 1 else "no" + ) + + return block_data + + class InitialConditionConverter(BaseConverter): def __init__(self): @@ -151,35 +238,88 @@ def convert(self, forcing: ExtOldForcing) -> InitialField: References: [Sec.D](https://content.oss.deltares.nl/delft3dfm1d2d/D-Flow_FM_User_Manual_1D2D.pdf#subsection.D) """ - block_data = { - "quantity": forcing.quantity, - "datafile": forcing.filename, - "datafiletype": oldfiletype_to_forcing_file_type(forcing.filetype), - } - if block_data["datafiletype"] == "polygon": - block_data["value"] = forcing.value + data = create_initial_cond_and_parameter_input_dict(forcing) + new_block = InitialField(**data) + + return new_block - if forcing.sourcemask != DiskOnlyFileModel(None): - raise ValueError( - f"Attribute 'SOURCEMASK' is no longer supported, cannot " - f"convert this input. Encountered for QUANTITY=" - f"{forcing.quantity} and FILENAME={forcing.filename}." - ) - block_data["interpolationmethod"] = oldmethod_to_interpolation_method( - forcing.method - ) - if block_data["interpolationmethod"] == InterpolationMethod.averaging: - block_data["averagingtype"] = oldmethod_to_averaging_type(forcing.method) - block_data["averagingrelsize"] = forcing.relativesearchcellsize - block_data["averagingnummin"] = forcing.nummin - block_data["averagingpercentile"] = forcing.percentileminmax - block_data["operand"] = forcing.operand - - if hasattr(forcing, "extrapolation"): - block_data["extrapolationmethod"] = ( - "yes" if forcing.extrapolation == 1 else "no" - ) - new_block = InitialField(**block_data) +class ParametersConverter(BaseConverter): + + def __init__(self): + super().__init__() + + def convert(self, forcing: ExtOldForcing) -> ParameterField: + """Convert an old external forcing block to a parameter forcing block + suitable for inclusion in an initial field and parameter file. + + This function takes a forcing block from an old external forcings + file, represented by an instance of ExtOldForcing, and converts it + into a ParameterField object. The ParameterField object is suitable for use in + an IniFieldModel, representing an initial field and parameter file, adhering + to the updated format and specifications. + + Args: + forcing (ExtOldForcing): The contents of a single forcing block + in an old external forcings file. This object contains all the + necessary information, such as quantity, values, and timestamps, + required for the conversion process. + + Returns: + ParameterField: + A ParameterField object that represents the converted forcing + block, ready to be included in an initial field and parameter file. The + ParameterField object conforms to the new format specifications, ensuring + compatibility with updated systems and models. + + Raises: + ValueError: If the forcing block contains a quantity that is not + supported by the converter, a ValueError is raised. This ensures + that only compatible forcing blocks are processed, maintaining + data integrity and preventing errors in the conversion process. + """ + data = create_initial_cond_and_parameter_input_dict(forcing) + new_block = ParameterField(**data) return new_block + + +class ConverterFactory: + """ + A factory class for creating converters based on the given quantity. + """ + + @staticmethod + def create_converter(quantity) -> BaseConverter: + """ + Create converter based on the given quantity. + + Args: + quantity: The quantity for which the converter needs to be created. + + Returns: + BaseConverter: An instance of a specific BaseConverter subclass + for the given quantity. + + Raises: + ValueError: If no converter is available for the given quantity. + """ + if ConverterFactory.contains(ExtOldMeteoQuantity, quantity): + return MeteoConverter() + elif ConverterFactory.contains(ExtOldInitialConditionQuantity, quantity): + return InitialConditionConverter() + elif ConverterFactory.contains(ExtOldBoundaryQuantity, quantity): + return BoundaryConditionConverter() + elif ConverterFactory.contains(ExtOldParametersQuantity, quantity): + return ParametersConverter() + else: + raise ValueError(f"No converter available for QUANTITY={quantity}.") + + @staticmethod + def contains(quantity_class, quantity) -> bool: + try: + quantity_class(quantity) + except ValueError: + return False + + return True diff --git a/hydrolib/tools/ext_old_to_new/enum_converters.py b/hydrolib/tools/ext_old_to_new/enum_converters.py deleted file mode 100644 index 301351143..000000000 --- a/hydrolib/tools/ext_old_to_new/enum_converters.py +++ /dev/null @@ -1,106 +0,0 @@ -from typing import Union - -from hydrolib.core.dflowfm.ext.models import ( - MeteoForcingFileType, - MeteoInterpolationMethod, -) -from hydrolib.core.dflowfm.extold.models import ExtOldFileType -from hydrolib.core.dflowfm.inifield.models import ( - AveragingType, - DataFileType, - InterpolationMethod, -) - - -def oldfiletype_to_forcing_file_type( - oldfiletype: int, -) -> Union[MeteoForcingFileType, str]: - """Convert old external forcing `FILETYPE` integer value to valid - `forcingFileType` string value. - - Args: - oldfiletype (int): The FILETYPE value in an old external forcings file. - - Returns: - Union[MeteoForcingFileType,str]: Corresponding value for `forcingFileType`, - or "unknown" for invalid input. - """ - - forcing_file_type = "unknown" - - if oldfiletype == ExtOldFileType.TimeSeries: # 1 - forcing_file_type = MeteoForcingFileType.uniform - elif oldfiletype == ExtOldFileType.TimeSeriesMagnitudeAndDirection: # 2 - forcing_file_type = MeteoForcingFileType.unimagdir - elif oldfiletype == ExtOldFileType.SpatiallyVaryingWindPressure: # 3 - raise NotImplementedError( - "FILETYPE = 3 (spatially verying wind and pressure) is no longer supported." - ) - elif oldfiletype == ExtOldFileType.ArcInfo: # 4 - forcing_file_type = MeteoForcingFileType.meteogridequi - elif oldfiletype == ExtOldFileType.SpiderWebData: # 5 - forcing_file_type = MeteoForcingFileType.spiderweb - elif oldfiletype == ExtOldFileType.CurvilinearData: # 6 - forcing_file_type = MeteoForcingFileType.meteogridcurvi - elif oldfiletype == ExtOldFileType.Samples: # 7 - forcing_file_type = DataFileType.sample - elif oldfiletype == ExtOldFileType.TriangulationMagnitudeAndDirection: # 8 - raise NotImplementedError( - "FILETYPE = 8 (magnitude+direction timeseries on stations) is no longer supported." - ) - elif oldfiletype == ExtOldFileType.Polyline: # 9 - # Boundary polyline files no longer need a filetype of their own (intentionally no error raised) - pass - elif oldfiletype == ExtOldFileType.InsidePolygon: # 10 - forcing_file_type = DataFileType.polygon - elif oldfiletype == ExtOldFileType.NetCDFGridData: # 11 - forcing_file_type = MeteoForcingFileType.netcdf - - return forcing_file_type - - -def oldmethod_to_interpolation_method( - oldmethod: int, -) -> Union[InterpolationMethod, MeteoInterpolationMethod, str]: - """Convert old external forcing `METHOD` integer value to valid - `interpolationMethod` string value. - - Args: - oldmethod (int): The METHOD value in an old external forcings file. - - Returns: - Union[InterpolationMethod,str]: Corresponding value for `interpolationMethod`, - or "unknown" for invalid input. - """ - - if oldmethod in [1, 2, 3, 11]: - interpolation_method = MeteoInterpolationMethod.linearSpaceTime - elif oldmethod == 5: - interpolation_method = InterpolationMethod.triangulation - elif oldmethod == 4: - interpolation_method = InterpolationMethod.constant - elif oldmethod in range(6, 10): - interpolation_method = InterpolationMethod.averaging - else: - interpolation_method = "unknown" - return interpolation_method - - -def oldmethod_to_averaging_type( - oldmethod: int, -) -> Union[AveragingType, str]: - """Convert old external forcing `METHOD` integer value to valid - `averagingType` string value. - - Args: - oldmethod (int): The METHOD value in an old external forcings file. - - Returns: - Union[AveragingType,str]: Corresponding value for `averagingType`, - or "unknown" for invalid input. - """ - - if oldmethod == 6: - averaging_type = AveragingType.mean - else: - interpolation_method = "unknown" diff --git a/hydrolib/tools/ext_old_to_new/main_converter.py b/hydrolib/tools/ext_old_to_new/main_converter.py index 97564c6ff..7ec52b276 100644 --- a/hydrolib/tools/ext_old_to_new/main_converter.py +++ b/hydrolib/tools/ext_old_to_new/main_converter.py @@ -15,9 +15,8 @@ ) from hydrolib.core.dflowfm.mdu.legacy import LegacyFMModel from hydrolib.core.dflowfm.structure.models import Structure, StructureModel - -from .converter_factory import ConverterFactory -from .utils import ( +from hydrolib.tools.ext_old_to_new.converters import ConverterFactory +from hydrolib.tools.ext_old_to_new.utils import ( backup_file, construct_filemodel_new_or_existing, construct_filepath_with_postfix, @@ -51,7 +50,7 @@ def __init__( extold_model = self._read_old_file(extold_model) self._extold_model = extold_model - rdir = self._extold_model.filepath.parent + rdir = extold_model.filepath.parent # create the new models if not provided by the user in the same directory as the old external file path = ( @@ -78,7 +77,7 @@ def __init__( ) @property - def extold_model(self) -> ExtOldModel: + def extold_model(self): """old external forcing model.""" return self._extold_model @@ -133,11 +132,21 @@ def _read_old_file(extoldfile: PathOrStr) -> ExtOldModel: """Read a legacy D-Flow FM external forcings file (.ext) into an ExtOldModel object. + - The `read_old_file` method instantiates an ExternalForcingConverter object with an ExtOldModel object and + a default set of new external forcing, initial field and structure models. + - The new models will be created in the same directory as the old external forcing file. + - The new external forcing file will be named new-external-forcing.ext, the new initial conditions file will be + named new-initial-conditions.ext and the new structure file will be named new-structure.ext. + - However the user can change the paths to the new models by using the ``ext_model``, ``inifield_model`` and + ``structure_model`` setters. The new models will be created in the specified paths. + - the user can also set the paths to the new models using the `converter.ext_model.filepath= "mypath.ext"`. + Args: extoldfile (PathOrStr): path to the external forcings file (.ext) Returns: - ExtOldModel: object with all forcing blocks.""" + ExtOldModel: object with all forcing blocks. + """ global _verbose if not isinstance(extoldfile, Path): extoldfile = Path(extoldfile) diff --git a/hydrolib/tools/ext_old_to_new/utils.py b/hydrolib/tools/ext_old_to_new/utils.py index 0c97df462..f8efab58b 100644 --- a/hydrolib/tools/ext_old_to_new/utils.py +++ b/hydrolib/tools/ext_old_to_new/utils.py @@ -1,7 +1,17 @@ from pathlib import Path -from typing import Type +from typing import Type, Union from hydrolib.core.basemodel import FileModel, PathOrStr +from hydrolib.core.dflowfm.ext.models import ( + MeteoForcingFileType, + MeteoInterpolationMethod, +) +from hydrolib.core.dflowfm.extold.models import ExtOldFileType +from hydrolib.core.dflowfm.inifield.models import ( + AveragingType, + DataFileType, + InterpolationMethod, +) def construct_filemodel_new_or_existing( @@ -57,3 +67,99 @@ def construct_filepath_with_postfix(filepath: PathOrStr, postfix: str) -> Path: """ file_as_path = Path(filepath) return file_as_path.with_stem(file_as_path.stem + postfix) + + +def oldfiletype_to_forcing_file_type( + oldfiletype: int, +) -> Union[MeteoForcingFileType, str]: + """Convert old external forcing `FILETYPE` integer value to valid + `forcingFileType` string value. + + Args: + oldfiletype (int): The FILETYPE value in an old external forcings file. + + Returns: + Union[MeteoForcingFileType,str]: Corresponding value for `forcingFileType`, + or "unknown" for invalid input. + """ + + forcing_file_type = "unknown" + + if oldfiletype == ExtOldFileType.TimeSeries: # 1 + forcing_file_type = MeteoForcingFileType.uniform + elif oldfiletype == ExtOldFileType.TimeSeriesMagnitudeAndDirection: # 2 + forcing_file_type = MeteoForcingFileType.unimagdir + elif oldfiletype == ExtOldFileType.SpatiallyVaryingWindPressure: # 3 + raise NotImplementedError( + "FILETYPE = 3 (spatially verying wind and pressure) is no longer supported." + ) + elif oldfiletype == ExtOldFileType.ArcInfo: # 4 + forcing_file_type = MeteoForcingFileType.meteogridequi + elif oldfiletype == ExtOldFileType.SpiderWebData: # 5 + forcing_file_type = MeteoForcingFileType.spiderweb + elif oldfiletype == ExtOldFileType.CurvilinearData: # 6 + forcing_file_type = MeteoForcingFileType.meteogridcurvi + elif oldfiletype == ExtOldFileType.Samples: # 7 + forcing_file_type = DataFileType.sample + elif oldfiletype == ExtOldFileType.TriangulationMagnitudeAndDirection: # 8 + raise NotImplementedError( + "FILETYPE = 8 (magnitude+direction timeseries on stations) is no longer supported." + ) + elif oldfiletype == ExtOldFileType.Polyline: # 9 + # Boundary polyline files no longer need a filetype of their own (intentionally no error raised) + pass + elif oldfiletype == ExtOldFileType.InsidePolygon: # 10 + forcing_file_type = DataFileType.polygon + elif oldfiletype == ExtOldFileType.NetCDFGridData: # 11 + forcing_file_type = MeteoForcingFileType.netcdf + + return forcing_file_type + + +def oldmethod_to_interpolation_method( + oldmethod: int, +) -> Union[InterpolationMethod, MeteoInterpolationMethod, str]: + """Convert old external forcing `METHOD` integer value to valid + `interpolationMethod` string value. + + Args: + oldmethod (int): The METHOD value in an old external forcings file. + + Returns: + Union[InterpolationMethod,str]: Corresponding value for `interpolationMethod`, + or "unknown" for invalid input. + """ + + if oldmethod in [1, 2, 3, 11]: + interpolation_method = MeteoInterpolationMethod.linearSpaceTime + elif oldmethod == 5: + interpolation_method = InterpolationMethod.triangulation + elif oldmethod == 4: + interpolation_method = InterpolationMethod.constant + elif oldmethod in range(6, 10): + interpolation_method = InterpolationMethod.averaging + else: + interpolation_method = "unknown" + return interpolation_method + + +def oldmethod_to_averaging_type( + oldmethod: int, +) -> Union[AveragingType, str]: + """Convert old external forcing `METHOD` integer value to valid + `averagingType` string value. + + Args: + oldmethod (int): The METHOD value in an old external forcings file. + + Returns: + Union[AveragingType,str]: Corresponding value for `averagingType`, + or "unknown" for invalid input. + """ + + if oldmethod == 6: + averaging_type = AveragingType.mean + else: + averaging_type = "unknown" + + return averaging_type diff --git a/pyproject.toml b/pyproject.toml index 8e65337e7..ba327908e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ homepage = "https://deltares.github.io/HYDROLIB-core" "issue tracker" = "https://github.com/Deltares/HYDROLIB-core/issues" [tool.poetry.dependencies] -python = "^3.9" +python = ">=3.9,<4" # exclude yanked netcdf versions 1.7.0 and 1.7.1, but include 1.7.2 (first with python 3.12 support) netCDF4 = "^1.5,!=1.7.0,!=1.7.1" # no caret here, since numpy v2 is required for future python 3.13 support, but this is not yet widely supported by packages numpy v1 support is also still required. diff --git a/tests/conftest.py b/tests/conftest.py index 9fe5bf5c0..2687f4092 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,6 +4,21 @@ import pytest +@pytest.fixture +def input_files_dir() -> Path: + return Path("tests/data/input") + + +@pytest.fixture +def output_files_dir() -> Path: + return Path("tests/data/output") + + +@pytest.fixture +def reference_files_dir() -> Path: + return Path("tests/data/reference") + + @pytest.fixture def initial_condition_quantities() -> List[str]: return [ @@ -86,26 +101,11 @@ def initial_cond_averaging_type() -> List[str]: ] -@pytest.fixture -def input_files_dir() -> Path: - return Path("tests/data/input") - - @pytest.fixture def polylines_dir() -> Path: return Path("tests/data/input/dflowfm_individual_files/polylines") -@pytest.fixture -def output_files_dir() -> Path: - return Path("tests/data/output") - - -@pytest.fixture -def reference_files_dir() -> Path: - return Path("tests/data/reference") - - @pytest.fixture def time_series_file(input_files_dir: Path) -> Path: return input_files_dir.joinpath("tim/single_data_for_timeseries.tim") @@ -198,3 +198,14 @@ def old_forcing_file_quantities() -> List[str]: @pytest.fixture def old_forcing_comment_len() -> int: return 63 + + +@pytest.fixture +def parameter_quantities() -> List[str]: + return [ + "frictioncoefficient", + "horizontaleddyviscositycoefficient", + "horizontaleddydiffusivitycoefficient", + "advectiontype", + "infiltrationcapacity", + ] diff --git a/tests/dflowfm/extold/__init__.py b/tests/dflowfm/ext/__init__.py similarity index 100% rename from tests/dflowfm/extold/__init__.py rename to tests/dflowfm/ext/__init__.py diff --git a/tests/dflowfm/ext/test_boundary.py b/tests/dflowfm/ext/test_boundary.py new file mode 100644 index 000000000..6eb01b786 --- /dev/null +++ b/tests/dflowfm/ext/test_boundary.py @@ -0,0 +1,173 @@ +""" +Test all methods contained in the +hydrolib.core.dflowfm.ext.models.Boundary class +""" + +from pathlib import Path + +import pytest + +from hydrolib.core.basemodel import DiskOnlyFileModel +from hydrolib.core.dflowfm.bc.models import ForcingModel +from hydrolib.core.dflowfm.ext.models import Boundary + + +def test_existing_file(): + polyline = "tests/data/input/boundary-conditions/tfl_01.pli" + data = { + "quantity": "waterlevelbnd", + "locationfile": polyline, + "forcingfile": ForcingModel(), + } + boundary_block = Boundary(**data) + assert boundary_block.locationfile == DiskOnlyFileModel(Path(polyline)) + assert boundary_block.quantity == "waterlevelbnd" + assert boundary_block.forcingfile == ForcingModel() + assert boundary_block.bndwidth1d is None + assert boundary_block.bndbldepth is None + + +def test_given_args_expected_values(): + # 1. Explicit declaration of parameters (to validate keys as they are written) + dict_values = { + "quantity": "42", + "nodeid": "aNodeId", + "locationfile": Path("aLocationFile"), + "forcingfile": ForcingModel(), + "bndwidth1d": 4.2, + "bndbldepth": 2.4, + } + + created_boundary = Boundary(**dict_values) + + # 3. Verify boundary values as expected. + created_boundary_dict = created_boundary.dict() + + compare_data = dict(dict_values) + expected_location_path = compare_data.pop("locationfile") + + for key, value in compare_data.items(): + assert created_boundary_dict[key] == value + + assert created_boundary_dict["locationfile"]["filepath"] == expected_location_path + + +def test_given_args_as_alias_expected_values(): + # 1. Explicit declaration of parameters (to validate keys as they are written) + dict_values = { + "quantity": "42", + "nodeid": "aNodeId", + "locationfile": Path("aLocationFile"), + "forcingFile": ForcingModel(), + "bndWidth1D": 4.2, + "bndBlDepth": 2.4, + } + + created_boundary = Boundary(**dict_values) + boundary_as_dict = created_boundary.dict() + # 3. Verify boundary values as expected. + assert boundary_as_dict["quantity"] == dict_values["quantity"] + assert boundary_as_dict["nodeid"] == dict_values["nodeid"] + assert boundary_as_dict["locationfile"]["filepath"] == dict_values["locationfile"] + assert boundary_as_dict["forcingfile"] == dict_values["forcingFile"] + assert boundary_as_dict["bndwidth1d"] == dict_values["bndWidth1D"] + assert boundary_as_dict["bndbldepth"] == dict_values["bndBlDepth"] + + +class TestValidateRootValidator: + """ + Test class to validate the paradigms when evaluating + check_nodeid_or_locationfile_present. + """ + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param(dict(), id="No entries."), + pytest.param(dict(nodeid=None, locationfile=None), id="Entries are None."), + pytest.param(dict(nodeid="", locationfile=""), id="Entries are Empty."), + ], + ) + def test_given_no_values_raises_valueerror(self, dict_values: dict): + with pytest.raises(ValueError) as exc_mssg: + Boundary.check_nodeid_or_locationfile_present(dict_values) + + # 3. Verify final expectations. + expected_error_mssg = ( + "Either nodeId or locationFile fields should be specified." + ) + assert str(exc_mssg.value) == expected_error_mssg + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param(dict(nodeid="aNodeId"), id="NodeId present."), + pytest.param( + dict(locationfile=Path("aLocationFile")), + id="LocationFile present.", + ), + pytest.param( + dict(nodeid="bNodeId", locationfile="bLocationFile"), + id="Both present.", + ), + ], + ) + def test_given_dict_values_doesnot_raise(self, dict_values: dict): + return_values = Boundary.check_nodeid_or_locationfile_present(dict_values) + assert dict_values == return_values + + +class TestValidateFromCtor: + """ + Test class to validate the validation during default object creation. + """ + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param(dict(), id="No entries."), + pytest.param(dict(nodeid=None, locationfile=None), id="Entries are None."), + pytest.param(dict(nodeid=""), id="NodeId is empty."), + ], + ) + def test_given_no_values_raises_valueerror(self, dict_values: dict): + required_values = dict(quantity="aQuantity") + test_values = {**dict_values, **required_values} + with pytest.raises(ValueError) as exc_mssg: + Boundary(**test_values) + + # 3. Verify final expectations. + expected_error_mssg = ( + "Either nodeId or locationFile fields should be specified." + ) + assert expected_error_mssg in str(exc_mssg.value) + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param(dict(nodeid="aNodeId"), id="NodeId present."), + pytest.param( + dict(locationfile=Path("aLocationFile")), + id="LocationFile present.", + ), + pytest.param( + dict(nodeid="bNodeId", locationfile=Path("bLocationFile")), + id="Both present.", + ), + ], + ) + def test_given_dict_values_doesnot_raise(self, dict_values: dict): + required_values = dict(quantity="aQuantity", forcingfile=ForcingModel()) + test_values = {**dict_values, **required_values} + created_boundary = Boundary(**test_values) + + expected_locationfile = test_values.pop("locationfile", None) + + for key, value in test_values.items(): + if key == "forcing_file": + value = value.dict() + assert created_boundary.dict()[key] == value + + assert ( + created_boundary.dict()["locationfile"]["filepath"] == expected_locationfile + ) diff --git a/tests/dflowfm/ext/test_ext.py b/tests/dflowfm/ext/test_ext.py new file mode 100644 index 000000000..bc55c8e9a --- /dev/null +++ b/tests/dflowfm/ext/test_ext.py @@ -0,0 +1,215 @@ +from pathlib import Path +from typing import List + +import numpy as np +import pytest +from pydantic.v1 import ValidationError + +from hydrolib.core.basemodel import DiskOnlyFileModel +from hydrolib.core.dflowfm.bc.models import ForcingModel +from hydrolib.core.dflowfm.ext.models import ( + ExtModel, + Meteo, + MeteoForcingFileType, + MeteoInterpolationMethod, +) +from hydrolib.core.dflowfm.tim.models import TimModel + + +class TestExtModel: + """Class to test all methods contained in the + hydrolib.core.dflowfm.ext.models.ExtModel class""" + + def test_construct_from_file_with_tim(self, input_files_dir: Path): + input_ext = input_files_dir.joinpath( + "e02/f006_external_forcing/c063_rain_tim/rainschematic.ext" + ) + + ext_model = ExtModel(input_ext) + + assert isinstance(ext_model, ExtModel) + assert len(ext_model.meteo) == 1 + assert ext_model.meteo[0].quantity == "rainfall_rate" + assert isinstance(ext_model.meteo[0].forcingfile, TimModel) + assert ext_model.meteo[0].forcingfiletype == MeteoForcingFileType.uniform + + assert len(ext_model.meteo[0].forcingfile.timeseries) == 14 + + def test_construct_from_file_with_bc(self, input_files_dir: Path): + input_ext = input_files_dir.joinpath( + "e02/f006_external_forcing/c069_rain_bc/rainschematic.ext" + ) + ext_model = ExtModel(input_ext) + + assert isinstance(ext_model, ExtModel) + assert len(ext_model.meteo) == 1 + assert ext_model.meteo[0].quantity == "rainfall_rate" + assert isinstance(ext_model.meteo[0].forcingfile, ForcingModel) + assert ext_model.meteo[0].forcingfiletype == MeteoForcingFileType.bcascii + + def test_construct_from_file_with_netcdf(self, input_files_dir: Path): + input_ext = input_files_dir.joinpath( + "e02/f006_external_forcing/c067_rain_netcdf_stations/rainschematic.ext" + ) + ext_model = ExtModel(input_ext) + + assert isinstance(ext_model, ExtModel) + assert len(ext_model.meteo) == 1 + assert ext_model.meteo[0].quantity == "rainfall" + assert isinstance(ext_model.meteo[0].forcingfile, DiskOnlyFileModel) + assert ext_model.meteo[0].forcingfiletype == MeteoForcingFileType.netcdf + + def test_ext_model_correct_default_serializer_config(self): + model = ExtModel() + + assert model.serializer_config.section_indent == 0 + assert model.serializer_config.property_indent == 0 + assert model.serializer_config.datablock_indent == 8 + assert model.serializer_config.float_format == "" + assert model.serializer_config.datablock_spacing == 2 + assert model.serializer_config.comment_delimiter == "#" + assert model.serializer_config.skip_empty_properties == True + + +class TestMeteo: + + def test_meteo_interpolation_methods(self, meteo_interpolation_methods: List[str]): + assert len(MeteoInterpolationMethod) == 3 + assert all( + quantity.value in meteo_interpolation_methods + for quantity in MeteoInterpolationMethod.__members__.values() + ) + + def test_meteo_forcing_file_type(self, meteo_forcing_file_type: List[str]): + assert len(MeteoForcingFileType) == 8 + assert all( + quantity.value in meteo_forcing_file_type + for quantity in MeteoForcingFileType.__members__.values() + ) + + def test_meteo_initialization(self): + data = { + "quantity": "rainfall", + "forcingfile": ForcingModel(), + "forcingfiletype": MeteoForcingFileType.bcascii, + "targetmaskfile": None, + "targetmaskinvert": False, + "interpolationmethod": None, + } + meteo = Meteo(**data) + assert meteo.quantity == "rainfall" + assert isinstance(meteo.forcingfile, ForcingModel) + assert meteo.forcingfiletype == MeteoForcingFileType.bcascii + + def test_default_values(self): + meteo = Meteo( + quantity="rainfall", + forcingfile=ForcingModel(), + forcingfiletype=MeteoForcingFileType.uniform, + ) + assert meteo.targetmaskfile is None + assert meteo.targetmaskinvert is None + assert meteo.interpolationmethod is None + assert meteo.operand == "O" + assert meteo.extrapolationAllowed is None + assert meteo.extrapolationSearchRadius is None + assert meteo.averagingType is None + assert meteo.averagingNumMin is None + assert meteo.averagingPercentile is None + + def test_setting_optional_fields(self): + meteo = Meteo( + quantity="rainfall", + forcingfile=ForcingModel(), + forcingfiletype=MeteoForcingFileType.uniform, + targetmaskfile=None, + targetmaskinvert=True, + interpolationmethod=MeteoInterpolationMethod.nearestnb, + operand="O", + extrapolationAllowed=True, + extrapolationSearchRadius=10, + averagingType=1, + averagingNumMin=0.5, + averagingPercentile=90, + ) + assert meteo.targetmaskfile is None + assert meteo.targetmaskinvert is True + assert meteo.interpolationmethod == MeteoInterpolationMethod.nearestnb + assert meteo.operand == "O" + assert meteo.extrapolationAllowed is True + assert meteo.extrapolationSearchRadius == 10 + assert meteo.averagingType == 1 + assert np.isclose(meteo.averagingNumMin, 0.5) + assert meteo.averagingPercentile == 90 + + def test_invalid_forcingfiletype(self): + with pytest.raises(ValueError): + Meteo( + quantity="rainfall", + forcingfile=ForcingModel(), + forcingfiletype="invalidType", + ) + + def test_invalid_interpolation_method(self): + with pytest.raises(ValueError): + Meteo( + quantity="rainfall", + forcingfile=ForcingModel(), + forcingfiletype=MeteoForcingFileType.uniform, + interpolationmethod="invalidMethod", + ) + + @pytest.mark.parametrize( + ("missing_field", "alias_field"), + [ + ("quantity", "quantity"), + ("forcingfile", "forcingFile"), + ("forcingfiletype", "forcingFileType"), + ], + ) + def test_missing_required_fields(self, missing_field, alias_field): + dict_values = { + "quantity": "rainfall", + "forcingfile": ForcingModel(), + "forcingfiletype": MeteoForcingFileType.bcascii, + "targetmaskfile": None, + "targetmaskinvert": False, + "interpolationmethod": None, + } + del dict_values[missing_field] + + with pytest.raises(ValidationError) as error: + Meteo(**dict_values) + + expected_message = f"{alias_field}\n field required " + assert expected_message in str(error.value) + + def test_is_intermediate_link(self): + meteo = Meteo( + quantity="rainfall", + forcingfile=ForcingModel(), + forcingfiletype=MeteoForcingFileType.uniform, + ) + assert meteo.is_intermediate_link() is True + + def test_initialize_with_boundary_condition_file( + self, boundary_condition_file: Path + ): + meteo = Meteo( + quantity="rainfall", + forcingfile=boundary_condition_file, + forcingfiletype=MeteoForcingFileType.bcascii, + ) + assert isinstance(meteo.forcingfile, ForcingModel) + assert meteo.forcingfile.filepath == boundary_condition_file + assert meteo.forcingfiletype == MeteoForcingFileType.bcascii + + def test_initialize_with_time_series_file(self, time_series_file: Path): + meteo = Meteo( + quantity="rainfall", + forcingfile=time_series_file, + forcingfiletype=MeteoForcingFileType.bcascii, + ) + assert isinstance(meteo.forcingfile, TimModel) + assert meteo.forcingfile.filepath == time_series_file + assert meteo.forcingfiletype == MeteoForcingFileType.bcascii diff --git a/tests/dflowfm/ext/test_laterals.py b/tests/dflowfm/ext/test_laterals.py new file mode 100644 index 000000000..24558183b --- /dev/null +++ b/tests/dflowfm/ext/test_laterals.py @@ -0,0 +1,414 @@ +""" +Class to test all methods contained in the +hydrolib.core.dflowfm.ext.models.Lateral class +""" + +from typing import Dict, List, Optional + +import numpy as np +import pytest +from pydantic.v1 import ValidationError + +from hydrolib.core.dflowfm.bc.models import Constant, ForcingModel, RealTime +from hydrolib.core.dflowfm.ext.models import ExtModel, Lateral +from hydrolib.core.dflowfm.ini.models import INIBasedModel +from tests.utils import test_data_dir + +LOCATION_ERROR: str = ( + "nodeId or branchId and chainage or xCoordinates, yCoordinates and numCoordinates should be provided" +) + + +class TestValidateCoordinates: + """ + Class to test the paradigms for validate_coordinates. + """ + + def _create_valid_lateral_values(self) -> Dict: + values = dict( + id="randomId", + name="randomName", + numcoordinates=2, + xcoordinates=[1.1, 2.2], + ycoordinates=[1.1, 2.2], + discharge=1.234, + ) + + return values + + def test_given_no_numcoordinates_raises_valueerror(self): + values = self._create_valid_lateral_values() + del values["numcoordinates"] + + with pytest.raises(ValueError): + Lateral(**values) + + def test_given_wrong_numcoordinates_raises_assertionerror(self): + values = self._create_valid_lateral_values() + values["numcoordinates"] = 999 + + with pytest.raises(ValueError): + Lateral(**values) + + def test_given_correct_numcoordinates(self): + xcoordinates = [1, 2] + ycoordinates = [2, 3] + + values = self._create_valid_lateral_values() + values["xcoordinates"] = xcoordinates + values["ycoordinates"] = ycoordinates + values["numcoordinates"] = len(xcoordinates) + + lateral = Lateral(**values) + + assert lateral.xcoordinates == xcoordinates + assert lateral.ycoordinates == ycoordinates + + def test_given_fewer_coordinates_than_minimum_required_throws_valueerror( + self, + ): + values = self._create_valid_lateral_values() + values["numcoordinates"] = 0 + values["xcoordinates"] = [] + values["ycoordinates"] = [] + + with pytest.raises(ValueError): + Lateral(**values) + + +class TestValidateLocationType: + """ + Class to test the paradigms for validate_location_type + """ + + @pytest.mark.parametrize( + "value", + [ + pytest.param(""), + pytest.param(" "), + pytest.param("notAValidType"), + ], + ) + def test_given_wrong_location_type_raises_valueerror(self, value: str): + with pytest.raises(ValueError) as exc_mssg: + Lateral.validate_location_type(value) + assert ( + str(exc_mssg.value) + == f"Value given ({value}) not accepted, should be one of: 1d, 2d, all" + ) + + @pytest.mark.parametrize( + "location_type", + [ + pytest.param("1d"), + pytest.param("1D"), + pytest.param("2d"), + pytest.param("2D"), + pytest.param("all"), + pytest.param("All"), + pytest.param("ALL"), + ], + ) + def test_given_correct_locationtype(self, location_type: str): + return_value = Lateral.validate_location_type(location_type) + assert return_value == location_type + + +class TestValidateLocationTypeDependencies: + """ + Class to test the paradigms of validate_location_dependencies + """ + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param( + dict(nodeid=None, branchid=None, chainage=None), + id="All None", + ), + pytest.param( + dict(nodeid="", branchid="", chainage=None), + id="All Empty", + ), + ], + ) + def test_given_no_values_raises_valueerror(self, dict_values: dict): + with pytest.raises(ValueError) as exc_err: + Lateral.validate_that_location_specification_is_correct(values=dict_values) + assert str(exc_err.value) == LOCATION_ERROR + + @pytest.mark.parametrize( + "missing_coordinates", [("xCoordinates"), ("yCoordinates")] + ) + def test_given_numcoords_but_missing_coordinates(self, missing_coordinates: str): + test_dict = dict( + nodeid=None, + branchid=None, + chainage=None, + numcoordinates=2, + xcoordinates=[42, 24], + ycoordinates=[24, 42], + ) + test_dict[missing_coordinates.lower()] = None + with pytest.raises(ValueError) as exc_error: + Lateral.validate_that_location_specification_is_correct(test_dict) + assert str(exc_error.value) == LOCATION_ERROR + + def test_given_numcoordinates_and_valid_coordinates(self): + test_dict = dict( + nodeid=None, + branchid=None, + chainage=None, + numcoordinates=2, + xcoordinates=[42, 24], + ycoordinates=[24, 42], + ) + return_value = Lateral.validate_that_location_specification_is_correct( + test_dict + ) + assert return_value == test_dict + + def test_given_branchid_and_no_chainage_raises_valueerror(self): + with pytest.raises(ValueError) as exc_err: + Lateral.validate_that_location_specification_is_correct( + dict( + nodeid=None, + branchid="aBranchId", + chainage=None, + ) + ) + assert str(exc_err.value) == LOCATION_ERROR + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param(dict(nodeid="42"), id="Given nodeid"), + pytest.param( + dict(branchid="aBranchId", chainage=4.2), + id="Given branchid and chainage", + ), + ], + ) + def test_given_1d_args_and_location_type_other_then_raises_valueerror( + self, dict_values: dict + ): + test_values = dict( + locationtype="wrongType", + ) + test_dict = {**dict_values, **test_values} + with pytest.raises(ValueError) as exc_err: + Lateral.validate_that_location_specification_is_correct(test_dict) + assert str(exc_err.value) == "locationType should be 1d but was wrongType" + + @pytest.mark.parametrize( + "dict_values", + [ + pytest.param(dict(nodeid="24"), id="Given nodeid"), + pytest.param( + dict(branchid="aBranchId", chainage=4.2), + id="Given branchid and chainage.", + ), + ], + ) + def test_given_1d_args_and_1d_location_type(self, dict_values: dict): + test_values = dict( + locationtype="1d", + ) + test_dict = {**dict_values, **test_values} + return_value = Lateral.validate_that_location_specification_is_correct( + test_dict + ) + assert return_value == test_dict + + @pytest.mark.parametrize( + "test_dict", + [ + pytest.param(dict(nodeid="aNodeId"), id="With NodeId"), + pytest.param( + dict(branchid="aBranchId", chainage=42), + id="Witch branchid and chainage", + ), + ], + ) + @pytest.mark.parametrize( + "location_type", + [ + pytest.param("", id="Empty string"), + pytest.param(None, id="None string"), + ], + ) + def test_given_1d_args_but_no_locationtype_then_sets_value( + self, test_dict: dict, location_type: str + ): + test_dict["locationtype"] = location_type + return_value = Lateral.validate_that_location_specification_is_correct( + test_dict + ) + assert return_value["locationtype"] == "1d" + + +class TestValidateFromCtor: + @pytest.mark.parametrize( + "x_coord, y_coord", + [ + pytest.param(None, [42, 24], id="Only y-coord."), + pytest.param([42, 24], None, id="Only x-coord."), + ], + ) + def test_given_coordinates_but_no_numcoordinates_raises( + self, x_coord: Optional[List[int]], y_coord: Optional[List[int]] + ): + with pytest.raises(ValidationError) as exc_mssg: + Lateral( + id="42", + discharge=1.23, + numcoordinates=None, + xcoordinates=x_coord, + ycoordinates=y_coord, + ) + + expected_error_mssg = LOCATION_ERROR + assert expected_error_mssg in str(exc_mssg.value) + + @pytest.mark.parametrize( + "x_coord, y_coord", + [ + pytest.param([42, 24], [24], id="Y coord not matching."), + pytest.param([24], [42, 24], id="X coord not matching."), + ], + ) + def test_given_coordinates_not_matching_numcoordinates_raises( + self, x_coord: List[int], y_coord: List[int] + ): + with pytest.raises(ValidationError): + Lateral( + id="42", + discharge=1.23, + numcoordinates=2, + xcoordinates=x_coord, + ycoordinates=y_coord, + ) + + @pytest.mark.parametrize("missing_coord", [("xCoordinates"), ("yCoordinates")]) + def test_given_partial_coordinates_raises(self, missing_coord: str): + lateral_dict = dict( + id="42", + discharge=1.23, + numcoordinates=2, + xcoordinates=[42, 24], + ycoordinates=[24, 42], + locationtype="all", + ) + lateral_dict[missing_coord.lower()] = None + with pytest.raises(ValidationError) as exc_mssg: + Lateral(**lateral_dict) + expected_error_mssg = LOCATION_ERROR + assert expected_error_mssg in str(exc_mssg.value) + + def test_given_unknown_locationtype_raises(self): + with pytest.raises(ValidationError) as exc_mssg: + location_type = "loremIpsum" + Lateral( + id="42", + discharge=1.23, + numcoordinates=2, + xcoordinates=[42, 24], + ycoordinates=[24, 42], + locationtype=location_type, + ) + expected_error_mssg = ( + f"Value given ({location_type}) not accepted, should be one of: 1d, 2d, all" + ) + assert expected_error_mssg in str(exc_mssg.value) + + @pytest.mark.parametrize( + "location_values", + [ + pytest.param(dict(nodeid="aNodeId"), id="nodeid given."), + pytest.param( + dict(branchid="aBranchId", chainage=42), + id="branchid + chainage given.", + ), + pytest.param( + dict(nodeid="", branchid="aBranchId", chainage=42), + id="Empty nodeid.", + ), + ], + ) + def test_given_valid_location_args_constructs_lateral(self, location_values: dict): + # 1. Define test data. + default_values = dict( + id="42", + discharge=1.23, + locationtype="1d", + ) + test_dict = {**default_values, **location_values} + + # 2. Run test. + new_lateral = Lateral(**test_dict) + + # 3. Validate final expectations. + for key, value in location_values.items(): + assert new_lateral.dict()[key] == value + + @pytest.mark.parametrize( + "location_dict", + [ + pytest.param( + dict(locationtype="1d", nodeid="aNodeId"), id="1D-With NodeId" + ), + pytest.param( + dict(locationtype="1d", branchid="aBranchId", chainage=4.2), + id="1D-With BranchId and Chainage", + ), + pytest.param( + dict( + locationtype="2d", + xcoordinates=[42, 24], + ycoordinates=[24, 42], + numcoordinates=2, + ), + id="2D-With coordinates", + ), + pytest.param( + dict( + locationtype="all", + xcoordinates=[42, 24], + ycoordinates=[24, 42], + numcoordinates=2, + ), + id="All-With coordinates", + ), + ], + ) + def test_given_valid_args_validates_locationtype(self, location_dict: str): + # 1. Define test data. + default_values = dict( + id="42", + discharge="realtime", + ) + lateral_dict = {**default_values, **location_dict} + # 2. Run test. + lateral_cls = Lateral(**lateral_dict) + + # 3. Validate expectations. + assert isinstance(lateral_cls, INIBasedModel) + for key, value in lateral_dict.items(): + assert lateral_cls.dict()[key] == value + + +class TestValidateForcingData: + """ + Class to test the different types of discharge forcings. + """ + + def test_dischargeforcings_fromfile(self): + + filepath = test_data_dir / "input/dflowfm_individual_files/FlowFM_bnd.ext" + m = ExtModel(filepath) + assert len(m.lateral) == 72 + assert m.lateral[0].discharge == RealTime.realtime + assert np.isclose(m.lateral[1].discharge, 1.23) + assert isinstance(m.lateral[3].discharge, ForcingModel) + assert isinstance(m.lateral[3].discharge.forcing[0], Constant) + assert m.lateral[3].discharge.forcing[0].name == "10637" diff --git a/tests/dflowfm/extold/test_boundary.py b/tests/dflowfm/extold/test_boundary.py deleted file mode 100644 index ac99bc49b..000000000 --- a/tests/dflowfm/extold/test_boundary.py +++ /dev/null @@ -1,182 +0,0 @@ -""" -Test all methods contained in the -hydrolib.core.dflowfm.ext.models.Boundary class -""" - -from pathlib import Path - -import pytest - -from hydrolib.core.basemodel import DiskOnlyFileModel -from hydrolib.core.dflowfm.bc.models import ForcingModel -from hydrolib.core.dflowfm.ext.models import Boundary - - -class TestBoundary: - - def test_existing_file(self): - polyline = "tests/data/input/boundary-conditions/tfl_01.pli" - data = { - "quantity": "waterlevelbnd", - "locationfile": polyline, - "forcingfile": ForcingModel(), - } - boundary_block = Boundary(**data) - assert boundary_block.locationfile == DiskOnlyFileModel(Path(polyline)) - assert boundary_block.quantity == "waterlevelbnd" - assert boundary_block.forcingfile == ForcingModel() - assert boundary_block.bndwidth1d is None - assert boundary_block.bndbldepth is None - - def test_given_args_expected_values(self): - # 1. Explicit declaration of parameters (to validate keys as they are written) - dict_values = { - "quantity": "42", - "nodeid": "aNodeId", - "locationfile": Path("aLocationFile"), - "forcingfile": ForcingModel(), - "bndwidth1d": 4.2, - "bndbldepth": 2.4, - } - - # 2. Create boundary. - created_boundary = Boundary(**dict_values) - - # 3. Verify boundary values as expected. - created_boundary_dict = created_boundary.dict() - - compare_data = dict(dict_values) - expected_location_path = compare_data.pop("locationfile") - - for key, value in compare_data.items(): - assert created_boundary_dict[key] == value - - assert ( - created_boundary_dict["locationfile"]["filepath"] == expected_location_path - ) - - def test_given_args_as_alias_expected_values(self): - # 1. Explicit declaration of parameters (to validate keys as they are written) - dict_values = { - "quantity": "42", - "nodeid": "aNodeId", - "locationfile": Path("aLocationFile"), - "forcingFile": ForcingModel(), - "bndWidth1D": 4.2, - "bndBlDepth": 2.4, - } - - # 2. Create boundary. - created_boundary = Boundary(**dict_values) - boundary_as_dict = created_boundary.dict() - # 3. Verify boundary values as expected. - assert boundary_as_dict["quantity"] == dict_values["quantity"] - assert boundary_as_dict["nodeid"] == dict_values["nodeid"] - assert ( - boundary_as_dict["locationfile"]["filepath"] == dict_values["locationfile"] - ) - assert boundary_as_dict["forcingfile"] == dict_values["forcingFile"] - assert boundary_as_dict["bndwidth1d"] == dict_values["bndWidth1D"] - assert boundary_as_dict["bndbldepth"] == dict_values["bndBlDepth"] - - class TestValidateRootValidator: - """ - Test class to validate the paradigms when evaluating - check_nodeid_or_locationfile_present. - """ - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param(dict(), id="No entries."), - pytest.param( - dict(nodeid=None, locationfile=None), id="Entries are None." - ), - pytest.param(dict(nodeid="", locationfile=""), id="Entries are Empty."), - ], - ) - def test_given_no_values_raises_valueerror(self, dict_values: dict): - with pytest.raises(ValueError) as exc_mssg: - Boundary.check_nodeid_or_locationfile_present(dict_values) - - # 3. Verify final expectations. - expected_error_mssg = ( - "Either nodeId or locationFile fields should be specified." - ) - assert str(exc_mssg.value) == expected_error_mssg - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param(dict(nodeid="aNodeId"), id="NodeId present."), - pytest.param( - dict(locationfile=Path("aLocationFile")), - id="LocationFile present.", - ), - pytest.param( - dict(nodeid="bNodeId", locationfile="bLocationFile"), - id="Both present.", - ), - ], - ) - def test_given_dict_values_doesnot_raise(self, dict_values: dict): - return_values = Boundary.check_nodeid_or_locationfile_present(dict_values) - assert dict_values == return_values - - class TestValidateFromCtor: - """ - Test class to validate the validation during default object creation. - """ - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param(dict(), id="No entries."), - pytest.param( - dict(nodeid=None, locationfile=None), id="Entries are None." - ), - pytest.param(dict(nodeid=""), id="NodeId is empty."), - ], - ) - def test_given_no_values_raises_valueerror(self, dict_values: dict): - required_values = dict(quantity="aQuantity") - test_values = {**dict_values, **required_values} - with pytest.raises(ValueError) as exc_mssg: - Boundary(**test_values) - - # 3. Verify final expectations. - expected_error_mssg = ( - "Either nodeId or locationFile fields should be specified." - ) - assert expected_error_mssg in str(exc_mssg.value) - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param(dict(nodeid="aNodeId"), id="NodeId present."), - pytest.param( - dict(locationfile=Path("aLocationFile")), - id="LocationFile present.", - ), - pytest.param( - dict(nodeid="bNodeId", locationfile=Path("bLocationFile")), - id="Both present.", - ), - ], - ) - def test_given_dict_values_doesnot_raise(self, dict_values: dict): - required_values = dict(quantity="aQuantity", forcingfile=ForcingModel()) - test_values = {**dict_values, **required_values} - created_boundary = Boundary(**test_values) - - expected_locationfile = test_values.pop("locationfile", None) - - for key, value in test_values.items(): - if key == "forcing_file": - value = value.dict() - assert created_boundary.dict()[key] == value - - assert ( - created_boundary.dict()["locationfile"]["filepath"] - == expected_locationfile - ) diff --git a/tests/dflowfm/extold/test_ext.py b/tests/dflowfm/extold/test_ext.py deleted file mode 100644 index 1d85ad37c..000000000 --- a/tests/dflowfm/extold/test_ext.py +++ /dev/null @@ -1,631 +0,0 @@ -from pathlib import Path -from typing import Dict, List, Optional - -import numpy as np -import pytest -from pydantic.v1 import ValidationError - -from hydrolib.core.basemodel import DiskOnlyFileModel -from hydrolib.core.dflowfm.bc.models import Constant, ForcingModel, RealTime -from hydrolib.core.dflowfm.ext.models import ( - ExtModel, - Lateral, - Meteo, - MeteoForcingFileType, - MeteoInterpolationMethod, -) -from hydrolib.core.dflowfm.ini.models import INIBasedModel -from hydrolib.core.dflowfm.tim.models import TimModel -from tests.utils import test_data_dir - - -class TestModels: - """Test class to test all classes and methods contained in the - hydrolib.core.dflowfm.ext.models.py module""" - - class TestLateral: - """Class to test all methods contained in the - hydrolib.core.dflowfm.ext.models.Lateral class""" - - location_error: str = ( - "nodeId or branchId and chainage or xCoordinates, yCoordinates and numCoordinates should be provided" - ) - - class TestValidateCoordinates: - """ - Class to test the paradigms for validate_coordinates. - """ - - def _create_valid_lateral_values(self) -> Dict: - values = dict( - id="randomId", - name="randomName", - numcoordinates=2, - xcoordinates=[1.1, 2.2], - ycoordinates=[1.1, 2.2], - discharge=1.234, - ) - - return values - - def test_given_no_numcoordinates_raises_valueerror(self): - values = self._create_valid_lateral_values() - del values["numcoordinates"] - - with pytest.raises(ValueError): - Lateral(**values) - - def test_given_wrong_numcoordinates_raises_assertionerror(self): - values = self._create_valid_lateral_values() - values["numcoordinates"] = 999 - - with pytest.raises(ValueError): - Lateral(**values) - - def test_given_correct_numcoordinates(self): - xcoordinates = [1, 2] - ycoordinates = [2, 3] - - values = self._create_valid_lateral_values() - values["xcoordinates"] = xcoordinates - values["ycoordinates"] = ycoordinates - values["numcoordinates"] = len(xcoordinates) - - lateral = Lateral(**values) - - assert lateral.xcoordinates == xcoordinates - assert lateral.ycoordinates == ycoordinates - - def test_given_fewer_coordinates_than_minimum_required_throws_valueerror( - self, - ): - values = self._create_valid_lateral_values() - values["numcoordinates"] = 0 - values["xcoordinates"] = [] - values["ycoordinates"] = [] - - with pytest.raises(ValueError): - Lateral(**values) - - class TestValidateLocationType: - """ - Class to test the paradigms for validate_location_type - """ - - @pytest.mark.parametrize( - "value", - [ - pytest.param(""), - pytest.param(" "), - pytest.param("notAValidType"), - ], - ) - def test_given_wrong_location_type_raises_valueerror(self, value: str): - with pytest.raises(ValueError) as exc_mssg: - Lateral.validate_location_type(value) - assert ( - str(exc_mssg.value) - == f"Value given ({value}) not accepted, should be one of: 1d, 2d, all" - ) - - @pytest.mark.parametrize( - "location_type", - [ - pytest.param("1d"), - pytest.param("1D"), - pytest.param("2d"), - pytest.param("2D"), - pytest.param("all"), - pytest.param("All"), - pytest.param("ALL"), - ], - ) - def test_given_correct_locationtype(self, location_type: str): - return_value = Lateral.validate_location_type(location_type) - assert return_value == location_type - - class TestValidateLocationTypeDependencies: - """ - Class to test the paradigms of validate_location_dependencies - """ - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param( - dict(nodeid=None, branchid=None, chainage=None), - id="All None", - ), - pytest.param( - dict(nodeid="", branchid="", chainage=None), - id="All Empty", - ), - ], - ) - def test_given_no_values_raises_valueerror(self, dict_values: dict): - with pytest.raises(ValueError) as exc_err: - Lateral.validate_that_location_specification_is_correct( - values=dict_values - ) - assert str(exc_err.value) == TestModels.TestLateral.location_error - - @pytest.mark.parametrize( - "missing_coordinates", [("xCoordinates"), ("yCoordinates")] - ) - def test_given_numcoords_but_missing_coordinates( - self, missing_coordinates: str - ): - test_dict = dict( - nodeid=None, - branchid=None, - chainage=None, - numcoordinates=2, - xcoordinates=[42, 24], - ycoordinates=[24, 42], - ) - test_dict[missing_coordinates.lower()] = None - with pytest.raises(ValueError) as exc_error: - Lateral.validate_that_location_specification_is_correct(test_dict) - assert str(exc_error.value) == TestModels.TestLateral.location_error - - def test_given_numcoordinates_and_valid_coordinates(self): - test_dict = dict( - nodeid=None, - branchid=None, - chainage=None, - numcoordinates=2, - xcoordinates=[42, 24], - ycoordinates=[24, 42], - ) - return_value = Lateral.validate_that_location_specification_is_correct( - test_dict - ) - assert return_value == test_dict - - def test_given_branchid_and_no_chainage_raises_valueerror(self): - with pytest.raises(ValueError) as exc_err: - Lateral.validate_that_location_specification_is_correct( - dict( - nodeid=None, - branchid="aBranchId", - chainage=None, - ) - ) - assert str(exc_err.value) == TestModels.TestLateral.location_error - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param(dict(nodeid="42"), id="Given nodeid"), - pytest.param( - dict(branchid="aBranchId", chainage=4.2), - id="Given branchid and chainage", - ), - ], - ) - def test_given_1d_args_and_location_type_other_then_raises_valueerror( - self, dict_values: dict - ): - test_values = dict( - locationtype="wrongType", - ) - test_dict = {**dict_values, **test_values} - with pytest.raises(ValueError) as exc_err: - Lateral.validate_that_location_specification_is_correct(test_dict) - assert ( - str(exc_err.value) == "locationType should be 1d but was wrongType" - ) - - @pytest.mark.parametrize( - "dict_values", - [ - pytest.param(dict(nodeid="24"), id="Given nodeid"), - pytest.param( - dict(branchid="aBranchId", chainage=4.2), - id="Given branchid and chainage.", - ), - ], - ) - def test_given_1d_args_and_1d_location_type(self, dict_values: dict): - test_values = dict( - locationtype="1d", - ) - test_dict = {**dict_values, **test_values} - return_value = Lateral.validate_that_location_specification_is_correct( - test_dict - ) - assert return_value == test_dict - - @pytest.mark.parametrize( - "test_dict", - [ - pytest.param(dict(nodeid="aNodeId"), id="With NodeId"), - pytest.param( - dict(branchid="aBranchId", chainage=42), - id="Witch branchid and chainage", - ), - ], - ) - @pytest.mark.parametrize( - "location_type", - [ - pytest.param("", id="Empty string"), - pytest.param(None, id="None string"), - ], - ) - def test_given_1d_args_but_no_locationtype_then_sets_value( - self, test_dict: dict, location_type: str - ): - test_dict["locationtype"] = location_type - return_value = Lateral.validate_that_location_specification_is_correct( - test_dict - ) - assert return_value["locationtype"] == "1d" - - class TestValidateFromCtor: - @pytest.mark.parametrize( - "x_coord, y_coord", - [ - pytest.param(None, [42, 24], id="Only y-coord."), - pytest.param([42, 24], None, id="Only x-coord."), - ], - ) - def test_given_coordinates_but_no_numcoordinates_raises( - self, x_coord: Optional[List[int]], y_coord: Optional[List[int]] - ): - with pytest.raises(ValidationError) as exc_mssg: - Lateral( - id="42", - discharge=1.23, - numcoordinates=None, - xcoordinates=x_coord, - ycoordinates=y_coord, - ) - - expected_error_mssg = TestModels.TestLateral.location_error - assert expected_error_mssg in str(exc_mssg.value) - - @pytest.mark.parametrize( - "x_coord, y_coord", - [ - pytest.param([42, 24], [24], id="Y coord not matching."), - pytest.param([24], [42, 24], id="X coord not matching."), - ], - ) - def test_given_coordinates_not_matching_numcoordinates_raises( - self, x_coord: List[int], y_coord: List[int] - ): - with pytest.raises(ValidationError): - Lateral( - id="42", - discharge=1.23, - numcoordinates=2, - xcoordinates=x_coord, - ycoordinates=y_coord, - ) - - @pytest.mark.parametrize( - "missing_coord", [("xCoordinates"), ("yCoordinates")] - ) - def test_given_partial_coordinates_raises(self, missing_coord: str): - lateral_dict = dict( - id="42", - discharge=1.23, - numcoordinates=2, - xcoordinates=[42, 24], - ycoordinates=[24, 42], - locationtype="all", - ) - lateral_dict[missing_coord.lower()] = None - with pytest.raises(ValidationError) as exc_mssg: - Lateral(**lateral_dict) - expected_error_mssg = TestModels.TestLateral.location_error - assert expected_error_mssg in str(exc_mssg.value) - - def test_given_unknown_locationtype_raises(self): - with pytest.raises(ValidationError) as exc_mssg: - location_type = "loremIpsum" - Lateral( - id="42", - discharge=1.23, - numcoordinates=2, - xcoordinates=[42, 24], - ycoordinates=[24, 42], - locationtype=location_type, - ) - expected_error_mssg = f"Value given ({location_type}) not accepted, should be one of: 1d, 2d, all" - assert expected_error_mssg in str(exc_mssg.value) - - @pytest.mark.parametrize( - "location_values", - [ - pytest.param(dict(nodeid="aNodeId"), id="nodeid given."), - pytest.param( - dict(branchid="aBranchId", chainage=42), - id="branchid + chainage given.", - ), - pytest.param( - dict(nodeid="", branchid="aBranchId", chainage=42), - id="Empty nodeid.", - ), - ], - ) - def test_given_valid_location_args_constructs_lateral( - self, location_values: dict - ): - # 1. Define test data. - default_values = dict( - id="42", - discharge=1.23, - locationtype="1d", - ) - test_dict = {**default_values, **location_values} - - # 2. Run test. - new_lateral = Lateral(**test_dict) - - # 3. Validate final expectations. - for key, value in location_values.items(): - assert new_lateral.dict()[key] == value - - @pytest.mark.parametrize( - "location_dict", - [ - pytest.param( - dict(locationtype="1d", nodeid="aNodeId"), id="1D-With NodeId" - ), - pytest.param( - dict(locationtype="1d", branchid="aBranchId", chainage=4.2), - id="1D-With BranchId and Chainage", - ), - pytest.param( - dict( - locationtype="2d", - xcoordinates=[42, 24], - ycoordinates=[24, 42], - numcoordinates=2, - ), - id="2D-With coordinates", - ), - pytest.param( - dict( - locationtype="all", - xcoordinates=[42, 24], - ycoordinates=[24, 42], - numcoordinates=2, - ), - id="All-With coordinates", - ), - ], - ) - def test_given_valid_args_validates_locationtype(self, location_dict: str): - # 1. Define test data. - default_values = dict( - id="42", - discharge="realtime", - ) - lateral_dict = {**default_values, **location_dict} - # 2. Run test. - lateral_cls = Lateral(**lateral_dict) - - # 3. Validate expectations. - assert isinstance(lateral_cls, INIBasedModel) - for key, value in lateral_dict.items(): - assert lateral_cls.dict()[key] == value - - class TestValidateForcingData: - """ - Class to test the different types of discharge forcings. - """ - - def test_dischargeforcings_fromfile(self): - - filepath = ( - test_data_dir / "input/dflowfm_individual_files/FlowFM_bnd.ext" - ) - m = ExtModel(filepath) - assert len(m.lateral) == 72 - assert m.lateral[0].discharge == RealTime.realtime - assert np.isclose(m.lateral[1].discharge, 1.23) - assert isinstance(m.lateral[3].discharge, ForcingModel) - assert isinstance(m.lateral[3].discharge.forcing[0], Constant) - assert m.lateral[3].discharge.forcing[0].name == "10637" - - -class TestExtModel: - """Class to test all methods contained in the - hydrolib.core.dflowfm.ext.models.ExtModel class""" - - def test_construct_from_file_with_tim(self, input_files_dir: Path): - input_ext = input_files_dir.joinpath( - "e02/f006_external_forcing/c063_rain_tim/rainschematic.ext" - ) - - ext_model = ExtModel(input_ext) - - assert isinstance(ext_model, ExtModel) - assert len(ext_model.meteo) == 1 - assert ext_model.meteo[0].quantity == "rainfall_rate" - assert isinstance(ext_model.meteo[0].forcingfile, TimModel) - assert ext_model.meteo[0].forcingfiletype == MeteoForcingFileType.uniform - - assert len(ext_model.meteo[0].forcingfile.timeseries) == 14 - - def test_construct_from_file_with_bc(self, input_files_dir: Path): - input_ext = input_files_dir.joinpath( - "e02/f006_external_forcing/c069_rain_bc/rainschematic.ext" - ) - ext_model = ExtModel(input_ext) - - assert isinstance(ext_model, ExtModel) - assert len(ext_model.meteo) == 1 - assert ext_model.meteo[0].quantity == "rainfall_rate" - assert isinstance(ext_model.meteo[0].forcingfile, ForcingModel) - assert ext_model.meteo[0].forcingfiletype == MeteoForcingFileType.bcascii - - def test_construct_from_file_with_netcdf(self, input_files_dir: Path): - input_ext = input_files_dir.joinpath( - "e02/f006_external_forcing/c067_rain_netcdf_stations/rainschematic.ext" - ) - ext_model = ExtModel(input_ext) - - assert isinstance(ext_model, ExtModel) - assert len(ext_model.meteo) == 1 - assert ext_model.meteo[0].quantity == "rainfall" - assert isinstance(ext_model.meteo[0].forcingfile, DiskOnlyFileModel) - assert ext_model.meteo[0].forcingfiletype == MeteoForcingFileType.netcdf - - def test_ext_model_correct_default_serializer_config(self): - model = ExtModel() - - assert model.serializer_config.section_indent == 0 - assert model.serializer_config.property_indent == 0 - assert model.serializer_config.datablock_indent == 8 - assert model.serializer_config.float_format == "" - assert model.serializer_config.datablock_spacing == 2 - assert model.serializer_config.comment_delimiter == "#" - assert model.serializer_config.skip_empty_properties == True - - -class TestMeteo: - - def test_meteo_interpolation_methods(self, meteo_interpolation_methods: List[str]): - assert len(MeteoInterpolationMethod) == 3 - assert all( - quantity.value in meteo_interpolation_methods - for quantity in MeteoInterpolationMethod.__members__.values() - ) - - def test_meteo_forcing_file_type(self, meteo_forcing_file_type: List[str]): - assert len(MeteoForcingFileType) == 8 - assert all( - quantity.value in meteo_forcing_file_type - for quantity in MeteoForcingFileType.__members__.values() - ) - - def test_meteo_initialization(self): - data = { - "quantity": "rainfall", - "forcingfile": ForcingModel(), - "forcingfiletype": MeteoForcingFileType.bcascii, - "targetmaskfile": None, - "targetmaskinvert": False, - "interpolationmethod": None, - } - meteo = Meteo(**data) - assert meteo.quantity == "rainfall" - assert isinstance(meteo.forcingfile, ForcingModel) - assert meteo.forcingfiletype == MeteoForcingFileType.bcascii - - def test_default_values(self): - meteo = Meteo( - quantity="rainfall", - forcingfile=ForcingModel(), - forcingfiletype=MeteoForcingFileType.uniform, - ) - assert meteo.targetmaskfile is None - assert meteo.targetmaskinvert is None - assert meteo.interpolationmethod is None - assert meteo.operand == "O" - assert meteo.extrapolationAllowed is None - assert meteo.extrapolationSearchRadius is None - assert meteo.averagingType is None - assert meteo.averagingNumMin is None - assert meteo.averagingPercentile is None - - def test_setting_optional_fields(self): - meteo = Meteo( - quantity="rainfall", - forcingfile=ForcingModel(), - forcingfiletype=MeteoForcingFileType.uniform, - targetmaskfile=None, - targetmaskinvert=True, - interpolationmethod=MeteoInterpolationMethod.nearestnb, - operand="O", - extrapolationAllowed=True, - extrapolationSearchRadius=10, - averagingType=1, - averagingNumMin=0.5, - averagingPercentile=90, - ) - assert meteo.targetmaskfile is None - assert meteo.targetmaskinvert is True - assert meteo.interpolationmethod == MeteoInterpolationMethod.nearestnb - assert meteo.operand == "O" - assert meteo.extrapolationAllowed is True - assert meteo.extrapolationSearchRadius == 10 - assert meteo.averagingType == 1 - assert np.isclose(meteo.averagingNumMin, 0.5) - assert meteo.averagingPercentile == 90 - - def test_invalid_forcingfiletype(self): - with pytest.raises(ValueError): - Meteo( - quantity="rainfall", - forcingfile=ForcingModel(), - forcingfiletype="invalidType", - ) - - def test_invalid_interpolation_method(self): - with pytest.raises(ValueError): - Meteo( - quantity="rainfall", - forcingfile=ForcingModel(), - forcingfiletype=MeteoForcingFileType.uniform, - interpolationmethod="invalidMethod", - ) - - @pytest.mark.parametrize( - ("missing_field", "alias_field"), - [ - ("quantity", "quantity"), - ("forcingfile", "forcingFile"), - ("forcingfiletype", "forcingFileType"), - ], - ) - def test_missing_required_fields(self, missing_field, alias_field): - dict_values = { - "quantity": "rainfall", - "forcingfile": ForcingModel(), - "forcingfiletype": MeteoForcingFileType.bcascii, - "targetmaskfile": None, - "targetmaskinvert": False, - "interpolationmethod": None, - } - del dict_values[missing_field] - - with pytest.raises(ValidationError) as error: - Meteo(**dict_values) - - expected_message = f"{alias_field}\n field required " - assert expected_message in str(error.value) - - def test_is_intermediate_link(self): - meteo = Meteo( - quantity="rainfall", - forcingfile=ForcingModel(), - forcingfiletype=MeteoForcingFileType.uniform, - ) - assert meteo.is_intermediate_link() is True - - def test_initialize_with_boundary_condition_file( - self, boundary_condition_file: Path - ): - meteo = Meteo( - quantity="rainfall", - forcingfile=boundary_condition_file, - forcingfiletype=MeteoForcingFileType.bcascii, - ) - assert isinstance(meteo.forcingfile, ForcingModel) - assert meteo.forcingfile.filepath == boundary_condition_file - assert meteo.forcingfiletype == MeteoForcingFileType.bcascii - - def test_initialize_with_time_series_file(self, time_series_file: Path): - meteo = Meteo( - quantity="rainfall", - forcingfile=time_series_file, - forcingfiletype=MeteoForcingFileType.bcascii, - ) - assert isinstance(meteo.forcingfile, TimModel) - assert meteo.forcingfile.filepath == time_series_file - assert meteo.forcingfiletype == MeteoForcingFileType.bcascii diff --git a/tests/dflowfm/test_extold.py b/tests/dflowfm/test_extold.py index 2fedccf78..22c60c39c 100644 --- a/tests/dflowfm/test_extold.py +++ b/tests/dflowfm/test_extold.py @@ -19,6 +19,7 @@ ExtOldInitialConditionQuantity, ExtOldMethod, ExtOldModel, + ExtOldParametersQuantity, ExtOldQuantity, ExtOldTracerQuantity, ) @@ -1048,3 +1049,14 @@ def test_the_missing_method_with_tracers(self, qunatity_name): """ quantity = ExtOldInitialConditionQuantity(qunatity_name) assert quantity.value == qunatity_name + + +def test_ext_old_parameter_quantity(parameter_quantities: List[str]): + """ + Test the number of parameter quantities in the ExtOldParametersQuantity enum. + """ + assert len(ExtOldParametersQuantity) == len(parameter_quantities) + assert all( + quantity.value in parameter_quantities + for quantity in ExtOldParametersQuantity.__members__.values() + ) diff --git a/tests/tools/test_converters.py b/tests/tools/test_converters.py index 79ab743af..16d495c17 100644 --- a/tests/tools/test_converters.py +++ b/tests/tools/test_converters.py @@ -2,12 +2,20 @@ from hydrolib.core.basemodel import DiskOnlyFileModel from hydrolib.core.dflowfm.bc.models import ForcingModel -from hydrolib.core.dflowfm.ext.models import Boundary +from hydrolib.core.dflowfm.common.models import Operand +from hydrolib.core.dflowfm.ext.models import ( + Boundary, + Meteo, + MeteoForcingFileType, + MeteoInterpolationMethod, +) from hydrolib.core.dflowfm.extold.models import ExtOldForcing, ExtOldQuantity -from hydrolib.core.dflowfm.inifield.models import InitialField +from hydrolib.core.dflowfm.inifield.models import InitialField, ParameterField from hydrolib.tools.ext_old_to_new.converters import ( BoundaryConditionConverter, InitialConditionConverter, + MeteoConverter, + ParametersConverter, ) @@ -41,6 +49,44 @@ def test_polygon_data_file(self): assert np.isclose(new_quantity_block.value, 0.0) +class TestConvertParameters: + def test_sample_data_file(self): + forcing = ExtOldForcing( + quantity=ExtOldQuantity.FrictionCoefficient, + filename="iniwaterlevel.xyz", + filetype=7, # "Polyline" + method="5", # "Interpolate space", + operand="O", + ) + + new_quantity_block = ParametersConverter().convert(forcing) + assert isinstance(new_quantity_block, ParameterField) + assert new_quantity_block.datafiletype == "sample" + assert new_quantity_block.interpolationmethod == "triangulation" + + +class TestConvertMeteo: + def test_default(self): + forcing = ExtOldForcing( + quantity=ExtOldQuantity.WindX, + filename="windtest.amu", + filetype=4, + method="2", + operand="O", + ) + + new_quantity_block = MeteoConverter().convert(forcing) + assert isinstance(new_quantity_block, Meteo) + assert new_quantity_block.quantity == "windx" + assert new_quantity_block.operand == Operand.override + assert new_quantity_block.forcingfile == DiskOnlyFileModel("windtest.amu") + assert new_quantity_block.forcingfiletype == MeteoForcingFileType.meteogridequi + assert ( + new_quantity_block.interpolationmethod + == MeteoInterpolationMethod.linearSpaceTime + ) + + class TestBoundaryConverter: def test_default(self):